aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch32/armclang_printf.S19
-rw-r--r--lib/aarch64/armclang_printf.S19
-rw-r--r--lib/compiler-rt/builtins/int_lib.h7
-rw-r--r--lib/cpus/aarch64/cortex_a55.S32
-rw-r--r--lib/cpus/aarch64/cortex_a75.S8
-rw-r--r--lib/cpus/aarch64/cortex_a76.S10
-rw-r--r--lib/cpus/aarch64/denver.S60
-rw-r--r--lib/cpus/aarch64/dsu_helpers.S70
-rw-r--r--lib/cpus/cpu-ops.mk9
-rw-r--r--lib/el3_runtime/aarch64/context_mgmt.c7
-rw-r--r--lib/extensions/mpam/mpam.c40
-rw-r--r--lib/extensions/ras/ras_common.c32
-rw-r--r--lib/extensions/ras/std_err_record.c26
-rw-r--r--lib/libc/abort.c (renamed from lib/stdlib/abort.c)5
-rw-r--r--lib/libc/assert.c (renamed from lib/stdlib/assert.c)22
-rw-r--r--lib/libc/exit.c26
-rw-r--r--lib/libc/libc.mk27
-rw-r--r--lib/libc/memchr.c20
-rw-r--r--lib/libc/memcmp.c24
-rw-r--r--lib/libc/memcpy.c18
-rw-r--r--lib/libc/memmove.c31
-rw-r--r--lib/libc/memset.c17
-rw-r--r--lib/libc/printf.c194
-rw-r--r--lib/libc/putchar.c19
-rw-r--r--lib/libc/puts.c (renamed from lib/stdlib/puts.c)11
-rw-r--r--lib/libc/snprintf.c140
-rw-r--r--lib/libc/strchr.c (renamed from lib/stdlib/strchr.c)7
-rw-r--r--lib/libc/strcmp.c (renamed from lib/stdlib/strcmp.c)26
-rw-r--r--lib/libc/strlen.c17
-rw-r--r--lib/libc/strncmp.c (renamed from lib/stdlib/strncmp.c)13
-rw-r--r--lib/libc/strnlen.c (renamed from lib/stdlib/strnlen.c)7
-rw-r--r--lib/libfdt/libfdt.mk2
-rw-r--r--lib/pmf/pmf_main.c2
-rw-r--r--lib/psci/aarch32/psci_helpers.S22
-rw-r--r--lib/psci/aarch64/psci_helpers.S22
-rw-r--r--lib/psci/psci_common.c166
-rw-r--r--lib/psci/psci_main.c146
-rw-r--r--lib/psci/psci_mem_protect.c25
-rw-r--r--lib/psci/psci_off.c19
-rw-r--r--lib/psci/psci_on.c51
-rw-r--r--lib/psci/psci_private.h268
-rw-r--r--lib/psci/psci_setup.c70
-rw-r--r--lib/psci/psci_stat.c47
-rw-r--r--lib/psci/psci_suspend.c42
-rw-r--r--lib/psci/psci_system_off.c40
-rw-r--r--lib/romlib/Makefile71
-rwxr-xr-xlib/romlib/gentbl.sh40
-rwxr-xr-xlib/romlib/genvar.sh36
-rwxr-xr-xlib/romlib/genwrappers.sh52
-rw-r--r--lib/romlib/init.s30
-rw-r--r--lib/romlib/jmptbl.i35
-rw-r--r--lib/romlib/romlib.ld.S44
-rw-r--r--lib/stdlib/exit.c14
-rw-r--r--lib/stdlib/mem.c97
-rw-r--r--lib/stdlib/printf.c36
-rw-r--r--lib/stdlib/putchar.c24
-rw-r--r--lib/stdlib/sscanf.c27
-rw-r--r--lib/stdlib/stdlib.mk25
-rw-r--r--lib/stdlib/strlen.c44
-rw-r--r--lib/stdlib/subr_prf.c548
-rw-r--r--lib/stdlib/timingsafe_bcmp.c36
-rw-r--r--lib/xlat_tables/aarch32/xlat_tables.c48
-rw-r--r--lib/xlat_tables/aarch64/xlat_tables.c46
-rw-r--r--lib/xlat_tables/xlat_tables_common.c130
-rw-r--r--lib/xlat_tables/xlat_tables_private.h12
-rw-r--r--lib/xlat_tables_v2/aarch32/enable_mmu.S72
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c152
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h22
-rw-r--r--lib/xlat_tables_v2/aarch64/enable_mmu.S36
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c165
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h28
-rw-r--r--lib/xlat_tables_v2/xlat_tables.mk2
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c75
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c375
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h44
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c232
76 files changed, 2515 insertions, 1968 deletions
diff --git a/lib/aarch32/armclang_printf.S b/lib/aarch32/armclang_printf.S
new file mode 100644
index 000000000..2b87bf7e0
--- /dev/null
+++ b/lib/aarch32/armclang_printf.S
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+/* Symbols needed by armclang */
+
+ .globl __0printf
+ .globl __1printf
+ .globl __2printf
+
+func __0printf
+__1printf:
+__2printf:
+ b printf
+endfunc __0printf
diff --git a/lib/aarch64/armclang_printf.S b/lib/aarch64/armclang_printf.S
new file mode 100644
index 000000000..2b87bf7e0
--- /dev/null
+++ b/lib/aarch64/armclang_printf.S
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+/* Symbols needed by armclang */
+
+ .globl __0printf
+ .globl __1printf
+ .globl __2printf
+
+func __0printf
+__1printf:
+__2printf:
+ b printf
+endfunc __0printf
diff --git a/lib/compiler-rt/builtins/int_lib.h b/lib/compiler-rt/builtins/int_lib.h
index 57dfc413c..787777a1b 100644
--- a/lib/compiler-rt/builtins/int_lib.h
+++ b/lib/compiler-rt/builtins/int_lib.h
@@ -14,7 +14,7 @@
*/
/*
- * Portions copyright (c) 2017, ARM Limited and Contributors.
+ * Portions copyright (c) 2017-2018, ARM Limited and Contributors.
* All rights reserved.
*/
@@ -63,9 +63,8 @@
* Kernel and boot environment can't use normal headers,
* so use the equivalent system headers.
*/
-# include <sys/limits.h>
-# include <sys/stdint.h>
-# include <sys/types.h>
+# include <limits.h>
+# include <stdint.h>
/* Include the commonly used internal type definitions. */
#include "int_types.h"
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
index 741c77347..4e9bd9f6f 100644
--- a/lib/cpus/aarch64/cortex_a55.S
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,6 +11,14 @@
#include <cpu_macros.S>
#include <plat_macros.S>
+func cortex_a55_reset_func
+ mov x19, x30
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+ ret x19
+endfunc cortex_a55_reset_func
+
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
@@ -27,6 +35,26 @@ func cortex_a55_core_pwr_dwn
ret
endfunc cortex_a55_core_pwr_dwn
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A55. Must follow AAPCS & can use stack.
+ */
+func cortex_a55_errata_report
+ stp x8, x30, [sp, #-16]!
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision variant information is at x8, where
+ * "report_errata" is expecting it and it doesn't corrupt it.
+ */
+ report_errata ERRATA_DSU_936184, cortex_a55, dsu_936184
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a55_errata_report
+#endif
+
/* ---------------------------------------------
* This function provides cortex_a55 specific
* register information for crash reporting.
@@ -47,5 +75,5 @@ func cortex_a55_cpu_reg_dump
endfunc cortex_a55_cpu_reg_dump
declare_cpu_ops cortex_a55, CORTEX_A55_MIDR, \
- CPU_NO_RESET_FUNC, \
+ cortex_a55_reset_func, \
cortex_a55_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 73f566f4b..e121b7da8 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -11,6 +11,7 @@
#include <cpu_macros.S>
func cortex_a75_reset_func
+ mov x19, x30
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
adr x0, wa_cve_2017_5715_bpiall_vbar
@@ -26,6 +27,10 @@ func cortex_a75_reset_func
isb
#endif
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
#if ENABLE_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
@@ -49,7 +54,7 @@ func cortex_a75_reset_func
msr CPUAMCNTENSET_EL0, x0
isb
#endif
- ret
+ ret x19
endfunc cortex_a75_reset_func
func check_errata_cve_2017_5715
@@ -106,6 +111,7 @@ func cortex_a75_errata_report
*/
report_errata WORKAROUND_CVE_2017_5715, cortex_a75, cve_2017_5715
report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639
+ report_errata ERRATA_DSU_936184, cortex_a75, dsu_936184
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 51d0b15e3..1697c55dc 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -207,6 +207,7 @@ func cortex_a76_disable_wa_cve_2018_3639
endfunc cortex_a76_disable_wa_cve_2018_3639
func cortex_a76_reset_func
+ mov x19, x30
#if WORKAROUND_CVE_2018_3639
mrs x0, CORTEX_A76_CPUACTLR2_EL1
orr x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
@@ -224,7 +225,11 @@ func cortex_a76_reset_func
msr vbar_el3, x0
isb
#endif
- ret
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+ ret x19
endfunc cortex_a76_reset_func
/* ---------------------------------------------
@@ -258,6 +263,7 @@ func cortex_a76_errata_report
* checking functions of each errata.
*/
report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
+ report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index f04dbd6c4..a981d02c7 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -189,6 +189,25 @@ func denver_disable_dco
ret
endfunc denver_disable_dco
+func check_errata_cve_2017_5715
+ mov x0, #ERRATA_MISSING
+#if WORKAROUND_CVE_2017_5715
+ /*
+ * Check if the CPU supports the special instruction
+ * required to flush the indirect branch predictor and
+ * RSB. Support for this operation can be determined by
+ * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
+ */
+ mrs x1, id_afr0_el1
+ mov x2, #0x10000
+ and x1, x1, x2
+ cbz x1, 1f
+ mov x0, #ERRATA_APPLIES
+1:
+#endif
+ ret
+endfunc check_errata_cve_2017_5715
+
/* -------------------------------------------------
* The CPU Ops reset function for Denver.
* -------------------------------------------------
@@ -248,6 +267,27 @@ func denver_cluster_pwr_dwn
ret
endfunc denver_cluster_pwr_dwn
+#if REPORT_ERRATA
+ /*
+ * Errata printing function for Denver. Must follow AAPCS.
+ */
+func denver_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2017_5715, denver, cve_2017_5715
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc denver_errata_report
+#endif
+
/* ---------------------------------------------
* This function provides Denver specific
* register information for crash reporting.
@@ -267,27 +307,37 @@ func denver_cpu_reg_dump
ret
endfunc denver_cpu_reg_dump
-declare_cpu_ops denver, DENVER_MIDR_PN0, \
+declare_cpu_ops_wa denver, DENVER_MIDR_PN0, \
denver_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
denver_core_pwr_dwn, \
denver_cluster_pwr_dwn
-declare_cpu_ops denver, DENVER_MIDR_PN1, \
+declare_cpu_ops_wa denver, DENVER_MIDR_PN1, \
denver_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
denver_core_pwr_dwn, \
denver_cluster_pwr_dwn
-declare_cpu_ops denver, DENVER_MIDR_PN2, \
+declare_cpu_ops_wa denver, DENVER_MIDR_PN2, \
denver_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
denver_core_pwr_dwn, \
denver_cluster_pwr_dwn
-declare_cpu_ops denver, DENVER_MIDR_PN3, \
+declare_cpu_ops_wa denver, DENVER_MIDR_PN3, \
denver_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
denver_core_pwr_dwn, \
denver_cluster_pwr_dwn
-declare_cpu_ops denver, DENVER_MIDR_PN4, \
+declare_cpu_ops_wa denver, DENVER_MIDR_PN4, \
denver_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
denver_core_pwr_dwn, \
denver_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
new file mode 100644
index 000000000..152a3da26
--- /dev/null
+++ b/lib/cpus/aarch64/dsu_helpers.S
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <dsu_def.h>
+#include <errata_report.h>
+
+/* -----------------------------------------------------------------------
+ * DSU erratum 936184 check function
+ * Checks the DSU variant, revision and configuration to determine if
+ * the erratum applies. Erratum applies if ACP interface is present
+ * in the DSU and revision-variant < r2p0.
+ *
+ * The erratum was fixed in r2p0.
+ *
+ * This function is called from both assembly and C environment. So it
+ * follows AAPCS.
+ *
+ * Clobbers: x0-x3
+ * -----------------------------------------------------------------------
+ */
+ .globl check_errata_dsu_936184
+ .globl errata_dsu_936184_wa
+
+func check_errata_dsu_936184
+ mov x2, #ERRATA_NOT_APPLIES
+ mov x3, #ERRATA_APPLIES
+
+ /* Erratum applies only if DSU has the ACP interface */
+ mov x0, x2
+ mrs x1, CLUSTERCFR_EL1
+ ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
+ cbz x1, 1f
+
+ /* If ACP is present, check if DSU is older than r2p0 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ mov x1, #(0x2 << CLUSTERIDR_REV_BITS)
+ cmp x0, x1
+ csel x0, x2, x3, hs
+1:
+ ret
+endfunc check_errata_dsu_936184
+
+/* --------------------------------------------------
+ * Errata Workaround for DSU erratum #936184.
+ *
+ * Can clobber only: x0-x17
+ * --------------------------------------------------
+ */
+func errata_dsu_936184_wa
+ mov x17, x30
+ bl check_errata_dsu_936184
+ cbz x0, 1f
+
+ /* If erratum applies, we set a mask to a DSU control register */
+ mrs x0, CLUSTERACTLR_EL1
+ ldr x1, =DSU_ERRATA_936184_MASK
+ orr x0, x0, x1
+ msr CLUSTERACTLR_EL1, x0
+ isb
+1:
+ ret x17
+endfunc errata_dsu_936184_wa
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 456e3e524..40a8ac7ce 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -123,6 +123,11 @@ ERRATA_A72_859971 ?=0
# only to r0p0 and r1p0 of the Ares cpu.
ERRATA_ARES_1043202 ?=1
+# Flag to apply DSU erratum 936184. This erratum applies to DSUs containing
+# the ACP interface and revision < r2p0. Applying the workaround results in
+# higher DSU power consumption on idle.
+ERRATA_DSU_936184 ?=0
+
# Process ERRATA_A53_826319 flag
$(eval $(call assert_boolean,ERRATA_A53_826319))
$(eval $(call add_define,ERRATA_A53_826319))
@@ -187,6 +192,10 @@ $(eval $(call add_define,ERRATA_A72_859971))
$(eval $(call assert_boolean,ERRATA_ARES_1043202))
$(eval $(call add_define,ERRATA_ARES_1043202))
+# Process ERRATA_DSU_936184 flag
+$(eval $(call assert_boolean,ERRATA_DSU_936184))
+$(eval $(call add_define,ERRATA_DSU_936184))
+
# Errata build flags
ifneq (${ERRATA_A53_843419},0)
TF_LDFLAGS_aarch64 += --fix-cortex-a53-843419
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index f389368d4..ee5fe4f9d 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -12,6 +12,7 @@
#include <context.h>
#include <context_mgmt.h>
#include <interrupt_mgmt.h>
+#include <mpam.h>
#include <platform.h>
#include <platform_def.h>
#include <pubsub_events.h>
@@ -104,7 +105,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
if (EP_GET_ST(ep->h.attr))
scr_el3 |= SCR_ST_BIT;
-#ifndef HANDLE_EA_EL3_FIRST
+#if !HANDLE_EA_EL3_FIRST
/*
* SCR_EL3.EA: Do not route External Abort and SError Interrupt External
* to EL3 when executing at a lower EL. When executing at EL3, External
@@ -244,6 +245,10 @@ static void enable_extensions_nonsecure(int el2_unused)
#if ENABLE_SVE_FOR_NS
sve_enable(el2_unused);
#endif
+
+#if ENABLE_MPAM_FOR_LOWER_ELS
+ mpam_enable(el2_unused);
+#endif
#endif
}
diff --git a/lib/extensions/mpam/mpam.c b/lib/extensions/mpam/mpam.c
new file mode 100644
index 000000000..e628827bf
--- /dev/null
+++ b/lib/extensions/mpam/mpam.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <mpam.h>
+#include <stdbool.h>
+
+bool mpam_supported(void)
+{
+ uint64_t features = read_id_aa64dfr0_el1() >> ID_AA64PFR0_MPAM_SHIFT;
+
+ return ((features & ID_AA64PFR0_MPAM_MASK) != 0U);
+}
+
+void mpam_enable(int el2_unused)
+{
+ if (!mpam_supported())
+ return;
+
+ /*
+ * Enable MPAM, and disable trapping to EL3 when lower ELs access their
+ * own MPAM registers.
+ */
+ write_mpam3_el3(MPAM3_EL3_MPAMEN_BIT);
+
+ /*
+ * If EL2 is implemented but unused, disable trapping to EL2 when lower
+ * ELs access their own MPAM registers.
+ */
+ if (el2_unused != 0) {
+ write_mpam2_el2(0);
+
+ if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U)
+ write_mpamhcr_el2(0);
+ }
+}
diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c
index 0335a7bcb..2e65eebb9 100644
--- a/lib/extensions/ras/ras_common.c
+++ b/lib/extensions/ras/ras_common.c
@@ -11,6 +11,7 @@
#include <platform.h>
#include <ras.h>
#include <ras_arch.h>
+#include <stdbool.h>
#ifndef PLAT_RAS_PRI
# error Platform must define RAS priority value
@@ -20,15 +21,15 @@
int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
void *handle, uint64_t flags)
{
- unsigned int i, n_handled = 0, ret;
- int probe_data;
+ unsigned int i, n_handled = 0;
+ int probe_data, ret;
struct err_record_info *info;
const struct err_handler_data err_data = {
.version = ERR_HANDLER_VERSION,
.ea_reason = ea_reason,
.interrupt = 0,
- .syndrome = syndrome,
+ .syndrome = (uint32_t) syndrome,
.flags = flags,
.cookie = cookie,
.handle = handle
@@ -39,7 +40,7 @@ int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
assert(info->handler != NULL);
/* Continue probing until the record group signals no error */
- while (1) {
+ while (true) {
if (info->probe(info, &probe_data) == 0)
break;
@@ -52,20 +53,20 @@ int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
}
}
- return (n_handled != 0);
+ return (n_handled != 0U) ? 1 : 0;
}
#if ENABLE_ASSERTIONS
static void assert_interrupts_sorted(void)
{
unsigned int i, last;
- struct ras_interrupt *start = ras_interrupt_mapping.intrs;
+ struct ras_interrupt *start = ras_interrupt_mappings.intrs;
- if (ras_interrupt_mapping.num_intrs == 0)
+ if (ras_interrupt_mappings.num_intrs == 0UL)
return;
last = start[0].intr_number;
- for (i = 1; i < ras_interrupt_mapping.num_intrs; i++) {
+ for (i = 1; i < ras_interrupt_mappings.num_intrs; i++) {
assert(start[i].intr_number > last);
last = start[i].intr_number;
}
@@ -79,7 +80,7 @@ static void assert_interrupts_sorted(void)
static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags,
void *handle, void *cookie)
{
- struct ras_interrupt *ras_inrs = ras_interrupt_mapping.intrs;
+ struct ras_interrupt *ras_inrs = ras_interrupt_mappings.intrs;
struct ras_interrupt *selected = NULL;
int start, end, mid, probe_data, ret __unused;
@@ -91,10 +92,10 @@ static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags,
.handle = handle
};
- assert(ras_interrupt_mapping.num_intrs > 0);
+ assert(ras_interrupt_mappings.num_intrs > 0UL);
start = 0;
- end = ras_interrupt_mapping.num_intrs;
+ end = (int) ras_interrupt_mappings.num_intrs;
while (start <= end) {
mid = ((end + start) / 2);
if (intr_raw == ras_inrs[mid].intr_number) {
@@ -114,13 +115,14 @@ static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags,
panic();
}
-
- ret = selected->err_record->probe(selected->err_record, &probe_data);
- assert(ret != 0);
+ if (selected->err_record->probe != NULL) {
+ ret = selected->err_record->probe(selected->err_record, &probe_data);
+ assert(ret != 0);
+ }
/* Call error handler for the record group */
assert(selected->err_record->handler != NULL);
- selected->err_record->handler(selected->err_record, probe_data,
+ (void) selected->err_record->handler(selected->err_record, probe_data,
&err_data);
return 0;
diff --git a/lib/extensions/ras/std_err_record.c b/lib/extensions/ras/std_err_record.c
index 65c007f9e..209cb7310 100644
--- a/lib/extensions/ras/std_err_record.c
+++ b/lib/extensions/ras/std_err_record.c
@@ -13,28 +13,29 @@
*/
int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data)
{
- int num_records, num_group_regs, i;
+ unsigned int num_records, num_group_regs, i;
uint64_t gsr;
- assert(base != 0);
+ assert(base != 0UL);
/* Only 4K supported for now */
assert(size_num_k == STD_ERR_NODE_SIZE_NUM_K);
- num_records = (mmio_read_32(ERR_DEVID(base, size_num_k)) & ERR_DEVID_MASK);
+ num_records = (unsigned int)
+ (mmio_read_32(ERR_DEVID(base, size_num_k)) & ERR_DEVID_MASK);
/* A group register shows error status for 2^6 error records */
- num_group_regs = (num_records >> 6) + 1;
+ num_group_regs = (num_records >> 6U) + 1U;
/* Iterate through group registers to find a record in error */
for (i = 0; i < num_group_regs; i++) {
gsr = mmio_read_64(ERR_GSR(base, size_num_k, i));
- if (gsr == 0)
+ if (gsr == 0ULL)
continue;
/* Return the index of the record in error */
if (probe_data != NULL)
- *probe_data = ((i << 6) + __builtin_ctz(gsr));
+ *probe_data = (((int) (i << 6U)) + __builtin_ctzll(gsr));
return 1;
}
@@ -49,13 +50,14 @@ int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data)
*/
int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_data)
{
- int i;
+ unsigned int i;
uint64_t status;
- unsigned int max_idx __unused = read_erridr_el1() & ERRIDR_MASK;
+ unsigned int max_idx __unused =
+ ((unsigned int) read_erridr_el1()) & ERRIDR_MASK;
assert(idx_start < max_idx);
- assert(check_u32_overflow(idx_start, num_idx) == 0);
- assert((idx_start + num_idx - 1) < max_idx);
+ assert(check_u32_overflow(idx_start, num_idx));
+ assert((idx_start + num_idx - 1U) < max_idx);
for (i = 0; i < num_idx; i++) {
/* Select the error record */
@@ -65,9 +67,9 @@ int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_da
status = read_erxstatus_el1();
/* Check for valid field in status */
- if (ERR_STATUS_GET_FIELD(status, V)) {
+ if (ERR_STATUS_GET_FIELD(status, V) != 0U) {
if (probe_data != NULL)
- *probe_data = i;
+ *probe_data = (int) i;
return 1;
}
}
diff --git a/lib/stdlib/abort.c b/lib/libc/abort.c
index 65ce4ccaf..c9d16cccc 100644
--- a/lib/stdlib/abort.c
+++ b/lib/libc/abort.c
@@ -7,10 +7,7 @@
#include <debug.h>
#include <stdlib.h>
-/*
- * This is a basic implementation. This could be improved.
- */
-void abort (void)
+void abort(void)
{
ERROR("ABORT\n");
panic();
diff --git a/lib/stdlib/assert.c b/lib/libc/assert.c
index 97fab4b0f..8fa8f7212 100644
--- a/lib/stdlib/assert.c
+++ b/lib/libc/assert.c
@@ -1,36 +1,42 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
+#include <cdefs.h>
#include <console.h>
#include <debug.h>
#include <platform.h>
+#include <stdio.h>
/*
-* Only print the output if PLAT_LOG_LEVEL_ASSERT is higher or equal to
-* LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
-*/
+ * Only print the output if PLAT_LOG_LEVEL_ASSERT is higher or equal to
+ * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+ */
#if PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_VERBOSE
void __assert(const char *file, unsigned int line, const char *assertion)
{
- tf_printf("ASSERT: %s:%d:%s\n", file, line, assertion);
- console_flush();
+ printf("ASSERT: %s:%d:%s\n", file, line, assertion);
+ backtrace("assert");
+ (void)console_flush();
plat_panic_handler();
}
#elif PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_INFO
void __assert(const char *file, unsigned int line)
{
- tf_printf("ASSERT: %s:%d\n", file, line);
- console_flush();
+ printf("ASSERT: %s:%d\n", file, line);
+ backtrace("assert");
+ (void)console_flush();
plat_panic_handler();
}
#else
void __assert(void)
{
+ backtrace("assert");
+ (void)console_flush();
plat_panic_handler();
}
#endif
diff --git a/lib/libc/exit.c b/lib/libc/exit.c
new file mode 100644
index 000000000..f4ffe27cf
--- /dev/null
+++ b/lib/libc/exit.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+static void (*exitfun)(void);
+
+void exit(int status)
+{
+ if (exitfun != NULL)
+ (*exitfun)();
+ for (;;)
+ ;
+}
+
+int atexit(void (*fun)(void))
+{
+ if (exitfun != NULL)
+ return -1;
+ exitfun = fun;
+
+ return 0;
+}
diff --git a/lib/libc/libc.mk b/lib/libc/libc.mk
new file mode 100644
index 000000000..554f36bb7
--- /dev/null
+++ b/lib/libc/libc.mk
@@ -0,0 +1,27 @@
+#
+# Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+LIBC_SRCS := $(addprefix lib/libc/, \
+ abort.c \
+ assert.c \
+ exit.c \
+ memchr.c \
+ memcmp.c \
+ memcpy.c \
+ memmove.c \
+ memset.c \
+ printf.c \
+ putchar.c \
+ puts.c \
+ snprintf.c \
+ strchr.c \
+ strcmp.c \
+ strlen.c \
+ strncmp.c \
+ strnlen.c)
+
+INCLUDES += -Iinclude/lib/libc \
+ -Iinclude/lib/libc/$(ARCH) \
diff --git a/lib/libc/memchr.c b/lib/libc/memchr.c
new file mode 100644
index 000000000..2eba47c95
--- /dev/null
+++ b/lib/libc/memchr.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+void *memchr(const void *src, int c, size_t len)
+{
+ const char *s = src;
+
+ while (len--) {
+ if (*s == c)
+ return (void *) s;
+ s++;
+ }
+
+ return NULL;
+}
diff --git a/lib/libc/memcmp.c b/lib/libc/memcmp.c
new file mode 100644
index 000000000..a4c798b09
--- /dev/null
+++ b/lib/libc/memcmp.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+int memcmp(const void *s1, const void *s2, size_t len)
+{
+ const unsigned char *s = s1;
+ const unsigned char *d = s2;
+ unsigned char sc;
+ unsigned char dc;
+
+ while (len--) {
+ sc = *s++;
+ dc = *d++;
+ if (sc - dc)
+ return (sc - dc);
+ }
+
+ return 0;
+}
diff --git a/lib/libc/memcpy.c b/lib/libc/memcpy.c
new file mode 100644
index 000000000..fc0c9fe89
--- /dev/null
+++ b/lib/libc/memcpy.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ const char *s = src;
+ char *d = dst;
+
+ while (len--)
+ *d++ = *s++;
+
+ return dst;
+}
diff --git a/lib/libc/memmove.c b/lib/libc/memmove.c
new file mode 100644
index 000000000..63acf267f
--- /dev/null
+++ b/lib/libc/memmove.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+void *memmove(void *dst, const void *src, size_t len)
+{
+ /*
+ * The following test makes use of unsigned arithmetic overflow to
+ * more efficiently test the condition !(src <= dst && dst < str+len).
+ * It also avoids the situation where the more explicit test would give
+ * incorrect results were the calculation str+len to overflow (though
+ * that issue is probably moot as such usage is probably undefined
+ * behaviour and a bug anyway.
+ */
+ if ((size_t)dst - (size_t)src >= len) {
+ /* destination not in source data, so can safely use memcpy */
+ return memcpy(dst, src, len);
+ } else {
+ /* copy backwards... */
+ const char *end = dst;
+ const char *s = (const char *)src + len;
+ char *d = (char *)dst + len;
+ while (d != end)
+ *--d = *--s;
+ }
+ return dst;
+}
diff --git a/lib/libc/memset.c b/lib/libc/memset.c
new file mode 100644
index 000000000..03aa80966
--- /dev/null
+++ b/lib/libc/memset.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+void *memset(void *dst, int val, size_t count)
+{
+ char *ptr = dst;
+
+ while (count--)
+ *ptr++ = val;
+
+ return dst;
+}
diff --git a/lib/libc/printf.c b/lib/libc/printf.c
new file mode 100644
index 000000000..4480e94db
--- /dev/null
+++ b/lib/libc/printf.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <debug.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#define get_num_va_args(_args, _lcount) \
+ (((_lcount) > 1) ? va_arg(_args, long long int) : \
+ (((_lcount) == 1) ? va_arg(_args, long int) : \
+ va_arg(_args, int)))
+
+#define get_unum_va_args(_args, _lcount) \
+ (((_lcount) > 1) ? va_arg(_args, unsigned long long int) : \
+ (((_lcount) == 1) ? va_arg(_args, unsigned long int) : \
+ va_arg(_args, unsigned int)))
+
+static int string_print(const char *str)
+{
+ int count = 0;
+
+ assert(str != NULL);
+
+ for ( ; *str != '\0'; str++) {
+ (void)putchar(*str);
+ count++;
+ }
+
+ return count;
+}
+
+static int unsigned_num_print(unsigned long long int unum, unsigned int radix,
+ char padc, int padn)
+{
+ /* Just need enough space to store 64 bit decimal integer */
+ char num_buf[20];
+ int i = 0, count = 0;
+ unsigned int rem;
+
+ do {
+ rem = unum % radix;
+ if (rem < 0xa)
+ num_buf[i] = '0' + rem;
+ else
+ num_buf[i] = 'a' + (rem - 0xa);
+ i++;
+ unum /= radix;
+ } while (unum > 0U);
+
+ if (padn > 0) {
+ while (i < padn) {
+ (void)putchar(padc);
+ count++;
+ padn--;
+ }
+ }
+
+ while (--i >= 0) {
+ (void)putchar(num_buf[i]);
+ count++;
+ }
+
+ return count;
+}
+
+/*******************************************************************
+ * Reduced format print for Trusted firmware.
+ * The following type specifiers are supported by this print
+ * %x - hexadecimal format
+ * %s - string format
+ * %d or %i - signed decimal format
+ * %u - unsigned decimal format
+ * %p - pointer format
+ *
+ * The following length specifiers are supported by this print
+ * %l - long int (64-bit on AArch64)
+ * %ll - long long int (64-bit on AArch64)
+ * %z - size_t sized integer formats (64 bit on AArch64)
+ *
+ * The following padding specifiers are supported by this print
+ * %0NN - Left-pad the number with 0s (NN is a decimal number)
+ *
+ * The print exits on all other formats specifiers other than valid
+ * combinations of the above specifiers.
+ *******************************************************************/
+int vprintf(const char *fmt, va_list args)
+{
+ int l_count;
+ long long int num;
+ unsigned long long int unum;
+ char *str;
+ char padc = '\0'; /* Padding character */
+ int padn; /* Number of characters to pad */
+ int count = 0; /* Number of printed characters */
+
+ while (*fmt != '\0') {
+ l_count = 0;
+ padn = 0;
+
+ if (*fmt == '%') {
+ fmt++;
+ /* Check the format specifier */
+loop:
+ switch (*fmt) {
+ case 'i': /* Fall through to next one */
+ case 'd':
+ num = get_num_va_args(args, l_count);
+ if (num < 0) {
+ (void)putchar('-');
+ unum = (unsigned long long int)-num;
+ padn--;
+ } else
+ unum = (unsigned long long int)num;
+
+ count += unsigned_num_print(unum, 10,
+ padc, padn);
+ break;
+ case 's':
+ str = va_arg(args, char *);
+ count += string_print(str);
+ break;
+ case 'p':
+ unum = (uintptr_t)va_arg(args, void *);
+ if (unum > 0U) {
+ count += string_print("0x");
+ padn -= 2;
+ }
+
+ count += unsigned_num_print(unum, 16,
+ padc, padn);
+ break;
+ case 'x':
+ unum = get_unum_va_args(args, l_count);
+ count += unsigned_num_print(unum, 16,
+ padc, padn);
+ break;
+ case 'z':
+ if (sizeof(size_t) == 8U)
+ l_count = 2;
+
+ fmt++;
+ goto loop;
+ case 'l':
+ l_count++;
+ fmt++;
+ goto loop;
+ case 'u':
+ unum = get_unum_va_args(args, l_count);
+ count += unsigned_num_print(unum, 10,
+ padc, padn);
+ break;
+ case '0':
+ padc = '0';
+ padn = 0;
+ fmt++;
+
+ for (;;) {
+ char ch = *fmt;
+ if ((ch < '0') || (ch > '9')) {
+ goto loop;
+ }
+ padn = (padn * 10) + (ch - '0');
+ fmt++;
+ }
+ default:
+ /* Exit on any other format specifier */
+ return -1;
+ }
+ fmt++;
+ continue;
+ }
+ (void)putchar(*fmt);
+ fmt++;
+ count++;
+ }
+
+ return count;
+}
+
+int printf(const char *fmt, ...)
+{
+ int count;
+ va_list va;
+
+ va_start(va, fmt);
+ count = vprintf(fmt, va);
+ va_end(va);
+
+ return count;
+}
diff --git a/lib/libc/putchar.c b/lib/libc/putchar.c
new file mode 100644
index 000000000..0beb625b2
--- /dev/null
+++ b/lib/libc/putchar.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <console.h>
+
+int putchar(int c)
+{
+ int res;
+ if (console_putc((unsigned char)c) >= 0)
+ res = c;
+ else
+ res = EOF;
+
+ return res;
+}
diff --git a/lib/stdlib/puts.c b/lib/libc/puts.c
index 284cf8c52..2a0ca11c6 100644
--- a/lib/stdlib/puts.c
+++ b/lib/libc/puts.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,15 +9,14 @@
int puts(const char *s)
{
int count = 0;
- while(*s) {
- if (putchar(*s++) == EOF)
+
+ while (*s != '\0') {
+ if (putchar(*s) == EOF)
return EOF;
+ s++;
count++;
}
- /* According to the puts(3) manpage, the function should write a
- * trailing newline.
- */
if (putchar('\n') == EOF)
return EOF;
diff --git a/lib/libc/snprintf.c b/lib/libc/snprintf.c
new file mode 100644
index 000000000..9bc07b2cb
--- /dev/null
+++ b/lib/libc/snprintf.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform.h>
+#include <stdarg.h>
+
+static void string_print(char **s, size_t n, size_t *chars_printed,
+ const char *str)
+{
+ while (*str != '\0') {
+ if (*chars_printed < n) {
+ *(*s) = *str;
+ (*s)++;
+ }
+
+ (*chars_printed)++;
+ str++;
+ }
+}
+
+static void unsigned_dec_print(char **s, size_t n, size_t *chars_printed,
+ unsigned int unum)
+{
+ /* Enough for a 32-bit unsigned decimal integer (4294967295). */
+ char num_buf[10];
+ int i = 0;
+ unsigned int rem;
+
+ do {
+ rem = unum % 10U;
+ num_buf[i++] = '0' + rem;
+ unum /= 10U;
+ } while (unum > 0U);
+
+ while (--i >= 0) {
+ if (*chars_printed < n) {
+ *(*s) = num_buf[i];
+ (*s)++;
+ }
+
+ (*chars_printed)++;
+ }
+}
+
+/*******************************************************************
+ * Reduced snprintf to be used for Trusted firmware.
+ * The following type specifiers are supported:
+ *
+ * %d or %i - signed decimal format
+ * %s - string format
+ * %u - unsigned decimal format
+ *
+ * The function panics on all other formats specifiers.
+ *
+ * It returns the number of characters that would be written if the
+ * buffer was big enough. If it returns a value lower than n, the
+ * whole string has been written.
+ *******************************************************************/
+int snprintf(char *s, size_t n, const char *fmt, ...)
+{
+ va_list args;
+ int num;
+ unsigned int unum;
+ char *str;
+ size_t chars_printed = 0U;
+
+ if (n == 0U) {
+ /* There isn't space for anything. */
+ } else if (n == 1U) {
+ /* Buffer is too small to actually write anything else. */
+ *s = '\0';
+ n = 0U;
+ } else {
+ /* Reserve space for the terminator character. */
+ n--;
+ }
+
+ va_start(args, fmt);
+ while (*fmt != '\0') {
+
+ if (*fmt == '%') {
+ fmt++;
+ /* Check the format specifier. */
+ switch (*fmt) {
+ case 'i':
+ case 'd':
+ num = va_arg(args, int);
+
+ if (num < 0) {
+ if (chars_printed < n) {
+ *s = '-';
+ s++;
+ }
+ chars_printed++;
+
+ unum = (unsigned int)-num;
+ } else {
+ unum = (unsigned int)num;
+ }
+
+ unsigned_dec_print(&s, n, &chars_printed, unum);
+ break;
+ case 's':
+ str = va_arg(args, char *);
+ string_print(&s, n, &chars_printed, str);
+ break;
+ case 'u':
+ unum = va_arg(args, unsigned int);
+ unsigned_dec_print(&s, n, &chars_printed, unum);
+ break;
+ default:
+ /* Panic on any other format specifier. */
+ ERROR("snprintf: specifier with ASCII code '%d' not supported.",
+ *fmt);
+ plat_panic_handler();
+ }
+ fmt++;
+ continue;
+ }
+
+ if (chars_printed < n) {
+ *s = *fmt;
+ s++;
+ }
+
+ fmt++;
+ chars_printed++;
+ }
+
+ va_end(args);
+
+ if (n > 0U)
+ *s = '\0';
+
+ return (int)chars_printed;
+}
diff --git a/lib/stdlib/strchr.c b/lib/libc/strchr.c
index 4247dcd36..d94bb9e0f 100644
--- a/lib/stdlib/strchr.c
+++ b/lib/libc/strchr.c
@@ -1,4 +1,6 @@
/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -10,7 +12,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
+ * 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
@@ -28,11 +30,10 @@
*/
/*
- * Portions copyright (c) 2013-2014, ARM Limited and Contributors.
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
* All rights reserved.
*/
-#include <sys/cdefs.h>
#include <stddef.h>
#include <string.h>
diff --git a/lib/stdlib/strcmp.c b/lib/libc/strcmp.c
index bb86e0f2c..b742f9b90 100644
--- a/lib/stdlib/strcmp.c
+++ b/lib/libc/strcmp.c
@@ -1,4 +1,6 @@
/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -13,7 +15,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
+ * 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
@@ -31,12 +33,10 @@
*/
/*
- * Portions copyright (c) 2014, ARM Limited and Contributors.
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
* All rights reserved.
*/
-#include <sys/cdefs.h>
-#include <sys/ctype.h>
#include <string.h>
/*
@@ -47,20 +47,6 @@ strcmp(const char *s1, const char *s2)
{
while (*s1 == *s2++)
if (*s1++ == '\0')
- return 0;
- return *(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1);
-}
-
-int
-strcasecmp(const char *s1, const char *s2)
-{
- const unsigned char *us1 = (const unsigned char *)s1;
- const unsigned char *us2 = (const unsigned char *)s2;
-
- while (tolower(*us1) == tolower(*us2)) {
- if (*us1++ == '\0')
- return 0;
- us2++;
- }
- return tolower(*us1) - tolower(*us2);
+ return (0);
+ return (*(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1));
}
diff --git a/lib/libc/strlen.c b/lib/libc/strlen.c
new file mode 100644
index 000000000..3c2763006
--- /dev/null
+++ b/lib/libc/strlen.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+size_t strlen(const char *s)
+{
+ const char *cursor = s;
+
+ while (*cursor)
+ cursor++;
+
+ return cursor - s;
+}
diff --git a/lib/stdlib/strncmp.c b/lib/libc/strncmp.c
index f45f4a223..ce9e5ed4a 100644
--- a/lib/stdlib/strncmp.c
+++ b/lib/libc/strncmp.c
@@ -1,4 +1,6 @@
-/*
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -10,7 +12,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
+ * 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
@@ -28,11 +30,10 @@
*/
/*
- * Portions copyright (c) 2014, ARM Limited and Contributors.
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
* All rights reserved.
*/
-#include <sys/cdefs.h>
#include <string.h>
int
@@ -40,7 +41,7 @@ strncmp(const char *s1, const char *s2, size_t n)
{
if (n == 0)
- return 0;
+ return (0);
do {
if (*s1 != *s2++)
return (*(const unsigned char *)s1 -
@@ -48,5 +49,5 @@ strncmp(const char *s1, const char *s2, size_t n)
if (*s1++ == '\0')
break;
} while (--n != 0);
- return 0;
+ return (0);
}
diff --git a/lib/stdlib/strnlen.c b/lib/libc/strnlen.c
index d48502bdb..b944e95b4 100644
--- a/lib/stdlib/strnlen.c
+++ b/lib/libc/strnlen.c
@@ -1,4 +1,6 @@
/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
* Copyright (c) 2009 David Schultz <das@FreeBSD.org>
* All rights reserved.
*
@@ -25,11 +27,10 @@
*/
/*
- * Portions copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
*/
-#include <sys/cdefs.h>
-
#include <string.h>
size_t
diff --git a/lib/libfdt/libfdt.mk b/lib/libfdt/libfdt.mk
index d03dde204..1cbbd7852 100644
--- a/lib/libfdt/libfdt.mk
+++ b/lib/libfdt/libfdt.mk
@@ -15,3 +15,5 @@ LIBFDT_SRCS := $(addprefix lib/libfdt/, \
fdt_wip.c) \
INCLUDES += -Iinclude/lib/libfdt
+
+$(eval $(call MAKE_LIB,fdt))
diff --git a/lib/pmf/pmf_main.c b/lib/pmf/pmf_main.c
index 0208948fe..a02086008 100644
--- a/lib/pmf/pmf_main.c
+++ b/lib/pmf/pmf_main.c
@@ -178,7 +178,7 @@ int pmf_get_timestamp_smc(unsigned int tid,
*/
void __pmf_dump_timestamp(unsigned int tid, unsigned long long ts)
{
- tf_printf("PMF:cpu %u tid %u ts %llu\n",
+ printf("PMF:cpu %u tid %u ts %llu\n",
plat_my_core_pos(), tid, ts);
}
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
index a29a29c49..63d7e7088 100644
--- a/lib/psci/aarch32/psci_helpers.S
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -91,28 +91,6 @@ func psci_do_pwrup_cache_maintenance
stcopr r0, SCTLR
isb
-#if PLAT_XLAT_TABLES_DYNAMIC
- /* ---------------------------------------------
- * During warm boot the MMU is enabled with data
- * cache disabled, then the interconnect is set
- * up and finally the data cache is enabled.
- *
- * During this period, if another CPU modifies
- * the translation tables, the MMU table walker
- * may read the old entries. This is only a
- * problem for dynamic regions, the warm boot
- * code isn't affected because it is static.
- *
- * Invalidate all TLB entries loaded while the
- * CPU wasn't coherent with the rest of the
- * system.
- * ---------------------------------------------
- */
- stcopr r0, TLBIALL
- dsb ish
- isb
-#endif
-
pop {r12, pc}
endfunc psci_do_pwrup_cache_maintenance
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index d37ca764a..06d6636ed 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -115,28 +115,6 @@ func psci_do_pwrup_cache_maintenance
msr sctlr_el3, x0
isb
-#if PLAT_XLAT_TABLES_DYNAMIC
- /* ---------------------------------------------
- * During warm boot the MMU is enabled with data
- * cache disabled, then the interconnect is set
- * up and finally the data cache is enabled.
- *
- * During this period, if another CPU modifies
- * the translation tables, the MMU table walker
- * may read the old entries. This is only a
- * problem for dynamic regions, the warm boot
- * code isn't affected because it is static.
- *
- * Invalidate all TLB entries loaded while the
- * CPU wasn't coherent with the rest of the
- * system.
- * ---------------------------------------------
- */
- tlbi alle3
- dsb ish
- isb
-#endif
-
ldp x29, x30, [sp], #16
ret
endfunc psci_do_pwrup_cache_maintenance
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 2220a745c..ec74a8cdb 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -68,9 +68,9 @@ const plat_psci_ops_t *psci_plat_pm_ops;
/******************************************************************************
* Check that the maximum power level supported by the platform makes sense
*****************************************************************************/
-CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
- PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
- assert_platform_max_pwrlvl_check);
+CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
+ (PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
+ assert_platform_max_pwrlvl_check);
/*
* The plat_local_state used by the platform is one of these types: RUN,
@@ -93,17 +93,25 @@ typedef enum plat_local_state_type {
STATE_TYPE_OFF
} plat_local_state_type_t;
-/* The macro used to categorize plat_local_state. */
-#define find_local_state_type(plat_local_state) \
- ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \
- ? STATE_TYPE_OFF : STATE_TYPE_RETN) \
- : STATE_TYPE_RUN)
+/* Function used to categorize plat_local_state. */
+static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
+{
+ if (state != 0U) {
+ if (state > PLAT_MAX_RET_STATE) {
+ return STATE_TYPE_OFF;
+ } else {
+ return STATE_TYPE_RETN;
+ }
+ } else {
+ return STATE_TYPE_RUN;
+ }
+}
/******************************************************************************
* Check that the maximum retention level supported by the platform is less
* than the maximum off level.
*****************************************************************************/
-CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
+CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
assert_platform_max_off_and_retn_state_check);
/******************************************************************************
@@ -114,10 +122,10 @@ int psci_validate_power_state(unsigned int power_state,
psci_power_state_t *state_info)
{
/* Check SBZ bits in power state are zero */
- if (psci_check_power_state(power_state))
+ if (psci_check_power_state(power_state) != 0U)
return PSCI_E_INVALID_PARAMS;
- assert(psci_plat_pm_ops->validate_power_state);
+ assert(psci_plat_pm_ops->validate_power_state != NULL);
/* Validate the power_state using platform pm_ops */
return psci_plat_pm_ops->validate_power_state(power_state, state_info);
@@ -133,7 +141,7 @@ void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
* Assert that the required pm_ops hook is implemented to ensure that
* the capability detected during psci_setup() is valid.
*/
- assert(psci_plat_pm_ops->get_sys_suspend_power_state);
+ assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
/*
* Query the platform for the power_state required for system suspend
@@ -149,7 +157,7 @@ void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
******************************************************************************/
unsigned int psci_is_last_on_cpu(void)
{
- unsigned int cpu_idx, my_idx = plat_my_core_pos();
+ int cpu_idx, my_idx = (int) plat_my_core_pos();
for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
if (cpu_idx == my_idx) {
@@ -201,7 +209,7 @@ static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
assert(pwrlvl > PSCI_CPU_PWR_LVL);
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
- psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
+ psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
#pragma GCC diagnostic pop
}
@@ -211,8 +219,15 @@ static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
void psci_init_req_local_pwr_states(void)
{
/* Initialize the requested state of all non CPU power domains as OFF */
- memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
- sizeof(psci_req_local_pwr_states));
+ unsigned int pwrlvl;
+ int core;
+
+ for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
+ for (core = 0; core < PLATFORM_CORE_COUNT; core++) {
+ psci_req_local_pwr_states[pwrlvl][core] =
+ PLAT_MAX_OFF_STATE;
+ }
+ }
}
/******************************************************************************
@@ -224,11 +239,11 @@ void psci_init_req_local_pwr_states(void)
* assertion is added to prevent us from accessing the CPU power level.
*****************************************************************************/
static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
- unsigned int cpu_idx)
+ int cpu_idx)
{
assert(pwrlvl > PSCI_CPU_PWR_LVL);
- return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
+ return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
}
/*
@@ -252,7 +267,7 @@ static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
static plat_local_state_t get_non_cpu_pd_node_local_state(
unsigned int parent_idx)
{
-#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY)
flush_dcache_range(
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
@@ -268,7 +283,7 @@ static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
plat_local_state_t state)
{
psci_non_cpu_pd_nodes[parent_idx].local_state = state;
-#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY)
flush_dcache_range(
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
@@ -291,7 +306,7 @@ void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
/* Copy the local power state from node to state_info */
- for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
@@ -324,7 +339,7 @@ static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
/* Copy the local_state from state_info */
- for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
+ for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
@@ -334,15 +349,17 @@ static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
/*******************************************************************************
* PSCI helper function to get the parent nodes corresponding to a cpu_index.
******************************************************************************/
-void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
+void psci_get_parent_pwr_domain_nodes(int cpu_idx,
unsigned int end_lvl,
- unsigned int node_index[])
+ unsigned int *node_index)
{
unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
unsigned int i;
+ unsigned int *node = node_index;
- for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
- *node_index++ = parent_node;
+ for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
+ *node = parent_node;
+ node++;
parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
}
}
@@ -358,7 +375,7 @@ void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Reset the local_state to RUN for the non cpu power domains. */
- for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
set_non_cpu_pd_node_local_state(parent_idx,
PSCI_LOCAL_STATE_RUN);
psci_set_req_local_pwr_state(lvl,
@@ -398,7 +415,8 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info)
{
unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
- unsigned int start_idx, ncpus;
+ int start_idx;
+ unsigned int ncpus;
plat_local_state_t target_state, *req_states;
assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
@@ -406,7 +424,7 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
/* For level 0, the requested state will be equivalent
to target state */
- for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
/* First update the requested power state */
psci_set_req_local_pwr_state(lvl, cpu_idx,
@@ -428,7 +446,7 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
state_info->pwr_domain_state[lvl] = target_state;
/* Break early if the negotiated target power state is RUN */
- if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+ if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
break;
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
@@ -440,7 +458,7 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
* We update the requested power state from state_info and then
* set the target state as RUN.
*/
- for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
+ for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
psci_set_req_local_pwr_state(lvl, cpu_idx,
state_info->pwr_domain_state[lvl]);
state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
@@ -478,7 +496,7 @@ int psci_validate_suspend_req(const psci_power_state_t *state_info,
/* All power domain levels are in a RUN state to begin with */
deepest_state_type = STATE_TYPE_RUN;
- for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
+ for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
state = state_info->pwr_domain_state[i];
req_state_type = find_local_state_type(state);
@@ -507,8 +525,9 @@ int psci_validate_suspend_req(const psci_power_state_t *state_info,
* has to be invalid and max retention level has to be a valid power
* level.
*/
- if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
- max_retn_lvl == PSCI_INVALID_PWR_LVL))
+ if ((is_power_down_state == 0U) &&
+ ((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
+ (max_retn_lvl == PSCI_INVALID_PWR_LVL)))
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
@@ -522,9 +541,9 @@ unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
{
int i;
- for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
- if (is_local_state_off(state_info->pwr_domain_state[i]))
- return i;
+ for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
+ if (is_local_state_off(state_info->pwr_domain_state[i]) != 0)
+ return (unsigned int) i;
}
return PSCI_INVALID_PWR_LVL;
@@ -538,9 +557,9 @@ unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
{
int i;
- for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
- if (!is_local_state_run(state_info->pwr_domain_state[i]))
- return i;
+ for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
+ if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
+ return (unsigned int) i;
}
return PSCI_INVALID_PWR_LVL;
@@ -551,14 +570,13 @@ unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
* tree that the operation should be applied to. It picks up locks in order of
* increasing power domain level in the range specified.
******************************************************************************/
-void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
- unsigned int cpu_idx)
+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx)
{
unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
unsigned int level;
/* No locking required for level 0. Hence start locking from level 1 */
- for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
+ for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
@@ -569,18 +587,17 @@ void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
* tree that the operation should be applied to. It releases the locks in order
* of decreasing power domain level in the range specified.
******************************************************************************/
-void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
- unsigned int cpu_idx)
+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx)
{
unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
- int level;
+ unsigned int level;
/* Get the parent nodes */
psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
/* Unlock top down. No unlocking required for level 0. */
- for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
- parent_idx = parent_nodes[level - 1];
+ for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1U; level--) {
+ parent_idx = parent_nodes[level - 1U];
psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
}
}
@@ -656,11 +673,12 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
u_register_t ns_scr_el3 = read_scr_el3();
u_register_t ns_sctlr_el1 = read_sctlr_el1();
- sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
+ sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
+ read_sctlr_el2() : ns_sctlr_el1;
ee = 0;
ep_attr = NON_SECURE | EP_ST_DISABLE;
- if (sctlr & SCTLR_EE_BIT) {
+ if ((sctlr & SCTLR_EE_BIT) != 0U) {
ep_attr |= EP_EE_BIG;
ee = 1;
}
@@ -674,21 +692,22 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
* Figure out whether the cpu enters the non-secure address space
* in aarch32 or aarch64
*/
- if (ns_scr_el3 & SCR_RW_BIT) {
+ if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
/*
* Check whether a Thumb entry point has been provided for an
* aarch64 EL
*/
- if (entrypoint & 0x1)
+ if ((entrypoint & 0x1UL) != 0UL)
return PSCI_E_INVALID_ADDRESS;
- mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
+ mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
} else {
- mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
+ mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
+ MODE32_hyp : MODE32_svc;
/*
* TODO: Choose async. exception bits if HYP mode is not
@@ -715,7 +734,7 @@ int psci_validate_entry_point(entry_point_info_t *ep,
int rc;
/* Validate the entrypoint using platform psci_ops */
- if (psci_plat_pm_ops->validate_ns_entrypoint) {
+ if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_ADDRESS;
@@ -741,7 +760,8 @@ int psci_validate_entry_point(entry_point_info_t *ep,
******************************************************************************/
void psci_warmboot_entrypoint(void)
{
- unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
+ unsigned int end_pwrlvl;
+ int cpu_idx = (int) plat_my_core_pos();
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
/*
@@ -764,8 +784,7 @@ void psci_warmboot_entrypoint(void)
* that by the time all locks are taken, the system topology is snapshot
* and state management can be done safely.
*/
- psci_acquire_pwr_domain_locks(end_pwrlvl,
- cpu_idx);
+ psci_acquire_pwr_domain_locks(end_pwrlvl, cpu_idx);
psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
@@ -810,8 +829,7 @@ void psci_warmboot_entrypoint(void)
* This loop releases the lock corresponding to each power level
* in the reverse order to which they were acquired.
*/
- psci_release_pwr_domain_locks(end_pwrlvl,
- cpu_idx);
+ psci_release_pwr_domain_locks(end_pwrlvl, cpu_idx);
}
/*******************************************************************************
@@ -821,13 +839,13 @@ void psci_warmboot_entrypoint(void)
******************************************************************************/
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
{
- assert(pm);
+ assert(pm != NULL);
psci_spd_pm = pm;
- if (pm->svc_migrate)
+ if (pm->svc_migrate != NULL)
psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
- if (pm->svc_migrate_info)
+ if (pm->svc_migrate_info != NULL)
psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
| define_psci_cap(PSCI_MIG_INFO_TYPE);
}
@@ -843,13 +861,13 @@ int psci_spd_migrate_info(u_register_t *mpidr)
{
int rc;
- if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
+ if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
return PSCI_E_NOT_SUPPORTED;
rc = psci_spd_pm->svc_migrate_info(mpidr);
- assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
- || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
+ assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
+ (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
return rc;
}
@@ -862,7 +880,7 @@ int psci_spd_migrate_info(u_register_t *mpidr)
void psci_print_power_domain_map(void)
{
#if LOG_LEVEL >= LOG_LEVEL_INFO
- unsigned int idx;
+ int idx;
plat_local_state_t state;
plat_local_state_type_t state_type;
@@ -908,16 +926,16 @@ void psci_print_power_domain_map(void)
*****************************************************************************/
int psci_secondaries_brought_up(void)
{
- unsigned int idx, n_valid = 0;
+ unsigned int idx, n_valid = 0U;
- for (idx = 0; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
+ for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
n_valid++;
}
- assert(n_valid);
+ assert(n_valid > 0U);
- return (n_valid > 1);
+ return (n_valid > 1U) ? 1 : 0;
}
#if ENABLE_PLAT_COMPAT
@@ -964,8 +982,8 @@ int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
return PSCI_INVALID_DATA;
/* Sanity check to verify that the CPU is in CPU_SUSPEND */
- if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
- !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
+ if ((psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON) &&
+ (!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))))
return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
return PSCI_INVALID_DATA;
diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c
index 607d0cd50..fd822bcfd 100644
--- a/lib/psci/psci_main.c
+++ b/lib/psci/psci_main.c
@@ -83,7 +83,7 @@ int psci_cpu_suspend(unsigned int power_state,
/* Fast path for CPU standby.*/
if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
- if (!psci_plat_pm_ops->cpu_standby)
+ if (psci_plat_pm_ops->cpu_standby == NULL)
return PSCI_E_INVALID_PARAMS;
/*
@@ -128,7 +128,7 @@ int psci_cpu_suspend(unsigned int power_state,
* If a power down state has been requested, we need to verify entry
* point and program entry information.
*/
- if (is_power_down_state) {
+ if (is_power_down_state != 0U) {
rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
@@ -156,7 +156,7 @@ int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
entry_point_info_t ep;
/* Check if the current CPU is the last ON CPU in the system */
- if (!psci_is_last_on_cpu())
+ if (psci_is_last_on_cpu() == 0U)
return PSCI_E_DENIED;
/* Validate the entry point and get the entry_point_info */
@@ -171,7 +171,8 @@ int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
== PSCI_E_SUCCESS);
- assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]));
+ assert(is_local_state_off(
+ state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]) != 0);
/*
* Do what is needed to enter the system suspend state. This function
@@ -236,7 +237,8 @@ int psci_affinity_info(u_register_t target_affinity,
* target CPUs shutdown was not seen by the current CPU's cluster. And
* so the cache may contain stale data for the target CPU.
*/
- flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+ flush_cpu_data_by_index((unsigned int)target_idx,
+ psci_svc_cpu_data.aff_info_state);
return psci_get_aff_info_state_by_idx(target_idx);
}
@@ -263,10 +265,10 @@ int psci_migrate(u_register_t target_cpu)
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
- assert(psci_spd_pm && psci_spd_pm->svc_migrate);
+ assert((psci_spd_pm != NULL) && (psci_spd_pm->svc_migrate != NULL));
rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
- assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
+ assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL));
return rc;
}
@@ -278,7 +280,7 @@ int psci_migrate_info_type(void)
return psci_spd_migrate_info(&resident_cpu_mpidr);
}
-long psci_migrate_info_up_cpu(void)
+u_register_t psci_migrate_info_up_cpu(void)
{
u_register_t resident_cpu_mpidr;
int rc;
@@ -288,8 +290,8 @@ long psci_migrate_info_up_cpu(void)
* psci_spd_migrate_info() returns.
*/
rc = psci_spd_migrate_info(&resident_cpu_mpidr);
- if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
- return PSCI_E_INVALID_PARAMS;
+ if ((rc != PSCI_TOS_NOT_UP_MIG_CAP) && (rc != PSCI_TOS_UP_MIG_CAP))
+ return (u_register_t)(register_t) PSCI_E_INVALID_PARAMS;
return resident_cpu_mpidr;
}
@@ -312,10 +314,11 @@ int psci_node_hw_state(u_register_t target_cpu,
* Dispatch this call to platform to query power controller, and pass on
* to the caller what it returns
*/
- assert(psci_plat_pm_ops->get_node_hw_state);
+ assert(psci_plat_pm_ops->get_node_hw_state != NULL);
rc = psci_plat_pm_ops->get_node_hw_state(target_cpu, power_level);
- assert((rc >= HW_ON && rc <= HW_STANDBY) || rc == PSCI_E_NOT_SUPPORTED
- || rc == PSCI_E_INVALID_PARAMS);
+ assert(((rc >= HW_ON) && (rc <= HW_STANDBY))
+ || (rc == PSCI_E_NOT_SUPPORTED)
+ || (rc == PSCI_E_INVALID_PARAMS));
return rc;
}
@@ -337,17 +340,19 @@ int psci_features(unsigned int psci_fid)
/* Check if the psci fid is supported or not */
- if (!(local_caps & define_psci_cap(psci_fid)))
+ if ((local_caps & define_psci_cap(psci_fid)) == 0U)
return PSCI_E_NOT_SUPPORTED;
/* Format the feature flags */
- if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
- psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
+ if ((psci_fid == PSCI_CPU_SUSPEND_AARCH32) ||
+ (psci_fid == PSCI_CPU_SUSPEND_AARCH64)) {
/*
* The trusted firmware does not support OS Initiated Mode.
*/
- return (FF_PSTATE << FF_PSTATE_SHIFT) |
- ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
+ unsigned int ret = ((FF_PSTATE << FF_PSTATE_SHIFT) |
+ (((FF_SUPPORTS_OS_INIT_MODE == 1U) ? 0U : 1U)
+ << FF_MODE_SUPPORT_SHIFT));
+ return (int) ret;
}
/* Return 0 for all other fid's */
@@ -366,50 +371,62 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
void *handle,
u_register_t flags)
{
+ u_register_t ret;
+
if (is_caller_secure(flags))
- return SMC_UNK;
+ return (u_register_t)SMC_UNK;
/* Check the fid against the capabilities */
- if (!(psci_caps & define_psci_cap(smc_fid)))
- return SMC_UNK;
+ if ((psci_caps & define_psci_cap(smc_fid)) == 0U)
+ return (u_register_t)SMC_UNK;
if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
/* 32-bit PSCI function, clear top parameter bits */
- x1 = (uint32_t)x1;
- x2 = (uint32_t)x2;
- x3 = (uint32_t)x3;
+ uint32_t r1 = (uint32_t)x1;
+ uint32_t r2 = (uint32_t)x2;
+ uint32_t r3 = (uint32_t)x3;
switch (smc_fid) {
case PSCI_VERSION:
- return psci_version();
+ ret = (u_register_t)psci_version();
+ break;
case PSCI_CPU_OFF:
- return psci_cpu_off();
+ ret = (u_register_t)psci_cpu_off();
+ break;
case PSCI_CPU_SUSPEND_AARCH32:
- return psci_cpu_suspend(x1, x2, x3);
+ ret = (u_register_t)psci_cpu_suspend(r1, r2, r3);
+ break;
case PSCI_CPU_ON_AARCH32:
- return psci_cpu_on(x1, x2, x3);
+ ret = (u_register_t)psci_cpu_on(r1, r2, r3);
+ break;
case PSCI_AFFINITY_INFO_AARCH32:
- return psci_affinity_info(x1, x2);
+ ret = (u_register_t)psci_affinity_info(r1, r2);
+ break;
case PSCI_MIG_AARCH32:
- return psci_migrate(x1);
+ ret = (u_register_t)psci_migrate(r1);
+ break;
case PSCI_MIG_INFO_TYPE:
- return psci_migrate_info_type();
+ ret = (u_register_t)psci_migrate_info_type();
+ break;
case PSCI_MIG_INFO_UP_CPU_AARCH32:
- return psci_migrate_info_up_cpu();
+ ret = psci_migrate_info_up_cpu();
+ break;
case PSCI_NODE_HW_STATE_AARCH32:
- return psci_node_hw_state(x1, x2);
+ ret = (u_register_t)psci_node_hw_state(r1, r2);
+ break;
case PSCI_SYSTEM_SUSPEND_AARCH32:
- return psci_system_suspend(x1, x2);
+ ret = (u_register_t)psci_system_suspend(r1, r2);
+ break;
case PSCI_SYSTEM_OFF:
psci_system_off();
@@ -422,26 +439,34 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
break;
case PSCI_FEATURES:
- return psci_features(x1);
+ ret = (u_register_t)psci_features(r1);
+ break;
#if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH32:
- return psci_stat_residency(x1, x2);
+ ret = psci_stat_residency(r1, r2);
+ break;
case PSCI_STAT_COUNT_AARCH32:
- return psci_stat_count(x1, x2);
+ ret = psci_stat_count(r1, r2);
+ break;
#endif
case PSCI_MEM_PROTECT:
- return psci_mem_protect(x1);
+ ret = psci_mem_protect(r1);
+ break;
case PSCI_MEM_CHK_RANGE_AARCH32:
- return psci_mem_chk_range(x1, x2);
+ ret = psci_mem_chk_range(r1, r2);
+ break;
case PSCI_SYSTEM_RESET2_AARCH32:
/* We should never return from psci_system_reset2() */
- return psci_system_reset2(x1, x2);
+ ret = psci_system_reset2(r1, r2);
+ break;
default:
+ WARN("Unimplemented PSCI Call: 0x%x\n", smc_fid);
+ ret = (u_register_t)SMC_UNK;
break;
}
} else {
@@ -449,46 +474,61 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
switch (smc_fid) {
case PSCI_CPU_SUSPEND_AARCH64:
- return psci_cpu_suspend(x1, x2, x3);
+ ret = (u_register_t)
+ psci_cpu_suspend((unsigned int)x1, x2, x3);
+ break;
case PSCI_CPU_ON_AARCH64:
- return psci_cpu_on(x1, x2, x3);
+ ret = (u_register_t)psci_cpu_on(x1, x2, x3);
+ break;
case PSCI_AFFINITY_INFO_AARCH64:
- return psci_affinity_info(x1, x2);
+ ret = (u_register_t)
+ psci_affinity_info(x1, (unsigned int)x2);
+ break;
case PSCI_MIG_AARCH64:
- return psci_migrate(x1);
+ ret = (u_register_t)psci_migrate(x1);
+ break;
case PSCI_MIG_INFO_UP_CPU_AARCH64:
- return psci_migrate_info_up_cpu();
+ ret = psci_migrate_info_up_cpu();
+ break;
case PSCI_NODE_HW_STATE_AARCH64:
- return psci_node_hw_state(x1, x2);
+ ret = (u_register_t)psci_node_hw_state(
+ x1, (unsigned int) x2);
+ break;
case PSCI_SYSTEM_SUSPEND_AARCH64:
- return psci_system_suspend(x1, x2);
+ ret = (u_register_t)psci_system_suspend(x1, x2);
+ break;
#if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH64:
- return psci_stat_residency(x1, x2);
+ ret = psci_stat_residency(x1, (unsigned int) x2);
+ break;
case PSCI_STAT_COUNT_AARCH64:
- return psci_stat_count(x1, x2);
+ ret = psci_stat_count(x1, (unsigned int) x2);
+ break;
#endif
case PSCI_MEM_CHK_RANGE_AARCH64:
- return psci_mem_chk_range(x1, x2);
+ ret = psci_mem_chk_range(x1, x2);
+ break;
case PSCI_SYSTEM_RESET2_AARCH64:
/* We should never return from psci_system_reset2() */
- return psci_system_reset2(x1, x2);
+ ret = psci_system_reset2((uint32_t) x1, x2);
+ break;
default:
+ WARN("Unimplemented PSCI Call: 0x%x\n", smc_fid);
+ ret = (u_register_t)SMC_UNK;
break;
}
}
- WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
- return SMC_UNK;
+ return ret;
}
diff --git a/lib/psci/psci_mem_protect.c b/lib/psci/psci_mem_protect.c
index fca84e905..857146b67 100644
--- a/lib/psci/psci_mem_protect.c
+++ b/lib/psci/psci_mem_protect.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,30 +9,31 @@
#include <utils.h>
#include "psci_private.h"
-int psci_mem_protect(unsigned int enable)
+u_register_t psci_mem_protect(unsigned int enable)
{
int val;
- assert(psci_plat_pm_ops->read_mem_protect);
- assert(psci_plat_pm_ops->write_mem_protect);
+ assert(psci_plat_pm_ops->read_mem_protect != NULL);
+ assert(psci_plat_pm_ops->write_mem_protect != NULL);
if (psci_plat_pm_ops->read_mem_protect(&val) < 0)
- return PSCI_E_NOT_SUPPORTED;
+ return (u_register_t) PSCI_E_NOT_SUPPORTED;
if (psci_plat_pm_ops->write_mem_protect(enable) < 0)
- return PSCI_E_NOT_SUPPORTED;
+ return (u_register_t) PSCI_E_NOT_SUPPORTED;
- return val != 0;
+ return (val != 0) ? 1U : 0U;
}
-int psci_mem_chk_range(uintptr_t base, u_register_t length)
+u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length)
{
int ret;
- assert(psci_plat_pm_ops->mem_protect_chk);
+ assert(psci_plat_pm_ops->mem_protect_chk != NULL);
- if (length == 0 || check_uptr_overflow(base, length-1))
- return PSCI_E_DENIED;
+ if ((length == 0U) || check_uptr_overflow(base, length - 1U))
+ return (u_register_t) PSCI_E_DENIED;
ret = psci_plat_pm_ops->mem_protect_chk(base, length);
- return (ret < 0) ? PSCI_E_DENIED : PSCI_E_SUCCESS;
+ return (ret < 0) ?
+ (u_register_t) PSCI_E_DENIED : (u_register_t) PSCI_E_SUCCESS;
}
diff --git a/lib/psci/psci_off.c b/lib/psci/psci_off.c
index 231deea2d..944f8bff9 100644
--- a/lib/psci/psci_off.c
+++ b/lib/psci/psci_off.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -40,14 +40,15 @@ static void psci_set_power_off_state(psci_power_state_t *state_info)
******************************************************************************/
int psci_do_cpu_off(unsigned int end_pwrlvl)
{
- int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
+ int rc = PSCI_E_SUCCESS;
+ int idx = (int) plat_my_core_pos();
psci_power_state_t state_info;
/*
* This function must only be called on platforms where the
* CPU_OFF platform hooks have been implemented.
*/
- assert(psci_plat_pm_ops->pwr_domain_off);
+ assert(psci_plat_pm_ops->pwr_domain_off != NULL);
/* Construct the psci_power_state for CPU_OFF */
psci_set_power_off_state(&state_info);
@@ -57,17 +58,16 @@ int psci_do_cpu_off(unsigned int end_pwrlvl)
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
- psci_acquire_pwr_domain_locks(end_pwrlvl,
- idx);
+ psci_acquire_pwr_domain_locks(end_pwrlvl, idx);
/*
* Call the cpu off handler registered by the Secure Payload Dispatcher
* to let it do any bookkeeping. Assume that the SPD always reports an
* E_DENIED error if SP refuse to power down
*/
- if (psci_spd_pm && psci_spd_pm->svc_off) {
+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_off != NULL)) {
rc = psci_spd_pm->svc_off(0);
- if (rc)
+ if (rc != 0)
goto exit;
}
@@ -120,8 +120,7 @@ exit:
* Release the locks corresponding to each power level in the
* reverse order to which they were acquired.
*/
- psci_release_pwr_domain_locks(end_pwrlvl,
- idx);
+ psci_release_pwr_domain_locks(end_pwrlvl, idx);
/*
* Check if all actions needed to safely power down this cpu have
@@ -154,7 +153,7 @@ exit:
PMF_NO_CACHE_MAINT);
#endif
- if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
+ if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) {
/* This function must not return */
psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
} else {
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
index 53b044ecf..f38900cf5 100644
--- a/lib/psci/psci_on.c
+++ b/lib/psci/psci_on.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -15,6 +15,19 @@
#include <stddef.h>
#include "psci_private.h"
+/*
+ * Helper functions for the CPU level spinlocks
+ */
+static inline void psci_spin_lock_cpu(int idx)
+{
+ spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock);
+}
+
+static inline void psci_spin_unlock_cpu(int idx)
+{
+ spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock);
+}
+
/*******************************************************************************
* This function checks whether a cpu which has been requested to be turned on
* is OFF to begin with.
@@ -42,22 +55,22 @@ static int cpu_on_validate_state(aff_info_state_t aff_state)
* platform handler as it can return error.
******************************************************************************/
int psci_cpu_on_start(u_register_t target_cpu,
- entry_point_info_t *ep)
+ const entry_point_info_t *ep)
{
int rc;
- unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
aff_info_state_t target_aff_state;
+ int target_idx = plat_core_pos_by_mpidr(target_cpu);
/* Calling function must supply valid input arguments */
- assert((int) target_idx >= 0);
+ assert(target_idx >= 0);
assert(ep != NULL);
/*
* This function must only be called on platforms where the
* CPU_ON platform hooks have been implemented.
*/
- assert(psci_plat_pm_ops->pwr_domain_on &&
- psci_plat_pm_ops->pwr_domain_on_finish);
+ assert((psci_plat_pm_ops->pwr_domain_on != NULL) &&
+ (psci_plat_pm_ops->pwr_domain_on_finish != NULL));
/* Protect against multiple CPUs trying to turn ON the same target CPU */
psci_spin_lock_cpu(target_idx);
@@ -78,7 +91,8 @@ int psci_cpu_on_start(u_register_t target_cpu,
* target CPUs shutdown was not seen by the current CPU's cluster. And
* so the cache may contain stale data for the target CPU.
*/
- flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+ flush_cpu_data_by_index((unsigned int)target_idx,
+ psci_svc_cpu_data.aff_info_state);
rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
if (rc != PSCI_E_SUCCESS)
goto exit;
@@ -88,7 +102,7 @@ int psci_cpu_on_start(u_register_t target_cpu,
* to let it do any bookeeping. If the handler encounters an error, it's
* expected to assert within
*/
- if (psci_spd_pm && psci_spd_pm->svc_on)
+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL))
psci_spd_pm->svc_on(target_cpu);
/*
@@ -97,7 +111,8 @@ int psci_cpu_on_start(u_register_t target_cpu,
* turned OFF.
*/
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
- flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+ flush_cpu_data_by_index((unsigned int)target_idx,
+ psci_svc_cpu_data.aff_info_state);
/*
* The cache line invalidation by the target CPU after setting the
@@ -109,9 +124,11 @@ int psci_cpu_on_start(u_register_t target_cpu,
if (target_aff_state != AFF_STATE_ON_PENDING) {
assert(target_aff_state == AFF_STATE_OFF);
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
- flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+ flush_cpu_data_by_index((unsigned int)target_idx,
+ psci_svc_cpu_data.aff_info_state);
- assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING);
+ assert(psci_get_aff_info_state_by_idx(target_idx) ==
+ AFF_STATE_ON_PENDING);
}
/*
@@ -123,15 +140,16 @@ int psci_cpu_on_start(u_register_t target_cpu,
* steps to power on.
*/
rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
- assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
+ assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL));
if (rc == PSCI_E_SUCCESS)
/* Store the re-entry information for the non-secure world. */
- cm_init_context_by_index(target_idx, ep);
+ cm_init_context_by_index((unsigned int)target_idx, ep);
else {
/* Restore the state on error. */
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
- flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+ flush_cpu_data_by_index((unsigned int)target_idx,
+ psci_svc_cpu_data.aff_info_state);
}
exit:
@@ -144,8 +162,7 @@ exit:
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
-void psci_cpu_on_finish(unsigned int cpu_idx,
- psci_power_state_t *state_info)
+void psci_cpu_on_finish(int cpu_idx, const psci_power_state_t *state_info)
{
/*
* Plat. management: Perform the platform specific actions
@@ -186,7 +203,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
- if (psci_spd_pm && psci_spd_pm->svc_on_finish)
+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL))
psci_spd_pm->svc_on_finish(0);
PUBLISH_EVENT(psci_cpu_on_finish);
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
index d452e2ae0..82b951db6 100644
--- a/lib/psci/psci_private.h
+++ b/lib/psci/psci_private.h
@@ -4,69 +4,17 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __PSCI_PRIVATE_H__
-#define __PSCI_PRIVATE_H__
+#ifndef PSCI_PRIVATE_H
+#define PSCI_PRIVATE_H
#include <arch.h>
+#include <arch_helpers.h>
#include <bakery_lock.h>
#include <bl_common.h>
#include <cpu_data.h>
#include <psci.h>
#include <spinlock.h>
-
-#if HW_ASSISTED_COHERENCY
-
-/*
- * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
- * as PSCI participants are cache-coherent, and there's no need for explicit
- * cache maintenance operations or barriers to coordinate their state.
- */
-#define psci_flush_dcache_range(addr, size)
-#define psci_flush_cpu_data(member)
-#define psci_inv_cpu_data(member)
-
-#define psci_dsbish()
-
-/*
- * On systems where participant CPUs are cache-coherent, we can use spinlocks
- * instead of bakery locks.
- */
-#define DEFINE_PSCI_LOCK(_name) spinlock_t _name
-#define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name)
-
-#define psci_lock_get(non_cpu_pd_node) \
- spin_lock(&psci_locks[(non_cpu_pd_node)->lock_index])
-#define psci_lock_release(non_cpu_pd_node) \
- spin_unlock(&psci_locks[(non_cpu_pd_node)->lock_index])
-
-#else
-
-/*
- * If not all PSCI participants are cache-coherent, perform cache maintenance
- * and issue barriers wherever required to coordinate state.
- */
-#define psci_flush_dcache_range(addr, size) flush_dcache_range(addr, size)
-#define psci_flush_cpu_data(member) flush_cpu_data(member)
-#define psci_inv_cpu_data(member) inv_cpu_data(member)
-
-#define psci_dsbish() dsbish()
-
-/*
- * Use bakery locks for state coordination as not all PSCI participants are
- * cache coherent.
- */
-#define DEFINE_PSCI_LOCK(_name) DEFINE_BAKERY_LOCK(_name)
-#define DECLARE_PSCI_LOCK(_name) DECLARE_BAKERY_LOCK(_name)
-
-#define psci_lock_get(non_cpu_pd_node) \
- bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
-#define psci_lock_release(non_cpu_pd_node) \
- bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
-
-#endif
-
-#define psci_lock_init(_non_cpu_pd_node, _idx) \
- ((_non_cpu_pd_node)[(_idx)].lock_index = (_idx))
+#include <stdbool.h>
/*
* The PSCI capability which are provided by the generic code but does not
@@ -94,37 +42,63 @@
define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
/*
- * Helper macros to get/set the fields of PSCI per-cpu data.
- */
-#define psci_set_aff_info_state(_aff_state) \
- set_cpu_data(psci_svc_cpu_data.aff_info_state, _aff_state)
-#define psci_get_aff_info_state() \
- get_cpu_data(psci_svc_cpu_data.aff_info_state)
-#define psci_get_aff_info_state_by_idx(_idx) \
- get_cpu_data_by_index(_idx, psci_svc_cpu_data.aff_info_state)
-#define psci_set_aff_info_state_by_idx(_idx, _aff_state) \
- set_cpu_data_by_index(_idx, psci_svc_cpu_data.aff_info_state,\
- _aff_state)
-#define psci_get_suspend_pwrlvl() \
- get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
-#define psci_set_suspend_pwrlvl(_target_lvl) \
- set_cpu_data(psci_svc_cpu_data.target_pwrlvl, _target_lvl)
-#define psci_set_cpu_local_state(_state) \
- set_cpu_data(psci_svc_cpu_data.local_state, _state)
-#define psci_get_cpu_local_state() \
- get_cpu_data(psci_svc_cpu_data.local_state)
-#define psci_get_cpu_local_state_by_idx(_idx) \
- get_cpu_data_by_index(_idx, psci_svc_cpu_data.local_state)
-
-/*
- * Helper macros for the CPU level spinlocks
+ * Helper functions to get/set the fields of PSCI per-cpu data.
*/
-#define psci_spin_lock_cpu(_idx) spin_lock(&psci_cpu_pd_nodes[_idx].cpu_lock)
-#define psci_spin_unlock_cpu(_idx) spin_unlock(&psci_cpu_pd_nodes[_idx].cpu_lock)
-
-/* Helper macro to identify a CPU standby request in PSCI Suspend call */
-#define is_cpu_standby_req(_is_power_down_state, _retn_lvl) \
- (((!(_is_power_down_state)) && ((_retn_lvl) == 0)) ? 1 : 0)
+static inline void psci_set_aff_info_state(aff_info_state_t aff_state)
+{
+ set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state);
+}
+
+static inline aff_info_state_t psci_get_aff_info_state(void)
+{
+ return get_cpu_data(psci_svc_cpu_data.aff_info_state);
+}
+
+static inline aff_info_state_t psci_get_aff_info_state_by_idx(int idx)
+{
+ return get_cpu_data_by_index((unsigned int)idx,
+ psci_svc_cpu_data.aff_info_state);
+}
+
+static inline void psci_set_aff_info_state_by_idx(int idx,
+ aff_info_state_t aff_state)
+{
+ set_cpu_data_by_index((unsigned int)idx,
+ psci_svc_cpu_data.aff_info_state, aff_state);
+}
+
+static inline unsigned int psci_get_suspend_pwrlvl(void)
+{
+ return get_cpu_data(psci_svc_cpu_data.target_pwrlvl);
+}
+
+static inline void psci_set_suspend_pwrlvl(unsigned int target_lvl)
+{
+ set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl);
+}
+
+static inline void psci_set_cpu_local_state(plat_local_state_t state)
+{
+ set_cpu_data(psci_svc_cpu_data.local_state, state);
+}
+
+static inline plat_local_state_t psci_get_cpu_local_state(void)
+{
+ return get_cpu_data(psci_svc_cpu_data.local_state);
+}
+
+static inline plat_local_state_t psci_get_cpu_local_state_by_idx(int idx)
+{
+ return get_cpu_data_by_index((unsigned int)idx,
+ psci_svc_cpu_data.local_state);
+}
+
+/* Helper function to identify a CPU standby request in PSCI Suspend call */
+static inline bool is_cpu_standby_req(unsigned int is_power_down_state,
+ unsigned int retn_lvl)
+{
+ return (is_power_down_state == 0U) && (retn_lvl == 0U);
+}
/*******************************************************************************
* The following two data structures implement the power domain tree. The tree
@@ -138,7 +112,7 @@ typedef struct non_cpu_pwr_domain_node {
* Index of the first CPU power domain node level 0 which has this node
* as its parent.
*/
- unsigned int cpu_start_idx;
+ int cpu_start_idx;
/*
* Number of CPU power domains which are siblings of the domain indexed
@@ -180,6 +154,95 @@ typedef struct cpu_pwr_domain_node {
} cpu_pd_node_t;
/*******************************************************************************
+ * The following are helpers and declarations of locks.
+ ******************************************************************************/
+#if HW_ASSISTED_COHERENCY
+/*
+ * On systems where participant CPUs are cache-coherent, we can use spinlocks
+ * instead of bakery locks.
+ */
+#define DEFINE_PSCI_LOCK(_name) spinlock_t _name
+#define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name)
+
+/* One lock is required per non-CPU power domain node */
+DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+
+/*
+ * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
+ * as PSCI participants are cache-coherent, and there's no need for explicit
+ * cache maintenance operations or barriers to coordinate their state.
+ */
+static inline void psci_flush_dcache_range(uintptr_t __unused addr,
+ size_t __unused size)
+{
+ /* Empty */
+}
+
+#define psci_flush_cpu_data(member)
+#define psci_inv_cpu_data(member)
+
+static inline void psci_dsbish(void)
+{
+ /* Empty */
+}
+
+static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
+{
+ spin_lock(&psci_locks[non_cpu_pd_node->lock_index]);
+}
+
+static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
+{
+ spin_unlock(&psci_locks[non_cpu_pd_node->lock_index]);
+}
+
+#else /* if HW_ASSISTED_COHERENCY == 0 */
+/*
+ * Use bakery locks for state coordination as not all PSCI participants are
+ * cache coherent.
+ */
+#define DEFINE_PSCI_LOCK(_name) DEFINE_BAKERY_LOCK(_name)
+#define DECLARE_PSCI_LOCK(_name) DECLARE_BAKERY_LOCK(_name)
+
+/* One lock is required per non-CPU power domain node */
+DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+
+/*
+ * If not all PSCI participants are cache-coherent, perform cache maintenance
+ * and issue barriers wherever required to coordinate state.
+ */
+static inline void psci_flush_dcache_range(uintptr_t addr, size_t size)
+{
+ flush_dcache_range(addr, size);
+}
+
+#define psci_flush_cpu_data(member) flush_cpu_data(member)
+#define psci_inv_cpu_data(member) inv_cpu_data(member)
+
+static inline void psci_dsbish(void)
+{
+ dsbish();
+}
+
+static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
+{
+ bakery_lock_get(&psci_locks[non_cpu_pd_node->lock_index]);
+}
+
+static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
+{
+ bakery_lock_release(&psci_locks[non_cpu_pd_node->lock_index]);
+}
+
+#endif /* HW_ASSISTED_COHERENCY */
+
+static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node,
+ unsigned char idx)
+{
+ non_cpu_pd_node[idx].lock_index = idx;
+}
+
+/*******************************************************************************
* Data prototypes
******************************************************************************/
extern const plat_psci_ops_t *psci_plat_pm_ops;
@@ -187,9 +250,6 @@ extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
extern unsigned int psci_caps;
-/* One lock is required per non-CPU power domain node */
-DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
-
/*******************************************************************************
* SPD's power management hooks registered with PSCI
******************************************************************************/
@@ -208,15 +268,13 @@ void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
psci_power_state_t *target_state);
int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint, u_register_t context_id);
-void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
+void psci_get_parent_pwr_domain_nodes(int cpu_idx,
unsigned int end_lvl,
- unsigned int node_index[]);
+ unsigned int *node_index);
void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info);
-void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
- unsigned int cpu_idx);
-void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
- unsigned int cpu_idx);
+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx);
+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx);
int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
@@ -236,22 +294,20 @@ void prepare_cpu_pwr_dwn(unsigned int power_level);
/* Private exported functions from psci_on.c */
int psci_cpu_on_start(u_register_t target_cpu,
- entry_point_info_t *ep);
+ const entry_point_info_t *ep);
-void psci_cpu_on_finish(unsigned int cpu_idx,
- psci_power_state_t *state_info);
+void psci_cpu_on_finish(int cpu_idx, const psci_power_state_t *state_info);
/* Private exported functions from psci_off.c */
int psci_do_cpu_off(unsigned int end_pwrlvl);
/* Private exported functions from psci_suspend.c */
-void psci_cpu_suspend_start(entry_point_info_t *ep,
+void psci_cpu_suspend_start(const entry_point_info_t *ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state);
-void psci_cpu_suspend_finish(unsigned int cpu_idx,
- psci_power_state_t *state_info);
+void psci_cpu_suspend_finish(int cpu_idx, const psci_power_state_t *state_info);
/* Private exported functions from psci_helpers.S */
void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
@@ -260,7 +316,7 @@ void psci_do_pwrup_cache_maintenance(void);
/* Private exported functions from psci_system_off.c */
void __dead2 psci_system_off(void);
void __dead2 psci_system_reset(void);
-int psci_system_reset2(uint32_t reset_type, u_register_t cookie);
+u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie);
/* Private exported functions from psci_stat.c */
void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
@@ -273,7 +329,7 @@ u_register_t psci_stat_count(u_register_t target_cpu,
unsigned int power_state);
/* Private exported functions from psci_mem_protect.c */
-int psci_mem_protect(unsigned int enable);
-int psci_mem_chk_range(uintptr_t base, u_register_t length);
+u_register_t psci_mem_protect(unsigned int enable);
+u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length);
-#endif /* __PSCI_PRIVATE_H__ */
+#endif /* PSCI_PRIVATE_H */
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index c00bd94ac..e59e163ea 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -32,9 +32,9 @@ unsigned int psci_caps;
* Function which initializes the 'psci_non_cpu_pd_nodes' or the
* 'psci_cpu_pd_nodes' corresponding to the power level.
******************************************************************************/
-static void psci_init_pwr_domain_node(unsigned int node_idx,
+static void psci_init_pwr_domain_node(unsigned char node_idx,
unsigned int parent_idx,
- unsigned int level)
+ unsigned char level)
{
if (level > PSCI_CPU_PWR_LVL) {
psci_non_cpu_pd_nodes[node_idx].level = level;
@@ -82,15 +82,15 @@ static void psci_init_pwr_domain_node(unsigned int node_idx,
*******************************************************************************/
static void psci_update_pwrlvl_limits(void)
{
- int j;
+ int j, cpu_idx;
unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
- unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
+ unsigned int temp_index[PLAT_MAX_PWR_LVL];
for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
psci_get_parent_pwr_domain_nodes(cpu_idx,
- PLAT_MAX_PWR_LVL,
+ (unsigned int)PLAT_MAX_PWR_LVL,
temp_index);
- for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
+ for (j = (int) PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
if (temp_index[j] != nodes_idx[j]) {
nodes_idx[j] = temp_index[j];
psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
@@ -109,9 +109,10 @@ static void psci_update_pwrlvl_limits(void)
******************************************************************************/
static void populate_power_domain_tree(const unsigned char *topology)
{
- unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
- unsigned int node_index = 0, parent_node_index = 0, num_children;
- int level = PLAT_MAX_PWR_LVL;
+ unsigned int i, j = 0U, num_nodes_at_lvl = 1U, num_nodes_at_next_lvl;
+ unsigned int node_index = 0U, num_children;
+ int parent_node_index = 0;
+ int level = (int) PLAT_MAX_PWR_LVL;
/*
* For each level the inputs are:
@@ -122,8 +123,8 @@ static void populate_power_domain_tree(const unsigned char *topology)
* - Index of first free entry in psci_non_cpu_pd_nodes[] or
* psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
*/
- while (level >= PSCI_CPU_PWR_LVL) {
- num_nodes_at_next_lvl = 0;
+ while (level >= (int) PSCI_CPU_PWR_LVL) {
+ num_nodes_at_next_lvl = 0U;
/*
* For each entry (parent node) at this level in the plat_array:
* - Find the number of children
@@ -132,16 +133,16 @@ static void populate_power_domain_tree(const unsigned char *topology)
* - Increment parent_node_index to point to the next parent
* - Accumulate the number of children at next level.
*/
- for (i = 0; i < num_nodes_at_lvl; i++) {
+ for (i = 0U; i < num_nodes_at_lvl; i++) {
assert(parent_node_index <=
PSCI_NUM_NON_CPU_PWR_DOMAINS);
num_children = topology[parent_node_index];
for (j = node_index;
- j < node_index + num_children; j++)
- psci_init_pwr_domain_node(j,
+ j < (node_index + num_children); j++)
+ psci_init_pwr_domain_node((unsigned char)j,
parent_node_index - 1,
- level);
+ (unsigned char)level);
node_index = j;
num_nodes_at_next_lvl += num_children;
@@ -152,12 +153,12 @@ static void populate_power_domain_tree(const unsigned char *topology)
level--;
/* Reset the index for the cpu power domain array */
- if (level == PSCI_CPU_PWR_LVL)
+ if (level == (int) PSCI_CPU_PWR_LVL)
node_index = 0;
}
/* Validate the sanity of array exported by the platform */
- assert(j == PLATFORM_CORE_COUNT);
+ assert((int) j == PLATFORM_CORE_COUNT);
}
/*******************************************************************************
@@ -213,8 +214,9 @@ int psci_setup(const psci_lib_args_t *lib_args)
*/
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
- plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops);
- assert(psci_plat_pm_ops);
+ (void) plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep,
+ &psci_plat_pm_ops);
+ assert(psci_plat_pm_ops != NULL);
/*
* Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
@@ -226,29 +228,29 @@ int psci_setup(const psci_lib_args_t *lib_args)
/* Initialize the psci capability */
psci_caps = PSCI_GENERIC_CAP;
- if (psci_plat_pm_ops->pwr_domain_off)
+ if (psci_plat_pm_ops->pwr_domain_off != NULL)
psci_caps |= define_psci_cap(PSCI_CPU_OFF);
- if (psci_plat_pm_ops->pwr_domain_on &&
- psci_plat_pm_ops->pwr_domain_on_finish)
+ if ((psci_plat_pm_ops->pwr_domain_on != NULL) &&
+ (psci_plat_pm_ops->pwr_domain_on_finish != NULL))
psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
- if (psci_plat_pm_ops->pwr_domain_suspend &&
- psci_plat_pm_ops->pwr_domain_suspend_finish) {
+ if ((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
+ (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)) {
psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
- if (psci_plat_pm_ops->get_sys_suspend_power_state)
+ if (psci_plat_pm_ops->get_sys_suspend_power_state != NULL)
psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
}
- if (psci_plat_pm_ops->system_off)
+ if (psci_plat_pm_ops->system_off != NULL)
psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF);
- if (psci_plat_pm_ops->system_reset)
+ if (psci_plat_pm_ops->system_reset != NULL)
psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET);
- if (psci_plat_pm_ops->get_node_hw_state)
+ if (psci_plat_pm_ops->get_node_hw_state != NULL)
psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
- if (psci_plat_pm_ops->read_mem_protect &&
- psci_plat_pm_ops->write_mem_protect)
+ if ((psci_plat_pm_ops->read_mem_protect != NULL) &&
+ (psci_plat_pm_ops->write_mem_protect != NULL))
psci_caps |= define_psci_cap(PSCI_MEM_PROTECT);
- if (psci_plat_pm_ops->mem_protect_chk)
+ if (psci_plat_pm_ops->mem_protect_chk != NULL)
psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64);
- if (psci_plat_pm_ops->system_reset2)
+ if (psci_plat_pm_ops->system_reset2 != NULL)
psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64);
#if ENABLE_PSCI_STAT
@@ -266,7 +268,7 @@ int psci_setup(const psci_lib_args_t *lib_args)
******************************************************************************/
void psci_arch_setup(void)
{
-#if ARM_ARCH_MAJOR > 7 || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
+#if (ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
/* Program the counter frequency */
write_cntfrq_el0(plat_get_syscnt_freq2());
#endif
diff --git a/lib/psci/psci_stat.c b/lib/psci/psci_stat.c
index e925d34ed..421db4430 100644
--- a/lib/psci/psci_stat.c
+++ b/lib/psci/psci_stat.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,7 +11,7 @@
#include "psci_private.h"
#ifndef PLAT_MAX_PWR_LVL_STATES
-#define PLAT_MAX_PWR_LVL_STATES 2
+#define PLAT_MAX_PWR_LVL_STATES 2U
#endif
/* Following structure is used for PSCI STAT */
@@ -25,7 +25,7 @@ typedef struct psci_stat {
* that goes to power down in non cpu power domains.
*/
static int last_cpu_in_non_cpu_pd[PSCI_NUM_NON_CPU_PWR_DOMAINS] = {
- [0 ... PSCI_NUM_NON_CPU_PWR_DOMAINS-1] = -1};
+ [0 ... PSCI_NUM_NON_CPU_PWR_DOMAINS - 1] = -1};
/*
* Following are used to store PSCI STAT values for
@@ -41,21 +41,21 @@ static psci_stat_t psci_non_cpu_stat[PSCI_NUM_NON_CPU_PWR_DOMAINS]
* local power state and power domain level. If the platform implements the
* `get_pwr_lvl_state_idx` pm hook, then that will be used to return the index.
*/
-static int get_stat_idx(plat_local_state_t local_state, int pwr_lvl)
+static int get_stat_idx(plat_local_state_t local_state, unsigned int pwr_lvl)
{
int idx;
if (psci_plat_pm_ops->get_pwr_lvl_state_idx == NULL) {
- assert(PLAT_MAX_PWR_LVL_STATES == 2);
- if (is_local_state_retn(local_state))
+ assert(PLAT_MAX_PWR_LVL_STATES == 2U);
+ if (is_local_state_retn(local_state) != 0)
return 0;
- assert(is_local_state_off(local_state));
+ assert(is_local_state_off(local_state) != 0);
return 1;
}
idx = psci_plat_pm_ops->get_pwr_lvl_state_idx(local_state, pwr_lvl);
- assert((idx >= 0) && (idx < PLAT_MAX_PWR_LVL_STATES));
+ assert((idx >= 0) && (idx < (int) PLAT_MAX_PWR_LVL_STATES));
return idx;
}
@@ -73,17 +73,18 @@ static int get_stat_idx(plat_local_state_t local_state, int pwr_lvl)
void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
const psci_power_state_t *state_info)
{
- unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+ unsigned int lvl, parent_idx;
+ int cpu_idx = (int) plat_my_core_pos();
assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
- assert(state_info);
+ assert(state_info != NULL);
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
- for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
/* Break early if the target power state is RUN */
- if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+ if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
break;
/*
@@ -105,13 +106,14 @@ void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
const psci_power_state_t *state_info)
{
- unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+ unsigned int lvl, parent_idx;
+ int cpu_idx = (int) plat_my_core_pos();
int stat_idx;
plat_local_state_t local_state;
u_register_t residency;
assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
- assert(state_info);
+ assert(state_info != NULL);
/* Get the index into the stats array */
local_state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL];
@@ -134,9 +136,9 @@ void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
if (last_cpu_in_non_cpu_pd[parent_idx] == -1)
return;
- for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
local_state = state_info->pwr_domain_state[lvl];
- if (is_local_state_run(local_state)) {
+ if (is_local_state_run(local_state) != 0) {
/* Break early */
break;
}
@@ -145,7 +147,7 @@ void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
/* Call into platform interface to calculate residency. */
residency = plat_psci_stat_get_residency(lvl, state_info,
- last_cpu_in_non_cpu_pd[parent_idx]);
+ last_cpu_in_non_cpu_pd[parent_idx]);
/* Initialize back to reset value */
last_cpu_in_non_cpu_pd[parent_idx] = -1;
@@ -171,17 +173,18 @@ static int psci_get_stat(u_register_t target_cpu, unsigned int power_state,
psci_stat_t *psci_stat)
{
int rc;
- unsigned int pwrlvl, lvl, parent_idx, stat_idx, target_idx;
+ unsigned int pwrlvl, lvl, parent_idx, target_idx;
+ int stat_idx;
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
plat_local_state_t local_state;
/* Validate the target_cpu parameter and determine the cpu index */
- target_idx = plat_core_pos_by_mpidr(target_cpu);
- if (target_idx == -1)
+ target_idx = (unsigned int) plat_core_pos_by_mpidr(target_cpu);
+ if (target_idx == (unsigned int) -1)
return PSCI_E_INVALID_PARAMS;
/* Validate the power_state parameter */
- if (!psci_plat_pm_ops->translate_power_state_by_mpidr)
+ if (psci_plat_pm_ops->translate_power_state_by_mpidr == NULL)
rc = psci_validate_power_state(power_state, &state_info);
else
rc = psci_plat_pm_ops->translate_power_state_by_mpidr(
@@ -204,7 +207,7 @@ static int psci_get_stat(u_register_t target_cpu, unsigned int power_state,
if (pwrlvl > PSCI_CPU_PWR_LVL) {
/* Get the power domain index */
parent_idx = psci_cpu_pd_nodes[target_idx].parent_node;
- for (lvl = PSCI_CPU_PWR_LVL + 1; lvl < pwrlvl; lvl++)
+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl < pwrlvl; lvl++)
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
/* Get the non cpu power domain stats */
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index a77972d38..e00819de7 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -23,7 +23,7 @@
* This function does generic and platform specific operations after a wake-up
* from standby/retention states at multiple power levels.
******************************************************************************/
-static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
+static void psci_suspend_to_standby_finisher(int cpu_idx,
unsigned int end_pwrlvl)
{
psci_power_state_t state_info;
@@ -64,8 +64,8 @@ static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
* operations.
******************************************************************************/
static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
- entry_point_info_t *ep,
- psci_power_state_t *state_info)
+ const entry_point_info_t *ep,
+ const psci_power_state_t *state_info)
{
unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
@@ -85,7 +85,7 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
* Dispatcher to let it do any book-keeping. If the handler encounters an
* error, it's expected to assert within
*/
- if (psci_spd_pm && psci_spd_pm->svc_suspend)
+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL))
psci_spd_pm->svc_suspend(max_off_lvl);
#if !HW_ASSISTED_COHERENCY
@@ -95,7 +95,7 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
* HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
* actions with data caches enabled.
*/
- if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early)
+ if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL)
psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
#endif
@@ -147,20 +147,20 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
* the state transition has been done, no further error is expected and it is
* not possible to undo any of the actions taken beyond that point.
******************************************************************************/
-void psci_cpu_suspend_start(entry_point_info_t *ep,
+void psci_cpu_suspend_start(const entry_point_info_t *ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state)
{
int skip_wfi = 0;
- unsigned int idx = plat_my_core_pos();
+ int idx = (int) plat_my_core_pos();
/*
* This function must only be called on platforms where the
* CPU_SUSPEND platform hooks have been implemented.
*/
- assert(psci_plat_pm_ops->pwr_domain_suspend &&
- psci_plat_pm_ops->pwr_domain_suspend_finish);
+ assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
+ (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL));
/*
* This function acquires the lock corresponding to each power
@@ -175,7 +175,7 @@ void psci_cpu_suspend_start(entry_point_info_t *ep,
* introduced by lock contention to increase the chances of early
* detection that a wake-up interrupt has fired.
*/
- if (read_isr_el1()) {
+ if (read_isr_el1() != 0U) {
skip_wfi = 1;
goto exit;
}
@@ -192,7 +192,7 @@ void psci_cpu_suspend_start(entry_point_info_t *ep,
psci_stats_update_pwr_down(end_pwrlvl, state_info);
#endif
- if (is_power_down_state)
+ if (is_power_down_state != 0U)
psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
/*
@@ -214,10 +214,10 @@ exit:
*/
psci_release_pwr_domain_locks(end_pwrlvl,
idx);
- if (skip_wfi)
+ if (skip_wfi == 1)
return;
- if (is_power_down_state) {
+ if (is_power_down_state != 0U) {
#if ENABLE_RUNTIME_INSTRUMENTATION
/*
@@ -232,7 +232,7 @@ exit:
#endif
/* The function calls below must not return */
- if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
+ if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL)
psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
else
psci_power_down_wfi();
@@ -269,15 +269,15 @@ exit:
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
-void psci_cpu_suspend_finish(unsigned int cpu_idx,
- psci_power_state_t *state_info)
+void psci_cpu_suspend_finish(int cpu_idx, const psci_power_state_t *state_info)
{
unsigned int counter_freq;
unsigned int max_off_lvl;
/* Ensure we have been woken up from a suspended state */
- assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
- state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
+ assert((psci_get_aff_info_state() == AFF_STATE_ON) &&
+ (is_local_state_off(
+ state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0));
/*
* Plat. management: Perform the platform specific actions
@@ -302,9 +302,9 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx,
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
- if (psci_spd_pm && psci_spd_pm->svc_suspend_finish) {
+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) {
max_off_lvl = psci_find_max_off_lvl(state_info);
- assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
+ assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
psci_spd_pm->svc_suspend_finish(max_off_lvl);
}
diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c
index 13e9f4aae..7cac4e937 100644
--- a/lib/psci/psci_system_off.c
+++ b/lib/psci/psci_system_off.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -16,14 +16,14 @@ void __dead2 psci_system_off(void)
{
psci_print_power_domain_map();
- assert(psci_plat_pm_ops->system_off);
+ assert(psci_plat_pm_ops->system_off != NULL);
/* Notify the Secure Payload Dispatcher */
- if (psci_spd_pm && psci_spd_pm->svc_system_off) {
+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_system_off != NULL)) {
psci_spd_pm->svc_system_off();
}
- console_flush();
+ (void) console_flush();
/* Call the platform specific hook */
psci_plat_pm_ops->system_off();
@@ -35,14 +35,14 @@ void __dead2 psci_system_reset(void)
{
psci_print_power_domain_map();
- assert(psci_plat_pm_ops->system_reset);
+ assert(psci_plat_pm_ops->system_reset != NULL);
/* Notify the Secure Payload Dispatcher */
- if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_system_reset != NULL)) {
psci_spd_pm->svc_system_reset();
}
- console_flush();
+ (void) console_flush();
/* Call the platform specific hook */
psci_plat_pm_ops->system_reset();
@@ -50,32 +50,34 @@ void __dead2 psci_system_reset(void)
/* This function does not return. We should never get here */
}
-int psci_system_reset2(uint32_t reset_type, u_register_t cookie)
+u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie)
{
- int is_vendor;
+ unsigned int is_vendor;
psci_print_power_domain_map();
- assert(psci_plat_pm_ops->system_reset2);
+ assert(psci_plat_pm_ops->system_reset2 != NULL);
- is_vendor = (reset_type >> PSCI_RESET2_TYPE_VENDOR_SHIFT) & 1;
- if (!is_vendor) {
+ is_vendor = (reset_type >> PSCI_RESET2_TYPE_VENDOR_SHIFT) & 1U;
+ if (is_vendor == 0U) {
/*
* Only WARM_RESET is allowed for architectural type resets.
*/
if (reset_type != PSCI_RESET2_SYSTEM_WARM_RESET)
- return PSCI_E_INVALID_PARAMS;
- if (psci_plat_pm_ops->write_mem_protect &&
- psci_plat_pm_ops->write_mem_protect(0) < 0) {
- return PSCI_E_NOT_SUPPORTED;
+ return (u_register_t) PSCI_E_INVALID_PARAMS;
+ if ((psci_plat_pm_ops->write_mem_protect != NULL) &&
+ (psci_plat_pm_ops->write_mem_protect(0) < 0)) {
+ return (u_register_t) PSCI_E_NOT_SUPPORTED;
}
}
/* Notify the Secure Payload Dispatcher */
- if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_system_reset != NULL)) {
psci_spd_pm->svc_system_reset();
}
- console_flush();
+ (void) console_flush();
- return psci_plat_pm_ops->system_reset2(is_vendor, reset_type, cookie);
+ return (u_register_t)
+ psci_plat_pm_ops->system_reset2((int) is_vendor, reset_type,
+ cookie);
}
diff --git a/lib/romlib/Makefile b/lib/romlib/Makefile
new file mode 100644
index 000000000..46b920682
--- /dev/null
+++ b/lib/romlib/Makefile
@@ -0,0 +1,71 @@
+#
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+AS = $(CROSS_COMPILE)as
+LD = $(CROSS_COMPILE)ld
+OC = $(CROSS_COMPILE)objcopy
+CPP = $(CROSS_COMPILE)cpp
+BUILD_DIR = ../../$(BUILD_PLAT)/romlib
+LIB_DIR = ../../$(BUILD_PLAT)/lib
+WRAPPER_DIR = ../../$(BUILD_PLAT)/libwrapper
+LIBS = -lmbedtls -lfdt -lc
+INC = $(INCLUDES:-I%=-I../../%)
+PPFLAGS = $(INC) $(DEFINES) -P -D__ASSEMBLY__ -D__LINKER__ -MD -MP -MT $(BUILD_DIR)/romlib.ld
+OBJS = $(BUILD_DIR)/jmptbl.o $(BUILD_DIR)/init.o
+
+V ?= 0
+ifeq ($(V),0)
+ Q := @
+else
+ Q :=
+endif
+
+ifeq ($(DEBUG),1)
+ CFLAGS := -g
+ LDFLAGS := -g
+endif
+
+
+.PHONY: all clean distclean
+
+all: $(BUILD_DIR)/romlib.bin $(LIB_DIR)/libwrappers.a
+
+%.o: %.s
+ @echo " AS $@"
+ $(Q)$(AS) $(ASFLAGS) -o $@ $<
+
+$(BUILD_DIR)/%.o: %.s
+ @echo " AS $@"
+ $(Q)$(AS) $(ASFLAGS) -o $@ $<
+
+$(BUILD_DIR)/romlib.ld: romlib.ld.S
+ @echo " PP $@"
+ $(Q)$(CPP) $(PPFLAGS) -o $@ romlib.ld.S
+
+$(BUILD_DIR)/romlib.elf: $(OBJS) $(BUILD_DIR)/romlib.ld
+ @echo " LD $@"
+ $(Q)$(LD) -T $(BUILD_DIR)/romlib.ld -L$(LIB_DIR) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
+
+$(BUILD_DIR)/romlib.bin: $(BUILD_DIR)/romlib.elf
+ @echo " BIN $@"
+ $(Q)$(OC) -O binary $(BUILD_DIR)/romlib.elf $@
+
+$(WRAPPER_DIR)/jmpvar.s: $(BUILD_DIR)/romlib.elf
+ @echo " VAR $@"
+ $(Q)./genvar.sh -o $@ $(BUILD_DIR)/romlib.elf
+
+$(LIB_DIR)/libwrappers.a: jmptbl.i $(WRAPPER_DIR)/jmpvar.o
+ @echo " AR $@"
+ $(Q)./genwrappers.sh -b $(WRAPPER_DIR) -o $@ jmptbl.i
+
+$(BUILD_DIR)/jmptbl.s: jmptbl.i
+ @echo " TBL $@"
+ $(Q)./gentbl.sh -o $@ jmptbl.i
+
+clean:
+ @rm -f $(BUILD_DIR)/*
+
+-include $(BUILD_DIR)/romlib.d
diff --git a/lib/romlib/gentbl.sh b/lib/romlib/gentbl.sh
new file mode 100755
index 000000000..0695f6e4f
--- /dev/null
+++ b/lib/romlib/gentbl.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+set -e
+
+output=jmptbl.s
+
+for i
+do
+ case $i in
+ -o)
+ output=$2
+ shift 2
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ echo usage: gentbl.sh [-o output] file ... >&2
+ exit 1
+ ;;
+ esac
+done
+
+tmp=`mktemp`
+trap "rm -f $tmp" EXIT INT QUIT
+
+rm -f $output
+
+awk -v OFS="\n" '
+BEGIN {print "\t.text",
+ "\t.globl\tjmptbl",
+ "jmptbl:"}
+ {sub(/[:blank:]*#.*/,"")}
+!/^$/ {print "\tb\t" $3}' "$@" > $tmp
+
+mv $tmp $output
diff --git a/lib/romlib/genvar.sh b/lib/romlib/genvar.sh
new file mode 100755
index 000000000..a3e2cdf69
--- /dev/null
+++ b/lib/romlib/genvar.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+set -e
+
+output=jmpvar.s
+for i
+do
+ case $i in
+ -o)
+ output=$2
+ shift 2
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ echo usage: genvar.sh [-o output] file... >&2
+ ;;
+ esac
+done
+
+tmp=`mktemp`
+trap "rm -f $tmp" EXIT INT QUIT
+
+nm -a "$@" |
+awk -v OFS="\n" '
+$3 == ".text" {print "\t.data",
+ "\t.globl\tjmptbl",
+ "\t.align\t4",
+ "jmptbl:\t.quad\t0x" $1}' > $tmp
+
+mv $tmp $output
diff --git a/lib/romlib/genwrappers.sh b/lib/romlib/genwrappers.sh
new file mode 100755
index 000000000..bcf670b98
--- /dev/null
+++ b/lib/romlib/genwrappers.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+set -e
+
+build=.
+out=output.a
+
+for i
+do
+ case $i in
+ -o)
+ out=$2
+ shift 2
+ ;;
+ -b)
+ build=$2
+ shift 2
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ echo usage: genwrappers.sh [-o output] [-b dir] file ... >&2
+ exit 1
+ ;;
+ esac
+done
+
+awk '{sub(/[:blank:]*#.*/,"")}
+!/^$/ {print $1*4, $2, $3}' "$@" |
+while read idx lib sym
+do
+ file=$build/${lib}_$sym
+
+ cat <<EOF > $file.s
+ .globl $sym
+$sym:
+ ldr x17, =jmptbl
+ ldr x17, [x17]
+ mov x16, $idx
+ add x16, x16, x17
+ br x16
+EOF
+
+ ${CROSS_COMPILE}as -o $file.o $file.s
+done
+
+${CROSS_COMPILE}ar -rc $out $build/*.o
diff --git a/lib/romlib/init.s b/lib/romlib/init.s
new file mode 100644
index 000000000..5cf2aca04
--- /dev/null
+++ b/lib/romlib/init.s
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ .globl rom_lib_init
+ .extern __DATA_RAM_START__, __DATA_ROM_START__, __DATA_SIZE__
+ .extern memset, memcpy
+
+rom_lib_init:
+ cmp w0, #1
+ mov w0, #0
+ b.le 1f
+ ret
+
+1: stp x29, x30, [sp, #-16]!
+ adrp x0, __DATA_RAM_START__
+ ldr x1,= __DATA_ROM_START__
+ ldr x2, =__DATA_SIZE__
+ bl memcpy
+
+ ldr x0, =__BSS_START__
+ mov x1, #0
+ ldr x2, =__BSS_SIZE__
+ bl memset
+ ldp x29, x30, [sp], #16
+
+ mov w0, #1
+ ret
diff --git a/lib/romlib/jmptbl.i b/lib/romlib/jmptbl.i
new file mode 100644
index 000000000..338cd8a71
--- /dev/null
+++ b/lib/romlib/jmptbl.i
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+0 rom rom_lib_init
+1 fdt fdt_getprop_namelen
+2 fdt fdt_setprop_inplace
+3 fdt fdt_check_header
+4 fdt fdt_node_offset_by_compatible
+5 mbedtls mbedtls_asn1_get_alg
+6 mbedtls mbedtls_asn1_get_alg_null
+7 mbedtls mbedtls_asn1_get_bitstring_null
+8 mbedtls mbedtls_asn1_get_bool
+9 mbedtls mbedtls_asn1_get_int
+10 mbedtls mbedtls_asn1_get_tag
+11 mbedtls mbedtls_free
+12 mbedtls mbedtls_md
+13 mbedtls mbedtls_md_get_size
+14 mbedtls mbedtls_memory_buffer_alloc_init
+15 mbedtls mbedtls_oid_get_md_alg
+16 mbedtls mbedtls_oid_get_numeric_string
+17 mbedtls mbedtls_oid_get_pk_alg
+18 mbedtls mbedtls_oid_get_sig_alg
+19 mbedtls mbedtls_pk_free
+20 mbedtls mbedtls_pk_init
+21 mbedtls mbedtls_pk_parse_subpubkey
+22 mbedtls mbedtls_pk_verify_ext
+23 mbedtls mbedtls_platform_set_snprintf
+24 mbedtls mbedtls_x509_get_rsassa_pss_params
+25 mbedtls mbedtls_x509_get_sig_alg
+26 mbedtls mbedtls_md_info_from_type
+27 c exit
+28 c atexit
diff --git a/lib/romlib/romlib.ld.S b/lib/romlib/romlib.ld.S
new file mode 100644
index 000000000..8f0bc62bc
--- /dev/null
+++ b/lib/romlib/romlib.ld.S
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+MEMORY {
+ ROM (rx): ORIGIN = ROMLIB_RO_BASE, LENGTH = ROMLIB_RO_LIMIT - ROMLIB_RO_BASE
+ RAM (rwx): ORIGIN = ROMLIB_RW_BASE, LENGTH = ROMLIB_RW_END - ROMLIB_RW_BASE
+}
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(jmptbl)
+
+SECTIONS
+{
+ . = ROMLIB_RO_BASE;
+ .text : {
+ *jmptbl.o(.text)
+ *(.text*)
+ *(.rodata*)
+ } >ROM
+
+ __DATA_ROM_START__ = LOADADDR(.data);
+
+ .data : {
+ __DATA_RAM_START__ = .;
+ *(.data*)
+ __DATA_RAM_END__ = .;
+ } >RAM AT>ROM
+
+ __DATA_SIZE__ = SIZEOF(.data);
+
+ .bss : {
+ __BSS_START__ = .;
+ *(.bss*)
+ __BSS_END__ = .;
+ } >RAM
+ __BSS_SIZE__ = SIZEOF(.bss);
+}
diff --git a/lib/stdlib/exit.c b/lib/stdlib/exit.c
deleted file mode 100644
index afc3f9343..000000000
--- a/lib/stdlib/exit.c
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <debug.h>
-#include <stdlib.h>
-
-void exit(int v)
-{
- ERROR("EXIT\n");
- panic();
-}
diff --git a/lib/stdlib/mem.c b/lib/stdlib/mem.c
deleted file mode 100644
index 65b62fde6..000000000
--- a/lib/stdlib/mem.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stddef.h> /* size_t */
-
-/*
- * Fill @count bytes of memory pointed to by @dst with @val
- */
-void *memset(void *dst, int val, size_t count)
-{
- char *ptr = dst;
-
- while (count--)
- *ptr++ = val;
-
- return dst;
-}
-
-/*
- * Compare @len bytes of @s1 and @s2
- */
-int memcmp(const void *s1, const void *s2, size_t len)
-{
- const unsigned char *s = s1;
- const unsigned char *d = s2;
- unsigned char sc;
- unsigned char dc;
-
- while (len--) {
- sc = *s++;
- dc = *d++;
- if (sc - dc)
- return (sc - dc);
- }
-
- return 0;
-}
-
-/*
- * Copy @len bytes from @src to @dst
- */
-void *memcpy(void *dst, const void *src, size_t len)
-{
- const char *s = src;
- char *d = dst;
-
- while (len--)
- *d++ = *s++;
-
- return dst;
-}
-
-/*
- * Move @len bytes from @src to @dst
- */
-void *memmove(void *dst, const void *src, size_t len)
-{
- /*
- * The following test makes use of unsigned arithmetic overflow to
- * more efficiently test the condition !(src <= dst && dst < str+len).
- * It also avoids the situation where the more explicit test would give
- * incorrect results were the calculation str+len to overflow (though
- * that issue is probably moot as such usage is probably undefined
- * behaviour and a bug anyway.
- */
- if ((size_t)dst - (size_t)src >= len) {
- /* destination not in source data, so can safely use memcpy */
- return memcpy(dst, src, len);
- } else {
- /* copy backwards... */
- const char *end = dst;
- const char *s = (const char *)src + len;
- char *d = (char *)dst + len;
- while (d != end)
- *--d = *--s;
- }
- return dst;
-}
-
-/*
- * Scan @len bytes of @src for value @c
- */
-void *memchr(const void *src, int c, size_t len)
-{
- const char *s = src;
-
- while (len--) {
- if (*s == c)
- return (void *) s;
- s++;
- }
-
- return NULL;
-}
diff --git a/lib/stdlib/printf.c b/lib/stdlib/printf.c
deleted file mode 100644
index f61564140..000000000
--- a/lib/stdlib/printf.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stdio.h>
-#include <stdarg.h>
-
-/* Choose max of 128 chars for now. */
-#define PRINT_BUFFER_SIZE 128
-int printf(const char *fmt, ...)
-{
- va_list args;
- char buf[PRINT_BUFFER_SIZE];
- int count;
-
- va_start(args, fmt);
- vsnprintf(buf, sizeof(buf) - 1, fmt, args);
- va_end(args);
-
- /* Use putchar directly as 'puts()' adds a newline. */
- buf[PRINT_BUFFER_SIZE - 1] = '\0';
- count = 0;
- while (buf[count])
- {
- if (putchar(buf[count]) != EOF) {
- count++;
- } else {
- count = EOF;
- break;
- }
- }
-
- return count;
-}
diff --git a/lib/stdlib/putchar.c b/lib/stdlib/putchar.c
deleted file mode 100644
index 8265667b1..000000000
--- a/lib/stdlib/putchar.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stdio.h>
-#include <console.h>
-
-/* Putchar() should either return the character printed or EOF in case of error.
- * Our current console_putc() function assumes success and returns the
- * character. Write all other printing functions in terms of putchar(), if
- * possible, so they all benefit when this is improved.
- */
-int putchar(int c)
-{
- int res;
- if (console_putc((unsigned char)c) >= 0)
- res = c;
- else
- res = EOF;
-
- return res;
-}
diff --git a/lib/stdlib/sscanf.c b/lib/stdlib/sscanf.c
deleted file mode 100644
index a5876cff3..000000000
--- a/lib/stdlib/sscanf.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stdio.h>
-#include <sys/cdefs.h>
-
-/*
- * TODO: This is not a real implementation of the sscanf() function. It just
- * returns the number of expected arguments based on the number of '%' found
- * in the format string.
- */
-int
-sscanf(const char *__restrict str, char const *__restrict fmt, ...)
-{
- int ret = 0;
-
- while (*fmt != '\0') {
- if (*fmt++ == '%') {
- ret++;
- }
- }
-
- return ret;
-}
diff --git a/lib/stdlib/stdlib.mk b/lib/stdlib/stdlib.mk
deleted file mode 100644
index 821162354..000000000
--- a/lib/stdlib/stdlib.mk
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-STDLIB_SRCS := $(addprefix lib/stdlib/, \
- abort.c \
- assert.c \
- exit.c \
- mem.c \
- printf.c \
- putchar.c \
- puts.c \
- sscanf.c \
- strchr.c \
- strcmp.c \
- strlen.c \
- strncmp.c \
- strnlen.c \
- subr_prf.c \
- timingsafe_bcmp.c)
-
-INCLUDES += -Iinclude/lib/stdlib \
- -Iinclude/lib/stdlib/sys
diff --git a/lib/stdlib/strlen.c b/lib/stdlib/strlen.c
deleted file mode 100644
index 23c3d3929..000000000
--- a/lib/stdlib/strlen.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*-
- * Copyright (c) 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * Portions copyright (c) 2009-2014, ARM Limited and Contributors. All rights reserved.
- */
-
-#include <stddef.h>
-
-size_t
-strlen(str)
- const char *str;
-{
- register const char *s;
-
- for (s = str; *s; ++s);
- return(s - str);
-}
diff --git a/lib/stdlib/subr_prf.c b/lib/stdlib/subr_prf.c
deleted file mode 100644
index c1035624e..000000000
--- a/lib/stdlib/subr_prf.c
+++ /dev/null
@@ -1,548 +0,0 @@
-/*-
- * Copyright (c) 1986, 1988, 1991, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)subr_prf.c 8.3 (Berkeley) 1/21/94
- */
-
-/*
- * Portions copyright (c) 2009-2014, ARM Limited and Contributors.
- * All rights reserved.
- */
-
-#include <stdio.h>
-#include <stdint.h>
-#include <stdarg.h>
-#include <stddef.h>
-#include <string.h>
-#include <ctype.h>
-
-typedef unsigned char u_char;
-typedef unsigned int u_int;
-typedef int64_t quad_t;
-typedef uint64_t u_quad_t;
-typedef unsigned long u_long;
-typedef unsigned short u_short;
-
-static inline int imax(int a, int b) { return (a > b ? a : b); }
-
-/*
- * Note that stdarg.h and the ANSI style va_start macro is used for both
- * ANSI and traditional C compilers.
- */
-
-#define TOCONS 0x01
-#define TOTTY 0x02
-#define TOLOG 0x04
-
-/* Max number conversion buffer length: a u_quad_t in base 2, plus NUL byte. */
-#define MAXNBUF (sizeof(intmax_t) * 8 + 1)
-
-struct putchar_arg {
- int flags;
- int pri;
- struct tty *tty;
- char *p_bufr;
- size_t n_bufr;
- char *p_next;
- size_t remain;
-};
-
-struct snprintf_arg {
- char *str;
- size_t remain;
-};
-
-extern int log_open;
-
-static char *ksprintn(char *nbuf, uintmax_t num, int base, int *len, int upper);
-static void snprintf_func(int ch, void *arg);
-static int kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap);
-
-int vsnprintf(char *str, size_t size, const char *format, va_list ap);
-
-static char const hex2ascii_data[] = "0123456789abcdefghijklmnopqrstuvwxyz";
-#define hex2ascii(hex) (hex2ascii_data[hex])
-
-/*
- * Scaled down version of sprintf(3).
- */
-int
-sprintf(char *buf, const char *cfmt, ...)
-{
- int retval;
- va_list ap;
-
- va_start(ap, cfmt);
- retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap);
- buf[retval] = '\0';
- va_end(ap);
- return (retval);
-}
-
-/*
- * Scaled down version of vsprintf(3).
- */
-int
-vsprintf(char *buf, const char *cfmt, va_list ap)
-{
- int retval;
-
- retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap);
- buf[retval] = '\0';
- return (retval);
-}
-
-/*
- * Scaled down version of snprintf(3).
- */
-int
-snprintf(char *str, size_t size, const char *format, ...)
-{
- int retval;
- va_list ap;
-
- va_start(ap, format);
- retval = vsnprintf(str, size, format, ap);
- va_end(ap);
- return(retval);
-}
-
-/*
- * Scaled down version of vsnprintf(3).
- */
-int
-vsnprintf(char *str, size_t size, const char *format, va_list ap)
-{
- struct snprintf_arg info;
- int retval;
-
- info.str = str;
- info.remain = size;
- retval = kvprintf(format, snprintf_func, &info, 10, ap);
- if (info.remain >= 1)
- *info.str++ = '\0';
- return (retval);
-}
-
-static void
-snprintf_func(int ch, void *arg)
-{
- struct snprintf_arg *const info = arg;
-
- if (info->remain >= 2) {
- *info->str++ = ch;
- info->remain--;
- }
-}
-
-
-/*
- * Kernel version which takes radix argument vsnprintf(3).
- */
-int
-vsnrprintf(char *str, size_t size, int radix, const char *format, va_list ap)
-{
- struct snprintf_arg info;
- int retval;
-
- info.str = str;
- info.remain = size;
- retval = kvprintf(format, snprintf_func, &info, radix, ap);
- if (info.remain >= 1)
- *info.str++ = '\0';
- return (retval);
-}
-
-
-/*
- * Put a NUL-terminated ASCII number (base <= 36) in a buffer in reverse
- * order; return an optional length and a pointer to the last character
- * written in the buffer (i.e., the first character of the string).
- * The buffer pointed to by `nbuf' must have length >= MAXNBUF.
- */
-static char *
-ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper)
-{
- char *p, c;
-
- p = nbuf;
- *p = '\0';
- do {
- c = hex2ascii(num % base);
- *++p = upper ? toupper(c) : c;
- } while (num /= base);
- if (lenp)
- *lenp = p - nbuf;
- return (p);
-}
-
-/*
- * Scaled down version of printf(3).
- *
- * Two additional formats:
- *
- * The format %b is supported to decode error registers.
- * Its usage is:
- *
- * printf("reg=%b\n", regval, "<base><arg>*");
- *
- * where <base> is the output base expressed as a control character, e.g.
- * \10 gives octal; \20 gives hex. Each arg is a sequence of characters,
- * the first of which gives the bit number to be inspected (origin 1), and
- * the next characters (up to a control character, i.e. a character <= 32),
- * give the name of the register. Thus:
- *
- * kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE\n");
- *
- * would produce output:
- *
- * reg=3<BITTWO,BITONE>
- *
- * XXX: %D -- Hexdump, takes pointer and separator string:
- * ("%6D", ptr, ":") -> XX:XX:XX:XX:XX:XX
- * ("%*D", len, ptr, " " -> XX XX XX XX ...
- */
-int
-kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap)
-{
-#define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = cc; retval++; }
- char nbuf[MAXNBUF];
- char *d;
- const char *p, *percent, *q;
- u_char *up;
- int ch, n;
- uintmax_t num;
- int base, lflag, qflag, tmp, width, ladjust, sharpflag, neg, sign, dot;
- int cflag, hflag, jflag, tflag, zflag;
- int dwidth, upper;
- char padc;
- int stop = 0, retval = 0;
-
- num = 0;
- if (!func)
- d = (char *) arg;
- else
- d = NULL;
-
- if (fmt == NULL)
- fmt = "(fmt null)\n";
-
- if (radix < 2 || radix > 36)
- radix = 10;
-
- for (;;) {
- padc = ' ';
- width = 0;
- while ((ch = (u_char)*fmt++) != '%' || stop) {
- if (ch == '\0')
- return (retval);
- PCHAR(ch);
- }
- percent = fmt - 1;
- qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0; neg = 0;
- sign = 0; dot = 0; dwidth = 0; upper = 0;
- cflag = 0; hflag = 0; jflag = 0; tflag = 0; zflag = 0;
-reswitch: switch (ch = (u_char)*fmt++) {
- case '.':
- dot = 1;
- goto reswitch;
- case '#':
- sharpflag = 1;
- goto reswitch;
- case '+':
- sign = 1;
- goto reswitch;
- case '-':
- ladjust = 1;
- goto reswitch;
- case '%':
- PCHAR(ch);
- break;
- case '*':
- if (!dot) {
- width = va_arg(ap, int);
- if (width < 0) {
- ladjust = !ladjust;
- width = -width;
- }
- } else {
- dwidth = va_arg(ap, int);
- }
- goto reswitch;
- case '0':
- if (!dot) {
- padc = '0';
- goto reswitch;
- }
- case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- for (n = 0;; ++fmt) {
- n = n * 10 + ch - '0';
- ch = *fmt;
- if (ch < '0' || ch > '9')
- break;
- }
- if (dot)
- dwidth = n;
- else
- width = n;
- goto reswitch;
- case 'b':
- num = (u_int)va_arg(ap, int);
- p = va_arg(ap, char *);
- for (q = ksprintn(nbuf, num, *p++, NULL, 0); *q;)
- PCHAR(*q--);
-
- if (num == 0)
- break;
-
- for (tmp = 0; *p;) {
- n = *p++;
- if (num & (1 << (n - 1))) {
- PCHAR(tmp ? ',' : '<');
- for (; (n = *p) > ' '; ++p)
- PCHAR(n);
- tmp = 1;
- } else
- for (; *p > ' '; ++p)
- continue;
- }
- if (tmp)
- PCHAR('>');
- break;
- case 'c':
- PCHAR(va_arg(ap, int));
- break;
- case 'D':
- up = va_arg(ap, u_char *);
- p = va_arg(ap, char *);
- if (!width)
- width = 16;
- while(width--) {
- PCHAR(hex2ascii(*up >> 4));
- PCHAR(hex2ascii(*up & 0x0f));
- up++;
- if (width)
- for (q=p;*q;q++)
- PCHAR(*q);
- }
- break;
- case 'd':
- case 'i':
- base = 10;
- sign = 1;
- goto handle_sign;
- case 'h':
- if (hflag) {
- hflag = 0;
- cflag = 1;
- } else
- hflag = 1;
- goto reswitch;
- case 'j':
- jflag = 1;
- goto reswitch;
- case 'l':
- if (lflag) {
- lflag = 0;
- qflag = 1;
- } else
- lflag = 1;
- goto reswitch;
- case 'n':
- if (jflag)
- *(va_arg(ap, intmax_t *)) = retval;
- else if (qflag)
- *(va_arg(ap, quad_t *)) = retval;
- else if (lflag)
- *(va_arg(ap, long *)) = retval;
- else if (zflag)
- *(va_arg(ap, size_t *)) = retval;
- else if (hflag)
- *(va_arg(ap, short *)) = retval;
- else if (cflag)
- *(va_arg(ap, char *)) = retval;
- else
- *(va_arg(ap, int *)) = retval;
- break;
- case 'o':
- base = 8;
- goto handle_nosign;
- case 'p':
- base = 16;
- sharpflag = (width == 0);
- sign = 0;
- num = (uintptr_t)va_arg(ap, void *);
- goto number;
- case 'q':
- qflag = 1;
- goto reswitch;
- case 'r':
- base = radix;
- if (sign)
- goto handle_sign;
- goto handle_nosign;
- case 's':
- p = va_arg(ap, char *);
- if (p == NULL)
- p = "(null)";
- if (!dot)
- n = strlen (p);
- else
- for (n = 0; n < dwidth && p[n]; n++)
- continue;
-
- width -= n;
-
- if (!ladjust && width > 0)
- while (width--)
- PCHAR(padc);
- while (n--)
- PCHAR(*p++);
- if (ladjust && width > 0)
- while (width--)
- PCHAR(padc);
- break;
- case 't':
- tflag = 1;
- goto reswitch;
- case 'u':
- base = 10;
- goto handle_nosign;
- case 'X':
- upper = 1;
- case 'x':
- base = 16;
- goto handle_nosign;
- case 'y':
- base = 16;
- sign = 1;
- goto handle_sign;
- case 'z':
- zflag = 1;
- goto reswitch;
-handle_nosign:
- sign = 0;
- if (jflag)
- num = va_arg(ap, uintmax_t);
- else if (qflag)
- num = va_arg(ap, u_quad_t);
- else if (tflag)
- num = va_arg(ap, ptrdiff_t);
- else if (lflag)
- num = va_arg(ap, u_long);
- else if (zflag)
- num = va_arg(ap, size_t);
- else if (hflag)
- num = (u_short)va_arg(ap, int);
- else if (cflag)
- num = (u_char)va_arg(ap, int);
- else
- num = va_arg(ap, u_int);
- goto number;
-handle_sign:
- if (jflag)
- num = va_arg(ap, intmax_t);
- else if (qflag)
- num = va_arg(ap, quad_t);
- else if (tflag)
- num = va_arg(ap, ptrdiff_t);
- else if (lflag)
- num = va_arg(ap, long);
- else if (zflag)
- num = va_arg(ap, ssize_t);
- else if (hflag)
- num = (short)va_arg(ap, int);
- else if (cflag)
- num = (char)va_arg(ap, int);
- else
- num = va_arg(ap, int);
-number:
- if (sign && (intmax_t)num < 0) {
- neg = 1;
- num = -(intmax_t)num;
- }
- p = ksprintn(nbuf, num, base, &n, upper);
- tmp = 0;
- if (sharpflag && num != 0) {
- if (base == 8)
- tmp++;
- else if (base == 16)
- tmp += 2;
- }
- if (neg)
- tmp++;
-
- if (!ladjust && padc == '0')
- dwidth = width - tmp;
- width -= tmp + imax(dwidth, n);
- dwidth -= n;
- if (!ladjust)
- while (width-- > 0)
- PCHAR(' ');
- if (neg)
- PCHAR('-');
- if (sharpflag && num != 0) {
- if (base == 8) {
- PCHAR('0');
- } else if (base == 16) {
- PCHAR('0');
- PCHAR('x');
- }
- }
- while (dwidth-- > 0)
- PCHAR('0');
-
- while (*p)
- PCHAR(*p--);
-
- if (ladjust)
- while (width-- > 0)
- PCHAR(' ');
-
- break;
- default:
- while (percent < fmt)
- PCHAR(*percent++);
- /*
- * Since we ignore an formatting argument it is no
- * longer safe to obey the remaining formatting
- * arguments as the arguments will no longer match
- * the format specs.
- */
- stop = 1;
- break;
- }
- }
-#undef PCHAR
-}
diff --git a/lib/stdlib/timingsafe_bcmp.c b/lib/stdlib/timingsafe_bcmp.c
deleted file mode 100644
index d09815805..000000000
--- a/lib/stdlib/timingsafe_bcmp.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/* $OpenBSD: timingsafe_bcmp.c,v 1.3 2015/08/31 02:53:57 guenther Exp $ */
-/*
- * Copyright (c) 2010 Damien Miller. All rights reserved.
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <string.h>
-
-int __timingsafe_bcmp(const void *, const void *, size_t);
-
-int
-__timingsafe_bcmp(const void *b1, const void *b2, size_t n)
-{
- const unsigned char *p1 = b1, *p2 = b2;
- int ret = 0;
-
- for (; n > 0; n--)
- ret |= *p1++ ^ *p2++;
- return (ret != 0);
-}
-
-__weak_reference(__timingsafe_bcmp, timingsafe_bcmp);
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index dd639397a..033e2375f 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -13,7 +13,7 @@
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
-#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
@@ -34,16 +34,16 @@ static unsigned long long get_max_supported_pa(void)
}
#endif /* ENABLE_ASSERTIONS */
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
*/
- return 3;
+ return 3U;
}
-uint64_t xlat_arch_get_xn_desc(int el __unused)
+uint64_t xlat_arch_get_xn_desc(unsigned int el __unused)
{
return UPPER_ATTRS(XN);
}
@@ -53,25 +53,37 @@ void init_xlat_tables(void)
unsigned long long max_pa;
uintptr_t max_va;
print_mmap();
- init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
+ init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
&max_va, &max_pa);
- assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
- assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
- assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
+ assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
+ assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
+ assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
}
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created.
******************************************************************************/
+#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags)
{
+ enable_mmu_svc_mon(flags);
+}
+
+void enable_mmu_direct(unsigned int flags)
+{
+ enable_mmu_direct_svc_mon(flags);
+}
+#endif
+
+void enable_mmu_svc_mon(unsigned int flags)
+{
unsigned int mair0, ttbcr, sctlr;
uint64_t ttbr0;
assert(IS_IN_SECURE());
- assert((read_sctlr() & SCTLR_M_BIT) == 0);
+ assert((read_sctlr() & SCTLR_M_BIT) == 0U);
/* Set attributes in the right indices of the MAIR */
mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
@@ -87,18 +99,18 @@ void enable_mmu_secure(unsigned int flags)
/*
* Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
*/
- if (flags & XLAT_TABLE_NC) {
+ int t0sz = 32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);
+
+ if ((flags & XLAT_TABLE_NC) != 0U) {
/* Inner & outer non-cacheable non-shareable. */
ttbcr = TTBCR_EAE_BIT |
TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
- TTBCR_RGN0_INNER_NC |
- (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
+ TTBCR_RGN0_INNER_NC | (uint32_t) t0sz;
} else {
/* Inner & outer WBWA & shareable. */
ttbcr = TTBCR_EAE_BIT |
TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
- TTBCR_RGN0_INNER_WBA |
- (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
+ TTBCR_RGN0_INNER_WBA | (uint32_t) t0sz;
}
ttbcr |= TTBCR_EPD1_BIT;
write_ttbcr(ttbcr);
@@ -106,7 +118,7 @@ void enable_mmu_secure(unsigned int flags)
/* Set TTBR0 bits as well */
ttbr0 = (uintptr_t) base_xlation_table;
write64_ttbr0(ttbr0);
- write64_ttbr1(0);
+ write64_ttbr1(0U);
/*
* Ensure all translation table writes have drained
@@ -120,7 +132,7 @@ void enable_mmu_secure(unsigned int flags)
sctlr = read_sctlr();
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
- if (flags & DISABLE_DCACHE)
+ if ((flags & DISABLE_DCACHE) != 0U)
sctlr &= ~SCTLR_C_BIT;
else
sctlr |= SCTLR_C_BIT;
@@ -131,7 +143,7 @@ void enable_mmu_secure(unsigned int flags)
isb();
}
-void enable_mmu_direct(unsigned int flags)
+void enable_mmu_direct_svc_mon(unsigned int flags)
{
- enable_mmu_secure(flags);
+ enable_mmu_svc_mon(flags);
}
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index 5717516a4..4afdeed06 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -10,7 +10,7 @@
#include <bl_common.h>
#include <common_def.h>
#include <platform_def.h>
-#include <sys/types.h>
+#include <stdint.h>
#include <utils.h>
#include <xlat_tables.h>
#include <xlat_tables_arch.h>
@@ -31,26 +31,26 @@ static unsigned long long calc_physical_addr_size_bits(
unsigned long long max_addr)
{
/* Physical address can't exceed 48 bits */
- assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
/* 48 bits address */
- if (max_addr & ADDR_MASK_44_TO_47)
+ if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
return TCR_PS_BITS_256TB;
/* 44 bits address */
- if (max_addr & ADDR_MASK_42_TO_43)
+ if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
return TCR_PS_BITS_16TB;
/* 42 bits address */
- if (max_addr & ADDR_MASK_40_TO_41)
+ if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
return TCR_PS_BITS_4TB;
/* 40 bits address */
- if (max_addr & ADDR_MASK_36_TO_39)
+ if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
return TCR_PS_BITS_1TB;
/* 36 bits address */
- if (max_addr & ADDR_MASK_32_TO_35)
+ if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
return TCR_PS_BITS_64GB;
return TCR_PS_BITS_4GB;
@@ -78,21 +78,21 @@ static unsigned long long get_max_supported_pa(void)
}
#endif /* ENABLE_ASSERTIONS */
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
- int el = GET_EL(read_CurrentEl());
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
- assert(el > 0);
+ assert(el > 0U);
return el;
}
-uint64_t xlat_arch_get_xn_desc(int el)
+uint64_t xlat_arch_get_xn_desc(unsigned int el)
{
- if (el == 3) {
+ if (el == 3U) {
return UPPER_ATTRS(XN);
} else {
- assert(el == 1);
+ assert(el == 1U);
return UPPER_ATTRS(PXN);
}
}
@@ -102,12 +102,12 @@ void init_xlat_tables(void)
unsigned long long max_pa;
uintptr_t max_va;
print_mmap();
- init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
+ init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
&max_va, &max_pa);
- assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
- assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
- assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
+ assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
+ assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
+ assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
}
@@ -129,7 +129,7 @@ void init_xlat_tables(void)
uint32_t sctlr; \
\
assert(IS_IN_EL(_el)); \
- assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
+ assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0U); \
\
/* Set attributes in the right indices of the MAIR */ \
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
@@ -144,16 +144,18 @@ void init_xlat_tables(void)
\
/* Set TCR bits as well. */ \
/* Set T0SZ to (64 - width of virtual address space) */ \
- if (flags & XLAT_TABLE_NC) { \
+ int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\
+ \
+ if ((flags & XLAT_TABLE_NC) != 0U) { \
/* Inner & outer non-cacheable non-shareable. */\
tcr = TCR_SH_NON_SHAREABLE | \
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
- (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
+ (uint64_t) t0sz; \
} else { \
/* Inner & outer WBWA & shareable. */ \
tcr = TCR_SH_INNER_SHAREABLE | \
TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
- (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
+ (uint64_t) t0sz; \
} \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
@@ -172,7 +174,7 @@ void init_xlat_tables(void)
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
- if (flags & DISABLE_DCACHE) \
+ if ((flags & DISABLE_DCACHE) != 0U) \
sctlr &= ~SCTLR_C_BIT; \
else \
sctlr |= SCTLR_C_BIT; \
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
index b42cd6814..ca67f2a09 100644
--- a/lib/xlat_tables/xlat_tables_common.c
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -11,8 +11,9 @@
#include <common_def.h>
#include <debug.h>
#include <platform_def.h>
+#include <stdbool.h>
+#include <stdint.h>
#include <string.h>
-#include <types.h>
#include <utils.h>
#include <xlat_tables.h>
#include "xlat_tables_private.h"
@@ -26,12 +27,13 @@
(((level) == U(0)) ? LVL0_SPACER : \
(((level) == U(1)) ? LVL1_SPACER : \
(((level) == U(2)) ? LVL2_SPACER : LVL3_SPACER)))
-#define debug_print(...) tf_printf(__VA_ARGS__)
+#define debug_print(...) printf(__VA_ARGS__)
#else
#define debug_print(...) ((void)0)
#endif
#define UNSET_DESC ~0ULL
+#define MT_UNKNOWN ~0U
static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
@@ -55,7 +57,7 @@ void print_mmap(void)
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
debug_print("mmap:\n");
mmap_region_t *mm = mmap;
- while (mm->size) {
+ while (mm->size != 0U) {
debug_print(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
(void *)mm->base_va, mm->base_pa,
mm->size, mm->attr);
@@ -69,37 +71,37 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr)
{
mmap_region_t *mm = mmap;
- mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
- unsigned long long end_pa = base_pa + size - 1;
- uintptr_t end_va = base_va + size - 1;
+ const mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1U;
+ unsigned long long end_pa = base_pa + size - 1U;
+ uintptr_t end_va = base_va + size - 1U;
assert(IS_PAGE_ALIGNED(base_pa));
assert(IS_PAGE_ALIGNED(base_va));
assert(IS_PAGE_ALIGNED(size));
- if (!size)
+ if (size == 0U)
return;
assert(base_pa < end_pa); /* Check for overflows */
assert(base_va < end_va);
assert((base_va + (uintptr_t)size - (uintptr_t)1) <=
- (PLAT_VIRT_ADDR_SPACE_SIZE - 1));
+ (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
assert((base_pa + (unsigned long long)size - 1ULL) <=
- (PLAT_PHY_ADDR_SPACE_SIZE - 1));
+ (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
#if ENABLE_ASSERTIONS
/* Check for PAs and VAs overlaps with all other regions */
for (mm = mmap; mm->size; ++mm) {
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
/*
* Check if one of the regions is completely inside the other
* one.
*/
- int fully_overlapped_va =
+ bool fully_overlapped_va =
((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
((mm->base_va >= base_va) && (mm_end_va <= end_va));
@@ -122,10 +124,10 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
unsigned long long mm_end_pa =
mm->base_pa + mm->size - 1;
- int separated_pa =
- (end_pa < mm->base_pa) || (base_pa > mm_end_pa);
- int separated_va =
- (end_va < mm->base_va) || (base_va > mm_end_va);
+ bool separated_pa = (end_pa < mm->base_pa) ||
+ (base_pa > mm_end_pa);
+ bool separated_va = (end_va < mm->base_va) ||
+ (base_va > mm_end_va);
assert(separated_va && separated_pa);
}
@@ -136,7 +138,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
#endif /* ENABLE_ASSERTIONS */
/* Find correct place in mmap to insert new region */
- while (mm->base_va < base_va && mm->size)
+ while ((mm->base_va < base_va) && (mm->size != 0U))
++mm;
/*
@@ -154,10 +156,10 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
++mm;
/* Make room for new region by moving other regions up by one place */
- memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
+ (void)memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
/* Check we haven't lost the empty sentinal from the end of the array */
- assert(mm_last->size == 0);
+ assert(mm_last->size == 0U);
mm->base_pa = base_pa;
mm->base_va = base_va;
@@ -172,9 +174,12 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
void mmap_add(const mmap_region_t *mm)
{
- while (mm->size) {
- mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
- ++mm;
+ const mmap_region_t *mm_cursor = mm;
+
+ while (mm_cursor->size != 0U) {
+ mmap_add_region(mm_cursor->base_pa, mm_cursor->base_va,
+ mm_cursor->size, mm_cursor->attr);
+ mm_cursor++;
}
}
@@ -185,7 +190,7 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
int mem_type;
/* Make sure that the granularity is fine enough to map this address. */
- assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
desc = addr_pa;
/*
@@ -193,8 +198,12 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
* rest.
*/
desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
- desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
- desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+ desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
+ desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+ /*
+ * Always set the access flag, as this library assumes access flag
+ * faults aren't managed.
+ */
desc |= LOWER_ATTRS(ACCESS_FLAG);
desc |= ap1_mask;
@@ -222,9 +231,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
} else { /* Normal memory */
/*
* Always map read-write normal memory as execute-never.
- * (Trusted Firmware doesn't self-modify its code, therefore
- * R/W memory is reserved for data storage, which must not be
- * executable.)
+ * This library assumes that it is used by software that does
+ * not self-modify its code, therefore R/W memory is reserved
+ * for data storage, which must not be executable.
+ *
* Note that setting the XN bit here is for consistency only.
* The function that enables the MMU sets the SCTLR_ELx.WXN bit,
* which makes any writable memory region to be treated as
@@ -234,7 +244,7 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
* attribute to figure out the value of the XN bit.
*/
- if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+ if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
desc |= execute_never_mask;
}
@@ -248,9 +258,9 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
debug_print((mem_type == MT_MEMORY) ? "MEM" :
((mem_type == MT_NON_CACHEABLE) ? "NC" : "DEV"));
- debug_print(attr & MT_RW ? "-RW" : "-RO");
- debug_print(attr & MT_NS ? "-NS" : "-S");
- debug_print(attr & MT_EXECUTE_NEVER ? "-XN" : "-EXEC");
+ debug_print(((attr & MT_RW) != 0U) ? "-RW" : "-RO");
+ debug_print(((attr & MT_NS) != 0U) ? "-NS" : "-S");
+ debug_print(((attr & MT_EXECUTE_NEVER) != 0U) ? "-XN" : "-EXEC");
return desc;
}
@@ -260,14 +270,14 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
*
* On success, this function returns 0.
* If there are partial overlaps (meaning that a smaller size is needed) or if
- * the region can't be found in the given area, it returns -1. In this case the
- * value pointed by attr should be ignored by the caller.
+ * the region can't be found in the given area, it returns MT_UNKNOWN. In this
+ * case the value pointed by attr should be ignored by the caller.
*/
-static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
- size_t size, unsigned int *attr)
+static unsigned int mmap_region_attr(const mmap_region_t *mm, uintptr_t base_va,
+ size_t size, unsigned int *attr)
{
/* Don't assume that the area is contained in the first region */
- int ret = -1;
+ unsigned int ret = MT_UNKNOWN;
/*
* Get attributes from last (innermost) region that contains the
@@ -284,26 +294,26 @@ static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
* in region 2. The loop shouldn't stop at region 2 as inner regions
* have priority over outer regions, it should stop at region 5.
*/
- for (;; ++mm) {
+ for ( ; ; ++mm) {
- if (!mm->size)
+ if (mm->size == 0U)
return ret; /* Reached end of list */
- if (mm->base_va > base_va + size - 1)
+ if (mm->base_va > (base_va + size - 1U))
return ret; /* Next region is after area so end */
- if (mm->base_va + mm->size - 1 < base_va)
+ if ((mm->base_va + mm->size - 1U) < base_va)
continue; /* Next region has already been overtaken */
- if (!ret && mm->attr == *attr)
+ if ((ret == 0U) && (mm->attr == *attr))
continue; /* Region doesn't override attribs so skip */
- if (mm->base_va > base_va ||
- mm->base_va + mm->size - 1 < base_va + size - 1)
- return -1; /* Region doesn't fully cover our area */
+ if ((mm->base_va > base_va) ||
+ ((mm->base_va + mm->size - 1U) < (base_va + size - 1U)))
+ return MT_UNKNOWN; /* Region doesn't fully cover area */
*attr = mm->attr;
- ret = 0;
+ ret = 0U;
}
return ret;
}
@@ -313,7 +323,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
uint64_t *table,
unsigned int level)
{
- assert(level >= XLAT_TABLE_LEVEL_MIN && level <= XLAT_TABLE_LEVEL_MAX);
+ assert((level >= XLAT_TABLE_LEVEL_MIN) &&
+ (level <= XLAT_TABLE_LEVEL_MAX));
unsigned int level_size_shift =
L0_XLAT_ADDRESS_SHIFT - level * XLAT_TABLE_ENTRIES_SHIFT;
@@ -326,10 +337,10 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
do {
uint64_t desc = UNSET_DESC;
- if (!mm->size) {
+ if (mm->size == 0U) {
/* Done mapping regions; finish zeroing the table */
desc = INVALID_DESC;
- } else if (mm->base_va + mm->size - 1 < base_va) {
+ } else if ((mm->base_va + mm->size - 1U) < base_va) {
/* This area is after the region so get next region */
++mm;
continue;
@@ -338,7 +349,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
debug_print("%s VA:%p size:0x%llx ", get_level_spacer(level),
(void *)base_va, (unsigned long long)level_size);
- if (mm->base_va > base_va + level_size - 1) {
+ if (mm->base_va > (base_va + level_size - 1U)) {
/* Next region is after this area. Nothing to map yet */
desc = INVALID_DESC;
/* Make sure that the current level allows block descriptors */
@@ -349,9 +360,10 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
* it will return the innermost region's attributes.
*/
unsigned int attr;
- int r = mmap_region_attr(mm, base_va, level_size, &attr);
+ unsigned int r = mmap_region_attr(mm, base_va,
+ level_size, &attr);
- if (!r) {
+ if (r == 0U) {
desc = mmap_desc(attr,
base_va - mm->base_va + mm->base_pa,
level);
@@ -360,13 +372,15 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
if (desc == UNSET_DESC) {
/* Area not covered by a region so need finer table */
- uint64_t *new_table = xlat_tables[next_xlat++];
+ uint64_t *new_table = xlat_tables[next_xlat];
+
+ next_xlat++;
assert(next_xlat <= MAX_XLAT_TABLES);
desc = TABLE_DESC | (uintptr_t)new_table;
/* Recurse to fill in new table */
mm = init_xlation_table_inner(mm, base_va,
- new_table, level+1);
+ new_table, level + 1U);
}
debug_print("\n");
@@ -374,7 +388,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
*table++ = desc;
base_va += level_size;
} while ((base_va & level_index_mask) &&
- (base_va - 1 < PLAT_VIRT_ADDR_SPACE_SIZE - 1));
+ ((base_va - 1U) < (PLAT_VIRT_ADDR_SPACE_SIZE - 1U)));
return mm;
}
@@ -383,15 +397,15 @@ void init_xlation_table(uintptr_t base_va, uint64_t *table,
unsigned int level, uintptr_t *max_va,
unsigned long long *max_pa)
{
- int el = xlat_arch_current_el();
+ unsigned int el = xlat_arch_current_el();
execute_never_mask = xlat_arch_get_xn_desc(el);
- if (el == 3) {
+ if (el == 3U) {
ap1_mask = LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
} else {
- assert(el == 1);
- ap1_mask = 0;
+ assert(el == 1U);
+ ap1_mask = 0ULL;
}
init_xlation_table_inner(mmap, base_va, table, level);
diff --git a/lib/xlat_tables/xlat_tables_private.h b/lib/xlat_tables/xlat_tables_private.h
index 810c48e1a..f882f7efb 100644
--- a/lib/xlat_tables/xlat_tables_private.h
+++ b/lib/xlat_tables/xlat_tables_private.h
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_PRIVATE_H__
-#define __XLAT_TABLES_PRIVATE_H__
+#ifndef XLAT_TABLES_PRIVATE_H
+#define XLAT_TABLES_PRIVATE_H
#include <cassert.h>
#include <platform_def.h>
@@ -44,17 +44,17 @@ CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(PLAT_PHY_ADDR_SPACE_SIZE),
void print_mmap(void);
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
-int xlat_arch_current_el(void);
+unsigned int xlat_arch_current_el(void);
/*
* Returns the bit mask that has to be ORed to the rest of a translation table
* descriptor so that execution of code is prohibited at the given Exception
* Level.
*/
-uint64_t xlat_arch_get_xn_desc(int el);
+uint64_t xlat_arch_get_xn_desc(unsigned int el);
void init_xlation_table(uintptr_t base_va, uint64_t *table,
unsigned int level, uintptr_t *max_va,
unsigned long long *max_pa);
-#endif /* __XLAT_TABLES_PRIVATE_H__ */
+#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/lib/xlat_tables_v2/aarch32/enable_mmu.S b/lib/xlat_tables_v2/aarch32/enable_mmu.S
index 97cdde751..4a4ac30f5 100644
--- a/lib/xlat_tables_v2/aarch32/enable_mmu.S
+++ b/lib/xlat_tables_v2/aarch32/enable_mmu.S
@@ -8,9 +8,11 @@
#include <assert_macros.S>
#include <xlat_tables_v2.h>
- .global enable_mmu_direct
+ .global enable_mmu_direct_svc_mon
+ .global enable_mmu_direct_hyp
-func enable_mmu_direct
+ /* void enable_mmu_direct_svc_mon(unsigned int flags) */
+func enable_mmu_direct_svc_mon
/* Assert that MMU is turned off */
#if ENABLE_ASSERTIONS
ldcopr r1, SCTLR
@@ -24,17 +26,17 @@ func enable_mmu_direct
mov r3, r0
ldr r0, =mmu_cfg_params
- /* MAIR0 */
- ldr r1, [r0, #(MMU_CFG_MAIR0 << 2)]
+ /* MAIR0. Only the lower 32 bits are used. */
+ ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
stcopr r1, MAIR0
- /* TTBCR */
- ldr r2, [r0, #(MMU_CFG_TCR << 2)]
+ /* TTBCR. Only the lower 32 bits are used. */
+ ldr r2, [r0, #(MMU_CFG_TCR << 3)]
stcopr r2, TTBCR
/* TTBR0 */
- ldr r1, [r0, #(MMU_CFG_TTBR0_LO << 2)]
- ldr r2, [r0, #(MMU_CFG_TTBR0_HI << 2)]
+ ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
+ ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
stcopr16 r1, r2, TTBR0_64
/* TTBR1 is unused right now; set it to 0. */
@@ -63,4 +65,56 @@ func enable_mmu_direct
isb
bx lr
-endfunc enable_mmu_direct
+endfunc enable_mmu_direct_svc_mon
+
+
+ /* void enable_mmu_direct_hyp(unsigned int flags) */
+func enable_mmu_direct_hyp
+ /* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+ ldcopr r1, HSCTLR
+ tst r1, #HSCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ TLB_INVALIDATE(r0, TLBIALL)
+
+ mov r3, r0
+ ldr r0, =mmu_cfg_params
+
+ /* HMAIR0 */
+ ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
+ stcopr r1, HMAIR0
+
+ /* HTCR */
+ ldr r2, [r0, #(MMU_CFG_TCR << 3)]
+ stcopr r2, HTCR
+
+ /* HTTBR */
+ ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
+ ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
+ stcopr16 r1, r2, HTTBR_64
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Enable enable MMU by honoring flags */
+ ldcopr r1, HSCTLR
+ ldr r2, =(HSCTLR_WXN_BIT | HSCTLR_C_BIT | HSCTLR_M_BIT)
+ orr r1, r1, r2
+
+ /* Clear C bit if requested */
+ tst r3, #DISABLE_DCACHE
+ bicne r1, r1, #HSCTLR_C_BIT
+
+ stcopr r1, HSCTLR
+ isb
+
+ bx lr
+endfunc enable_mmu_direct_hyp
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index 6e9719258..66938e5f1 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -9,32 +9,30 @@
#include <assert.h>
#include <cassert.h>
#include <platform_def.h>
-#include <utils.h>
+#include <stdbool.h>
#include <utils_def.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
-#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
-uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
-
/*
- * Returns 1 if the provided granule size is supported, 0 otherwise.
+ * Returns true if the provided granule size is supported, false otherwise.
*/
-int xlat_arch_is_granule_size_supported(size_t size)
+bool xlat_arch_is_granule_size_supported(size_t size)
{
/*
- * The Trusted Firmware uses long descriptor translation table format,
- * which supports 4 KiB pages only.
+ * The library uses the long descriptor translation table format, which
+ * supports 4 KiB pages only.
*/
- return (size == (4U * 1024U));
+ return size == PAGE_SIZE_4KB;
}
size_t xlat_arch_get_max_supported_granule_size(void)
{
- return 4U * 1024U;
+ return PAGE_SIZE_4KB;
}
#if ENABLE_ASSERTIONS
@@ -45,23 +43,38 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
}
#endif /* ENABLE_ASSERTIONS*/
-int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
{
- return (read_sctlr() & SCTLR_M_BIT) != 0;
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() == 1U);
+ return (read_sctlr() & SCTLR_M_BIT) != 0U;
+ } else {
+ assert(ctx->xlat_regime == EL2_REGIME);
+ assert(xlat_arch_current_el() == 2U);
+ return (read_hsctlr() & HSCTLR_M_BIT) != 0U;
+ }
}
-void xlat_arch_tlbi_va(uintptr_t va)
+bool is_dcache_enabled(void)
{
- /*
- * Ensure the translation table write has drained into memory before
- * invalidating the TLB entry.
- */
- dsbishst();
+ if (IS_IN_EL2()) {
+ return (read_hsctlr() & HSCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr() & SCTLR_C_BIT) != 0U;
+ }
+}
- tlbimvaais(TLBI_ADDR(va));
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
+{
+ if (xlat_regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN);
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ return UPPER_ATTRS(XN);
+ }
}
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime __unused)
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
{
/*
* Ensure the translation table write has drained into memory before
@@ -69,7 +82,12 @@ void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime __unused)
*/
dsbishst();
- tlbimvaais(TLBI_ADDR(va));
+ if (xlat_regime == EL1_EL0_REGIME) {
+ tlbimvaais(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ tlbimvahis(TLBI_ADDR(va));
+ }
}
void xlat_arch_tlbi_va_sync(void)
@@ -98,49 +116,70 @@ void xlat_arch_tlbi_va_sync(void)
isb();
}
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
- /*
- * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
- * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
- */
- return 3;
+ if (IS_IN_HYP()) {
+ return 2U;
+ } else {
+ assert(IS_IN_SVC() || IS_IN_MON());
+ /*
+ * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor,
+ * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+ *
+ * The PL1&0 translation regime in AArch32 behaves like the
+ * EL1&0 regime in AArch64 except for the XN bits, but we set
+ * and unset them at the same time, so there's no difference in
+ * practice.
+ */
+ return 1U;
+ }
}
/*******************************************************************************
- * Function for enabling the MMU in Secure PL1, assuming that the page tables
+ * Function for enabling the MMU in PL1 or PL2, assuming that the page tables
* have already been created.
******************************************************************************/
-void setup_mmu_cfg(unsigned int flags,
- const uint64_t *base_table,
- unsigned long long max_pa,
- uintptr_t max_va)
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, __unused int xlat_regime)
{
- u_register_t mair0, ttbcr;
- uint64_t ttbr0;
-
- assert(IS_IN_SECURE());
+ uint64_t mair, ttbr0;
+ uint32_t ttbcr;
/* Set attributes in the right indices of the MAIR */
- mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
- mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+ mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
ATTR_IWBWA_OWBWA_NTR_INDEX);
- mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
+ mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
ATTR_NON_CACHEABLE_INDEX);
/*
- * Configure the control register for stage 1 of the PL1&0 translation
- * regime.
+ * Configure the control register for stage 1 of the PL1&0 or EL2
+ * translation regimes.
*/
/* Use the Long-descriptor translation table format. */
ttbcr = TTBCR_EAE_BIT;
- /*
- * Disable translation table walk for addresses that are translated
- * using TTBR1. Therefore, only TTBR0 is used.
- */
- ttbcr |= TTBCR_EPD1_BIT;
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(IS_IN_SVC() || IS_IN_MON());
+ /*
+ * Disable translation table walk for addresses that are
+ * translated using TTBR1. Therefore, only TTBR0 is used.
+ */
+ ttbcr |= TTBCR_EPD1_BIT;
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ assert(IS_IN_HYP());
+
+ /*
+ * Set HTCR bits as well. Set HTTBR table properties
+ * as Inner & outer WBWA & shareable.
+ */
+ ttbcr |= HTCR_RES1 |
+ HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA |
+ HTCR_RGN0_INNER_WBA;
+ }
/*
* Limit the input address ranges and memory region sizes translated
@@ -148,20 +187,23 @@ void setup_mmu_cfg(unsigned int flags,
* 32 bits.
*/
if (max_va != UINT32_MAX) {
- uintptr_t virtual_addr_space_size = max_va + 1;
+ uintptr_t virtual_addr_space_size = max_va + 1U;
+
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed
* that virtual_addr_space_size is in the range [1, UINT32_MAX].
*/
- ttbcr |= 32 - __builtin_ctzll(virtual_addr_space_size);
+ int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size);
+
+ ttbcr |= (uint32_t) t0sz;
}
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks using TTBR0.
*/
- if (flags & XLAT_TABLE_NC) {
+ if ((flags & XLAT_TABLE_NC) != 0U) {
/* Inner & outer non-cacheable non-shareable. */
ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
TTBCR_RGN0_INNER_NC;
@@ -173,17 +215,17 @@ void setup_mmu_cfg(unsigned int flags,
/* Set TTBR0 bits as well */
ttbr0 = (uint64_t)(uintptr_t) base_table;
+
#if ARM_ARCH_AT_LEAST(8, 2)
/*
- * Enable CnP bit so as to share page tables with all PEs.
- * Mandatory for ARMv8.2 implementations.
+ * Enable CnP bit so as to share page tables with all PEs. This
+ * is mandatory for ARMv8.2 implementations.
*/
ttbr0 |= TTBR_CNP_BIT;
#endif
/* Now populate MMU configuration */
- mmu_cfg_params[MMU_CFG_MAIR0] = mair0;
- mmu_cfg_params[MMU_CFG_TCR] = ttbcr;
- mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0;
- mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
+ params[MMU_CFG_MAIR] = mair;
+ params[MMU_CFG_TCR] = (uint64_t) ttbcr;
+ params[MMU_CFG_TTBR0] = ttbr0;
}
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
deleted file mode 100644
index 9b41f4df2..000000000
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
-#define __XLAT_TABLES_ARCH_PRIVATE_H__
-
-#include <xlat_tables_defs.h>
-#include <xlat_tables_v2.h>
-
-/*
- * Return the execute-never mask that will prevent instruction fetch at the
- * given translation regime.
- */
-static inline uint64_t xlat_arch_regime_get_xn_desc(int regime __unused)
-{
- return UPPER_ATTRS(XN);
-}
-
-#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S
index a72c7fae5..21717d28a 100644
--- a/lib/xlat_tables_v2/aarch64/enable_mmu.S
+++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S
@@ -9,6 +9,7 @@
#include <xlat_tables_v2.h>
.global enable_mmu_direct_el1
+ .global enable_mmu_direct_el2
.global enable_mmu_direct_el3
/* Macros to read and write to system register for a given EL. */
@@ -20,6 +21,19 @@
mrs \gp_reg, \reg_name\()_el\()\el
.endm
+ .macro tlbi_invalidate_all el
+ .if \el == 1
+ TLB_INVALIDATE(vmalle1)
+ .elseif \el == 2
+ TLB_INVALIDATE(alle2)
+ .elseif \el == 3
+ TLB_INVALIDATE(alle3)
+ .else
+ .error "EL must be 1, 2 or 3"
+ .endif
+ .endm
+
+ /* void enable_mmu_direct_el<x>(unsigned int flags) */
.macro define_mmu_enable_func el
func enable_mmu_direct_\()el\el
#if ENABLE_ASSERTIONS
@@ -27,33 +41,22 @@
tst x1, #SCTLR_M_BIT
ASM_ASSERT(eq)
#endif
-
- /* Invalidate TLB entries */
- .if \el == 1
- TLB_INVALIDATE(vmalle1)
- .else
- .if \el == 3
- TLB_INVALIDATE(alle3)
- .else
- .error "EL must be 1 or 3"
- .endif
- .endif
+ /* Invalidate all TLB entries */
+ tlbi_invalidate_all \el
mov x7, x0
ldr x0, =mmu_cfg_params
/* MAIR */
- ldr w1, [x0, #(MMU_CFG_MAIR0 << 2)]
+ ldr x1, [x0, #(MMU_CFG_MAIR << 3)]
_msr mair, \el, x1
/* TCR */
- ldr w2, [x0, #(MMU_CFG_TCR << 2)]
+ ldr x2, [x0, #(MMU_CFG_TCR << 3)]
_msr tcr, \el, x2
/* TTBR */
- ldr w3, [x0, #(MMU_CFG_TTBR0_LO << 2)]
- ldr w4, [x0, #(MMU_CFG_TTBR0_HI << 2)]
- orr x3, x3, x4, lsl #32
+ ldr x3, [x0, #(MMU_CFG_TTBR0 << 3)]
_msr ttbr0, \el, x3
/*
@@ -88,4 +91,5 @@
* enable_mmu_direct_el3
*/
define_mmu_enable_func 1
+ define_mmu_enable_func 2
define_mmu_enable_func 3
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 4bbbe5443..269adc7ef 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -8,72 +8,71 @@
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
-#include <sys/types.h>
+#include <stdbool.h>
+#include <stdint.h>
#include <utils_def.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
-uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
-
/*
- * Returns 1 if the provided granule size is supported, 0 otherwise.
+ * Returns true if the provided granule size is supported, false otherwise.
*/
-int xlat_arch_is_granule_size_supported(size_t size)
+bool xlat_arch_is_granule_size_supported(size_t size)
{
u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1();
- if (size == (4U * 1024U)) {
+ if (size == PAGE_SIZE_4KB) {
return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN4_MASK) ==
ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED;
- } else if (size == (16U * 1024U)) {
+ } else if (size == PAGE_SIZE_16KB) {
return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN16_MASK) ==
ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED;
- } else if (size == (64U * 1024U)) {
+ } else if (size == PAGE_SIZE_64KB) {
return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN64_MASK) ==
ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED;
+ } else {
+ return 0;
}
-
- return 0;
}
size_t xlat_arch_get_max_supported_granule_size(void)
{
- if (xlat_arch_is_granule_size_supported(64U * 1024U)) {
- return 64U * 1024U;
- } else if (xlat_arch_is_granule_size_supported(16U * 1024U)) {
- return 16U * 1024U;
+ if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) {
+ return PAGE_SIZE_64KB;
+ } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) {
+ return PAGE_SIZE_16KB;
} else {
- assert(xlat_arch_is_granule_size_supported(4U * 1024U));
- return 4U * 1024U;
+ assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB));
+ return PAGE_SIZE_4KB;
}
}
unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
{
/* Physical address can't exceed 48 bits */
- assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
/* 48 bits address */
- if (max_addr & ADDR_MASK_44_TO_47)
+ if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
return TCR_PS_BITS_256TB;
/* 44 bits address */
- if (max_addr & ADDR_MASK_42_TO_43)
+ if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
return TCR_PS_BITS_16TB;
/* 42 bits address */
- if (max_addr & ADDR_MASK_40_TO_41)
+ if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
return TCR_PS_BITS_4TB;
/* 40 bits address */
- if (max_addr & ADDR_MASK_36_TO_39)
+ if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
return TCR_PS_BITS_1TB;
/* 36 bits address */
- if (max_addr & ADDR_MASK_32_TO_35)
+ if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
return TCR_PS_BITS_64GB;
return TCR_PS_BITS_4GB;
@@ -101,31 +100,46 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
}
#endif /* ENABLE_ASSERTIONS*/
-int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
{
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- assert(xlat_arch_current_el() >= 1);
- return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
+ assert(xlat_arch_current_el() >= 1U);
+ return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
} else {
assert(ctx->xlat_regime == EL3_REGIME);
- assert(xlat_arch_current_el() >= 3);
- return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
+ assert(xlat_arch_current_el() >= 3U);
+ return (read_sctlr_el3() & SCTLR_M_BIT) != 0U;
}
}
+bool is_dcache_enabled(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ if (el == 1U) {
+ return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
+ } else if (el == 2U) {
+ return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
+ }
+}
-void xlat_arch_tlbi_va(uintptr_t va)
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
{
-#if IMAGE_EL == 1
- assert(IS_IN_EL(1));
- xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME);
-#elif IMAGE_EL == 3
- assert(IS_IN_EL(3));
- xlat_arch_tlbi_va_regime(va, EL3_REGIME);
-#endif
+ if (xlat_regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
+ } else {
+ assert((xlat_regime == EL2_REGIME) ||
+ (xlat_regime == EL3_REGIME));
+ return UPPER_ATTRS(XN);
+ }
}
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime)
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
{
/*
* Ensure the translation table write has drained into memory before
@@ -141,11 +155,14 @@ void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime)
* exception level (see section D4.9.2 of the ARM ARM rev B.a).
*/
if (xlat_regime == EL1_EL0_REGIME) {
- assert(xlat_arch_current_el() >= 1);
+ assert(xlat_arch_current_el() >= 1U);
tlbivaae1is(TLBI_ADDR(va));
+ } else if (xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ tlbivae2is(TLBI_ADDR(va));
} else {
assert(xlat_regime == EL3_REGIME);
- assert(xlat_arch_current_el() >= 3);
+ assert(xlat_arch_current_el() >= 3U);
tlbivae3is(TLBI_ADDR(va));
}
}
@@ -173,21 +190,20 @@ void xlat_arch_tlbi_va_sync(void)
isb();
}
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
- int el = GET_EL(read_CurrentEl());
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
- assert(el > 0);
+ assert(el > 0U);
return el;
}
-void setup_mmu_cfg(unsigned int flags,
- const uint64_t *base_table,
- unsigned long long max_pa,
- uintptr_t max_va)
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, int xlat_regime)
{
- uint64_t mair, ttbr, tcr;
+ uint64_t mair, ttbr0, tcr;
uintptr_t virtual_addr_space_size;
/* Set attributes in the right indices of the MAIR. */
@@ -195,28 +211,28 @@ void setup_mmu_cfg(unsigned int flags,
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
- ttbr = (uint64_t) base_table;
-
/*
* Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size.
*/
- assert(max_va < ((uint64_t) UINTPTR_MAX));
+ assert(max_va < ((uint64_t)UINTPTR_MAX));
- virtual_addr_space_size = max_va + 1;
+ virtual_addr_space_size = (uintptr_t)max_va + 1U;
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed that
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
*/
- tcr = (uint64_t) 64 - __builtin_ctzll(virtual_addr_space_size);
+ int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
+
+ tcr = (uint64_t) t0sz;
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks.
*/
- if ((flags & XLAT_TABLE_NC) != 0) {
+ if ((flags & XLAT_TABLE_NC) != 0U) {
/* Inner & outer non-cacheable non-shareable. */
tcr |= TCR_SH_NON_SHAREABLE |
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
@@ -232,30 +248,31 @@ void setup_mmu_cfg(unsigned int flags,
*/
unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
-#if IMAGE_EL == 1
- assert(IS_IN_EL(1));
- /*
- * TCR_EL1.EPD1: Disable translation table walk for addresses that are
- * translated using TTBR1_EL1.
- */
- tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
-#elif IMAGE_EL == 3
- assert(IS_IN_EL(3));
- tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
-#endif
-
- mmu_cfg_params[MMU_CFG_MAIR0] = (uint32_t) mair;
- mmu_cfg_params[MMU_CFG_TCR] = (uint32_t) tcr;
-
- /* Set TTBR bits as well */
- if (ARM_ARCH_AT_LEAST(8, 2)) {
+ if (xlat_regime == EL1_EL0_REGIME) {
/*
- * Enable CnP bit so as to share page tables with all PEs. This
- * is mandatory for ARMv8.2 implementations.
+ * TCR_EL1.EPD1: Disable translation table walk for addresses
+ * that are translated using TTBR1_EL1.
*/
- ttbr |= TTBR_CNP_BIT;
+ tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
+ } else if (xlat_regime == EL2_REGIME) {
+ tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
}
- mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr;
- mmu_cfg_params[MMU_CFG_TTBR0_HI] = (uint32_t) (ttbr >> 32);
+ /* Set TTBR bits as well */
+ ttbr0 = (uint64_t) base_table;
+
+#if ARM_ARCH_AT_LEAST(8, 2)
+ /*
+ * Enable CnP bit so as to share page tables with all PEs. This
+ * is mandatory for ARMv8.2 implementations.
+ */
+ ttbr0 |= TTBR_CNP_BIT;
+#endif
+
+ params[MMU_CFG_MAIR] = mair;
+ params[MMU_CFG_TCR] = tcr;
+ params[MMU_CFG_TTBR0] = ttbr0;
}
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
deleted file mode 100644
index 39b0a653a..000000000
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
-#define __XLAT_TABLES_ARCH_PRIVATE_H__
-
-#include <assert.h>
-#include <xlat_tables_defs.h>
-#include <xlat_tables_v2.h>
-
-/*
- * Return the execute-never mask that will prevent instruction fetch at all ELs
- * that are part of the given translation regime.
- */
-static inline uint64_t xlat_arch_regime_get_xn_desc(int regime)
-{
- if (regime == EL1_EL0_REGIME) {
- return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
- } else {
- assert(regime == EL3_REGIME);
- return UPPER_ATTRS(XN);
- }
-}
-
-#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
index b25c805cf..9507ad715 100644
--- a/lib/xlat_tables_v2/xlat_tables.mk
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -10,5 +10,3 @@ XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
xlat_tables_context.c \
xlat_tables_core.c \
xlat_tables_utils.c)
-
-INCLUDES += -Ilib/xlat_tables_v2/${ARCH}
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
index 0964b49b2..bf0cc9f5d 100644
--- a/lib/xlat_tables_v2/xlat_tables_context.c
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <assert.h>
#include <debug.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
@@ -12,6 +13,12 @@
#include "xlat_tables_private.h"
/*
+ * MMU configuration register values for the active translation context. Used
+ * from the MMU assembly helpers.
+ */
+uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
* Each platform can define the size of its physical and virtual address spaces.
* If the platform hasn't defined one or both of them, default to
* ADDR_SPACE_SIZE. The latter is deprecated, though.
@@ -69,9 +76,32 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
void init_xlat_tables(void)
{
+ assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
+
+ unsigned int current_el = xlat_arch_current_el();
+
+ if (current_el == 1U) {
+ tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
+ } else if (current_el == 2U) {
+ tf_xlat_ctx.xlat_regime = EL2_REGIME;
+ } else {
+ assert(current_el == 3U);
+ tf_xlat_ctx.xlat_regime = EL3_REGIME;
+ }
+
init_xlat_tables_ctx(&tf_xlat_ctx);
}
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
+{
+ return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
+}
+
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
+{
+ return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
+}
+
/*
* If dynamic allocation of new regions is disabled then by the time we call the
* function enabling the MMU, we'll have registered all the memory regions to
@@ -91,26 +121,57 @@ void init_xlat_tables(void)
#ifdef AARCH32
+#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags)
{
- setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address);
- enable_mmu_direct(flags);
+ enable_mmu_svc_mon(flags);
+}
+
+void enable_mmu_direct(unsigned int flags)
+{
+ enable_mmu_direct_svc_mon(flags);
+}
+#endif
+
+void enable_mmu_svc_mon(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
+ enable_mmu_direct_svc_mon(flags);
+}
+
+void enable_mmu_hyp(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_hyp(flags);
}
#else
void enable_mmu_el1(unsigned int flags)
{
- setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address);
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct_el1(flags);
}
+void enable_mmu_el2(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_el2(flags);
+}
+
void enable_mmu_el3(unsigned int flags)
{
- setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address);
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL3_REGIME);
enable_mmu_direct_el3(flags);
}
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index f555524a9..003718e76 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -9,15 +9,22 @@
#include <debug.h>
#include <errno.h>
#include <platform_def.h>
+#include <stdbool.h>
+#include <stdint.h>
#include <string.h>
-#include <types.h>
#include <utils_def.h>
-#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
#include "xlat_tables_private.h"
+/* Helper function that cleans the data cache only if it is enabled. */
+static inline void xlat_clean_dcache_range(uintptr_t addr, size_t size)
+{
+ if (is_dcache_enabled())
+ clean_dcache_range(addr, size);
+}
+
#if PLAT_XLAT_TABLES_DYNAMIC
/*
@@ -30,9 +37,9 @@
* Returns the index of the array corresponding to the specified translation
* table.
*/
-static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
+static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
{
- for (unsigned int i = 0; i < ctx->tables_num; i++)
+ for (int i = 0; i < ctx->tables_num; i++)
if (ctx->tables[i] == table)
return i;
@@ -40,15 +47,15 @@ static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
* Maybe we were asked to get the index of the base level table, which
* should never happen.
*/
- assert(0);
+ assert(false);
return -1;
}
/* Returns a pointer to an empty translation table. */
-static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
{
- for (unsigned int i = 0; i < ctx->tables_num; i++)
+ for (int i = 0; i < ctx->tables_num; i++)
if (ctx->tables_mapped_regions[i] == 0)
return ctx->tables[i];
@@ -56,21 +63,27 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
}
/* Increments region count for a given table. */
-static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
{
- ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]++;
}
/* Decrements region count for a given table. */
-static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
{
- ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]--;
}
/* Returns 0 if the specified table isn't empty, otherwise 1. */
-static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
+static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
{
- return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
+ return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
}
#else /* PLAT_XLAT_TABLES_DYNAMIC */
@@ -89,13 +102,13 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
- unsigned long long addr_pa, int level)
+ unsigned long long addr_pa, unsigned int level)
{
uint64_t desc;
- int mem_type;
+ uint32_t mem_type;
/* Make sure that the granularity is fine enough to map this address. */
- assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
desc = addr_pa;
/*
@@ -104,14 +117,16 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
*/
desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
/*
- * Always set the access flag, as TF doesn't manage access flag faults.
+ * Always set the access flag, as this library assumes access flag
+ * faults aren't managed.
+ */
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+ /*
* Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes.
*/
- desc |= LOWER_ATTRS(ACCESS_FLAG);
-
- desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
- desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+ desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
+ desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
/*
* Do not allow unprivileged access when the mapping is for a privileged
@@ -119,7 +134,7 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
* lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
*/
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- if (attr & MT_USER) {
+ if ((attr & MT_USER) != 0U) {
/* EL0 mapping requested, so we give User access */
desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
} else {
@@ -127,7 +142,8 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
}
} else {
- assert(ctx->xlat_regime == EL3_REGIME);
+ assert((ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
}
@@ -155,9 +171,10 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
} else { /* Normal memory */
/*
* Always map read-write normal memory as execute-never.
- * (Trusted Firmware doesn't self-modify its code, therefore
- * R/W memory is reserved for data storage, which must not be
- * executable.)
+ * This library assumes that it is used by software that does
+ * not self-modify its code, therefore R/W memory is reserved
+ * for data storage, which must not be executable.
+ *
* Note that setting the XN bit here is for consistency only.
* The function that enables the MMU sets the SCTLR_ELx.WXN bit,
* which makes any writable memory region to be treated as
@@ -170,7 +187,7 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
* translation regime and the policy applied in
* xlat_arch_regime_get_xn_desc().
*/
- if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+ if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
}
@@ -221,10 +238,10 @@ typedef enum {
static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
const uintptr_t table_base_va,
uint64_t *const table_base,
- const int table_entries,
+ const unsigned int table_entries,
const unsigned int level)
{
- assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
uint64_t *subtable;
uint64_t desc;
@@ -232,16 +249,16 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
uintptr_t table_idx_va;
uintptr_t table_idx_end_va; /* End VA of this entry */
- uintptr_t region_end_va = mm->base_va + mm->size - 1;
+ uintptr_t region_end_va = mm->base_va + mm->size - 1U;
- int table_idx;
+ unsigned int table_idx;
if (mm->base_va > table_base_va) {
/* Find the first index of the table affected by the region. */
table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
- table_idx = (table_idx_va - table_base_va) >>
- XLAT_ADDR_SHIFT(level);
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
assert(table_idx < table_entries);
} else {
@@ -252,19 +269,18 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
while (table_idx < table_entries) {
- table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
+ table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
desc = table_base[table_idx];
uint64_t desc_type = desc & DESC_MASK;
- action_t action = ACTION_NONE;
+ action_t action;
if ((mm->base_va <= table_idx_va) &&
(region_end_va >= table_idx_end_va)) {
-
/* Region covers all block */
- if (level == 3) {
+ if (level == 3U) {
/*
* Last level, only page descriptors allowed,
* erase it.
@@ -291,7 +307,6 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
} else if ((mm->base_va <= table_idx_end_va) ||
(region_end_va >= table_idx_va)) {
-
/*
* Region partially covers block.
*
@@ -300,18 +315,19 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* There must be a table descriptor here, if not there
* was a problem when mapping the region.
*/
-
- assert(level < 3);
-
+ assert(level < 3U);
assert(desc_type == TABLE_DESC);
action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ /* The region doesn't cover the block at all */
+ action = ACTION_NONE;
}
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] = INVALID_DESC;
- xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime);
+ xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
} else if (action == ACTION_RECURSE_INTO_TABLE) {
@@ -320,15 +336,18 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
/* Recurse to write into subtable */
xlat_tables_unmap_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
-
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
/*
* If the subtable is now empty, remove its reference.
*/
if (xlat_table_is_empty(ctx, subtable)) {
table_base[table_idx] = INVALID_DESC;
- xlat_arch_tlbi_va_regime(table_idx_va,
- ctx->xlat_regime);
+ xlat_arch_tlbi_va(table_idx_va,
+ ctx->xlat_regime);
}
} else {
@@ -354,12 +373,12 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* specified region.
*/
static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
- const int desc_type, const unsigned long long dest_pa,
- const uintptr_t table_entry_base_va, const unsigned int level)
+ unsigned int desc_type, unsigned long long dest_pa,
+ uintptr_t table_entry_base_va, unsigned int level)
{
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_entry_end_va =
- table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
+ table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
/*
* The descriptor types allowed depend on the current table level.
@@ -376,7 +395,7 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* translation with this granularity in principle.
*/
- if (level == 3) {
+ if (level == 3U) {
/*
* Last level, only page descriptors are allowed.
*/
@@ -414,8 +433,8 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* Also, check if the current level allows block
* descriptors. If not, create a table instead.
*/
- if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
- (level < MIN_LVL_BLOCK_DESC) ||
+ if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
+ || (level < MIN_LVL_BLOCK_DESC) ||
(mm->granularity < XLAT_BLOCK_SIZE(level)))
return ACTION_CREATE_NEW_TABLE;
else
@@ -447,7 +466,7 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* mmap region failed to detect that PA and VA must at least be
* aligned to PAGE_SIZE.
*/
- assert(level < 3);
+ assert(level < 3U);
if (desc_type == INVALID_DESC) {
/*
@@ -470,13 +489,14 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
*/
return ACTION_RECURSE_INTO_TABLE;
}
- }
+ } else {
- /*
- * This table entry is outside of the region specified in the arguments,
- * don't write anything to it.
- */
- return ACTION_NONE;
+ /*
+ * This table entry is outside of the region specified in the
+ * arguments, don't write anything to it.
+ */
+ return ACTION_NONE;
+ }
}
/*
@@ -486,14 +506,14 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* should have been mapped.
*/
static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
- const uintptr_t table_base_va,
+ uintptr_t table_base_va,
uint64_t *const table_base,
- const int table_entries,
- const unsigned int level)
+ unsigned int table_entries,
+ unsigned int level)
{
- assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_idx_va;
unsigned long long table_idx_pa;
@@ -501,20 +521,20 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
uint64_t *subtable;
uint64_t desc;
- int table_idx;
+ unsigned int table_idx;
if (mm->base_va > table_base_va) {
/* Find the first index of the table affected by the region. */
table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
- table_idx = (table_idx_va - table_base_va) >>
- XLAT_ADDR_SHIFT(level);
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
assert(table_idx < table_entries);
} else {
/* Start from the beginning of the table. */
table_idx_va = table_base_va;
- table_idx = 0;
+ table_idx = 0U;
}
#if PLAT_XLAT_TABLES_DYNAMIC
@@ -529,7 +549,8 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
action_t action = xlat_tables_map_region_action(mm,
- desc & DESC_MASK, table_idx_pa, table_idx_va, level);
+ (uint32_t)(desc & DESC_MASK), table_idx_pa,
+ table_idx_va, level);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
@@ -538,6 +559,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
level);
} else if (action == ACTION_CREATE_NEW_TABLE) {
+ uintptr_t end_va;
subtable = xlat_table_get_empty(ctx);
if (subtable == NULL) {
@@ -549,20 +571,31 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
/* Recurse to write into subtable */
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
- if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
+ uintptr_t end_va;
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
/* Recurse to write into subtable */
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
- if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
} else {
@@ -579,7 +612,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
break;
}
- return table_idx_va - 1;
+ return table_idx_va - 1U;
}
/*
@@ -591,23 +624,23 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* ENOMEM: There is not enough memory in the mmap array.
* EPERM: Region overlaps another one in an invalid way.
*/
-static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
+static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
{
unsigned long long base_pa = mm->base_pa;
uintptr_t base_va = mm->base_va;
size_t size = mm->size;
size_t granularity = mm->granularity;
- unsigned long long end_pa = base_pa + size - 1;
- uintptr_t end_va = base_va + size - 1;
+ unsigned long long end_pa = base_pa + size - 1U;
+ uintptr_t end_va = base_va + size - 1U;
if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
!IS_PAGE_ALIGNED(size))
return -EINVAL;
- if ((granularity != XLAT_BLOCK_SIZE(1)) &&
- (granularity != XLAT_BLOCK_SIZE(2)) &&
- (granularity != XLAT_BLOCK_SIZE(3))) {
+ if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
+ (granularity != XLAT_BLOCK_SIZE(2U)) &&
+ (granularity != XLAT_BLOCK_SIZE(3U))) {
return -EINVAL;
}
@@ -622,24 +655,23 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
return -ERANGE;
/* Check that there is space in the ctx->mmap array */
- if (ctx->mmap[ctx->mmap_num - 1].size != 0)
+ if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
return -ENOMEM;
/* Check for PAs and VAs overlaps with all other regions */
- for (mmap_region_t *mm_cursor = ctx->mmap;
- mm_cursor->size; ++mm_cursor) {
+ for (const mmap_region_t *mm_cursor = ctx->mmap;
+ mm_cursor->size != 0U; ++mm_cursor) {
uintptr_t mm_cursor_end_va = mm_cursor->base_va
- + mm_cursor->size - 1;
+ + mm_cursor->size - 1U;
/*
* Check if one of the regions is completely inside the other
* one.
*/
- int fully_overlapped_va =
+ bool fully_overlapped_va =
((base_va >= mm_cursor->base_va) &&
(end_va <= mm_cursor_end_va)) ||
-
((mm_cursor->base_va >= base_va) &&
(mm_cursor_end_va <= end_va));
@@ -652,8 +684,8 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
if (fully_overlapped_va) {
#if PLAT_XLAT_TABLES_DYNAMIC
- if ((mm->attr & MT_DYNAMIC) ||
- (mm_cursor->attr & MT_DYNAMIC))
+ if (((mm->attr & MT_DYNAMIC) != 0U) ||
+ ((mm_cursor->attr & MT_DYNAMIC) != 0U))
return -EPERM;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
if ((mm_cursor->base_va - mm_cursor->base_pa) !=
@@ -672,16 +704,14 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
*/
unsigned long long mm_cursor_end_pa =
- mm_cursor->base_pa + mm_cursor->size - 1;
+ mm_cursor->base_pa + mm_cursor->size - 1U;
- int separated_pa =
- (end_pa < mm_cursor->base_pa) ||
+ bool separated_pa = (end_pa < mm_cursor->base_pa) ||
(base_pa > mm_cursor_end_pa);
- int separated_va =
- (end_va < mm_cursor->base_va) ||
+ bool separated_va = (end_va < mm_cursor->base_va) ||
(base_va > mm_cursor_end_va);
- if (!(separated_va && separated_pa))
+ if (!separated_va || !separated_pa)
return -EPERM;
}
}
@@ -693,13 +723,13 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
- mmap_region_t *mm_last;
- unsigned long long end_pa = mm->base_pa + mm->size - 1;
- uintptr_t end_va = mm->base_va + mm->size - 1;
+ const mmap_region_t *mm_last;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
int ret;
/* Ignore empty regions */
- if (!mm->size)
+ if (mm->size == 0U)
return;
/* Static regions must be added before initializing the xlat tables. */
@@ -708,7 +738,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
ret = mmap_add_region_check(ctx, mm);
if (ret != 0) {
ERROR("mmap_add_region_check() failed. error %d\n", ret);
- assert(0);
+ assert(false);
return;
}
@@ -736,13 +766,15 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
* Overlapping is only allowed for static regions.
*/
- while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
- && mm_cursor->size)
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
++mm_cursor;
+ }
- while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) &&
- (mm_cursor->size != 0U) && (mm_cursor->size < mm->size))
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
++mm_cursor;
+ }
/*
* Find the last entry marker in the mmap
@@ -761,7 +793,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
/* Make room for new region by moving other regions up by one place */
mm_destination = mm_cursor + 1;
- memmove(mm_destination, mm_cursor,
+ (void)memmove(mm_destination, mm_cursor,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -781,9 +813,11 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
- while (mm->size) {
- mmap_add_region_ctx(ctx, mm);
- mm++;
+ const mmap_region_t *mm_cursor = mm;
+
+ while (mm_cursor->size != 0U) {
+ mmap_add_region_ctx(ctx, mm_cursor);
+ mm_cursor++;
}
}
@@ -792,13 +826,13 @@ void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
{
mmap_region_t *mm_cursor = ctx->mmap;
- mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
- unsigned long long end_pa = mm->base_pa + mm->size - 1;
- uintptr_t end_va = mm->base_va + mm->size - 1;
+ const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
int ret;
/* Nothing to do */
- if (!mm->size)
+ if (mm->size == 0U)
return 0;
/* Now this region is a dynamic one */
@@ -813,16 +847,18 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* static regions in mmap_add_region_ctx().
*/
- while ((mm_cursor->base_va + mm_cursor->size - 1)
- < end_va && mm_cursor->size)
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
++mm_cursor;
+ }
- while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
- && (mm_cursor->size < mm->size))
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
++mm_cursor;
+ }
/* Make room for new region by moving other regions up by one place */
- memmove(mm_cursor + 1, mm_cursor,
+ (void)memmove(mm_cursor + 1U, mm_cursor,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -830,7 +866,7 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* This shouldn't happen as we have checked in mmap_add_region_check
* that there is free space.
*/
- assert(mm_last->size == 0);
+ assert(mm_last->size == 0U);
*mm_cursor = *mm;
@@ -839,13 +875,16 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* not, this region will be mapped when they are initialized.
*/
if (ctx->initialized) {
- uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor,
- 0, ctx->base_table, ctx->base_table_entries,
+ end_va = xlat_tables_map_region(ctx, mm_cursor,
+ 0U, ctx->base_table, ctx->base_table_entries,
ctx->base_level);
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
/* Failed to map, remove mmap entry, unmap and return error. */
- if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
- memmove(mm_cursor, mm_cursor + 1,
+ if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
+ (void)memmove(mm_cursor, mm_cursor + 1U,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -860,14 +899,18 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* entries, undo every change done up to this point.
*/
mmap_region_t unmap_mm = {
- .base_pa = 0,
+ .base_pa = 0U,
.base_va = mm->base_va,
.size = end_va - mm->base_va,
- .attr = 0
+ .attr = 0U
};
- xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
-
+ xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
return -ENOMEM;
}
@@ -901,61 +944,65 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
size_t size)
{
mmap_region_t *mm = ctx->mmap;
- mmap_region_t *mm_last = mm + ctx->mmap_num;
+ const mmap_region_t *mm_last = mm + ctx->mmap_num;
int update_max_va_needed = 0;
int update_max_pa_needed = 0;
/* Check sanity of mmap array. */
- assert(mm[ctx->mmap_num].size == 0);
+ assert(mm[ctx->mmap_num].size == 0U);
- while (mm->size) {
+ while (mm->size != 0U) {
if ((mm->base_va == base_va) && (mm->size == size))
break;
++mm;
}
/* Check that the region was found */
- if (mm->size == 0)
+ if (mm->size == 0U)
return -EINVAL;
/* If the region is static it can't be removed */
- if (!(mm->attr & MT_DYNAMIC))
+ if ((mm->attr & MT_DYNAMIC) == 0U)
return -EPERM;
/* Check if this region is using the top VAs or PAs. */
- if ((mm->base_va + mm->size - 1) == ctx->max_va)
+ if ((mm->base_va + mm->size - 1U) == ctx->max_va)
update_max_va_needed = 1;
- if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
+ if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
update_max_pa_needed = 1;
/* Update the translation tables if needed */
if (ctx->initialized) {
- xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
+ xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
ctx->base_table_entries,
ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
xlat_arch_tlbi_va_sync();
}
/* Remove this region by moving the rest down by one place. */
- memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
+ (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
/* Check if we need to update the max VAs and PAs */
- if (update_max_va_needed) {
- ctx->max_va = 0;
+ if (update_max_va_needed == 1) {
+ ctx->max_va = 0U;
mm = ctx->mmap;
- while (mm->size) {
- if ((mm->base_va + mm->size - 1) > ctx->max_va)
- ctx->max_va = mm->base_va + mm->size - 1;
+ while (mm->size != 0U) {
+ if ((mm->base_va + mm->size - 1U) > ctx->max_va)
+ ctx->max_va = mm->base_va + mm->size - 1U;
++mm;
}
}
- if (update_max_pa_needed) {
- ctx->max_pa = 0;
+ if (update_max_pa_needed == 1) {
+ ctx->max_pa = 0U;
mm = ctx->mmap;
- while (mm->size) {
- if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
- ctx->max_pa = mm->base_pa + mm->size - 1;
+ while (mm->size != 0U) {
+ if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
+ ctx->max_pa = mm->base_pa + mm->size - 1U;
++mm;
}
}
@@ -969,7 +1016,9 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
{
assert(ctx != NULL);
assert(!ctx->initialized);
- assert(ctx->xlat_regime == EL3_REGIME || ctx->xlat_regime == EL1_EL0_REGIME);
+ assert((ctx->xlat_regime == EL3_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL1_EL0_REGIME));
assert(!is_mmu_enabled_ctx(ctx));
mmap_region_t *mm = ctx->mmap;
@@ -978,25 +1027,29 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
/* All tables must be zeroed before mapping any region. */
- for (unsigned int i = 0; i < ctx->base_table_entries; i++)
+ for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
ctx->base_table[i] = INVALID_DESC;
- for (unsigned int j = 0; j < ctx->tables_num; j++) {
+ for (int j = 0; j < ctx->tables_num; j++) {
#if PLAT_XLAT_TABLES_DYNAMIC
ctx->tables_mapped_regions[j] = 0;
#endif
- for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
+ for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
ctx->tables[j][i] = INVALID_DESC;
}
- while (mm->size) {
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
-
- if (end_va != mm->base_va + mm->size - 1) {
+ while (mm->size != 0U) {
+ uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ if (end_va != (mm->base_va + mm->size - 1U)) {
ERROR("Not enough memory to map region:\n"
- " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
- (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
+ " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr);
panic();
}
@@ -1007,7 +1060,7 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
assert(ctx->max_va <= ctx->va_max_address);
assert(ctx->max_pa <= ctx->pa_max_address);
- ctx->initialized = 1;
+ ctx->initialized = true;
xlat_tables_print(ctx);
}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 4a54ec5d0..528996a29 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -4,10 +4,11 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_PRIVATE_H__
-#define __XLAT_TABLES_PRIVATE_H__
+#ifndef XLAT_TABLES_PRIVATE_H
+#define XLAT_TABLES_PRIVATE_H
#include <platform_def.h>
+#include <stdbool.h>
#include <xlat_tables_defs.h>
#if PLAT_XLAT_TABLES_DYNAMIC
@@ -35,22 +36,26 @@
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime);
+
/*
* Invalidate all TLB entries that match the given virtual address. This
* operation applies to all PEs in the same Inner Shareable domain as the PE
* that executes this function. This functions must be called for every
- * translation table entry that is modified.
- *
- * xlat_arch_tlbi_va() applies the invalidation to the exception level of the
- * current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to
- * the given translation regime.
+ * translation table entry that is modified. It only affects the specified
+ * translation regime.
*
* Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
* pertaining to a higher exception level, e.g. invalidating EL3 entries from
* S-EL1.
*/
-void xlat_arch_tlbi_va(uintptr_t va);
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime);
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
/*
* This function has to be called at the end of any code that uses the function
@@ -59,7 +64,7 @@ void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime);
void xlat_arch_tlbi_va_sync(void);
/* Print VA, PA, size and attributes of all regions in the mmap array. */
-void xlat_mmap_print(mmap_region_t *const mmap);
+void xlat_mmap_print(const mmap_region_t *mmap);
/*
* Print the current state of the translation tables by reading them from
@@ -71,14 +76,14 @@ void xlat_tables_print(xlat_ctx_t *ctx);
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
- unsigned long long addr_pa, int level);
+ unsigned long long addr_pa, unsigned int level);
/*
* Architecture-specific initialization code.
*/
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
-int xlat_arch_current_el(void);
+unsigned int xlat_arch_current_el(void);
/*
* Return the maximum physical address supported by the hardware.
@@ -86,14 +91,13 @@ int xlat_arch_current_el(void);
*/
unsigned long long xlat_arch_get_max_supported_pa(void);
-/* Enable MMU and configure it to use the specified translation tables. */
-void setup_mmu_cfg(unsigned int flags, const uint64_t *base_table,
- unsigned long long max_pa, uintptr_t max_va);
-
/*
- * Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t
- * is enabled, 0 otherwise.
+ * Returns true if the MMU of the translation regime managed by the given
+ * xlat_ctx_t is enabled, false otherwise.
*/
-int is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
+
+/* Returns true if the data cache is enabled at the current EL. */
+bool is_dcache_enabled(void);
-#endif /* __XLAT_TABLES_PRIVATE_H__ */
+#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 5a78434ab..41c01aee7 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -9,9 +9,10 @@
#include <debug.h>
#include <errno.h>
#include <platform_def.h>
-#include <types.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
#include <utils_def.h>
-#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
@@ -19,7 +20,7 @@
#if LOG_LEVEL < LOG_LEVEL_VERBOSE
-void xlat_mmap_print(__unused mmap_region_t *const mmap)
+void xlat_mmap_print(__unused const mmap_region_t *mmap)
{
/* Empty */
}
@@ -31,39 +32,39 @@ void xlat_tables_print(__unused xlat_ctx_t *ctx)
#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-void xlat_mmap_print(mmap_region_t *const mmap)
+void xlat_mmap_print(const mmap_region_t *mmap)
{
- tf_printf("mmap:\n");
+ printf("mmap:\n");
const mmap_region_t *mm = mmap;
while (mm->size != 0U) {
- tf_printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x "
- "granularity:0x%zx\n", mm->base_va, mm->base_pa,
- mm->size, mm->attr, mm->granularity);
+ printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x granularity:0x%zx\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr,
+ mm->granularity);
++mm;
};
- tf_printf("\n");
+ printf("\n");
}
/* Print the attributes of the specified block descriptor. */
static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
{
- int mem_type_index = ATTR_INDEX_GET(desc);
+ uint64_t mem_type_index = ATTR_INDEX_GET(desc);
int xlat_regime = ctx->xlat_regime;
if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
- tf_printf("MEM");
+ printf("MEM");
} else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
- tf_printf("NC");
+ printf("NC");
} else {
assert(mem_type_index == ATTR_DEVICE_INDEX);
- tf_printf("DEV");
+ printf("DEV");
}
- if (xlat_regime == EL3_REGIME) {
- /* For EL3 only check the AP[2] and XN bits. */
- tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW");
- tf_printf((desc & UPPER_ATTRS(XN)) ? "-XN" : "-EXEC");
+ if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
+ /* For EL3 and EL2 only check the AP[2] and XN bits. */
+ printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
} else {
assert(xlat_regime == EL1_EL0_REGIME);
/*
@@ -81,18 +82,18 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
#endif
- tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW");
+ printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
/* Only check one of PXN and UXN, the other one is the same. */
- tf_printf((desc & UPPER_ATTRS(PXN)) ? "-XN" : "-EXEC");
+ printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
/*
* Privileged regions can only be accessed from EL1, user
* regions can be accessed from EL1 and EL0.
*/
- tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
+ printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
? "-USER" : "-PRIV");
}
- tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
+ printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
}
static const char * const level_spacers[] = {
@@ -109,17 +110,15 @@ static const char *invalid_descriptors_ommited =
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
-static void xlat_tables_print_internal(xlat_ctx_t *ctx,
- const uintptr_t table_base_va,
- uint64_t *const table_base, const int table_entries,
- const unsigned int level)
+static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
+ const uint64_t *table_base, unsigned int table_entries,
+ unsigned int level)
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
uint64_t desc;
uintptr_t table_idx_va = table_base_va;
- int table_idx = 0;
-
+ unsigned int table_idx = 0U;
size_t level_size = XLAT_BLOCK_SIZE(level);
/*
@@ -137,18 +136,18 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
if ((desc & DESC_MASK) == INVALID_DESC) {
if (invalid_row_count == 0) {
- tf_printf("%sVA:%p size:0x%zx\n",
- level_spacers[level],
- (void *)table_idx_va, level_size);
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
}
invalid_row_count++;
} else {
if (invalid_row_count > 1) {
- tf_printf(invalid_descriptors_ommited,
- level_spacers[level],
- invalid_row_count - 1);
+ printf(invalid_descriptors_ommited,
+ level_spacers[level],
+ invalid_row_count - 1);
}
invalid_row_count = 0;
@@ -165,23 +164,22 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
* but instead points to the next translation
* table in the translation table walk.
*/
- tf_printf("%sVA:%p size:0x%zx\n",
- level_spacers[level],
- (void *)table_idx_va, level_size);
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
xlat_tables_print_internal(ctx, table_idx_va,
(uint64_t *)addr_inner,
- XLAT_TABLE_ENTRIES, level + 1);
+ XLAT_TABLE_ENTRIES, level + 1U);
} else {
- tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
- level_spacers[level],
- (void *)table_idx_va,
- (unsigned long long)(desc & TABLE_ADDR_MASK),
- level_size);
+ printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
+ level_spacers[level], table_idx_va,
+ (uint64_t)(desc & TABLE_ADDR_MASK),
+ level_size);
xlat_desc_print(ctx, desc);
- tf_printf("\n");
+ printf("\n");
}
}
@@ -190,16 +188,20 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
}
if (invalid_row_count > 1) {
- tf_printf(invalid_descriptors_ommited,
- level_spacers[level], invalid_row_count - 1);
+ printf(invalid_descriptors_ommited,
+ level_spacers[level], invalid_row_count - 1);
}
}
void xlat_tables_print(xlat_ctx_t *ctx)
{
const char *xlat_regime_str;
+ int used_page_tables;
+
if (ctx->xlat_regime == EL1_EL0_REGIME) {
xlat_regime_str = "1&0";
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ xlat_regime_str = "2";
} else {
assert(ctx->xlat_regime == EL3_REGIME);
xlat_regime_str = "3";
@@ -207,29 +209,28 @@ void xlat_tables_print(xlat_ctx_t *ctx)
VERBOSE("Translation tables state:\n");
VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
- VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
+ VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address);
VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
- VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
+ VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va);
- VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
- VERBOSE(" Entries @initial lookup level: %i\n",
+ VERBOSE(" Initial lookup level: %u\n", ctx->base_level);
+ VERBOSE(" Entries @initial lookup level: %u\n",
ctx->base_table_entries);
- int used_page_tables;
#if PLAT_XLAT_TABLES_DYNAMIC
used_page_tables = 0;
- for (unsigned int i = 0; i < ctx->tables_num; ++i) {
+ for (int i = 0; i < ctx->tables_num; ++i) {
if (ctx->tables_mapped_regions[i] != 0)
++used_page_tables;
}
#else
used_page_tables = ctx->next_table;
#endif
- VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
+ VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
used_page_tables, ctx->tables_num,
ctx->tables_num - used_page_tables);
- xlat_tables_print_internal(ctx, 0, ctx->base_table,
+ xlat_tables_print_internal(ctx, 0U, ctx->base_table,
ctx->base_table_entries, ctx->base_level);
}
@@ -252,13 +253,13 @@ void xlat_tables_print(xlat_ctx_t *ctx)
*/
static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
void *xlat_table_base,
- int xlat_table_base_entries,
+ unsigned int xlat_table_base_entries,
unsigned long long virt_addr_space_size,
- int *out_level)
+ unsigned int *out_level)
{
unsigned int start_level;
uint64_t *table;
- int entries;
+ unsigned int entries;
start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
@@ -268,9 +269,7 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
for (unsigned int level = start_level;
level <= XLAT_TABLE_LEVEL_MAX;
++level) {
- int idx;
- uint64_t desc;
- uint64_t desc_type;
+ uint64_t idx, desc, desc_type;
idx = XLAT_TABLE_IDX(virtual_addr, level);
if (idx >= entries) {
@@ -311,19 +310,19 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
* This shouldn't be reached, the translation table walk should end at
* most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
*/
- assert(0);
+ assert(false);
return NULL;
}
-static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
- uint32_t *attributes, uint64_t **table_entry,
- unsigned long long *addr_pa, int *table_level)
+static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
+ uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
+ unsigned long long *addr_pa, unsigned int *table_level)
{
uint64_t *entry;
uint64_t desc;
- int level;
+ unsigned int level;
unsigned long long virt_addr_space_size;
/*
@@ -331,10 +330,12 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
*/
assert(ctx != NULL);
assert(ctx->initialized);
- assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
+ assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
- virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
- assert(virt_addr_space_size > 0);
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
+ assert(virt_addr_space_size > 0U);
entry = find_xlat_table_entry(base_va,
ctx->base_table,
@@ -342,7 +343,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
virt_addr_space_size,
&level);
if (entry == NULL) {
- WARN("Address %p is not mapped.\n", (void *)base_va);
+ WARN("Address 0x%lx is not mapped.\n", base_va);
return -EINVAL;
}
@@ -363,13 +364,13 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
VERBOSE("Attributes: ");
xlat_desc_print(ctx, desc);
- tf_printf("\n");
+ printf("\n");
#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
assert(attributes != NULL);
- *attributes = 0;
+ *attributes = 0U;
- int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
*attributes |= MT_MEMORY;
@@ -380,20 +381,21 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
*attributes |= MT_DEVICE;
}
- int ap2_bit = (desc >> AP2_SHIFT) & 1;
+ uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
if (ap2_bit == AP2_RW)
*attributes |= MT_RW;
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- int ap1_bit = (desc >> AP1_SHIFT) & 1;
+ uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
+
if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
*attributes |= MT_USER;
}
- int ns_bit = (desc >> NS_SHIFT) & 1;
+ uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
- if (ns_bit == 1)
+ if (ns_bit == 1U)
*attributes |= MT_NS;
uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
@@ -401,25 +403,23 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
if ((desc & xn_mask) == xn_mask) {
*attributes |= MT_EXECUTE_NEVER;
} else {
- assert((desc & xn_mask) == 0);
+ assert((desc & xn_mask) == 0U);
}
return 0;
}
-int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
- uint32_t *attributes)
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attr)
{
- return get_mem_attributes_internal(ctx, base_va, attributes,
- NULL, NULL, NULL);
+ return xlat_get_mem_attributes_internal(ctx, base_va, attr,
+ NULL, NULL, NULL);
}
-int change_mem_attributes(xlat_ctx_t *ctx,
- uintptr_t base_va,
- size_t size,
- uint32_t attr)
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size, uint32_t attr)
{
/* Note: This implementation isn't optimized. */
@@ -427,46 +427,46 @@ int change_mem_attributes(xlat_ctx_t *ctx,
assert(ctx->initialized);
unsigned long long virt_addr_space_size =
- (unsigned long long)ctx->va_max_address + 1;
- assert(virt_addr_space_size > 0);
+ (unsigned long long)ctx->va_max_address + 1U;
+ assert(virt_addr_space_size > 0U);
if (!IS_PAGE_ALIGNED(base_va)) {
- WARN("%s: Address %p is not aligned on a page boundary.\n",
- __func__, (void *)base_va);
+ WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
+ __func__, base_va);
return -EINVAL;
}
- if (size == 0) {
+ if (size == 0U) {
WARN("%s: Size is 0.\n", __func__);
return -EINVAL;
}
- if ((size % PAGE_SIZE) != 0) {
+ if ((size % PAGE_SIZE) != 0U) {
WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
__func__, size);
return -EINVAL;
}
- if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
+ if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
WARN("%s: Mapping memory as read-write and executable not allowed.\n",
__func__);
return -EINVAL;
}
- int pages_count = size / PAGE_SIZE;
+ size_t pages_count = size / PAGE_SIZE;
- VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
- pages_count, (void *)base_va);
+ VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
+ pages_count, base_va);
uintptr_t base_va_original = base_va;
/*
* Sanity checks.
*/
- for (int i = 0; i < pages_count; ++i) {
- uint64_t *entry;
- uint64_t desc;
- int level;
+ for (size_t i = 0U; i < pages_count; ++i) {
+ const uint64_t *entry;
+ uint64_t desc, attr_index;
+ unsigned int level;
entry = find_xlat_table_entry(base_va,
ctx->base_table,
@@ -474,7 +474,7 @@ int change_mem_attributes(xlat_ctx_t *ctx,
virt_addr_space_size,
&level);
if (entry == NULL) {
- WARN("Address %p is not mapped.\n", (void *)base_va);
+ WARN("Address 0x%lx is not mapped.\n", base_va);
return -EINVAL;
}
@@ -486,8 +486,8 @@ int change_mem_attributes(xlat_ctx_t *ctx,
*/
if (((desc & DESC_MASK) != PAGE_DESC) ||
(level != XLAT_TABLE_LEVEL_MAX)) {
- WARN("Address %p is not mapped at the right granularity.\n",
- (void *)base_va);
+ WARN("Address 0x%lx is not mapped at the right granularity.\n",
+ base_va);
WARN("Granularity is 0x%llx, should be 0x%x.\n",
(unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
return -EINVAL;
@@ -496,11 +496,11 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/*
* If the region type is device, it shouldn't be executable.
*/
- int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
if (attr_index == ATTR_DEVICE_INDEX) {
- if ((attr & MT_EXECUTE_NEVER) == 0) {
- WARN("Setting device memory as executable at address %p.",
- (void *)base_va);
+ if ((attr & MT_EXECUTE_NEVER) == 0U) {
+ WARN("Setting device memory as executable at address 0x%lx.",
+ base_va);
return -EINVAL;
}
}
@@ -511,14 +511,14 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/* Restore original value. */
base_va = base_va_original;
- for (int i = 0; i < pages_count; ++i) {
+ for (unsigned int i = 0U; i < pages_count; ++i) {
- uint32_t old_attr, new_attr;
- uint64_t *entry;
- int level;
- unsigned long long addr_pa;
+ uint32_t old_attr = 0U, new_attr;
+ uint64_t *entry = NULL;
+ unsigned int level = 0U;
+ unsigned long long addr_pa = 0ULL;
- get_mem_attributes_internal(ctx, base_va, &old_attr,
+ (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
&entry, &addr_pa, &level);
/*
@@ -542,16 +542,20 @@ int change_mem_attributes(xlat_ctx_t *ctx,
* before writing the new descriptor.
*/
*entry = INVALID_DESC;
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
/* Invalidate any cached copy of this mapping in the TLBs. */
- xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
+ xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
/* Ensure completion of the invalidation. */
xlat_arch_tlbi_va_sync();
/* Write new descriptor */
*entry = xlat_desc(ctx, new_attr, addr_pa, level);
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
base_va += PAGE_SIZE;
}