aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/cpus/aarch64/cortex_a76.S290
-rw-r--r--lib/cpus/aarch64/cortex_ares.S136
-rw-r--r--lib/cpus/aarch64/cortex_ares_pubsub.c26
-rw-r--r--lib/cpus/aarch64/cpuamu.c8
-rw-r--r--lib/cpus/cpu-ops.mk10
-rw-r--r--lib/locks/bakery/bakery_lock_coherent.c6
-rw-r--r--lib/locks/bakery/bakery_lock_normal.c14
-rw-r--r--lib/optee/optee_utils.c48
-rw-r--r--lib/psci/psci_private.h38
-rw-r--r--lib/xlat_tables_v2/xlat_tables_internal.c7
10 files changed, 522 insertions, 61 deletions
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
new file mode 100644
index 000000000..14705d7b2
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_arch_svc.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+#include <cortex_a76.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+#if !DYNAMIC_WORKAROUND_CVE_2018_3639
+#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
+#endif
+
+#define ESR_EL3_A64_SMC0 0x5e000000
+#define ESR_EL3_A32_SMC0 0x4e000000
+
+ /*
+ * This macro applies the mitigation for CVE-2018-3639.
+ * It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
+ * SMC calls from a lower EL running in AArch32 or AArch64
+ * will go through the fast and return early.
+ *
+ * The macro saves x2-x3 to the context. In the fast path
+ * x0-x3 registers do not need to be restored as the calling
+ * context will have saved them.
+ */
+ .macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+ .if \_is_sync_exception
+ /*
+ * Ensure SMC is coming from A64/A32 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_2
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ orr w2, wzr, #SMCCC_ARCH_WORKAROUND_2
+ cmp x0, x2
+ mrs x3, esr_el3
+ mov_imm w2, \_esr_el3_val
+ ccmp w2, w3, #0, eq
+ /*
+ * Static predictor will predict a fall-through, optimizing
+ * the `SMCCC_ARCH_WORKAROUND_2` fast path.
+ */
+ bne 1f
+
+ /*
+ * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
+ * fast path.
+ */
+ cmp x1, xzr /* enable/disable check */
+
+ /*
+ * When the calling context wants mitigation disabled,
+ * we program the mitigation disable function in the
+ * CPU context, which gets invoked on subsequent exits from
+ * EL3 via the `el3_exit` function. Otherwise NULL is
+ * programmed in the CPU context, which results in caller's
+ * inheriting the EL3 mitigation state (enabled) on subsequent
+ * `el3_exit`.
+ */
+ mov x0, xzr
+ adr x1, cortex_a76_disable_wa_cve_2018_3639
+ csel x1, x1, x0, eq
+ str x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+
+ mrs x2, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ bic x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ csel x3, x3, x1, eq
+ msr CORTEX_A76_CPUACTLR2_EL1, x3
+ eret /* ERET implies ISB */
+ .endif
+1:
+ /*
+ * Always enable v4 mitigation during EL3 execution. This is not
+ * required for the fast path above because it does not perform any
+ * memory loads.
+ */
+ mrs x2, CORTEX_A76_CPUACTLR2_EL1
+ orr x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A76_CPUACTLR2_EL1, x2
+ isb
+
+ /*
+ * The caller may have passed arguments to EL3 via x2-x3.
+ * Restore these registers from the context before jumping to the
+ * main runtime vector table entry.
+ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ .endm
+
+vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ check_vector_size cortex_a76_sync_exception_sp_el0
+
+vector_entry cortex_a76_irq_sp_el0
+ b irq_sp_el0
+ check_vector_size cortex_a76_irq_sp_el0
+
+vector_entry cortex_a76_fiq_sp_el0
+ b fiq_sp_el0
+ check_vector_size cortex_a76_fiq_sp_el0
+
+vector_entry cortex_a76_serror_sp_el0
+ b serror_sp_el0
+ check_vector_size cortex_a76_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size cortex_a76_sync_exception_sp_elx
+
+vector_entry cortex_a76_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size cortex_a76_irq_sp_elx
+
+vector_entry cortex_a76_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size cortex_a76_fiq_sp_elx
+
+vector_entry cortex_a76_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size cortex_a76_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_aarch64
+ apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+ b sync_exception_aarch64
+ check_vector_size cortex_a76_sync_exception_aarch64
+
+vector_entry cortex_a76_irq_aarch64
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b irq_aarch64
+ check_vector_size cortex_a76_irq_aarch64
+
+vector_entry cortex_a76_fiq_aarch64
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b fiq_aarch64
+ check_vector_size cortex_a76_fiq_aarch64
+
+vector_entry cortex_a76_serror_aarch64
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b serror_aarch64
+ check_vector_size cortex_a76_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_aarch32
+ apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+ b sync_exception_aarch32
+ check_vector_size cortex_a76_sync_exception_aarch32
+
+vector_entry cortex_a76_irq_aarch32
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b irq_aarch32
+ check_vector_size cortex_a76_irq_aarch32
+
+vector_entry cortex_a76_fiq_aarch32
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b fiq_aarch32
+ check_vector_size cortex_a76_fiq_aarch32
+
+vector_entry cortex_a76_serror_aarch32
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b serror_aarch32
+ check_vector_size cortex_a76_serror_aarch32
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
+func cortex_a76_disable_wa_cve_2018_3639
+ mrs x0, CORTEX_A76_CPUACTLR2_EL1
+ bic x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A76_CPUACTLR2_EL1, x0
+ isb
+ ret
+endfunc cortex_a76_disable_wa_cve_2018_3639
+
+func cortex_a76_reset_func
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A76_CPUACTLR2_EL1
+ orr x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A76_CPUACTLR2_EL1, x0
+ isb
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2018_3639
+ /*
+ * The Cortex-A76 generic vectors are overwritten to use the vectors
+ * defined above. This is required in order to apply mitigation
+ * against CVE-2018-3639 on exception entry from lower ELs.
+ */
+ adr x0, cortex_a76_wa_cve_2018_3639_a76_vbar
+ msr vbar_el3, x0
+ isb
+#endif
+ ret
+endfunc cortex_a76_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a76_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A76_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
+ msr CORTEX_A76_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a76_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex Cortex A76. Must follow AAPCS.
+ */
+func cortex_a76_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a76_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a76 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a76_regs, "aS"
+cortex_a76_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a76_cpu_reg_dump
+ adr x6, cortex_a76_regs
+ mrs x8, CORTEX_A76_CPUECTLR_EL1
+ ret
+endfunc cortex_a76_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
+ cortex_a76_reset_func, \
+ CPU_NO_EXTRA1_FUNC, \
+ cortex_a76_disable_wa_cve_2018_3639, \
+ cortex_a76_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_ares.S b/lib/cpus/aarch64/cortex_ares.S
new file mode 100644
index 000000000..942b6f705
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_ares.S
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cortex_ares.h>
+#include <cpuamu.h>
+#include <cpu_macros.S>
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex-Ares Errata
+ * This applies to revision r0p0 and r1p0 of Cortex-Ares.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_ares_1043202_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1043202
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ ldr x0, =0x0
+ msr CPUPSELR_EL3, x0
+ ldr x0, =0xF3BF8F2F
+ msr CPUPOR_EL3, x0
+ ldr x0, =0xFFFFFFFF
+ msr CPUPMR_EL3, x0
+ ldr x0, =0x800200071
+ msr CPUPCR_EL3, x0
+ isb
+1:
+ ret x17
+endfunc errata_ares_1043202_wa
+
+func check_errata_1043202
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1043202
+
+func cortex_ares_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_ARES_1043202
+ mov x0, x18
+ bl errata_ares_1043202_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ orr x0, x0, #CORTEX_ARES_ACTLR_AMEN_BIT
+ msr actlr_el3, x0
+ isb
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ orr x0, x0, #CORTEX_ARES_ACTLR_AMEN_BIT
+ msr actlr_el2, x0
+ isb
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_ARES_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+ isb
+#endif
+ ret x19
+endfunc cortex_ares_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_ares_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_ARES_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_ARES_CORE_PWRDN_EN_MASK
+ msr CORTEX_ARES_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_ares_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-Ares. Must follow AAPCS.
+ */
+func cortex_a72_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_ARES_1043202, cortex_ares, 1043202
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a72_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_ares specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_ares_regs, "aS"
+cortex_ares_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_ares_cpu_reg_dump
+ adr x6, cortex_ares_regs
+ mrs x8, CORTEX_ARES_CPUECTLR_EL1
+ ret
+endfunc cortex_ares_cpu_reg_dump
+
+declare_cpu_ops cortex_ares, CORTEX_ARES_MIDR, \
+ cortex_ares_reset_func, \
+ cortex_ares_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_ares_pubsub.c b/lib/cpus/aarch64/cortex_ares_pubsub.c
new file mode 100644
index 000000000..c7d850a00
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_ares_pubsub.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cortex_ares.h>
+#include <cpuamu.h>
+#include <pubsub_events.h>
+
+static void *cortex_ares_context_save(const void *arg)
+{
+ if (midr_match(CORTEX_ARES_MIDR) != 0)
+ cpuamu_context_save(CORTEX_ARES_AMU_NR_COUNTERS);
+ return 0;
+}
+
+static void *cortex_ares_context_restore(const void *arg)
+{
+ if (midr_match(CORTEX_ARES_MIDR) != 0)
+ cpuamu_context_restore(CORTEX_ARES_AMU_NR_COUNTERS);
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_ares_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_ares_context_restore);
diff --git a/lib/cpus/aarch64/cpuamu.c b/lib/cpus/aarch64/cpuamu.c
index b9bad8604..5ad5bf839 100644
--- a/lib/cpus/aarch64/cpuamu.c
+++ b/lib/cpus/aarch64/cpuamu.c
@@ -10,12 +10,12 @@
#define CPUAMU_NR_COUNTERS 5U
-struct amu_ctx {
+struct cpuamu_ctx {
uint64_t cnts[CPUAMU_NR_COUNTERS];
unsigned int mask;
};
-static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+static struct cpuamu_ctx cpuamu_ctxs[PLATFORM_CORE_COUNT];
int midr_match(unsigned int cpu_midr)
{
@@ -29,7 +29,7 @@ int midr_match(unsigned int cpu_midr)
void cpuamu_context_save(unsigned int nr_counters)
{
- struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
unsigned int i;
assert(nr_counters <= CPUAMU_NR_COUNTERS);
@@ -48,7 +48,7 @@ void cpuamu_context_save(unsigned int nr_counters)
void cpuamu_context_restore(unsigned int nr_counters)
{
- struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
unsigned int i;
assert(nr_counters <= CPUAMU_NR_COUNTERS);
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 434c13ea0..456e3e524 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -119,6 +119,10 @@ ERRATA_A57_859972 ?=0
# only to revision <= r0p3 of the Cortex A72 cpu.
ERRATA_A72_859971 ?=0
+# Flag to apply T32 CLREX workaround during reset. This erratum applies
+# only to r0p0 and r1p0 of the Ares cpu.
+ERRATA_ARES_1043202 ?=1
+
# Process ERRATA_A53_826319 flag
$(eval $(call assert_boolean,ERRATA_A53_826319))
$(eval $(call add_define,ERRATA_A53_826319))
@@ -179,6 +183,10 @@ $(eval $(call add_define,ERRATA_A57_859972))
$(eval $(call assert_boolean,ERRATA_A72_859971))
$(eval $(call add_define,ERRATA_A72_859971))
+# Process ERRATA_ARES_1043202 flag
+$(eval $(call assert_boolean,ERRATA_ARES_1043202))
+$(eval $(call add_define,ERRATA_ARES_1043202))
+
# Errata build flags
ifneq (${ERRATA_A53_843419},0)
TF_LDFLAGS_aarch64 += --fix-cortex-a53-843419
diff --git a/lib/locks/bakery/bakery_lock_coherent.c b/lib/locks/bakery/bakery_lock_coherent.c
index a857e0355..788ba9818 100644
--- a/lib/locks/bakery/bakery_lock_coherent.c
+++ b/lib/locks/bakery/bakery_lock_coherent.c
@@ -34,9 +34,9 @@
* accesses regardless of status of address translation.
*/
-#define assert_bakery_entry_valid(entry, bakery) do { \
- assert(bakery); \
- assert(entry < BAKERY_LOCK_MAX_CPUS); \
+#define assert_bakery_entry_valid(_entry, _bakery) do { \
+ assert(_bakery); \
+ assert(_entry < BAKERY_LOCK_MAX_CPUS); \
} while (0)
/* Obtain a ticket for a given CPU */
diff --git a/lib/locks/bakery/bakery_lock_normal.c b/lib/locks/bakery/bakery_lock_normal.c
index 37697f521..630226ae2 100644
--- a/lib/locks/bakery/bakery_lock_normal.c
+++ b/lib/locks/bakery/bakery_lock_normal.c
@@ -53,18 +53,18 @@ CASSERT((PLAT_PERCPU_BAKERY_LOCK_SIZE & (CACHE_WRITEBACK_GRANULE - 1)) == 0, \
IMPORT_SYM(uintptr_t, __PERCPU_BAKERY_LOCK_SIZE__, PERCPU_BAKERY_LOCK_SIZE);
#endif
-#define get_bakery_info(cpu_ix, lock) \
- (bakery_info_t *)((uintptr_t)lock + cpu_ix * PERCPU_BAKERY_LOCK_SIZE)
+#define get_bakery_info(_cpu_ix, _lock) \
+ (bakery_info_t *)((uintptr_t)_lock + _cpu_ix * PERCPU_BAKERY_LOCK_SIZE)
-#define write_cache_op(addr, cached) \
+#define write_cache_op(_addr, _cached) \
do { \
- (cached ? dccvac((uintptr_t)addr) :\
- dcivac((uintptr_t)addr));\
+ (_cached ? dccvac((uintptr_t)_addr) :\
+ dcivac((uintptr_t)_addr));\
dsbish();\
} while (0)
-#define read_cache_op(addr, cached) if (cached) \
- dccivac((uintptr_t)addr)
+#define read_cache_op(_addr, _cached) if (_cached) \
+ dccivac((uintptr_t)_addr)
/* Helper function to check if the lock is acquired */
static inline int is_lock_acquired(const bakery_info_t *my_bakery_info,
diff --git a/lib/optee/optee_utils.c b/lib/optee/optee_utils.c
index ac51265ed..ecf7cc057 100644
--- a/lib/optee/optee_utils.c
+++ b/lib/optee/optee_utils.c
@@ -43,7 +43,7 @@ typedef struct optee_header {
uint8_t arch;
uint16_t flags;
uint32_t nb_images;
- optee_image_t optee_image[];
+ optee_image_t optee_image_list[];
} optee_header_t;
/*******************************************************************************
@@ -51,11 +51,11 @@ typedef struct optee_header {
* Return 1 if valid
* Return 0 if invalid
******************************************************************************/
-static inline int tee_validate_header(optee_header_t *optee_header)
+static inline int tee_validate_header(optee_header_t *header)
{
- if ((optee_header->magic == TEE_MAGIC_NUM_OPTEE) &&
- (optee_header->version == 2) &&
- (optee_header->nb_images <= OPTEE_MAX_IMAGE_NUM)) {
+ if ((header->magic == TEE_MAGIC_NUM_OPTEE) &&
+ (header->version == 2) &&
+ (header->nb_images <= OPTEE_MAX_IMAGE_NUM)) {
return 1;
}
@@ -68,14 +68,14 @@ static inline int tee_validate_header(optee_header_t *optee_header)
* Return 0 on success or a negative error code otherwise.
******************************************************************************/
static int parse_optee_image(image_info_t *image_info,
- optee_image_t *optee_image)
+ optee_image_t *image)
{
uintptr_t init_load_addr, free_end, requested_end;
size_t init_size;
- init_load_addr = ((uint64_t)optee_image->load_addr_hi << 32) |
- optee_image->load_addr_lo;
- init_size = optee_image->size;
+ init_load_addr = ((uint64_t)image->load_addr_hi << 32) |
+ image->load_addr_lo;
+ init_size = image->size;
/*
* -1 indicates loader decided address; take our pre-mapped area
@@ -133,21 +133,21 @@ int parse_optee_header(entry_point_info_t *header_ep,
image_info_t *paged_image_info)
{
- optee_header_t *optee_header;
+ optee_header_t *header;
int num, ret;
assert(header_ep);
- optee_header = (optee_header_t *)header_ep->pc;
- assert(optee_header);
+ header = (optee_header_t *)header_ep->pc;
+ assert(header);
/* Print the OPTEE header information */
INFO("OPTEE ep=0x%x\n", (unsigned int)header_ep->pc);
INFO("OPTEE header info:\n");
- INFO(" magic=0x%x\n", optee_header->magic);
- INFO(" version=0x%x\n", optee_header->version);
- INFO(" arch=0x%x\n", optee_header->arch);
- INFO(" flags=0x%x\n", optee_header->flags);
- INFO(" nb_images=0x%x\n", optee_header->nb_images);
+ INFO(" magic=0x%x\n", header->magic);
+ INFO(" version=0x%x\n", header->version);
+ INFO(" arch=0x%x\n", header->arch);
+ INFO(" flags=0x%x\n", header->flags);
+ INFO(" nb_images=0x%x\n", header->nb_images);
/*
* OPTEE image has 3 types:
@@ -166,7 +166,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
* pager and pageable. Remove skip attr for BL32_EXTRA1_IMAGE_ID
* and BL32_EXTRA2_IMAGE_ID to load pager and paged bin.
*/
- if (!tee_validate_header(optee_header)) {
+ if (!tee_validate_header(header)) {
INFO("Invalid OPTEE header, set legacy mode.\n");
#ifdef AARCH64
header_ep->args.arg0 = MODE_RW_64;
@@ -177,15 +177,15 @@ int parse_optee_header(entry_point_info_t *header_ep,
}
/* Parse OPTEE image */
- for (num = 0; num < optee_header->nb_images; num++) {
- if (optee_header->optee_image[num].image_id ==
+ for (num = 0; num < header->nb_images; num++) {
+ if (header->optee_image_list[num].image_id ==
OPTEE_PAGER_IMAGE_ID) {
ret = parse_optee_image(pager_image_info,
- &optee_header->optee_image[num]);
- } else if (optee_header->optee_image[num].image_id ==
+ &header->optee_image_list[num]);
+ } else if (header->optee_image_list[num].image_id ==
OPTEE_PAGED_IMAGE_ID) {
ret = parse_optee_image(paged_image_info,
- &optee_header->optee_image[num]);
+ &header->optee_image_list[num]);
} else {
ERROR("Parse optee image failed.\n");
return -1;
@@ -211,7 +211,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
header_ep->args.arg2 = paged_image_info->image_size;
/* Set OPTEE runtime arch - aarch32/aarch64 */
- if (optee_header->arch == 0) {
+ if (header->arch == 0) {
header_ep->args.arg0 = MODE_RW_32;
} else {
#ifdef AARCH64
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
index c58f32969..d452e2ae0 100644
--- a/lib/psci/psci_private.h
+++ b/lib/psci/psci_private.h
@@ -65,8 +65,8 @@
#endif
-#define psci_lock_init(non_cpu_pd_node, idx) \
- ((non_cpu_pd_node)[(idx)].lock_index = (idx))
+#define psci_lock_init(_non_cpu_pd_node, _idx) \
+ ((_non_cpu_pd_node)[(_idx)].lock_index = (_idx))
/*
* The PSCI capability which are provided by the generic code but does not
@@ -96,35 +96,35 @@
/*
* Helper macros to get/set the fields of PSCI per-cpu data.
*/
-#define psci_set_aff_info_state(aff_state) \
- set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
+#define psci_set_aff_info_state(_aff_state) \
+ set_cpu_data(psci_svc_cpu_data.aff_info_state, _aff_state)
#define psci_get_aff_info_state() \
get_cpu_data(psci_svc_cpu_data.aff_info_state)
-#define psci_get_aff_info_state_by_idx(idx) \
- get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
-#define psci_set_aff_info_state_by_idx(idx, aff_state) \
- set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
- aff_state)
+#define psci_get_aff_info_state_by_idx(_idx) \
+ get_cpu_data_by_index(_idx, psci_svc_cpu_data.aff_info_state)
+#define psci_set_aff_info_state_by_idx(_idx, _aff_state) \
+ set_cpu_data_by_index(_idx, psci_svc_cpu_data.aff_info_state,\
+ _aff_state)
#define psci_get_suspend_pwrlvl() \
get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
-#define psci_set_suspend_pwrlvl(target_lvl) \
- set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
-#define psci_set_cpu_local_state(state) \
- set_cpu_data(psci_svc_cpu_data.local_state, state)
+#define psci_set_suspend_pwrlvl(_target_lvl) \
+ set_cpu_data(psci_svc_cpu_data.target_pwrlvl, _target_lvl)
+#define psci_set_cpu_local_state(_state) \
+ set_cpu_data(psci_svc_cpu_data.local_state, _state)
#define psci_get_cpu_local_state() \
get_cpu_data(psci_svc_cpu_data.local_state)
-#define psci_get_cpu_local_state_by_idx(idx) \
- get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
+#define psci_get_cpu_local_state_by_idx(_idx) \
+ get_cpu_data_by_index(_idx, psci_svc_cpu_data.local_state)
/*
* Helper macros for the CPU level spinlocks
*/
-#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
-#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
+#define psci_spin_lock_cpu(_idx) spin_lock(&psci_cpu_pd_nodes[_idx].cpu_lock)
+#define psci_spin_unlock_cpu(_idx) spin_unlock(&psci_cpu_pd_nodes[_idx].cpu_lock)
/* Helper macro to identify a CPU standby request in PSCI Suspend call */
-#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
- (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
+#define is_cpu_standby_req(_is_power_down_state, _retn_lvl) \
+ (((!(_is_power_down_state)) && ((_retn_lvl) == 0)) ? 1 : 0)
/*******************************************************************************
* The following two data structures implement the power domain tree. The tree
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 8be6d942d..31d3365b9 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -735,7 +735,7 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
- mmap_region_t *mm_cursor = ctx->mmap;
+ mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
mmap_region_t *mm_last;
unsigned long long end_pa = mm->base_pa + mm->size - 1;
@@ -802,9 +802,10 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
* that there is free space.
*/
assert(mm_last->size == 0U);
-
+
/* Make room for new region by moving other regions up by one place */
- memmove(mm_cursor + 1, mm_cursor,
+ mm_destination = mm_cursor + 1;
+ memmove(mm_destination, mm_cursor,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*