aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch32/misc_helpers.S11
-rw-r--r--lib/aarch64/cache_helpers.S6
-rw-r--r--lib/aarch64/misc_helpers.S52
-rw-r--r--lib/coreboot/coreboot_table.c36
-rw-r--r--lib/cpus/aarch64/cortex_a53.S17
-rw-r--r--lib/cpus/aarch64/cortex_a55.S17
-rw-r--r--lib/cpus/aarch64/cortex_a57.S29
-rw-r--r--lib/cpus/aarch64/cortex_a72.S17
-rw-r--r--lib/cpus/aarch64/cortex_a76.S188
-rw-r--r--lib/cpus/aarch64/cortex_a77.S133
-rw-r--r--lib/cpus/aarch64/cortex_a78.S237
-rw-r--r--lib/cpus/aarch64/cortex_a78_ae.S (renamed from lib/cpus/aarch64/cortex_hercules_ae.S)60
-rw-r--r--lib/cpus/aarch64/cortex_hercules.S143
-rw-r--r--lib/cpus/aarch64/cortex_klein.S77
-rw-r--r--lib/cpus/aarch64/cortex_matterhorn.S77
-rw-r--r--lib/cpus/aarch64/cpu_helpers.S74
-rw-r--r--lib/cpus/aarch64/denver.S87
-rw-r--r--lib/cpus/aarch64/dsu_helpers.S38
-rw-r--r--lib/cpus/aarch64/generic.S89
-rw-r--r--lib/cpus/aarch64/neoverse_n1.S107
-rw-r--r--lib/cpus/aarch64/neoverse_n2.S109
-rw-r--r--lib/cpus/aarch64/neoverse_n_common.S26
-rw-r--r--lib/cpus/aarch64/neoverse_v1.S (renamed from lib/cpus/aarch64/neoverse_zeus.S)48
-rw-r--r--lib/cpus/aarch64/rainier.S130
-rw-r--r--lib/cpus/cpu-ops.mk150
-rw-r--r--lib/cpus/errata_report.c3
-rw-r--r--lib/debugfs/dev.c6
-rw-r--r--lib/debugfs/devfip.c20
-rw-r--r--lib/el3_runtime/aarch32/context_mgmt.c4
-rw-r--r--lib/el3_runtime/aarch64/context.S481
-rw-r--r--lib/el3_runtime/aarch64/context_mgmt.c135
-rw-r--r--lib/el3_runtime/aarch64/cpu_data.S5
-rw-r--r--lib/extensions/amu/aarch32/amu.c178
-rw-r--r--lib/extensions/amu/aarch64/amu.c167
-rw-r--r--lib/extensions/mpam/mpam.c21
-rw-r--r--lib/extensions/mtpmu/aarch32/mtpmu.S105
-rw-r--r--lib/extensions/mtpmu/aarch64/mtpmu.S96
-rw-r--r--lib/extensions/ras/ras_common.c42
-rw-r--r--lib/extensions/spe/spe.c2
-rw-r--r--lib/fconf/fconf.c78
-rw-r--r--lib/fconf/fconf.mk12
-rw-r--r--lib/fconf/fconf_cot_getter.c497
-rw-r--r--lib/fconf/fconf_dyn_cfg_getter.c130
-rw-r--r--lib/fconf/fconf_tbbr_getter.c97
-rw-r--r--lib/libc/aarch32/memset.S74
-rw-r--r--lib/libc/aarch64/memset.S64
-rw-r--r--lib/libc/assert.c8
-rw-r--r--lib/libc/libc.mk10
-rw-r--r--lib/libc/libc_asm.mk44
-rw-r--r--lib/libc/memset.c34
-rw-r--r--lib/libc/printf.c5
-rw-r--r--lib/libc/snprintf.c196
-rw-r--r--lib/libc/strlcat.c56
-rw-r--r--lib/libc/strtok.c83
-rw-r--r--lib/libc/strtol.c133
-rw-r--r--lib/libc/strtoll.c134
-rw-r--r--lib/libc/strtoul.c112
-rw-r--r--lib/libc/strtoull.c112
-rw-r--r--lib/libfdt/libfdt.mk2
-rw-r--r--lib/locks/bakery/bakery_lock_normal.c24
-rw-r--r--lib/optee/optee_utils.c5
-rw-r--r--lib/psci/psci_common.c57
-rw-r--r--lib/psci/psci_private.h9
-rw-r--r--lib/psci/psci_setup.c14
-rw-r--r--lib/psci/psci_system_off.c8
-rw-r--r--lib/romlib/Makefile8
-rw-r--r--lib/semihosting/semihosting.c77
-rw-r--r--lib/utils/mem_region.c41
-rw-r--r--lib/xlat_tables/aarch32/nonlpae_tables.c193
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c4
-rw-r--r--lib/xlat_tables_v2/ro_xlat_tables.mk37
-rw-r--r--lib/xlat_tables_v2/xlat_tables.mk6
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c91
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c11
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h5
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c8
76 files changed, 4956 insertions, 746 deletions
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
index 6d2ec1c52..e9734ac2c 100644
--- a/lib/aarch32/misc_helpers.S
+++ b/lib/aarch32/misc_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -149,17 +149,16 @@ m_loop4:
blo m_loop1
ldr r3, [r1], #4
str r3, [r0], #4
- sub r2, r2, #4
- b m_loop4
+ subs r2, r2, #4
+ bne m_loop4
+ bx lr
+
/* copy byte per byte */
m_loop1:
- cmp r2,#0
- beq m_end
ldrb r3, [r1], #1
strb r3, [r0], #1
subs r2, r2, #1
bne m_loop1
-m_end:
bx lr
endfunc memcpy4
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index 9ef8ca79b..de9c8e4f0 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -30,7 +30,7 @@ loop_\op:
dc \op, x0
add x0, x0, x2
cmp x0, x1
- b.lo loop_\op
+ b.lo loop_\op
dsb sy
exit_loop_\op:
ret
@@ -140,7 +140,7 @@ loop3_\_op:
level_done:
add x10, x10, #2 // increment cache number
cmp x3, x10
- b.hi loop1
+ b.hi loop1
msr csselr_el1, xzr // select cache level 0 in csselr
dsb sy // barrier to complete final cache operation
isb
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index d298f2b66..b6f6c9d88 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -486,17 +486,22 @@ endfunc enable_vfp
* arguments (which is usually the limits of the relocable BL image).
* x0 - the start of the fixup region
* x1 - the limit of the fixup region
- * These addresses have to be page (4KB aligned).
+ * These addresses have to be 4KB page aligned.
* ---------------------------------------------------------------------------
*/
+
+/* Relocation codes */
+#define R_AARCH64_NONE 0
+#define R_AARCH64_RELATIVE 1027
+
func fixup_gdt_reloc
mov x6, x0
mov x7, x1
- /* Test if the limits are 4K aligned */
#if ENABLE_ASSERTIONS
+ /* Test if the limits are 4KB aligned */
orr x0, x0, x1
- tst x0, #(PAGE_SIZE - 1)
+ tst x0, #(PAGE_SIZE_MASK)
ASM_ASSERT(eq)
#endif
/*
@@ -504,8 +509,9 @@ func fixup_gdt_reloc
* Assume that this function is called within a page at the start of
* fixup region.
*/
- and x2, x30, #~(PAGE_SIZE - 1)
- sub x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */
+ and x2, x30, #~(PAGE_SIZE_MASK)
+ subs x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */
+ b.eq 3f /* Diff(S) = 0. No relocation needed */
adrp x1, __GOT_START__
add x1, x1, :lo12:__GOT_START__
@@ -518,31 +524,32 @@ func fixup_gdt_reloc
* The new_addr is the address currently the binary is executing from
* and old_addr is the address at compile time.
*/
-1:
- ldr x3, [x1]
+1: ldr x3, [x1]
+
/* Skip adding offset if address is < lower limit */
cmp x3, x6
b.lo 2f
+
/* Skip adding offset if address is >= upper limit */
cmp x3, x7
- b.ge 2f
+ b.hs 2f
add x3, x3, x0
str x3, [x1]
-2:
- add x1, x1, #8
+
+2: add x1, x1, #8
cmp x1, x2
b.lo 1b
/* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */
- adrp x1, __RELA_START__
+3: adrp x1, __RELA_START__
add x1, x1, :lo12:__RELA_START__
adrp x2, __RELA_END__
add x2, x2, :lo12:__RELA_END__
+
/*
* According to ELF-64 specification, the RELA data structure is as
* follows:
- * typedef struct
- * {
+ * typedef struct {
* Elf64_Addr r_offset;
* Elf64_Xword r_info;
* Elf64_Sxword r_addend;
@@ -550,16 +557,19 @@ func fixup_gdt_reloc
*
* r_offset is address of reference
* r_info is symbol index and type of relocation (in this case
- * 0x403 which corresponds to R_AARCH64_RELATIVE).
+ * code 1027 which corresponds to R_AARCH64_RELATIVE).
* r_addend is constant part of expression.
*
* Size of Elf64_Rela structure is 24 bytes.
*/
-1:
- /* Assert that the relocation type is R_AARCH64_RELATIVE */
+
+ /* Skip R_AARCH64_NONE entry with code 0 */
+1: ldr x3, [x1, #8]
+ cbz x3, 2f
+
#if ENABLE_ASSERTIONS
- ldr x3, [x1, #8]
- cmp x3, #0x403
+ /* Assert that the relocation type is R_AARCH64_RELATIVE */
+ cmp x3, #R_AARCH64_RELATIVE
ASM_ASSERT(eq)
#endif
ldr x3, [x1] /* r_offset */
@@ -569,9 +579,10 @@ func fixup_gdt_reloc
/* Skip adding offset if r_addend is < lower limit */
cmp x4, x6
b.lo 2f
+
/* Skip adding offset if r_addend entry is >= upper limit */
cmp x4, x7
- b.ge 2f
+ b.hs 2f
add x4, x0, x4 /* Diff(S) + r_addend */
str x4, [x3]
@@ -579,6 +590,5 @@ func fixup_gdt_reloc
2: add x1, x1, #24
cmp x1, x2
b.lo 1b
-
ret
endfunc fixup_gdt_reloc
diff --git a/lib/coreboot/coreboot_table.c b/lib/coreboot/coreboot_table.c
index 63bdc6359..fb31ef1e0 100644
--- a/lib/coreboot/coreboot_table.c
+++ b/lib/coreboot/coreboot_table.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -29,6 +29,7 @@ typedef struct {
} cb_header_t;
typedef enum {
+ CB_TAG_MEMORY = 0x1,
CB_TAG_SERIAL = 0xf,
CB_TAG_CBMEM_CONSOLE = 0x17,
} cb_tag_t;
@@ -37,11 +38,13 @@ typedef struct {
uint32_t tag;
uint32_t size;
union {
+ coreboot_memrange_t memranges[COREBOOT_MAX_MEMRANGES];
coreboot_serial_t serial;
uint64_t uint64;
};
} cb_entry_t;
+coreboot_memrange_t coreboot_memranges[COREBOOT_MAX_MEMRANGES];
coreboot_serial_t coreboot_serial;
/*
@@ -75,7 +78,7 @@ static void expand_and_mmap(uintptr_t baseaddr, size_t size)
static void setup_cbmem_console(uintptr_t baseaddr)
{
static console_cbmc_t console;
- assert(!console.base); /* should only have one CBMEM console */
+ assert(!console.console.base); /* should only have one CBMEM console */
/* CBMEM console structure stores its size in first header field. */
uint32_t size = *(uint32_t *)baseaddr;
@@ -86,6 +89,25 @@ static void setup_cbmem_console(uintptr_t baseaddr)
CONSOLE_FLAG_CRASH);
}
+coreboot_memory_t coreboot_get_memory_type(uintptr_t start, size_t size)
+{
+ int i;
+
+ for (i = 0; i < COREBOOT_MAX_MEMRANGES; i++) {
+ coreboot_memrange_t *range = &coreboot_memranges[i];
+
+ if (range->type == CB_MEM_NONE)
+ break; /* end of table reached */
+ if ((start >= range->start) &&
+ (start - range->start < range->size) &&
+ (size <= range->size - (start - range->start))) {
+ return range->type;
+ }
+ }
+
+ return CB_MEM_NONE;
+}
+
void coreboot_table_setup(void *base)
{
cb_header_t *header = base;
@@ -99,6 +121,7 @@ void coreboot_table_setup(void *base)
ptr = base + header->header_bytes;
for (i = 0; i < header->table_entries; i++) {
+ size_t size;
cb_entry_t *entry = ptr;
if (ptr - base >= header->header_bytes + header->table_bytes) {
@@ -107,6 +130,15 @@ void coreboot_table_setup(void *base)
}
switch (read_le32(&entry->tag)) {
+ case CB_TAG_MEMORY:
+ size = read_le32(&entry->size) -
+ offsetof(cb_entry_t, memranges);
+ if (size > sizeof(coreboot_memranges)) {
+ ERROR("Need to truncate coreboot memranges!\n");
+ size = sizeof(coreboot_memranges);
+ }
+ memcpy(&coreboot_memranges, &entry->memranges, size);
+ break;
case CB_TAG_SERIAL:
memcpy(&coreboot_serial, &entry->serial,
sizeof(coreboot_serial));
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index b105de26b..df11d8690 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -239,6 +239,20 @@ exit_check_errata_843419:
ret
endfunc check_errata_843419
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A53 Errata #1530924.
+ * This applies to all revisions of Cortex A53.
+ * --------------------------------------------------
+ */
+func check_errata_1530924
+#if ERRATA_A53_1530924
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1530924
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A53.
* Shall clobber: x0-x19
@@ -359,6 +373,7 @@ func cortex_a53_errata_report
report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
report_errata ERRATA_A53_843419, cortex_a53, 843419
report_errata ERRATA_A53_855873, cortex_a53, 855873
+ report_errata ERRATA_A53_1530924, cortex_a53, 1530924
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
index 8e138244b..783830450 100644
--- a/lib/cpus/aarch64/cortex_a55.S
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -222,6 +222,20 @@ func check_errata_1221012
b cpu_rev_var_ls
endfunc check_errata_1221012
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A55 Errata #1530923.
+ * This applies to all revisions of Cortex A55.
+ * --------------------------------------------------
+ */
+func check_errata_1530923
+#if ERRATA_A55_1530923
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1530923
+
func cortex_a55_reset_func
mov x19, x30
@@ -306,6 +320,7 @@ func cortex_a55_errata_report
report_errata ERRATA_A55_846532, cortex_a55, 846532
report_errata ERRATA_A55_903758, cortex_a55, 903758
report_errata ERRATA_A55_1221012, cortex_a55, 1221012
+ report_errata ERRATA_A55_1530923, cortex_a55, 1530923
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index dd03c0f02..8ef0f922a 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -395,6 +396,20 @@ func check_errata_cve_2018_3639
ret
endfunc check_errata_cve_2018_3639
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A57 Errata #1319537.
+ * This applies to all revisions of Cortex A57.
+ * --------------------------------------------------
+ */
+func check_errata_1319537
+#if ERRATA_A57_1319537
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1319537
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A57.
* Shall clobber: x0-x19
@@ -469,6 +484,17 @@ func cortex_a57_reset_func
dsb sy
#endif
+#if A57_ENABLE_NONCACHEABLE_LOAD_FWD
+ /* ---------------------------------------------
+ * Enable higher performance non-cacheable load
+ * forwarding
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A57_CPUACTLR_EL1_EN_NC_LOAD_FWD
+ msr CORTEX_A57_CPUACTLR_EL1, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
@@ -601,6 +627,7 @@ func cortex_a57_errata_report
report_errata ERRATA_A57_829520, cortex_a57, 829520
report_errata ERRATA_A57_833471, cortex_a57, 833471
report_errata ERRATA_A57_859972, cortex_a57, 859972
+ report_errata ERRATA_A57_1319537, cortex_a57, 1319537
report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 38b76b940..aff6072a0 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -119,6 +119,20 @@ func check_errata_cve_2018_3639
ret
endfunc check_errata_cve_2018_3639
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A72 Errata #1319367.
+ * This applies to all revisions of Cortex A72.
+ * --------------------------------------------------
+ */
+func check_errata_1319367
+#if ERRATA_A72_1319367
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1319367
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A72.
* -------------------------------------------------
@@ -282,6 +296,7 @@ func cortex_a72_errata_report
* checking functions of each errata.
*/
report_errata ERRATA_A72_859971, cortex_a72, 859971
+ report_errata ERRATA_A72_1319367, cortex_a72, 1319367
report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index baefa4676..4f7f4bb9a 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -337,33 +337,83 @@ func check_errata_1262888
b cpu_rev_var_ls
endfunc check_errata_1262888
- /* --------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1275112
- * and Errata #1262606.
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1286807.
* This applies only to revision <= r3p0 of Cortex A76.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * ---------------------------------------------------
+ */
+func check_errata_1286807
+#if ERRATA_A76_1286807
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x30
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_1286807
+
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A76 Errata #1791580.
+ * This applies to revisions <= r4p0 of Cortex A76.
* Inputs:
* x0: variant[4:7] and revision[0:3] of current cpu.
* Shall clobber: x0-x17
* --------------------------------------------------
*/
-func errata_a76_1275112_1262606_wa
- /*
- * Compare x0 against revision r3p0
- */
+func errata_a76_1791580_wa
+ /* Compare x0 against revision r4p0 */
mov x17, x30
- /*
- * Since both errata #1275112 and #1262606 have the same check, we can
- * invoke any one of them for the check here.
- */
- bl check_errata_1275112
+ bl check_errata_1791580
cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x1, CORTEX_A76_CPUACTLR2_EL1_BIT_2
+ msr CORTEX_A76_CPUACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1791580_wa
+
+func check_errata_1791580
+ /* Applies to everything <=r4p0. */
+ mov x1, #0x40
+ b cpu_rev_var_ls
+endfunc check_errata_1791580
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1262606,
+ * #1275112, and #1868343. #1262606 and #1275112
+ * apply to revisions <= r3p0 and #1868343 applies to
+ * revisions <= r4p0.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+
+func errata_a76_1262606_1275112_1868343_wa
+ mov x17, x30
+
+/* Check for <= r3p0 cases and branch if check passes. */
+#if ERRATA_A76_1262606 || ERRATA_A76_1275112
+ bl check_errata_1262606
+ cbnz x0, 1f
+#endif
+
+/* Check for <= r4p0 cases and branch if check fails. */
+#if ERRATA_A76_1868343
+ bl check_errata_1868343
+ cbz x0, 2f
+#endif
+1:
mrs x1, CORTEX_A76_CPUACTLR_EL1
- orr x1, x1, CORTEX_A76_CPUACTLR_EL1_BIT_13
+ orr x1, x1, #CORTEX_A76_CPUACTLR_EL1_BIT_13
msr CORTEX_A76_CPUACTLR_EL1, x1
isb
-1:
+2:
ret x17
-endfunc errata_a76_1275112_1262606_wa
+endfunc errata_a76_1262606_1275112_1868343_wa
func check_errata_1262606
mov x1, #0x30
@@ -375,22 +425,65 @@ func check_errata_1275112
b cpu_rev_var_ls
endfunc check_errata_1275112
- /* ---------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1286807.
- * This applies only to revision <= r3p0 of Cortex A76.
- * Due to the nature of the errata it is applied unconditionally
- * when built in, report it as applicable in this case
- * ---------------------------------------------------
- */
-func check_errata_1286807
-#if ERRATA_A76_1286807
- mov x0, #ERRATA_APPLIES
- ret
-#else
- mov x1, #0x30
+func check_errata_1868343
+ mov x1, #0x40
b cpu_rev_var_ls
-#endif
-endfunc check_errata_1286807
+endfunc check_errata_1868343
+
+/* --------------------------------------------------
+ * Errata Workaround for A76 Erratum 1946160.
+ * This applies to revisions r3p0 - r4p1 of A76.
+ * It also exists in r0p0 - r2p0 but there is no fix
+ * in those revisions.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1946160_wa
+ /* Compare x0 against revisions r3p0 - r4p1 */
+ mov x17, x30
+ bl check_errata_1946160
+ cbz x0, 1f
+
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a76_1946160_wa
+
+func check_errata_1946160
+ /* Applies to revisions r3p0 - r4p1. */
+ mov x1, #0x30
+ mov x2, #0x41
+ b cpu_rev_var_range
+endfunc check_errata_1946160
func check_errata_cve_2018_3639
#if WORKAROUND_CVE_2018_3639
@@ -409,6 +502,23 @@ func cortex_a76_disable_wa_cve_2018_3639
ret
endfunc cortex_a76_disable_wa_cve_2018_3639
+ /* --------------------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1165522.
+ * This applies only to revisions <= r3p0 of Cortex A76.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * --------------------------------------------------------------
+ */
+func check_errata_1165522
+#if ERRATA_A76_1165522
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x30
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_1165522
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A76.
* Shall clobber: x0-x19
@@ -439,9 +549,9 @@ func cortex_a76_reset_func
bl errata_a76_1257314_wa
#endif
-#if ERRATA_A76_1262606 || ERRATA_A76_1275112
+#if ERRATA_A76_1262606 || ERRATA_A76_1275112 || ERRATA_A76_1868343
mov x0, x18
- bl errata_a76_1275112_1262606_wa
+ bl errata_a76_1262606_1275112_1868343_wa
#endif
#if ERRATA_A76_1262888
@@ -449,6 +559,16 @@ func cortex_a76_reset_func
bl errata_a76_1262888_wa
#endif
+#if ERRATA_A76_1791580
+ mov x0, x18
+ bl errata_a76_1791580_wa
+#endif
+
+#if ERRATA_A76_1946160
+ mov x0, x18
+ bl errata_a76_1946160_wa
+#endif
+
#if WORKAROUND_CVE_2018_3639
/* If the PE implements SSBS, we don't need the dynamic workaround */
mrs x0, id_aa64pfr1_el1
@@ -529,6 +649,10 @@ func cortex_a76_errata_report
report_errata ERRATA_A76_1262888, cortex_a76, 1262888
report_errata ERRATA_A76_1275112, cortex_a76, 1275112
report_errata ERRATA_A76_1286807, cortex_a76, 1286807
+ report_errata ERRATA_A76_1791580, cortex_a76, 1791580
+ report_errata ERRATA_A76_1165522, cortex_a76, 1165522
+ report_errata ERRATA_A76_1868343, cortex_a76, 1868343
+ report_errata ERRATA_A76_1946160, cortex_a76, 1946160
report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index f3fd5e196..e3a6f5fbf 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,6 +21,122 @@
#error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1508412.
+ * This applies only to revision <= r1p0 of Cortex A77.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1508412_wa
+ /*
+ * Compare x0 against revision r1p0
+ */
+ mov x17, x30
+ bl check_errata_1508412
+ cbz x0, 3f
+ /*
+ * Compare x0 against revision r0p0
+ */
+ bl check_errata_1508412_0
+ cbz x0, 1f
+ ldr x0, =0x0
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8400000
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FFE00000
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ ldr x0, =0x4004003FF
+ msr CORTEX_A77_CPUPCR_EL3, x0
+ ldr x0, =0x1
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8C00040
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FFE00040
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ b 2f
+1:
+ ldr x0, =0x0
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8400000
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FF600000
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ ldr x0, =0x00E8E00080
+ msr CORTEX_A77_CPUPOR2_EL3, x0
+ ldr x0, =0x00FFE000C0
+ msr CORTEX_A77_CPUPMR2_EL3, x0
+2:
+ ldr x0, =0x04004003FF
+ msr CORTEX_A77_CPUPCR_EL3, x0
+ isb
+3:
+ ret x17
+endfunc errata_a77_1508412_wa
+
+func check_errata_1508412
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1508412
+
+func check_errata_1508412_0
+ mov x1, #0x0
+ b cpu_rev_var_ls
+endfunc check_errata_1508412_0
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1925769.
+ * This applies to revision <= r1p1 of Cortex A77.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1925769_wa
+ /* Compare x0 against revision <= r1p1 */
+ mov x17, x30
+ bl check_errata_1925769
+ cbz x0, 1f
+
+ /* Set bit 8 in ECTLR_EL1 */
+ mrs x1, CORTEX_A77_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A77_CPUECTLR_EL1_BIT_8
+ msr CORTEX_A77_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a77_1925769_wa
+
+func check_errata_1925769
+ /* Applies to everything <= r1p1 */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1925769
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A77.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a77_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A77_1508412
+ mov x0, x18
+ bl errata_a77_1508412_wa
+#endif
+
+#if ERRATA_A77_1925769
+ mov x0, x18
+ bl errata_a77_1925769_wa
+#endif
+
+ ret x19
+endfunc cortex_a77_reset_func
+
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
@@ -42,6 +158,19 @@ endfunc cortex_a77_core_pwr_dwn
* Errata printing function for Cortex-A77. Must follow AAPCS.
*/
func cortex_a77_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A77_1508412, cortex_a77, 1508412
+ report_errata ERRATA_A77_1925769, cortex_a77, 1925769
+
+ ldp x8, x30, [sp], #16
ret
endfunc cortex_a77_errata_report
#endif
@@ -67,5 +196,5 @@ func cortex_a77_cpu_reg_dump
endfunc cortex_a77_cpu_reg_dump
declare_cpu_ops cortex_a77, CORTEX_A77_MIDR, \
- CPU_NO_RESET_FUNC, \
+ cortex_a77_reset_func, \
cortex_a77_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
new file mode 100644
index 000000000..f61726b46
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a78.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+
+/* --------------------------------------------------
+ * Errata Workaround for A78 Erratum 1688305.
+ * This applies to revision r0p0 and r1p0 of A78.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1688305_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1688305
+ cbz x0, 1f
+ mrs x1, CORTEX_A78_ACTLR2_EL1
+ orr x1, x1, #CORTEX_A78_ACTLR2_EL1_BIT_1
+ msr CORTEX_A78_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a78_1688305_wa
+
+func check_errata_1688305
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1688305
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata #1941498.
+ * This applies to revisions r0p0, r1p0, and r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1941498_wa
+ /* Compare x0 against revision <= r1p1 */
+ mov x17, x30
+ bl check_errata_1941498
+ cbz x0, 1f
+
+ /* Set bit 8 in ECTLR_EL1 */
+ mrs x1, CORTEX_A78_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A78_CPUECTLR_EL1_BIT_8
+ msr CORTEX_A78_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a78_1941498_wa
+
+func check_errata_1941498
+ /* Check for revision <= r1p1, might need to be updated later. */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1941498
+
+ /* --------------------------------------------------
+ * Errata Workaround for A78 Erratum 1951500.
+ * This applies to revisions r1p0 and r1p1 of A78.
+ * The issue also exists in r0p0 but there is no fix
+ * in that revision.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1951500_wa
+ /* Compare x0 against revisions r1p0 - r1p1 */
+ mov x17, x30
+ bl check_errata_1951500
+ cbz x0, 1f
+
+ msr S3_6_c15_c8_0, xzr
+ ldr x0, =0x10E3900002
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #1
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #2
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a78_1951500_wa
+
+func check_errata_1951500
+ /* Applies to revisions r1p0 and r1p1. */
+ mov x1, #CPU_REV(1, 0)
+ mov x2, #CPU_REV(1, 1)
+ b cpu_rev_var_range
+endfunc check_errata_1951500
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A78
+ * -------------------------------------------------
+ */
+func cortex_a78_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A78_1688305
+ mov x0, x18
+ bl errata_a78_1688305_wa
+#endif
+
+#if ERRATA_A78_1941498
+ mov x0, x18
+ bl errata_a78_1941498_wa
+#endif
+
+#if ERRATA_A78_1951500
+ mov x0, x18
+ bl errata_a78_1951500_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET0_EL0, x0
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET1_EL0, x0
+#endif
+
+ isb
+ ret x19
+endfunc cortex_a78_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a78_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A78_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ msr CORTEX_A78_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a78_core_pwr_dwn
+
+ /*
+ * Errata printing function for cortex_a78. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_a78_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A78_1688305, cortex_a78, 1688305
+ report_errata ERRATA_A78_1941498, cortex_a78, 1941498
+ report_errata ERRATA_A78_1951500, cortex_a78, 1951500
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a78_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a78 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a78_regs, "aS"
+cortex_a78_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a78_cpu_reg_dump
+ adr x6, cortex_a78_regs
+ mrs x8, CORTEX_A78_CPUECTLR_EL1
+ ret
+endfunc cortex_a78_cpu_reg_dump
+
+declare_cpu_ops cortex_a78, CORTEX_A78_MIDR, \
+ cortex_a78_reset_func, \
+ cortex_a78_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hercules_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S
index c4a216353..9aff9ac85 100644
--- a/lib/cpus/aarch64/cortex_hercules_ae.S
+++ b/lib/cpus/aarch64/cortex_a78_ae.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2020, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,71 +7,71 @@
#include <arch.h>
#include <asm_macros.S>
#include <common/bl_common.h>
-#include <cortex_hercules_ae.h>
+#include <cortex_a78_ae.h>
#include <cpu_macros.S>
#include <plat_macros.S>
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
-#error "cortex_hercules_ae must be compiled with HW_ASSISTED_COHERENCY enabled"
+#error "cortex_a78_ae must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
/* -------------------------------------------------
- * The CPU Ops reset function for Cortex-Hercules-AE
+ * The CPU Ops reset function for Cortex-A78-AE
* -------------------------------------------------
*/
#if ENABLE_AMU
-func cortex_hercules_ae_reset_func
+func cortex_a78_ae_reset_func
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
- bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
msr actlr_el3, x0
/* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
mrs x0, actlr_el2
- bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
msr actlr_el2, x0
/* Enable group0 counters */
- mov x0, #CORTEX_HERCULES_AMU_GROUP0_MASK
+ mov x0, #CORTEX_A78_AMU_GROUP0_MASK
msr CPUAMCNTENSET0_EL0, x0
/* Enable group1 counters */
- mov x0, #CORTEX_HERCULES_AMU_GROUP1_MASK
+ mov x0, #CORTEX_A78_AMU_GROUP1_MASK
msr CPUAMCNTENSET1_EL0, x0
isb
ret
-endfunc cortex_hercules_ae_reset_func
+endfunc cortex_a78_ae_reset_func
#endif
/* -------------------------------------------------------
* HW will do the cache maintenance while powering down
* -------------------------------------------------------
*/
-func cortex_hercules_ae_core_pwr_dwn
+func cortex_a78_ae_core_pwr_dwn
/* -------------------------------------------------------
* Enable CPU power down bit in power control register
* -------------------------------------------------------
*/
- mrs x0, CORTEX_HERCULES_CPUPWRCTLR_EL1
- orr x0, x0, #CORTEX_HERCULES_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
- msr CORTEX_HERCULES_CPUPWRCTLR_EL1, x0
+ mrs x0, CORTEX_A78_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ msr CORTEX_A78_CPUPWRCTLR_EL1, x0
isb
ret
-endfunc cortex_hercules_ae_core_pwr_dwn
+endfunc cortex_a78_ae_core_pwr_dwn
/*
- * Errata printing function for cortex_hercules_ae. Must follow AAPCS.
+ * Errata printing function for cortex_a78_ae. Must follow AAPCS.
*/
#if REPORT_ERRATA
-func cortex_hercules_ae_errata_report
+func cortex_a78_ae_errata_report
ret
-endfunc cortex_hercules_ae_errata_report
+endfunc cortex_a78_ae_errata_report
#endif
/* -------------------------------------------------------
- * This function provides cortex_hercules_ae specific
+ * This function provides cortex_a78_ae specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
@@ -79,22 +79,22 @@ endfunc cortex_hercules_ae_errata_report
* reported.
* -------------------------------------------------------
*/
-.section .rodata.cortex_hercules_ae_regs, "aS"
-cortex_hercules_ae_regs: /* The ascii list of register names to be reported */
+.section .rodata.cortex_a78_ae_regs, "aS"
+cortex_a78_ae_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""
-func cortex_hercules_ae_cpu_reg_dump
- adr x6, cortex_hercules_ae_regs
- mrs x8, CORTEX_HERCULES_CPUECTLR_EL1
+func cortex_a78_ae_cpu_reg_dump
+ adr x6, cortex_a78_ae_regs
+ mrs x8, CORTEX_A78_CPUECTLR_EL1
ret
-endfunc cortex_hercules_ae_cpu_reg_dump
+endfunc cortex_a78_ae_cpu_reg_dump
#if ENABLE_AMU
-#define HERCULES_AE_RESET_FUNC cortex_hercules_ae_reset_func
+#define A78_AE_RESET_FUNC cortex_a78_ae_reset_func
#else
-#define HERCULES_AE_RESET_FUNC CPU_NO_RESET_FUNC
+#define A78_AE_RESET_FUNC CPU_NO_RESET_FUNC
#endif
-declare_cpu_ops cortex_hercules_ae, CORTEX_HERCULES_AE_MIDR, \
- HERCULES_AE_RESET_FUNC, \
- cortex_hercules_ae_core_pwr_dwn
+declare_cpu_ops cortex_a78_ae, CORTEX_A78_AE_MIDR, \
+ A78_AE_RESET_FUNC, \
+ cortex_a78_ae_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hercules.S b/lib/cpus/aarch64/cortex_hercules.S
deleted file mode 100644
index a23919626..000000000
--- a/lib/cpus/aarch64/cortex_hercules.S
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2019, ARM Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <common/bl_common.h>
-#include <cortex_hercules.h>
-#include <cpu_macros.S>
-#include <plat_macros.S>
-
-/* Hardware handled coherency */
-#if HW_ASSISTED_COHERENCY == 0
-#error "cortex_hercules must be compiled with HW_ASSISTED_COHERENCY enabled"
-#endif
-
-
-/* --------------------------------------------------
- * Errata Workaround for Hercules Erratum 1688305.
- * This applies to revision r0p0 and r1p0 of Hercules.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
- */
-func errata_hercules_1688305_wa
- /* Compare x0 against revision r1p0 */
- mov x17, x30
- bl check_errata_1688305
- cbz x0, 1f
- mrs x1, CORTEX_HERCULES_ACTLR2_EL1
- orr x1, x1, CORTEX_HERCULES_ACTLR2_EL1_BIT_1
- msr CORTEX_HERCULES_ACTLR2_EL1, x1
- isb
-1:
- ret x17
-endfunc errata_hercules_1688305_wa
-
-func check_errata_1688305
- /* Applies to r0p0 and r1p0 */
- mov x1, #0x10
- b cpu_rev_var_ls
-endfunc check_errata_1688305
-
- /* -------------------------------------------------
- * The CPU Ops reset function for Cortex-Hercules
- * -------------------------------------------------
- */
-func cortex_hercules_reset_func
- mov x19, x30
- bl cpu_get_rev_var
- mov x18, x0
-
-#if ERRATA_HERCULES_1688305
- mov x0, x18
- bl errata_hercules_1688305_wa
-#endif
-
-#if ENABLE_AMU
- /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
- mrs x0, actlr_el3
- bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
- msr actlr_el3, x0
-
- /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
- mrs x0, actlr_el2
- bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
- msr actlr_el2, x0
-
- /* Enable group0 counters */
- mov x0, #CORTEX_HERCULES_AMU_GROUP0_MASK
- msr CPUAMCNTENSET0_EL0, x0
-
- /* Enable group1 counters */
- mov x0, #CORTEX_HERCULES_AMU_GROUP1_MASK
- msr CPUAMCNTENSET1_EL0, x0
-#endif
-
- isb
- ret x19
-endfunc cortex_hercules_reset_func
-
- /* ---------------------------------------------
- * HW will do the cache maintenance while powering down
- * ---------------------------------------------
- */
-func cortex_hercules_core_pwr_dwn
- /* ---------------------------------------------
- * Enable CPU power down bit in power control register
- * ---------------------------------------------
- */
- mrs x0, CORTEX_HERCULES_CPUPWRCTLR_EL1
- orr x0, x0, #CORTEX_HERCULES_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
- msr CORTEX_HERCULES_CPUPWRCTLR_EL1, x0
- isb
- ret
-endfunc cortex_hercules_core_pwr_dwn
-
- /*
- * Errata printing function for cortex_hercules. Must follow AAPCS.
- */
-#if REPORT_ERRATA
-func cortex_hercules_errata_report
- stp x8, x30, [sp, #-16]!
-
- bl cpu_get_rev_var
- mov x8, x0
-
- /*
- * Report all errata. The revision-variant information is passed to
- * checking functions of each errata.
- */
- report_errata ERRATA_HERCULES_1688305, cortex_hercules, 1688305
-
- ldp x8, x30, [sp], #16
- ret
-endfunc cortex_hercules_errata_report
-#endif
-
- /* ---------------------------------------------
- * This function provides cortex_hercules specific
- * register information for crash reporting.
- * It needs to return with x6 pointing to
- * a list of register names in ascii and
- * x8 - x15 having values of registers to be
- * reported.
- * ---------------------------------------------
- */
-.section .rodata.cortex_hercules_regs, "aS"
-cortex_hercules_regs: /* The ascii list of register names to be reported */
- .asciz "cpuectlr_el1", ""
-
-func cortex_hercules_cpu_reg_dump
- adr x6, cortex_hercules_regs
- mrs x8, CORTEX_HERCULES_CPUECTLR_EL1
- ret
-endfunc cortex_hercules_cpu_reg_dump
-
-declare_cpu_ops cortex_hercules, CORTEX_HERCULES_MIDR, \
- cortex_hercules_reset_func, \
- cortex_hercules_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_klein.S b/lib/cpus/aarch64/cortex_klein.S
new file mode 100644
index 000000000..d3a8ab481
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_klein.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_klein.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Klein must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Klein supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_klein_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_KLEIN_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_KLEIN_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_KLEIN_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_klein_core_pwr_dwn
+
+ /*
+ * Errata printing function for Cortex Klein. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_klein_errata_report
+ ret
+endfunc cortex_klein_errata_report
+#endif
+
+func cortex_klein_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+ isb
+ ret
+endfunc cortex_klein_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Cortex-Klein specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_klein_regs, "aS"
+cortex_klein_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_klein_cpu_reg_dump
+ adr x6, cortex_klein_regs
+ mrs x8, CORTEX_KLEIN_CPUECTLR_EL1
+ ret
+endfunc cortex_klein_cpu_reg_dump
+
+declare_cpu_ops cortex_klein, CORTEX_KLEIN_MIDR, \
+ cortex_klein_reset_func, \
+ cortex_klein_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_matterhorn.S b/lib/cpus/aarch64/cortex_matterhorn.S
new file mode 100644
index 000000000..4156f3cf8
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_matterhorn.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_matterhorn.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Matterhorn must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Matterhorn supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_matterhorn_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_MATTERHORN_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_MATTERHORN_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_MATTERHORN_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_matterhorn_core_pwr_dwn
+
+ /*
+ * Errata printing function for Cortex Matterhorn. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_matterhorn_errata_report
+ ret
+endfunc cortex_matterhorn_errata_report
+#endif
+
+func cortex_matterhorn_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+ isb
+ ret
+endfunc cortex_matterhorn_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Cortex-Matterhorn specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_matterhorn_regs, "aS"
+cortex_matterhorn_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_matterhorn_cpu_reg_dump
+ adr x6, cortex_matterhorn_regs
+ mrs x8, CORTEX_MATTERHORN_CPUECTLR_EL1
+ ret
+endfunc cortex_matterhorn_cpu_reg_dump
+
+declare_cpu_ops cortex_matterhorn, CORTEX_MATTERHORN_MIDR, \
+ cortex_matterhorn_reset_func, \
+ cortex_matterhorn_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 808c7f807..730b09beb 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -78,6 +78,10 @@ func prepare_cpu_pwr_dwn
mov x1, #CPU_PWR_DWN_OPS
add x1, x1, x2, lsl #3
ldr x1, [x0, x1]
+#if ENABLE_ASSERTIONS
+ cmp x1, #0
+ ASM_ASSERT(ne)
+#endif
br x1
endfunc prepare_cpu_pwr_dwn
@@ -136,6 +140,13 @@ endfunc do_cpu_reg_dump
* midr of the core. It reads the MIDR_EL1 and finds the matching
* entry in cpu_ops entries. Only the implementation and part number
* are used to match the entries.
+ *
+ * If cpu_ops for the MIDR_EL1 cannot be found and
+ * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
+ * default cpu_ops with an MIDR value of 0.
+ * (Implementation number 0x0 should be reseverd for software use
+ * and therefore no clashes should happen with that default value).
+ *
* Return :
* x0 - The matching cpu_ops pointer on Success
* x0 - 0 on failure.
@@ -143,23 +154,26 @@ endfunc do_cpu_reg_dump
*/
.globl get_cpu_ops_ptr
func get_cpu_ops_ptr
- /* Get the cpu_ops start and end locations */
- adr x4, (__CPU_OPS_START__ + CPU_MIDR)
- adr x5, (__CPU_OPS_END__ + CPU_MIDR)
-
- /* Initialize the return parameter */
- mov x0, #0
-
/* Read the MIDR_EL1 */
mrs x2, midr_el1
mov_imm x3, CPU_IMPL_PN_MASK
/* Retain only the implementation and part number using mask */
and w2, w2, w3
+
+ /* Get the cpu_ops end location */
+ adr x5, (__CPU_OPS_END__ + CPU_MIDR)
+
+ /* Initialize the return parameter */
+ mov x0, #0
1:
+ /* Get the cpu_ops start location */
+ adr x4, (__CPU_OPS_START__ + CPU_MIDR)
+
+2:
/* Check if we have reached end of list */
cmp x4, x5
- b.eq error_exit
+ b.eq search_def_ptr
/* load the midr from the cpu_ops */
ldr x1, [x4], #CPU_OPS_SIZE
@@ -167,11 +181,35 @@ func get_cpu_ops_ptr
/* Check if midr matches to midr of this core */
cmp w1, w2
- b.ne 1b
+ b.ne 2b
/* Subtract the increment and offset to get the cpu-ops pointer */
sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+#ifdef SUPPORT_UNKNOWN_MPID
+ cbnz x2, exit_mpid_found
+ /* Mark the unsupported MPID flag */
+ adrp x1, unsupported_mpid_flag
+ add x1, x1, :lo12:unsupported_mpid_flag
+ str w2, [x1]
+exit_mpid_found:
+#endif
+ ret
+
+ /*
+ * Search again for a default pointer (MIDR = 0x0)
+ * or return error if already searched.
+ */
+search_def_ptr:
+#ifdef SUPPORT_UNKNOWN_MPID
+ cbz x2, error_exit
+ mov x2, #0
+ b 1b
error_exit:
+#endif
ret
endfunc get_cpu_ops_ptr
@@ -276,7 +314,15 @@ func print_errata_status
* turn.
*/
mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x1, #0
+ ASM_ASSERT(ne)
+#endif
ldr x0, [x1, #CPU_ERRATA_FUNC]
cbz x0, .Lnoprint
@@ -326,6 +372,10 @@ func check_wa_cve_2017_5715
ASM_ASSERT(ne)
#endif
ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
ldr x0, [x0, #CPU_EXTRA1_FUNC]
/*
* If the reserved function pointer is NULL, this CPU
@@ -359,6 +409,10 @@ func wa_cve_2018_3639_get_disable_ptr
ASM_ASSERT(ne)
#endif
ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
ldr x0, [x0, #CPU_EXTRA2_FUNC]
ret
endfunc wa_cve_2018_3639_get_disable_ptr
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index c377b28b4..224ee2676 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -26,13 +27,17 @@
* table.
* -------------------------------------------------
*/
- .globl workaround_bpflush_runtime_exceptions
-
vector_base workaround_bpflush_runtime_exceptions
.macro apply_workaround
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ /* Disable cycle counter when event counting is prohibited */
+ mrs x1, pmcr_el0
+ orr x0, x1, #PMCR_EL0_DP_BIT
+ msr pmcr_el0, x0
+ isb
+
/* -------------------------------------------------
* A new write-only system register where a write of
* 1 to bit 0 will cause the indirect branch predictor
@@ -156,13 +161,19 @@ endfunc denver_disable_ext_debug
* ----------------------------------------------------
*/
func denver_enable_dco
- mov x3, x30
+ /* DCO is not supported on PN5 and later */
+ mrs x1, midr_el1
+ mov_imm x2, DENVER_MIDR_PN4
+ cmp x1, x2
+ b.hi 1f
+
+ mov x18, x30
bl plat_my_core_pos
mov x1, #1
lsl x1, x1, x0
msr s3_0_c15_c0_2, x1
- mov x30, x3
- ret
+ mov x30, x18
+1: ret
endfunc denver_enable_dco
/* ----------------------------------------------------
@@ -170,10 +181,14 @@ endfunc denver_enable_dco
* ----------------------------------------------------
*/
func denver_disable_dco
-
- mov x3, x30
+ /* DCO is not supported on PN5 and later */
+ mrs x1, midr_el1
+ mov_imm x2, DENVER_MIDR_PN4
+ cmp x1, x2
+ b.hi 2f
/* turn off background work */
+ mov x18, x30
bl plat_my_core_pos
mov x1, #1
lsl x1, x1, x0
@@ -188,8 +203,8 @@ func denver_disable_dco
and x2, x2, x1
cbnz x2, 1b
- mov x30, x3
- ret
+ mov x30, x18
+2: ret
endfunc denver_disable_dco
func check_errata_cve_2017_5715
@@ -348,37 +363,23 @@ func denver_cpu_reg_dump
ret
endfunc denver_cpu_reg_dump
-declare_cpu_ops_wa denver, DENVER_MIDR_PN0, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
-
-declare_cpu_ops_wa denver, DENVER_MIDR_PN1, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
-
-declare_cpu_ops_wa denver, DENVER_MIDR_PN2, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
-
-declare_cpu_ops_wa denver, DENVER_MIDR_PN3, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
-
-declare_cpu_ops_wa denver, DENVER_MIDR_PN4, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
+/* macro to declare cpu_ops for Denver SKUs */
+.macro denver_cpu_ops_wa midr
+ declare_cpu_ops_wa denver, \midr, \
+ denver_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ denver_core_pwr_dwn, \
+ denver_cluster_pwr_dwn
+.endm
+
+denver_cpu_ops_wa DENVER_MIDR_PN0
+denver_cpu_ops_wa DENVER_MIDR_PN1
+denver_cpu_ops_wa DENVER_MIDR_PN2
+denver_cpu_ops_wa DENVER_MIDR_PN3
+denver_cpu_ops_wa DENVER_MIDR_PN4
+denver_cpu_ops_wa DENVER_MIDR_PN5
+denver_cpu_ops_wa DENVER_MIDR_PN6
+denver_cpu_ops_wa DENVER_MIDR_PN7
+denver_cpu_ops_wa DENVER_MIDR_PN8
+denver_cpu_ops_wa DENVER_MIDR_PN9
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
index 100ffaa97..da052d5c9 100644
--- a/lib/cpus/aarch64/dsu_helpers.S
+++ b/lib/cpus/aarch64/dsu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -72,18 +72,36 @@ endfunc errata_dsu_798953_wa
* This function is called from both assembly and C environment. So it
* follows AAPCS.
*
- * Clobbers: x0-x3
+ * Clobbers: x0-x15
* -----------------------------------------------------------------------
*/
.globl check_errata_dsu_936184
.globl errata_dsu_936184_wa
+ .weak is_scu_present_in_dsu
+
+ /* --------------------------------------------------------------------
+ * Default behaviour respresents SCU is always present with DSU.
+ * CPUs can override this definition if required.
+ *
+ * Can clobber only: x0-x14
+ * --------------------------------------------------------------------
+ */
+func is_scu_present_in_dsu
+ mov x0, #1
+ ret
+endfunc is_scu_present_in_dsu
func check_errata_dsu_936184
- mov x2, #ERRATA_NOT_APPLIES
- mov x3, #ERRATA_APPLIES
+ mov x15, x30
+ bl is_scu_present_in_dsu
+ cmp x0, xzr
+ /* Default error status */
+ mov x0, #ERRATA_NOT_APPLIES
+
+ /* If SCU is not present, return without applying patch */
+ b.eq 1f
/* Erratum applies only if DSU has the ACP interface */
- mov x0, x2
mrs x1, CLUSTERCFR_EL1
ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
cbz x1, 1f
@@ -92,13 +110,13 @@ func check_errata_dsu_936184
mrs x1, CLUSTERIDR_EL1
/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
- ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ ubfx x2, x1, #CLUSTERIDR_REV_SHIFT,\
#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
- mov x1, #(0x2 << CLUSTERIDR_VAR_SHIFT)
- cmp x0, x1
- csel x0, x2, x3, hs
+ cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT)
+ b.hs 1f
+ mov x0, #ERRATA_APPLIES
1:
- ret
+ ret x15
endfunc check_errata_dsu_936184
/* --------------------------------------------------
diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S
new file mode 100644
index 000000000..ef1f048a1
--- /dev/null
+++ b/lib/cpus/aarch64/generic.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <generic.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func generic_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc generic_disable_dcache
+
+func generic_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl generic_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ ret x18
+endfunc generic_core_pwr_dwn
+
+func generic_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl generic_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ ret x18
+
+endfunc generic_cluster_pwr_dwn
+
+/* ---------------------------------------------
+ * Unimplemented functions.
+ * ---------------------------------------------
+ */
+.equ generic_errata_report, 0
+.equ generic_cpu_reg_dump, 0
+.equ generic_reset_func, 0
+
+declare_cpu_ops generic, AARCH64_GENERIC_MIDR, \
+ generic_reset_func, \
+ generic_core_pwr_dwn, \
+ generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index d537ed6a8..9c97cf60a 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,15 +1,15 @@
/*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
-#include <neoverse_n1.h>
#include <cpuamu.h>
#include <cpu_macros.S>
#include <context.h>
+#include <neoverse_n1.h>
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@@ -375,6 +375,95 @@ func check_errata_1542419
b cpu_rev_var_range
endfunc check_errata_1542419
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1868343.
+ * This applies to revision <= r4p0 of Neoverse N1.
+ * This workaround is the same as the workaround for
+ * errata 1262606 and 1275112 but applies to a wider
+ * revision range.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1868343_wa
+ /*
+ * Compare x0 against revision r4p0
+ */
+ mov x17, x30
+ bl check_errata_1868343
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR_EL1_BIT_13
+ msr NEOVERSE_N1_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_n1_1868343_wa
+
+func check_errata_1868343
+ /* Applies to everything <= r4p0 */
+ mov x1, #0x40
+ b cpu_rev_var_ls
+endfunc check_errata_1868343
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1946160.
+ * This applies to revisions r3p0, r3p1, r4p0, and
+ * r4p1 of Neoverse N1. It also exists in r0p0, r1p0,
+ * and r2p0 but there is no fix in these revisions.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1946160_wa
+ /*
+ * Compare x0 against r3p0 - r4p1
+ */
+ mov x17, x30
+ bl check_errata_1946160
+ cbz x0, 1f
+
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_n1_1946160_wa
+
+func check_errata_1946160
+ /* Applies to r3p0 - r4p1. */
+ mov x1, #0x30
+ mov x2, #0x41
+ b cpu_rev_var_range
+endfunc check_errata_1946160
+
func neoverse_n1_reset_func
mov x19, x30
@@ -449,6 +538,16 @@ func neoverse_n1_reset_func
bl errata_n1_1542419_wa
#endif
+#if ERRATA_N1_1868343
+ mov x0, x18
+ bl errata_n1_1868343_wa
+#endif
+
+#if ERRATA_N1_1946160
+ mov x0, x18
+ bl errata_n1_1946160_wa
+#endif
+
#if ENABLE_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
@@ -465,7 +564,7 @@ func neoverse_n1_reset_func
msr CPUAMCNTENSET_EL0, x0
#endif
-#if NEOVERSE_N1_EXTERNAL_LLC
+#if NEOVERSE_Nx_EXTERNAL_LLC
/* Some system may have External LLC, core needs to be made aware */
mrs x0, NEOVERSE_N1_CPUECTLR_EL1
orr x0, x0, NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT
@@ -522,6 +621,8 @@ func neoverse_n1_errata_report
report_errata ERRATA_N1_1275112, neoverse_n1, 1275112
report_errata ERRATA_N1_1315703, neoverse_n1, 1315703
report_errata ERRATA_N1_1542419, neoverse_n1, 1542419
+ report_errata ERRATA_N1_1868343, neoverse_n1, 1868343
+ report_errata ERRATA_N1_1946160, neoverse_n1, 1946160
report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184
ldp x8, x30, [sp], #16
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
new file mode 100644
index 000000000..8d646cba5
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <neoverse_n2.h>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse N2 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Neoverse N2.
+ * -------------------------------------------------
+ */
+func neoverse_n2_reset_func
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+1:
+ /* Force all cacheable atomic instructions to be near */
+ mrs x0, NEOVERSE_N2_CPUACTLR2_EL1
+ orr x0, x0, #NEOVERSE_N2_CPUACTLR2_EL1_BIT_2
+ msr NEOVERSE_N2_CPUACTLR2_EL1, x0
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, cptr_el3
+ orr x0, x0, #TAM_BIT
+ msr cptr_el3, x0
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, cptr_el2
+ orr x0, x0, #TAM_BIT
+ msr cptr_el2, x0
+
+ /* No need to enable the counters as this would be done at el3 exit */
+#endif
+
+#if NEOVERSE_Nx_EXTERNAL_LLC
+ /* Some systems may have External LLC, core needs to be made aware */
+ mrs x0, NEOVERSE_N2_CPUECTLR_EL1
+ orr x0, x0, NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT
+ msr NEOVERSE_N2_CPUECTLR_EL1, x0
+#endif
+
+ isb
+ ret
+endfunc neoverse_n2_reset_func
+
+func neoverse_n2_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * No need to do cache maintenance here.
+ * ---------------------------------------------
+ */
+ mrs x0, NEOVERSE_N2_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_N2_CORE_PWRDN_EN_BIT
+ msr NEOVERSE_N2_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc neoverse_n2_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Neoverse N2 cores. Must follow AAPCS.
+ */
+func neoverse_n2_errata_report
+ /* No errata reported for Neoverse N2 cores */
+ ret
+endfunc neoverse_n2_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Neoverse N2 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ASCII and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_n2_regs, "aS"
+neoverse_n2_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpupwrctlr_el1", ""
+
+func neoverse_n2_cpu_reg_dump
+ adr x6, neoverse_n2_regs
+ mrs x8, NEOVERSE_N2_CPUPWRCTLR_EL1
+ ret
+endfunc neoverse_n2_cpu_reg_dump
+
+declare_cpu_ops neoverse_n2, NEOVERSE_N2_MIDR, \
+ neoverse_n2_reset_func, \
+ neoverse_n2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n_common.S b/lib/cpus/aarch64/neoverse_n_common.S
new file mode 100644
index 000000000..b816342ba
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n_common.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <neoverse_n_common.h>
+
+ .global is_scu_present_in_dsu
+
+/*
+ * Check if the SCU L3 Unit is present on the DSU
+ * 1-> SCU present
+ * 0-> SCU not present
+ *
+ * This function is implemented as weak on dsu_helpers.S and must be
+ * overwritten for Neoverse Nx cores.
+ */
+
+func is_scu_present_in_dsu
+ mrs x0, CPUCFR_EL1
+ ubfx x0, x0, #SCU_SHIFT, #1
+ eor x0, x0, #1
+ ret
+endfunc is_scu_present_in_dsu
diff --git a/lib/cpus/aarch64/neoverse_zeus.S b/lib/cpus/aarch64/neoverse_v1.S
index 44882b459..733629425 100644
--- a/lib/cpus/aarch64/neoverse_zeus.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2020, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,46 +7,46 @@
#include <arch.h>
#include <asm_macros.S>
#include <common/bl_common.h>
-#include <neoverse_zeus.h>
+#include <neoverse_v1.h>
#include <cpu_macros.S>
#include <plat_macros.S>
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
-#error "Neoverse Zeus must be compiled with HW_ASSISTED_COHERENCY enabled"
+#error "Neoverse V1 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
/* 64-bit only core */
#if CTX_INCLUDE_AARCH32_REGS == 1
-#error "Neoverse-Zeus supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
*/
-func neoverse_zeus_core_pwr_dwn
+func neoverse_v1_core_pwr_dwn
/* ---------------------------------------------
* Enable CPU power down bit in power control register
* ---------------------------------------------
*/
- mrs x0, NEOVERSE_ZEUS_CPUPWRCTLR_EL1
- orr x0, x0, #NEOVERSE_ZEUS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
- msr NEOVERSE_ZEUS_CPUPWRCTLR_EL1, x0
+ mrs x0, NEOVERSE_V1_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_V1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr NEOVERSE_V1_CPUPWRCTLR_EL1, x0
isb
ret
-endfunc neoverse_zeus_core_pwr_dwn
+endfunc neoverse_v1_core_pwr_dwn
/*
- * Errata printing function for Neoverse Zeus. Must follow AAPCS.
+ * Errata printing function for Neoverse V1. Must follow AAPCS.
*/
#if REPORT_ERRATA
-func neoverse_zeus_errata_report
+func neoverse_v1_errata_report
ret
-endfunc neoverse_zeus_errata_report
+endfunc neoverse_v1_errata_report
#endif
-func neoverse_zeus_reset_func
+func neoverse_v1_reset_func
mov x19, x30
/* Disable speculative loads */
@@ -54,10 +54,10 @@ func neoverse_zeus_reset_func
isb
ret x19
-endfunc neoverse_zeus_reset_func
+endfunc neoverse_v1_reset_func
/* ---------------------------------------------
- * This function provides Neoverse-Zeus specific
+ * This function provides Neoverse-V1 specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
@@ -65,16 +65,16 @@ endfunc neoverse_zeus_reset_func
* reported.
* ---------------------------------------------
*/
-.section .rodata.neoverse_zeus_regs, "aS"
-neoverse_zeus_regs: /* The ascii list of register names to be reported */
+.section .rodata.neoverse_v1_regs, "aS"
+neoverse_v1_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""
-func neoverse_zeus_cpu_reg_dump
- adr x6, neoverse_zeus_regs
- mrs x8, NEOVERSE_ZEUS_CPUECTLR_EL1
+func neoverse_v1_cpu_reg_dump
+ adr x6, neoverse_v1_regs
+ mrs x8, NEOVERSE_V1_CPUECTLR_EL1
ret
-endfunc neoverse_zeus_cpu_reg_dump
+endfunc neoverse_v1_cpu_reg_dump
-declare_cpu_ops neoverse_zeus, NEOVERSE_ZEUS_MIDR, \
- neoverse_zeus_reset_func, \
- neoverse_zeus_core_pwr_dwn
+declare_cpu_ops neoverse_v1, NEOVERSE_V1_MIDR, \
+ neoverse_v1_reset_func, \
+ neoverse_v1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
new file mode 100644
index 000000000..3017a5012
--- /dev/null
+++ b/lib/cpus/aarch64/rainier.S
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <cpu_macros.S>
+#include <cpuamu.h>
+#include <rainier.h>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Rainier CPU must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* --------------------------------------------------
+ * Disable speculative loads if Rainier supports
+ * SSBS.
+ *
+ * Shall clobber: x0.
+ * --------------------------------------------------
+ */
+func rainier_disable_speculative_loads
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+1:
+ ret
+endfunc rainier_disable_speculative_loads
+
+func rainier_reset_func
+ mov x19, x30
+
+ bl rainier_disable_speculative_loads
+
+ /* Forces all cacheable atomic instructions to be near */
+ mrs x0, RAINIER_CPUACTLR2_EL1
+ orr x0, x0, #RAINIER_CPUACTLR2_EL1_BIT_2
+ msr RAINIER_CPUACTLR2_EL1, x0
+ isb
+
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ orr x0, x0, #RAINIER_ACTLR_AMEN_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ orr x0, x0, #RAINIER_ACTLR_AMEN_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #RAINIER_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+#endif
+
+ isb
+ ret x19
+endfunc rainier_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func rainier_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, RAINIER_CPUPWRCTLR_EL1
+ orr x0, x0, #RAINIER_CORE_PWRDN_EN_MASK
+ msr RAINIER_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc rainier_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Rainier. Must follow AAPCS.
+ */
+func rainier_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc rainier_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Rainier specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.rainier_regs, "aS"
+rainier_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func rainier_cpu_reg_dump
+ adr x6, rainier_regs
+ mrs x8, RAINIER_CPUECTLR_EL1
+ ret
+endfunc rainier_cpu_reg_dump
+
+declare_cpu_ops rainier, RAINIER_MIDR, \
+ rainier_reset_func, \
+ rainier_core_pwr_dwn
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index e3bfc2f2e..64a4b4d47 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -1,5 +1,6 @@
#
-# Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -16,13 +17,21 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1
# It is enabled by default.
A57_DISABLE_NON_TEMPORAL_HINT ?=1
+# Flag to enable higher performance non-cacheable load forwarding.
+# It is disabled by default.
+A57_ENABLE_NONCACHEABLE_LOAD_FWD ?= 0
+
WORKAROUND_CVE_2017_5715 ?=1
WORKAROUND_CVE_2018_3639 ?=1
DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0
-# Flag to indicate internal or external Last level cache
+# Flags to indicate internal or external Last level cache
# By default internal
-NEOVERSE_N1_EXTERNAL_LLC ?=0
+NEOVERSE_Nx_EXTERNAL_LLC ?=0
+
+# Process A57_ENABLE_NONCACHEABLE_LOAD_FWD flag
+$(eval $(call assert_boolean,A57_ENABLE_NONCACHEABLE_LOAD_FWD))
+$(eval $(call add_define,A57_ENABLE_NONCACHEABLE_LOAD_FWD))
# Process SKIP_A57_L1_FLUSH_PWR_DWN flag
$(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -47,8 +56,8 @@ $(eval $(call add_define,WORKAROUND_CVE_2018_3639))
$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
-$(eval $(call assert_boolean,NEOVERSE_N1_EXTERNAL_LLC))
-$(eval $(call add_define,NEOVERSE_N1_EXTERNAL_LLC))
+$(eval $(call assert_boolean,NEOVERSE_Nx_EXTERNAL_LLC))
+$(eval $(call add_define,NEOVERSE_Nx_EXTERNAL_LLC))
ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0)
ifeq (${WORKAROUND_CVE_2018_3639},0)
@@ -121,6 +130,10 @@ ERRATA_A53_843419 ?=0
# of by the rich OS.
ERRATA_A53_855873 ?=0
+# Flag to apply erratum 1530924 workaround during reset. This erratum applies
+# to all revisions of Cortex A53 cpu.
+ERRATA_A53_1530924 ?=0
+
# Flag to apply erratum 768277 workaround during reset. This erratum applies
# only to revision r0p0 of the Cortex A55 cpu.
ERRATA_A55_768277 ?=0
@@ -145,6 +158,10 @@ ERRATA_A55_903758 ?=0
# only to revision <= r1p0 of the Cortex A55 cpu.
ERRATA_A55_1221012 ?=0
+# Flag to apply erratum 1530923 workaround during reset. This erratum applies
+# to all revisions of Cortex A55 cpu.
+ERRATA_A55_1530923 ?=0
+
# Flag to apply erratum 806969 workaround during reset. This erratum applies
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_806969 ?=0
@@ -189,10 +206,18 @@ ERRATA_A57_833471 ?=0
# only to revision <= r1p3 of the Cortex A57 cpu.
ERRATA_A57_859972 ?=0
+# Flag to apply erratum 1319537 workaround during reset. This erratum applies
+# to all revisions of Cortex A57 cpu.
+ERRATA_A57_1319537 ?=0
+
# Flag to apply erratum 855971 workaround during reset. This erratum applies
# only to revision <= r0p3 of the Cortex A72 cpu.
ERRATA_A72_859971 ?=0
+# Flag to apply erratum 1319367 workaround during reset. This erratum applies
+# to all revisions of Cortex A72 cpu.
+ERRATA_A72_1319367 ?=0
+
# Flag to apply erratum 852427 workaround during reset. This erratum applies
# only to revision r0p0 of the Cortex A73 cpu.
ERRATA_A73_852427 ?=0
@@ -241,9 +266,42 @@ ERRATA_A76_1275112 ?=0
# only to revision <= r3p0 of the Cortex A76 cpu.
ERRATA_A76_1286807 ?=0
+# Flag to apply erratum 1791580 workaround during reset. This erratum applies
+# only to revision <= r4p0 of the Cortex A76 cpu.
+ERRATA_A76_1791580 ?=0
+
+# Flag to apply erratum 1165522 workaround during reset. This erratum applies
+# to all revisions of Cortex A76 cpu.
+ERRATA_A76_1165522 ?=0
+
+# Flag to apply erratum 1868343 workaround during reset. This erratum applies
+# only to revision <= r4p0 of the Cortex A76 cpu.
+ERRATA_A76_1868343 ?=0
+
+# Flag to apply erratum 1946160 workaround during reset. This erratum applies
+# only to revisions r3p0 - r4p1 of the Cortex A76 cpu.
+ERRATA_A76_1946160 ?=0
+
+# Flag to apply erratum 1508412 workaround during reset. This erratum applies
+# only to revision <= r1p0 of the Cortex A77 cpu.
+ERRATA_A77_1508412 ?=0
+
+# Flag to apply erratum 1925769 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A77 cpu.
+ERRATA_A77_1925769 ?=0
+
# Flag to apply erratum 1688305 workaround during reset. This erratum applies
-# to revisions r0p0 - r1p0 of the Hercules cpu.
-ERRATA_HERCULES_1688305 ?=0
+# to revisions r0p0 - r1p0 of the A78 cpu.
+ERRATA_A78_1688305 ?=0
+
+# Flag to apply erratum 1941498 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the A78 cpu.
+ERRATA_A78_1941498 ?=0
+
+# Flag to apply erratum 1951500 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the A78 cpu. The issue is present in r0p0 as
+# well but there is no workaround for that revision.
+ERRATA_A78_1951500 ?=0
# Flag to apply T32 CLREX workaround during reset. This erratum applies
# only to r0p0 and r1p0 of the Neoverse N1 cpu.
@@ -293,6 +351,15 @@ ERRATA_N1_1315703 ?=0
# to revisions r3p0 - r4p0 of the Neoverse N1 cpu.
ERRATA_N1_1542419 ?=0
+# Flag to apply erratum 1868343 workaround during reset. This erratum applies
+# to revision <= r4p0 of the Neoverse N1 cpu.
+ERRATA_N1_1868343 ?=0
+
+# Flag to apply erratum 1946160 workaround during reset. This erratum applies
+# to revisions r3p0, r3p1, r4p0, and r4p1 of the Neoverse N1 cpu. The issue
+# exists in revisions r0p0, r1p0, and r2p0 as well but there is no workaround.
+ERRATA_N1_1946160 ?=0
+
# Flag to apply DSU erratum 798953. This erratum applies to DSUs revision r0p0.
# Applying the workaround results in higher DSU power consumption on idle.
ERRATA_DSU_798953 ?=0
@@ -358,6 +425,10 @@ $(eval $(call add_define,ERRATA_A53_843419))
$(eval $(call assert_boolean,ERRATA_A53_855873))
$(eval $(call add_define,ERRATA_A53_855873))
+# Process ERRATA_A53_1530924 flag
+$(eval $(call assert_boolean,ERRATA_A53_1530924))
+$(eval $(call add_define,ERRATA_A53_1530924))
+
# Process ERRATA_A55_768277 flag
$(eval $(call assert_boolean,ERRATA_A55_768277))
$(eval $(call add_define,ERRATA_A55_768277))
@@ -382,6 +453,10 @@ $(eval $(call add_define,ERRATA_A55_903758))
$(eval $(call assert_boolean,ERRATA_A55_1221012))
$(eval $(call add_define,ERRATA_A55_1221012))
+# Process ERRATA_A55_1530923 flag
+$(eval $(call assert_boolean,ERRATA_A55_1530923))
+$(eval $(call add_define,ERRATA_A55_1530923))
+
# Process ERRATA_A57_806969 flag
$(eval $(call assert_boolean,ERRATA_A57_806969))
$(eval $(call add_define,ERRATA_A57_806969))
@@ -426,10 +501,18 @@ $(eval $(call add_define,ERRATA_A57_833471))
$(eval $(call assert_boolean,ERRATA_A57_859972))
$(eval $(call add_define,ERRATA_A57_859972))
+# Process ERRATA_A57_1319537 flag
+$(eval $(call assert_boolean,ERRATA_A57_1319537))
+$(eval $(call add_define,ERRATA_A57_1319537))
+
# Process ERRATA_A72_859971 flag
$(eval $(call assert_boolean,ERRATA_A72_859971))
$(eval $(call add_define,ERRATA_A72_859971))
+# Process ERRATA_A72_1319367 flag
+$(eval $(call assert_boolean,ERRATA_A72_1319367))
+$(eval $(call add_define,ERRATA_A72_1319367))
+
# Process ERRATA_A73_852427 flag
$(eval $(call assert_boolean,ERRATA_A73_852427))
$(eval $(call add_define,ERRATA_A73_852427))
@@ -478,9 +561,41 @@ $(eval $(call add_define,ERRATA_A76_1275112))
$(eval $(call assert_boolean,ERRATA_A76_1286807))
$(eval $(call add_define,ERRATA_A76_1286807))
-# Process ERRATA_HERCULES_1688305 flag
-$(eval $(call assert_boolean,ERRATA_HERCULES_1688305))
-$(eval $(call add_define,ERRATA_HERCULES_1688305))
+# Process ERRATA_A76_1791580 flag
+$(eval $(call assert_boolean,ERRATA_A76_1791580))
+$(eval $(call add_define,ERRATA_A76_1791580))
+
+# Process ERRATA_A76_1165522 flag
+$(eval $(call assert_boolean,ERRATA_A76_1165522))
+$(eval $(call add_define,ERRATA_A76_1165522))
+
+# Process ERRATA_A76_1868343 flag
+$(eval $(call assert_boolean,ERRATA_A76_1868343))
+$(eval $(call add_define,ERRATA_A76_1868343))
+
+# Process ERRATA_A76_1946160 flag
+$(eval $(call assert_boolean,ERRATA_A76_1946160))
+$(eval $(call add_define,ERRATA_A76_1946160))
+
+# Process ERRATA_A77_1508412 flag
+$(eval $(call assert_boolean,ERRATA_A77_1508412))
+$(eval $(call add_define,ERRATA_A77_1508412))
+
+# Process ERRATA_A77_1925769 flag
+$(eval $(call assert_boolean,ERRATA_A77_1925769))
+$(eval $(call add_define,ERRATA_A77_1925769))
+
+# Process ERRATA_A78_1688305 flag
+$(eval $(call assert_boolean,ERRATA_A78_1688305))
+$(eval $(call add_define,ERRATA_A78_1688305))
+
+# Process ERRATA_A78_1941498 flag
+$(eval $(call assert_boolean,ERRATA_A78_1941498))
+$(eval $(call add_define,ERRATA_A78_1941498))
+
+# Process ERRATA_A78_1951500 flag
+$(eval $(call assert_boolean,ERRATA_A78_1951500))
+$(eval $(call add_define,ERRATA_A78_1951500))
# Process ERRATA_N1_1043202 flag
$(eval $(call assert_boolean,ERRATA_N1_1043202))
@@ -530,6 +645,14 @@ $(eval $(call add_define,ERRATA_N1_1315703))
$(eval $(call assert_boolean,ERRATA_N1_1542419))
$(eval $(call add_define,ERRATA_N1_1542419))
+# Process ERRATA_N1_1868343 flag
+$(eval $(call assert_boolean,ERRATA_N1_1868343))
+$(eval $(call add_define,ERRATA_N1_1868343))
+
+# Process ERRATA_N1_1946160 flag
+$(eval $(call assert_boolean,ERRATA_N1_1946160))
+$(eval $(call add_define,ERRATA_N1_1946160))
+
# Process ERRATA_DSU_798953 flag
$(eval $(call assert_boolean,ERRATA_DSU_798953))
$(eval $(call add_define,ERRATA_DSU_798953))
@@ -547,3 +670,10 @@ ifneq (${ERRATA_A53_835769},0)
TF_CFLAGS_aarch64 += -mfix-cortex-a53-835769
TF_LDFLAGS_aarch64 += --fix-cortex-a53-835769
endif
+
+ifneq ($(filter 1,${ERRATA_A53_1530924} ${ERRATA_A55_1530923} \
+ ${ERRATA_A57_1319537} ${ERRATA_A72_1319367} ${ERRATA_A76_1165522}),)
+ERRATA_SPECULATIVE_AT := 1
+else
+ERRATA_SPECULATIVE_AT := 0
+endif
diff --git a/lib/cpus/errata_report.c b/lib/cpus/errata_report.c
index f43b2176d..5d1e3c5cc 100644
--- a/lib/cpus/errata_report.c
+++ b/lib/cpus/errata_report.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,7 +14,6 @@
#include <lib/cpus/errata_report.h>
#include <lib/el3_runtime/cpu_data.h>
#include <lib/spinlock.h>
-#include <lib/utils.h>
#ifdef IMAGE_BL1
# define BL_STRING "BL1"
diff --git a/lib/debugfs/dev.c b/lib/debugfs/dev.c
index 0361437b8..2fc1d4062 100644
--- a/lib/debugfs/dev.c
+++ b/lib/debugfs/dev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -333,6 +333,10 @@ noent:
******************************************************************************/
chan_t *clone(chan_t *c, chan_t *nc)
{
+ if (c->index == NODEV) {
+ return NULL;
+ }
+
return devtab[c->index]->clone(c, nc);
}
diff --git a/lib/debugfs/devfip.c b/lib/debugfs/devfip.c
index 5581b219f..85e6403c7 100644
--- a/lib/debugfs/devfip.c
+++ b/lib/debugfs/devfip.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -73,8 +73,11 @@ static const struct uuidnames uuidnames[] = {
{"soc-fw.cfg", UUID_SOC_FW_CONFIG},
{"tos-fw.cfg", UUID_TOS_FW_CONFIG},
{"nt-fw.cfg", UUID_NT_FW_CONFIG},
+ {"fw.cfg", UUID_FW_CONFIG},
{"rot-k.crt", UUID_ROT_KEY_CERT},
- {"nt-k.crt", UUID_NON_TRUSTED_WORLD_KEY_CERT}
+ {"nt-k.crt", UUID_NON_TRUSTED_WORLD_KEY_CERT},
+ {"sip-sp.crt", UUID_SIP_SECURE_PARTITION_CONTENT_CERT},
+ {"plat-sp.crt", UUID_PLAT_SECURE_PARTITION_CONTENT_CERT}
};
/*******************************************************************************
@@ -103,10 +106,6 @@ static int get_entry(chan_t *c, struct fip_entry *entry)
return -1;
}
- if ((entry->size > LONG_MAX) || (entry->offset_address > LONG_MAX)) {
- return -1;
- }
-
if (entry->size == 0) {
return 0;
}
@@ -130,7 +129,10 @@ static int fipgen(chan_t *c, const dirtab_t *tab, int ntab, int n, dir_t *dir)
panic();
}
- clone(archives[c->dev].c, &nc);
+ if (clone(archives[c->dev].c, &nc) == NULL) {
+ panic();
+ }
+
fip = &archives[nc.dev];
off = STOC_HEADER;
@@ -203,7 +205,9 @@ static int fipread(chan_t *c, void *buf, int n)
panic();
}
- clone(fip->c, &cs);
+ if (clone(fip->c, &cs) == NULL) {
+ panic();
+ }
size = fip->size[c->qid];
if (c->offset >= size) {
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 73d1e354d..2443001b8 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -17,8 +17,6 @@
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/extensions/amu.h>
#include <lib/utils.h>
-#include <plat/common/platform.h>
-#include <smccc_helpers.h>
/*******************************************************************************
* Context management library initialisation routine. This library is used by
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 9bd25bac9..75e214d9c 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,12 @@
#include <asm_macros.S>
#include <assert_macros.S>
#include <context.h>
+#include <el3_common_macros.S>
+
+#if CTX_INCLUDE_EL2_REGS
+ .global el2_sysregs_context_save
+ .global el2_sysregs_context_restore
+#endif
.global el1_sysregs_context_save
.global el1_sysregs_context_restore
@@ -17,8 +23,416 @@
#endif
.global save_gp_pmcr_pauth_regs
.global restore_gp_pmcr_pauth_regs
+ .global save_and_update_ptw_el1_sys_regs
.global el3_exit
+#if CTX_INCLUDE_EL2_REGS
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to save EL2 system register context. It assumes that
+ * 'x0' is pointing to a 'el2_sys_regs' structure where
+ * the register context will be saved.
+ *
+ * The following registers are not added.
+ * AMEVCNTVOFF0<n>_EL2
+ * AMEVCNTVOFF1<n>_EL2
+ * ICH_AP0R<n>_EL2
+ * ICH_AP1R<n>_EL2
+ * ICH_LR<n>_EL2
+ * -----------------------------------------------------
+ */
+
+func el2_sysregs_context_save
+ mrs x9, actlr_el2
+ mrs x10, afsr0_el2
+ stp x9, x10, [x0, #CTX_ACTLR_EL2]
+
+ mrs x11, afsr1_el2
+ mrs x12, amair_el2
+ stp x11, x12, [x0, #CTX_AFSR1_EL2]
+
+ mrs x13, cnthctl_el2
+ mrs x14, cnthp_ctl_el2
+ stp x13, x14, [x0, #CTX_CNTHCTL_EL2]
+
+ mrs x15, cnthp_cval_el2
+ mrs x16, cnthp_tval_el2
+ stp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2]
+
+ mrs x17, cntvoff_el2
+ mrs x9, cptr_el2
+ stp x17, x9, [x0, #CTX_CNTVOFF_EL2]
+
+ mrs x11, elr_el2
+#if CTX_INCLUDE_AARCH32_REGS
+ mrs x10, dbgvcr32_el2
+ stp x10, x11, [x0, #CTX_DBGVCR32_EL2]
+#else
+ str x11, [x0, #CTX_ELR_EL2]
+#endif
+
+ mrs x14, esr_el2
+ mrs x15, far_el2
+ stp x14, x15, [x0, #CTX_ESR_EL2]
+
+ mrs x16, hacr_el2
+ mrs x17, hcr_el2
+ stp x16, x17, [x0, #CTX_HACR_EL2]
+
+ mrs x9, hpfar_el2
+ mrs x10, hstr_el2
+ stp x9, x10, [x0, #CTX_HPFAR_EL2]
+
+ mrs x11, ICC_SRE_EL2
+ mrs x12, ICH_HCR_EL2
+ stp x11, x12, [x0, #CTX_ICC_SRE_EL2]
+
+ mrs x13, ICH_VMCR_EL2
+ mrs x14, mair_el2
+ stp x13, x14, [x0, #CTX_ICH_VMCR_EL2]
+
+ mrs x15, mdcr_el2
+#if ENABLE_SPE_FOR_LOWER_ELS
+ mrs x16, PMSCR_EL2
+ stp x15, x16, [x0, #CTX_MDCR_EL2]
+#else
+ str x15, [x0, #CTX_MDCR_EL2]
+#endif
+
+ mrs x17, sctlr_el2
+ mrs x9, spsr_el2
+ stp x17, x9, [x0, #CTX_SCTLR_EL2]
+
+ mrs x10, sp_el2
+ mrs x11, tcr_el2
+ stp x10, x11, [x0, #CTX_SP_EL2]
+
+ mrs x12, tpidr_el2
+ mrs x13, ttbr0_el2
+ stp x12, x13, [x0, #CTX_TPIDR_EL2]
+
+ mrs x14, vbar_el2
+ mrs x15, vmpidr_el2
+ stp x14, x15, [x0, #CTX_VBAR_EL2]
+
+ mrs x16, vpidr_el2
+ mrs x17, vtcr_el2
+ stp x16, x17, [x0, #CTX_VPIDR_EL2]
+
+ mrs x9, vttbr_el2
+ str x9, [x0, #CTX_VTTBR_EL2]
+
+#if CTX_INCLUDE_MTE_REGS
+ mrs x10, TFSR_EL2
+ str x10, [x0, #CTX_TFSR_EL2]
+#endif
+
+#if ENABLE_MPAM_FOR_LOWER_ELS
+ mrs x9, MPAM2_EL2
+ mrs x10, MPAMHCR_EL2
+ stp x9, x10, [x0, #CTX_MPAM2_EL2]
+
+ mrs x11, MPAMVPM0_EL2
+ mrs x12, MPAMVPM1_EL2
+ stp x11, x12, [x0, #CTX_MPAMVPM0_EL2]
+
+ mrs x13, MPAMVPM2_EL2
+ mrs x14, MPAMVPM3_EL2
+ stp x13, x14, [x0, #CTX_MPAMVPM2_EL2]
+
+ mrs x15, MPAMVPM4_EL2
+ mrs x16, MPAMVPM5_EL2
+ stp x15, x16, [x0, #CTX_MPAMVPM4_EL2]
+
+ mrs x17, MPAMVPM6_EL2
+ mrs x9, MPAMVPM7_EL2
+ stp x17, x9, [x0, #CTX_MPAMVPM6_EL2]
+
+ mrs x10, MPAMVPMV_EL2
+ str x10, [x0, #CTX_MPAMVPMV_EL2]
+#endif
+
+
+#if ARM_ARCH_AT_LEAST(8, 6)
+ mrs x11, HAFGRTR_EL2
+ mrs x12, HDFGRTR_EL2
+ stp x11, x12, [x0, #CTX_HAFGRTR_EL2]
+
+ mrs x13, HDFGWTR_EL2
+ mrs x14, HFGITR_EL2
+ stp x13, x14, [x0, #CTX_HDFGWTR_EL2]
+
+ mrs x15, HFGRTR_EL2
+ mrs x16, HFGWTR_EL2
+ stp x15, x16, [x0, #CTX_HFGRTR_EL2]
+
+ mrs x17, CNTPOFF_EL2
+ str x17, [x0, #CTX_CNTPOFF_EL2]
+#endif
+
+#if ARM_ARCH_AT_LEAST(8, 4)
+ mrs x9, cnthps_ctl_el2
+ mrs x10, cnthps_cval_el2
+ stp x9, x10, [x0, #CTX_CNTHPS_CTL_EL2]
+
+ mrs x11, cnthps_tval_el2
+ mrs x12, cnthvs_ctl_el2
+ stp x11, x12, [x0, #CTX_CNTHPS_TVAL_EL2]
+
+ mrs x13, cnthvs_cval_el2
+ mrs x14, cnthvs_tval_el2
+ stp x13, x14, [x0, #CTX_CNTHVS_CVAL_EL2]
+
+ mrs x15, cnthv_ctl_el2
+ mrs x16, cnthv_cval_el2
+ stp x15, x16, [x0, #CTX_CNTHV_CTL_EL2]
+
+ mrs x17, cnthv_tval_el2
+ mrs x9, contextidr_el2
+ stp x17, x9, [x0, #CTX_CNTHV_TVAL_EL2]
+
+#if CTX_INCLUDE_AARCH32_REGS
+ mrs x10, sder32_el2
+ str x10, [x0, #CTX_SDER32_EL2]
+#endif
+
+ mrs x11, ttbr1_el2
+ str x11, [x0, #CTX_TTBR1_EL2]
+
+ mrs x12, vdisr_el2
+ str x12, [x0, #CTX_VDISR_EL2]
+
+#if CTX_INCLUDE_NEVE_REGS
+ mrs x13, vncr_el2
+ str x13, [x0, #CTX_VNCR_EL2]
+#endif
+
+ mrs x14, vsesr_el2
+ str x14, [x0, #CTX_VSESR_EL2]
+
+ mrs x15, vstcr_el2
+ str x15, [x0, #CTX_VSTCR_EL2]
+
+ mrs x16, vsttbr_el2
+ str x16, [x0, #CTX_VSTTBR_EL2]
+
+ mrs x17, TRFCR_EL2
+ str x17, [x0, #CTX_TRFCR_EL2]
+#endif
+
+#if ARM_ARCH_AT_LEAST(8, 5)
+ mrs x9, scxtnum_el2
+ str x9, [x0, #CTX_SCXTNUM_EL2]
+#endif
+
+ ret
+endfunc el2_sysregs_context_save
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to restore EL2 system register context. It assumes
+ * that 'x0' is pointing to a 'el2_sys_regs' structure
+ * from where the register context will be restored
+
+ * The following registers are not restored
+ * AMEVCNTVOFF0<n>_EL2
+ * AMEVCNTVOFF1<n>_EL2
+ * ICH_AP0R<n>_EL2
+ * ICH_AP1R<n>_EL2
+ * ICH_LR<n>_EL2
+ * -----------------------------------------------------
+ */
+func el2_sysregs_context_restore
+
+ ldp x9, x10, [x0, #CTX_ACTLR_EL2]
+ msr actlr_el2, x9
+ msr afsr0_el2, x10
+
+ ldp x11, x12, [x0, #CTX_AFSR1_EL2]
+ msr afsr1_el2, x11
+ msr amair_el2, x12
+
+ ldp x13, x14, [x0, #CTX_CNTHCTL_EL2]
+ msr cnthctl_el2, x13
+ msr cnthp_ctl_el2, x14
+
+ ldp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2]
+ msr cnthp_cval_el2, x15
+ msr cnthp_tval_el2, x16
+
+ ldp x17, x9, [x0, #CTX_CNTVOFF_EL2]
+ msr cntvoff_el2, x17
+ msr cptr_el2, x9
+
+#if CTX_INCLUDE_AARCH32_REGS
+ ldp x10, x11, [x0, #CTX_DBGVCR32_EL2]
+ msr dbgvcr32_el2, x10
+#else
+ ldr x11, [x0, #CTX_ELR_EL2]
+#endif
+ msr elr_el2, x11
+
+ ldp x14, x15, [x0, #CTX_ESR_EL2]
+ msr esr_el2, x14
+ msr far_el2, x15
+
+ ldp x16, x17, [x0, #CTX_HACR_EL2]
+ msr hacr_el2, x16
+ msr hcr_el2, x17
+
+ ldp x9, x10, [x0, #CTX_HPFAR_EL2]
+ msr hpfar_el2, x9
+ msr hstr_el2, x10
+
+ ldp x11, x12, [x0, #CTX_ICC_SRE_EL2]
+ msr ICC_SRE_EL2, x11
+ msr ICH_HCR_EL2, x12
+
+ ldp x13, x14, [x0, #CTX_ICH_VMCR_EL2]
+ msr ICH_VMCR_EL2, x13
+ msr mair_el2, x14
+
+#if ENABLE_SPE_FOR_LOWER_ELS
+ ldp x15, x16, [x0, #CTX_MDCR_EL2]
+ msr PMSCR_EL2, x16
+#else
+ ldr x15, [x0, #CTX_MDCR_EL2]
+#endif
+ msr mdcr_el2, x15
+
+ ldp x17, x9, [x0, #CTX_SCTLR_EL2]
+ msr sctlr_el2, x17
+ msr spsr_el2, x9
+
+ ldp x10, x11, [x0, #CTX_SP_EL2]
+ msr sp_el2, x10
+ msr tcr_el2, x11
+
+ ldp x12, x13, [x0, #CTX_TPIDR_EL2]
+ msr tpidr_el2, x12
+ msr ttbr0_el2, x13
+
+ ldp x13, x14, [x0, #CTX_VBAR_EL2]
+ msr vbar_el2, x13
+ msr vmpidr_el2, x14
+
+ ldp x15, x16, [x0, #CTX_VPIDR_EL2]
+ msr vpidr_el2, x15
+ msr vtcr_el2, x16
+
+ ldr x17, [x0, #CTX_VTTBR_EL2]
+ msr vttbr_el2, x17
+
+#if CTX_INCLUDE_MTE_REGS
+ ldr x9, [x0, #CTX_TFSR_EL2]
+ msr TFSR_EL2, x9
+#endif
+
+#if ENABLE_MPAM_FOR_LOWER_ELS
+ ldp x10, x11, [x0, #CTX_MPAM2_EL2]
+ msr MPAM2_EL2, x10
+ msr MPAMHCR_EL2, x11
+
+ ldp x12, x13, [x0, #CTX_MPAMVPM0_EL2]
+ msr MPAMVPM0_EL2, x12
+ msr MPAMVPM1_EL2, x13
+
+ ldp x14, x15, [x0, #CTX_MPAMVPM2_EL2]
+ msr MPAMVPM2_EL2, x14
+ msr MPAMVPM3_EL2, x15
+
+ ldp x16, x17, [x0, #CTX_MPAMVPM4_EL2]
+ msr MPAMVPM4_EL2, x16
+ msr MPAMVPM5_EL2, x17
+
+ ldp x9, x10, [x0, #CTX_MPAMVPM6_EL2]
+ msr MPAMVPM6_EL2, x9
+ msr MPAMVPM7_EL2, x10
+
+ ldr x11, [x0, #CTX_MPAMVPMV_EL2]
+ msr MPAMVPMV_EL2, x11
+#endif
+
+#if ARM_ARCH_AT_LEAST(8, 6)
+ ldp x12, x13, [x0, #CTX_HAFGRTR_EL2]
+ msr HAFGRTR_EL2, x12
+ msr HDFGRTR_EL2, x13
+
+ ldp x14, x15, [x0, #CTX_HDFGWTR_EL2]
+ msr HDFGWTR_EL2, x14
+ msr HFGITR_EL2, x15
+
+ ldp x16, x17, [x0, #CTX_HFGRTR_EL2]
+ msr HFGRTR_EL2, x16
+ msr HFGWTR_EL2, x17
+
+ ldr x9, [x0, #CTX_CNTPOFF_EL2]
+ msr CNTPOFF_EL2, x9
+#endif
+
+#if ARM_ARCH_AT_LEAST(8, 4)
+ ldp x10, x11, [x0, #CTX_CNTHPS_CTL_EL2]
+ msr cnthps_ctl_el2, x10
+ msr cnthps_cval_el2, x11
+
+ ldp x12, x13, [x0, #CTX_CNTHPS_TVAL_EL2]
+ msr cnthps_tval_el2, x12
+ msr cnthvs_ctl_el2, x13
+
+ ldp x14, x15, [x0, #CTX_CNTHVS_CVAL_EL2]
+ msr cnthvs_cval_el2, x14
+ msr cnthvs_tval_el2, x15
+
+ ldp x16, x17, [x0, #CTX_CNTHV_CTL_EL2]
+ msr cnthv_ctl_el2, x16
+ msr cnthv_cval_el2, x17
+
+ ldp x9, x10, [x0, #CTX_CNTHV_TVAL_EL2]
+ msr cnthv_tval_el2, x9
+ msr contextidr_el2, x10
+
+#if CTX_INCLUDE_AARCH32_REGS
+ ldr x11, [x0, #CTX_SDER32_EL2]
+ msr sder32_el2, x11
+#endif
+
+ ldr x12, [x0, #CTX_TTBR1_EL2]
+ msr ttbr1_el2, x12
+
+ ldr x13, [x0, #CTX_VDISR_EL2]
+ msr vdisr_el2, x13
+
+#if CTX_INCLUDE_NEVE_REGS
+ ldr x14, [x0, #CTX_VNCR_EL2]
+ msr vncr_el2, x14
+#endif
+
+ ldr x15, [x0, #CTX_VSESR_EL2]
+ msr vsesr_el2, x15
+
+ ldr x16, [x0, #CTX_VSTCR_EL2]
+ msr vstcr_el2, x16
+
+ ldr x17, [x0, #CTX_VSTTBR_EL2]
+ msr vsttbr_el2, x17
+
+ ldr x9, [x0, #CTX_TRFCR_EL2]
+ msr TRFCR_EL2, x9
+#endif
+
+#if ARM_ARCH_AT_LEAST(8, 5)
+ ldr x10, [x0, #CTX_SCXTNUM_EL2]
+ msr scxtnum_el2, x10
+#endif
+
+ ret
+endfunc el2_sysregs_context_restore
+
+#endif /* CTX_INCLUDE_EL2_REGS */
+
/* ------------------------------------------------------------------
* The following function strictly follows the AArch64 PCS to use
* x9-x17 (temporary caller-saved registers) to save EL1 system
@@ -32,9 +446,11 @@ func el1_sysregs_context_save
mrs x10, elr_el1
stp x9, x10, [x0, #CTX_SPSR_EL1]
+#if !ERRATA_SPECULATIVE_AT
mrs x15, sctlr_el1
- mrs x16, actlr_el1
+ mrs x16, tcr_el1
stp x15, x16, [x0, #CTX_SCTLR_EL1]
+#endif
mrs x17, cpacr_el1
mrs x9, csselr_el1
@@ -52,9 +468,9 @@ func el1_sysregs_context_save
mrs x15, amair_el1
stp x14, x15, [x0, #CTX_MAIR_EL1]
- mrs x16, tcr_el1
+ mrs x16, actlr_el1
mrs x17, tpidr_el1
- stp x16, x17, [x0, #CTX_TCR_EL1]
+ stp x16, x17, [x0, #CTX_ACTLR_EL1]
mrs x9, tpidr_el0
mrs x10, tpidrro_el0
@@ -129,9 +545,11 @@ func el1_sysregs_context_restore
msr spsr_el1, x9
msr elr_el1, x10
+#if !ERRATA_SPECULATIVE_AT
ldp x15, x16, [x0, #CTX_SCTLR_EL1]
msr sctlr_el1, x15
- msr actlr_el1, x16
+ msr tcr_el1, x16
+#endif
ldp x17, x9, [x0, #CTX_CPACR_EL1]
msr cpacr_el1, x17
@@ -149,8 +567,8 @@ func el1_sysregs_context_restore
msr mair_el1, x14
msr amair_el1, x15
- ldp x16, x17, [x0, #CTX_TCR_EL1]
- msr tcr_el1, x16
+ ldp x16, x17, [x0, #CTX_ACTLR_EL1]
+ msr actlr_el1, x16
msr tpidr_el1, x17
ldp x9, x10, [x0, #CTX_TPIDR_EL0]
@@ -471,6 +889,48 @@ func restore_gp_pmcr_pauth_regs
ret
endfunc restore_gp_pmcr_pauth_regs
+/*
+ * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
+ * registers and update EL1 registers to disable stage1 and stage2
+ * page table walk
+ */
+func save_and_update_ptw_el1_sys_regs
+ /* ----------------------------------------------------------
+ * Save only sctlr_el1 and tcr_el1 registers
+ * ----------------------------------------------------------
+ */
+ mrs x29, sctlr_el1
+ str x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)]
+ mrs x29, tcr_el1
+ str x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)]
+
+ /* ------------------------------------------------------------
+ * Must follow below order in order to disable page table
+ * walk for lower ELs (EL1 and EL0). First step ensures that
+ * page table walk is disabled for stage1 and second step
+ * ensures that page table walker should use TCR_EL1.EPDx
+ * bits to perform address translation. ISB ensures that CPU
+ * does these 2 steps in order.
+ *
+ * 1. Update TCR_EL1.EPDx bits to disable page table walk by
+ * stage1.
+ * 2. Enable MMU bit to avoid identity mapping via stage2
+ * and force TCR_EL1.EPDx to be used by the page table
+ * walker.
+ * ------------------------------------------------------------
+ */
+ orr x29, x29, #(TCR_EPD0_BIT)
+ orr x29, x29, #(TCR_EPD1_BIT)
+ msr tcr_el1, x29
+ isb
+ mrs x29, sctlr_el1
+ orr x29, x29, #SCTLR_M_BIT
+ msr sctlr_el1, x29
+ isb
+
+ ret
+endfunc save_and_update_ptw_el1_sys_regs
+
/* ------------------------------------------------------------------
* This routine assumes that the SP_EL3 is pointing to a valid
* context structure from where the gp regs and other special
@@ -515,6 +975,8 @@ func el3_exit
blr x17
1:
#endif
+ restore_ptw_el1_sys_regs
+
/* ----------------------------------------------------------
* Restore general purpose (including x30), PMCR_EL0 and
* ARMv8.3-PAuth registers.
@@ -533,6 +995,11 @@ func el3_exit
* ----------------------------------------------------------
*/
esb
+#else
+ dsb sy
+#endif
+#ifdef IMAGE_BL31
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
#endif
exception_return
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index dc4717abe..72d463b71 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -22,9 +22,8 @@
#include <lib/extensions/mpam.h>
#include <lib/extensions/spe.h>
#include <lib/extensions/sve.h>
+#include <lib/extensions/twed.h>
#include <lib/utils.h>
-#include <plat/common/platform.h>
-#include <smccc_helpers.h>
/*******************************************************************************
@@ -109,6 +108,14 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
if (EP_GET_ST(ep->h.attr) != 0U)
scr_el3 |= SCR_ST_BIT;
+#if RAS_TRAP_LOWER_EL_ERR_ACCESS
+ /*
+ * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR
+ * and RAS ERX registers from EL1 and EL2 are trapped to EL3.
+ */
+ scr_el3 |= SCR_TERR_BIT;
+#endif
+
#if !HANDLE_EA_EL3_FIRST
/*
* SCR_EL3.EA: Do not route External Abort and SError Interrupt External
@@ -137,30 +144,33 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
#endif /* !CTX_INCLUDE_PAUTH_REGS */
+#if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS
+ /* Get Memory Tagging Extension support level */
+ unsigned int mte = get_armv8_5_mte_support();
+#endif
/*
* Enable MTE support. Support is enabled unilaterally for the normal
* world, and only for the secure world when CTX_INCLUDE_MTE_REGS is
* set.
*/
#if CTX_INCLUDE_MTE_REGS
- assert(get_armv8_5_mte_support() == MTE_IMPLEMENTED_ELX);
+ assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY));
scr_el3 |= SCR_ATA_BIT;
#else
- unsigned int mte = get_armv8_5_mte_support();
- if (mte == MTE_IMPLEMENTED_EL0) {
- /*
- * Can enable MTE across both worlds as no MTE registers are
- * used
- */
- scr_el3 |= SCR_ATA_BIT;
- } else if (mte == MTE_IMPLEMENTED_ELX && security_state == NON_SECURE) {
- /*
- * Can only enable MTE in Non-Secure world without register
- * saving
- */
+ /*
+ * When MTE is only implemented at EL0, it can be enabled
+ * across both worlds as no MTE registers are used.
+ */
+ if ((mte == MTE_IMPLEMENTED_EL0) ||
+ /*
+ * When MTE is implemented at all ELs, it can be only enabled
+ * in Non-Secure world without register saving.
+ */
+ (((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)) &&
+ (security_state == NON_SECURE))) {
scr_el3 |= SCR_ATA_BIT;
}
-#endif
+#endif /* CTX_INCLUDE_MTE_REGS */
#ifdef IMAGE_BL31
/*
@@ -174,11 +184,26 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* SCR_EL3.HCE: Enable HVC instructions if next execution state is
* AArch64 and next EL is EL2, or if next execution state is AArch32 and
* next mode is Hyp.
+ * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the
+ * same conditions as HVC instructions and when the processor supports
+ * ARMv8.6-FGT.
+ * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV)
+ * CNTPOFF_EL2 register under the same conditions as HVC instructions
+ * and when the processor supports ECV.
*/
if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2))
|| ((GET_RW(ep->spsr) != MODE_RW_64)
&& (GET_M32(ep->spsr) == MODE32_hyp))) {
scr_el3 |= SCR_HCE_BIT;
+
+ if (is_armv8_6_fgt_present()) {
+ scr_el3 |= SCR_FGTEN_BIT;
+ }
+
+ if (get_armv8_6_ecv_support()
+ == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) {
+ scr_el3 |= SCR_ECVEN_BIT;
+ }
}
/* Enable S-EL2 if the next EL is EL2 and security state is secure */
@@ -231,12 +256,30 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
sctlr_elx |= SCTLR_IESB_BIT;
#endif
+ /* Enable WFE trap delay in SCR_EL3 if supported and configured */
+ if (is_armv8_6_twed_present()) {
+ uint32_t delay = plat_arm_set_twedel_scr_el3();
+
+ if (delay != TWED_DISABLED) {
+ /* Make sure delay value fits */
+ assert((delay & ~SCR_TWEDEL_MASK) == 0U);
+
+ /* Set delay in SCR_EL3 */
+ scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT);
+ scr_el3 |= ((delay & SCR_TWEDEL_MASK)
+ << SCR_TWEDEL_SHIFT);
+
+ /* Enable WFE delay */
+ scr_el3 |= SCR_TWEDEn_BIT;
+ }
+ }
+
/*
* Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2
* and other EL2 registers are set up by cm_prepare_ns_entry() as they
* are not part of the stored cpu_context.
*/
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
/*
* Base the context ACTLR_EL1 on the current value, as it is
@@ -246,7 +289,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* be zero.
*/
actlr_elx = read_actlr_el1();
- write_ctx_reg((get_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
+ write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
/*
* Populate EL3 state so that we've the right context
@@ -338,7 +381,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
CTX_SCR_EL3);
if ((scr_el3 & SCR_HCE_BIT) != 0U) {
/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
- sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
+ sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_SCTLR_EL1);
sctlr_elx &= SCTLR_EE_BIT;
sctlr_elx |= SCTLR_EL2_RES1;
@@ -532,6 +575,52 @@ void cm_prepare_el3_exit(uint32_t security_state)
cm_set_next_eret_context(security_state);
}
+#if CTX_INCLUDE_EL2_REGS
+/*******************************************************************************
+ * Save EL2 sysreg context
+ ******************************************************************************/
+void cm_el2_sysregs_context_save(uint32_t security_state)
+{
+ u_register_t scr_el3 = read_scr();
+
+ /*
+ * Always save the non-secure EL2 context, only save the
+ * S-EL2 context if S-EL2 is enabled.
+ */
+ if ((security_state == NON_SECURE) ||
+ ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
+ cpu_context_t *ctx;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx != NULL);
+
+ el2_sysregs_context_save(get_el2_sysregs_ctx(ctx));
+ }
+}
+
+/*******************************************************************************
+ * Restore EL2 sysreg context
+ ******************************************************************************/
+void cm_el2_sysregs_context_restore(uint32_t security_state)
+{
+ u_register_t scr_el3 = read_scr();
+
+ /*
+ * Always restore the non-secure EL2 context, only restore the
+ * S-EL2 context if S-EL2 is enabled.
+ */
+ if ((security_state == NON_SECURE) ||
+ ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
+ cpu_context_t *ctx;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx != NULL);
+
+ el2_sysregs_context_restore(get_el2_sysregs_ctx(ctx));
+ }
+}
+#endif /* CTX_INCLUDE_EL2_REGS */
+
/*******************************************************************************
* The next four functions are used by runtime services to save and restore
* EL1 context on the 'cpu_context' structure for the specified security
@@ -544,7 +633,7 @@ void cm_el1_sysregs_context_save(uint32_t security_state)
ctx = cm_get_context(security_state);
assert(ctx != NULL);
- el1_sysregs_context_save(get_sysregs_ctx(ctx));
+ el1_sysregs_context_save(get_el1_sysregs_ctx(ctx));
#if IMAGE_BL31
if (security_state == SECURE)
@@ -561,7 +650,7 @@ void cm_el1_sysregs_context_restore(uint32_t security_state)
ctx = cm_get_context(security_state);
assert(ctx != NULL);
- el1_sysregs_context_restore(get_sysregs_ctx(ctx));
+ el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx));
#if IMAGE_BL31
if (security_state == SECURE)
@@ -624,7 +713,7 @@ void cm_write_scr_el3_bit(uint32_t security_state,
assert(ctx != NULL);
/* Ensure that the bit position is a valid one */
- assert(((1U << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
+ assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
/* Ensure that the 'value' is only a bit wide */
assert(value <= 1U);
@@ -635,7 +724,7 @@ void cm_write_scr_el3_bit(uint32_t security_state,
*/
state = get_el3state_ctx(ctx);
scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
- scr_el3 &= ~(1U << bit_pos);
+ scr_el3 &= ~(1UL << bit_pos);
scr_el3 |= (u_register_t)value << bit_pos;
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
}
diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S
index 2edf22559..2392d6b90 100644
--- a/lib/el3_runtime/aarch64/cpu_data.S
+++ b/lib/el3_runtime/aarch64/cpu_data.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -41,7 +41,8 @@ endfunc init_cpu_data_ptr
func _cpu_data_by_index
mov_imm x1, CPU_DATA_SIZE
mul x0, x0, x1
- adr x1, percpu_data
+ adrp x1, percpu_data
+ add x1, x1, :lo12:percpu_data
add x0, x0, x1
ret
endfunc _cpu_data_by_index
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index 82d2e1864..0f75f0791 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -1,39 +1,73 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <assert.h>
#include <stdbool.h>
#include <arch.h>
#include <arch_helpers.h>
+
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>
-#include <plat/common/platform.h>
-
-#define AMU_GROUP0_NR_COUNTERS 4
-struct amu_ctx {
- uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
- uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
-};
+#include <plat/common/platform.h>
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void)
{
- uint64_t features;
+ uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- return (features & ID_PFR0_AMU_MASK) == 1U;
+ features &= ID_PFR0_AMU_MASK;
+ return ((features == 1U) || (features == 2U));
}
+#if AMU_GROUP1_NR_COUNTERS
+/* Check if group 1 counters is implemented */
+bool amu_group1_supported(void)
+{
+ uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
+
+ return (features & AMCFGR_NCG_MASK) == 1U;
+}
+#endif
+
+/*
+ * Enable counters. This function is meant to be invoked
+ * by the context management library before exiting from EL3.
+ */
void amu_enable(bool el2_unused)
{
- if (!amu_supported())
+ if (!amu_supported()) {
return;
+ }
+
+#if AMU_GROUP1_NR_COUNTERS
+ /* Check and set presence of group 1 counters */
+ if (!amu_group1_supported()) {
+ ERROR("AMU Counter Group 1 is not implemented\n");
+ panic();
+ }
+
+ /* Check number of group 1 counters */
+ uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
+ AMCGCR_CG1NC_MASK;
+ VERBOSE("%s%u. %s%u\n",
+ "Number of AMU Group 1 Counters ", cnt_num,
+ "Requested number ", AMU_GROUP1_NR_COUNTERS);
+
+ if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
+ ERROR("%s%u is less than %s%u\n",
+ "Number of AMU Group 1 Counters ", cnt_num,
+ "Requested number ", AMU_GROUP1_NR_COUNTERS);
+ panic();
+ }
+#endif
if (el2_unused) {
uint64_t v;
@@ -49,112 +83,156 @@ void amu_enable(bool el2_unused)
/* Enable group 0 counters */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
+#endif
}
/* Read the group 0 counter identified by the given `idx`. */
-uint64_t amu_group0_cnt_read(int idx)
+uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
+ assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
}
-/* Write the group 0 counter identified by the given `idx` with `val`. */
-void amu_group0_cnt_write(int idx, uint64_t val)
+/* Write the group 0 counter identified by the given `idx` with `val` */
+void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
+ assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
isb();
}
-/* Read the group 1 counter identified by the given `idx`. */
-uint64_t amu_group1_cnt_read(int idx)
+#if AMU_GROUP1_NR_COUNTERS
+/* Read the group 1 counter identified by the given `idx` */
+uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
}
-/* Write the group 1 counter identified by the given `idx` with `val`. */
-void amu_group1_cnt_write(int idx, uint64_t val)
+/* Write the group 1 counter identified by the given `idx` with `val` */
+void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
isb();
}
-void amu_group1_set_evtype(int idx, unsigned int val)
+/*
+ * Program the event type register for the given `idx` with
+ * the event number `val`
+ */
+void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
isb();
}
+#endif /* AMU_GROUP1_NR_COUNTERS */
static void *amu_context_save(const void *arg)
{
- struct amu_ctx *ctx;
- int i;
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int i;
- if (!amu_supported())
+ if (!amu_supported()) {
return (void *)-1;
+ }
- ctx = &amu_ctxs[plat_my_core_pos()];
-
- /* Assert that group 0 counter configuration is what we expect */
- assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK &&
- read_amcntenset1() == AMU_GROUP1_COUNTERS_MASK);
+#if AMU_GROUP1_NR_COUNTERS
+ if (!amu_group1_supported()) {
+ return (void *)-1;
+ }
+#endif
+ /* Assert that group 0/1 counter configuration is what we expect */
+ assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
+#if AMU_GROUP1_NR_COUNTERS
+ assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+#endif
/*
- * Disable group 0 counters to avoid other observers like SCP sampling
+ * Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
+
+#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
+#endif
isb();
- for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ /* Save all group 0 counters */
+ for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+ }
- for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
- ctx->group1_cnts[i] = amu_group1_cnt_read(i);
-
+#if AMU_GROUP1_NR_COUNTERS
+ /* Save group 1 counters */
+ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
+ ctx->group1_cnts[i] = amu_group1_cnt_read(i);
+ }
+ }
+#endif
return (void *)0;
}
static void *amu_context_restore(const void *arg)
{
- struct amu_ctx *ctx;
- int i;
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int i;
- if (!amu_supported())
+ if (!amu_supported()) {
return (void *)-1;
+ }
- ctx = &amu_ctxs[plat_my_core_pos()];
-
+#if AMU_GROUP1_NR_COUNTERS
+ if (!amu_group1_supported()) {
+ return (void *)-1;
+ }
+#endif
/* Counters were disabled in `amu_context_save()` */
- assert((read_amcntenset0() == 0U) && (read_amcntenset1() == 0U));
+ assert(read_amcntenset0_el0() == 0U);
+
+#if AMU_GROUP1_NR_COUNTERS
+ assert(read_amcntenset1_el0() == 0U);
+#endif
- /* Restore group 0 counters */
- for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ /* Restore all group 0 counters */
+ for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
amu_group0_cnt_write(i, ctx->group0_cnts[i]);
- for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
- amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ }
- /* Enable group 0 counters */
+ /* Restore group 0 counter configuration */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
- /* Enable group 1 counters */
+#if AMU_GROUP1_NR_COUNTERS
+ /* Restore group 1 counters */
+ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
+ amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ }
+ }
+
+ /* Restore group 1 counter configuration */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
+#endif
+
return (void *)0;
}
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 85f7007aa..499736345 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,38 +9,67 @@
#include <arch.h>
#include <arch_helpers.h>
+
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>
-#include <plat/common/platform.h>
-#define AMU_GROUP0_NR_COUNTERS 4
-
-struct amu_ctx {
- uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
- uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
-};
+#include <plat/common/platform.h>
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void)
{
- uint64_t features;
+ uint64_t features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
+
+ features &= ID_AA64PFR0_AMU_MASK;
+ return ((features == 1U) || (features == 2U));
+}
+
+#if AMU_GROUP1_NR_COUNTERS
+/* Check if group 1 counters is implemented */
+bool amu_group1_supported(void)
+{
+ uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
- features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
- return (features & ID_AA64PFR0_AMU_MASK) == 1U;
+ return (features & AMCFGR_EL0_NCG_MASK) == 1U;
}
+#endif
/*
- * Enable counters. This function is meant to be invoked
+ * Enable counters. This function is meant to be invoked
* by the context management library before exiting from EL3.
*/
void amu_enable(bool el2_unused)
{
uint64_t v;
- if (!amu_supported())
+ if (!amu_supported()) {
return;
+ }
+
+#if AMU_GROUP1_NR_COUNTERS
+ /* Check and set presence of group 1 counters */
+ if (!amu_group1_supported()) {
+ ERROR("AMU Counter Group 1 is not implemented\n");
+ panic();
+ }
+
+ /* Check number of group 1 counters */
+ uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
+ AMCGCR_EL0_CG1NC_MASK;
+ VERBOSE("%s%llu. %s%u\n",
+ "Number of AMU Group 1 Counters ", cnt_num,
+ "Requested number ", AMU_GROUP1_NR_COUNTERS);
+
+ if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
+ ERROR("%s%llu is less than %s%u\n",
+ "Number of AMU Group 1 Counters ", cnt_num,
+ "Requested number ", AMU_GROUP1_NR_COUNTERS);
+ panic();
+ }
+#endif
if (el2_unused) {
/*
@@ -62,43 +91,49 @@ void amu_enable(bool el2_unused)
/* Enable group 0 counters */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+
+#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+#endif
}
/* Read the group 0 counter identified by the given `idx`. */
-uint64_t amu_group0_cnt_read(int idx)
+uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
+ assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
}
-/* Write the group 0 counter identified by the given `idx` with `val`. */
-void amu_group0_cnt_write(int idx, uint64_t val)
+/* Write the group 0 counter identified by the given `idx` with `val` */
+void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
+ assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
isb();
}
-/* Read the group 1 counter identified by the given `idx`. */
-uint64_t amu_group1_cnt_read(int idx)
+#if AMU_GROUP1_NR_COUNTERS
+/* Read the group 1 counter identified by the given `idx` */
+uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
}
-/* Write the group 1 counter identified by the given `idx` with `val`. */
-void amu_group1_cnt_write(int idx, uint64_t val)
+/* Write the group 1 counter identified by the given `idx` with `val` */
+void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
isb();
@@ -106,78 +141,106 @@ void amu_group1_cnt_write(int idx, uint64_t val)
/*
* Program the event type register for the given `idx` with
- * the event number `val`.
+ * the event number `val`
*/
-void amu_group1_set_evtype(int idx, unsigned int val)
+void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
isb();
}
+#endif /* AMU_GROUP1_NR_COUNTERS */
static void *amu_context_save(const void *arg)
{
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
- int i;
+ unsigned int i;
- if (!amu_supported())
+ if (!amu_supported()) {
return (void *)-1;
+ }
+#if AMU_GROUP1_NR_COUNTERS
+ if (!amu_group1_supported()) {
+ return (void *)-1;
+ }
+#endif
/* Assert that group 0/1 counter configuration is what we expect */
- assert((read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK) &&
- (read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK));
-
- assert(((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
- <= AMU_GROUP1_NR_COUNTERS);
+ assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
+#if AMU_GROUP1_NR_COUNTERS
+ assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+#endif
/*
* Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
+
+#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
+#endif
isb();
- /* Save group 0 counters */
- for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ /* Save all group 0 counters */
+ for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+ }
+#if AMU_GROUP1_NR_COUNTERS
/* Save group 1 counters */
- for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
- ctx->group1_cnts[i] = amu_group1_cnt_read(i);
-
+ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
+ ctx->group1_cnts[i] = amu_group1_cnt_read(i);
+ }
+ }
+#endif
return (void *)0;
}
static void *amu_context_restore(const void *arg)
{
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
- int i;
+ unsigned int i;
- if (!amu_supported())
+ if (!amu_supported()) {
return (void *)-1;
+ }
+#if AMU_GROUP1_NR_COUNTERS
+ if (!amu_group1_supported()) {
+ return (void *)-1;
+ }
+#endif
/* Counters were disabled in `amu_context_save()` */
- assert((read_amcntenset0_el0() == 0U) && (read_amcntenset1_el0() == 0U));
+ assert(read_amcntenset0_el0() == 0U);
- assert(((sizeof(int) * 8U) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
- <= AMU_GROUP1_NR_COUNTERS);
+#if AMU_GROUP1_NR_COUNTERS
+ assert(read_amcntenset1_el0() == 0U);
+#endif
- /* Restore group 0 counters */
- for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
- if ((AMU_GROUP0_COUNTERS_MASK & (1U << i)) != 0U)
- amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+ /* Restore all group 0 counters */
+ for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+ amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+ }
+ /* Restore group 0 counter configuration */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+
+#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
- for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
- if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U)
+ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ }
+ }
- /* Restore group 0/1 counter configuration */
- write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ /* Restore group 1 counter configuration */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+#endif
return (void *)0;
}
diff --git a/lib/extensions/mpam/mpam.c b/lib/extensions/mpam/mpam.c
index e794f013b..65601ddec 100644
--- a/lib/extensions/mpam/mpam.c
+++ b/lib/extensions/mpam/mpam.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,20 +7,16 @@
#include <stdbool.h>
#include <arch.h>
+#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/extensions/mpam.h>
-bool mpam_supported(void)
-{
- uint64_t features = read_id_aa64dfr0_el1() >> ID_AA64PFR0_MPAM_SHIFT;
-
- return ((features & ID_AA64PFR0_MPAM_MASK) != 0U);
-}
-
void mpam_enable(bool el2_unused)
{
- if (!mpam_supported())
+ /* Check if MPAM is implemented */
+ if (get_mpam_version() == 0U) {
return;
+ }
/*
* Enable MPAM, and disable trapping to EL3 when lower ELs access their
@@ -34,10 +30,11 @@ void mpam_enable(bool el2_unused)
* If EL2 is implemented and used, enable trapping to EL2.
*/
if (el2_unused) {
- write_mpam2_el2(0);
+ write_mpam2_el2(0ULL);
- if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U)
- write_mpamhcr_el2(0);
+ if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) {
+ write_mpamhcr_el2(0ULL);
+ }
} else {
write_mpam2_el2(MPAM2_EL2_TRAPMPAM0EL1 |
MPAM2_EL2_TRAPMPAM1EL1);
diff --git a/lib/extensions/mtpmu/aarch32/mtpmu.S b/lib/extensions/mtpmu/aarch32/mtpmu.S
new file mode 100644
index 000000000..834cee3ea
--- /dev/null
+++ b/lib/extensions/mtpmu/aarch32/mtpmu.S
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .global mtpmu_disable
+
+/* -------------------------------------------------------------
+ * The functions in this file are called at entrypoint, before
+ * the CPU has decided whether this is a cold or a warm boot.
+ * Therefore there are no stack yet to rely on for a C function
+ * call.
+ * -------------------------------------------------------------
+ */
+
+/*
+ * bool mtpmu_supported(void)
+ *
+ * Return a boolean indicating whether FEAT_MTPMU is supported or not.
+ *
+ * Trash registers: r0.
+ */
+func mtpmu_supported
+ ldcopr r0, ID_DFR1
+ and r0, r0, #(ID_DFR1_MTPMU_MASK >> ID_DFR1_MTPMU_SHIFT)
+ cmp r0, #ID_DFR1_MTPMU_SUPPORTED
+ mov r0, #0
+ addeq r0, r0, #1
+ bx lr
+endfunc mtpmu_supported
+
+/*
+ * bool el_implemented(unsigned int el)
+ *
+ * Return a boolean indicating if the specified EL (2 or 3) is implemented.
+ *
+ * Trash registers: r0
+ */
+func el_implemented
+ cmp r0, #3
+ ldcopr r0, ID_PFR1
+ lsreq r0, r0, #ID_PFR1_SEC_SHIFT
+ lsrne r0, r0, #ID_PFR1_VIRTEXT_SHIFT
+ /*
+ * ID_PFR1_VIRTEXT_MASK is the same as ID_PFR1_SEC_MASK
+ * so use any one of them
+ */
+ and r0, r0, #ID_PFR1_VIRTEXT_MASK
+ cmp r0, #ID_PFR1_ELx_ENABLED
+ mov r0, #0
+ addeq r0, r0, #1
+ bx lr
+endfunc el_implemented
+
+/*
+ * void mtpmu_disable(void)
+ *
+ * Disable mtpmu feature if supported.
+ *
+ * Trash register: r0, r1, r2
+ */
+func mtpmu_disable
+ mov r2, lr
+ bl mtpmu_supported
+ cmp r0, #0
+ bxeq r2 /* FEAT_MTPMU not supported */
+
+ /* FEAT_MTMPU Supported */
+ mov r0, #3
+ bl el_implemented
+ cmp r0, #0
+ beq 1f
+
+ /* EL3 implemented */
+ ldcopr r0, SDCR
+ ldr r1, =SDCR_MTPME_BIT
+ bic r0, r0, r1
+ stcopr r0, SDCR
+
+ /*
+ * If EL3 is implemented, HDCR.MTPME is implemented as Res0 and
+ * FEAT_MTPMU is controlled only from EL3, so no need to perform
+ * any operations for EL2.
+ */
+ isb
+ bx r2
+1:
+ /* EL3 not implemented */
+ mov r0, #2
+ bl el_implemented
+ cmp r0, #0
+ bxeq r2 /* No EL2 or EL3 implemented */
+
+ /* EL2 implemented */
+ ldcopr r0, HDCR
+ ldr r1, =HDCR_MTPME_BIT
+ orr r0, r0, r1
+ stcopr r0, HDCR
+ isb
+ bx r2
+endfunc mtpmu_disable
diff --git a/lib/extensions/mtpmu/aarch64/mtpmu.S b/lib/extensions/mtpmu/aarch64/mtpmu.S
new file mode 100644
index 000000000..0a1d57b9d
--- /dev/null
+++ b/lib/extensions/mtpmu/aarch64/mtpmu.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .global mtpmu_disable
+
+/* -------------------------------------------------------------
+ * The functions in this file are called at entrypoint, before
+ * the CPU has decided whether this is a cold or a warm boot.
+ * Therefore there are no stack yet to rely on for a C function
+ * call.
+ * -------------------------------------------------------------
+ */
+
+/*
+ * bool mtpmu_supported(void)
+ *
+ * Return a boolean indicating whether FEAT_MTPMU is supported or not.
+ *
+ * Trash registers: x0, x1
+ */
+func mtpmu_supported
+ mrs x0, id_aa64dfr0_el1
+ mov_imm x1, ID_AA64DFR0_MTPMU_MASK
+ and x0, x1, x0, LSR #ID_AA64DFR0_MTPMU_SHIFT
+ cmp x0, ID_AA64DFR0_MTPMU_SUPPORTED
+ cset x0, eq
+ ret
+endfunc mtpmu_supported
+
+/*
+ * bool el_implemented(unsigned int el_shift)
+ *
+ * Return a boolean indicating if the specified EL is implemented.
+ * The EL is represented as the bitmask shift on id_aa64pfr0_el1 register.
+ *
+ * Trash registers: x0, x1
+ */
+func el_implemented
+ mrs x1, id_aa64pfr0_el1
+ lsr x1, x1, x0
+ cmp x1, #ID_AA64PFR0_ELX_MASK
+ cset x0, eq
+ ret
+endfunc el_implemented
+
+/*
+ * void mtpmu_disable(void)
+ *
+ * Disable mtpmu feature if supported.
+ *
+ * Trash register: x0, x1, x30
+ */
+func mtpmu_disable
+ mov x10, x30
+ bl mtpmu_supported
+ cbz x0, exit_disable
+
+ /* FEAT_MTMPU Supported */
+ mov_imm x0, ID_AA64PFR0_EL3_SHIFT
+ bl el_implemented
+ cbz x0, 1f
+
+ /* EL3 implemented */
+ mrs x0, mdcr_el3
+ mov_imm x1, MDCR_MTPME_BIT
+ bic x0, x0, x1
+ msr mdcr_el3, x0
+
+ /*
+ * If EL3 is implemented, MDCR_EL2.MTPME is implemented as Res0 and
+ * FEAT_MTPMU is controlled only from EL3, so no need to perform
+ * any operations for EL2.
+ */
+ isb
+exit_disable:
+ ret x10
+1:
+ /* EL3 not implemented */
+ mov_imm x0, ID_AA64PFR0_EL2_SHIFT
+ bl el_implemented
+ cbz x0, exit_disable
+
+ /* EL2 implemented */
+ mrs x0, mdcr_el2
+ mov_imm x1, MDCR_EL2_MTPME
+ bic x0, x0, x1
+ msr mdcr_el2, x0
+ isb
+ ret x10
+endfunc mtpmu_disable
diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c
index 64a48524b..36f9a95b6 100644
--- a/lib/extensions/ras/ras_common.c
+++ b/lib/extensions/ras/ras_common.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,6 +19,47 @@
# error Platform must define RAS priority value
#endif
+/*
+ * Function to convert architecturally-defined primary error code SERR,
+ * bits[7:0] from ERR<n>STATUS to its corresponding error string.
+ */
+const char *ras_serr_to_str(unsigned int serr)
+{
+ const char *str[ERROR_STATUS_NUM_SERR] = {
+ "No error",
+ "IMPLEMENTATION DEFINED error",
+ "Data value from (non-associative) internal memory",
+ "IMPLEMENTATION DEFINED pin",
+ "Assertion failure",
+ "Error detected on internal data path",
+ "Data value from associative memory",
+ "Address/control value from associative memory",
+ "Data value from a TLB",
+ "Address/control value from a TLB",
+ "Data value from producer",
+ "Address/control value from producer",
+ "Data value from (non-associative) external memory",
+ "Illegal address (software fault)",
+ "Illegal access (software fault)",
+ "Illegal state (software fault)",
+ "Internal data register",
+ "Internal control register",
+ "Error response from slave",
+ "External timeout",
+ "Internal timeout",
+ "Deferred error from slave not supported at master"
+ };
+
+ /*
+ * All other values are reserved. Reserved values might be defined
+ * in a future version of the architecture
+ */
+ if (serr >= ERROR_STATUS_NUM_SERR)
+ return "unknown SERR";
+
+ return str[serr];
+}
+
/* Handler that receives External Aborts on RAS-capable systems */
int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
void *handle, uint64_t flags)
diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c
index 78876c66b..f0d734223 100644
--- a/lib/extensions/spe/spe.c
+++ b/lib/extensions/spe/spe.c
@@ -25,7 +25,7 @@ bool spe_supported(void)
uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
- return (features & ID_AA64DFR0_PMS_MASK) == 1U;
+ return (features & ID_AA64DFR0_PMS_MASK) > 0ULL;
}
void spe_enable(bool el2_unused)
diff --git a/lib/fconf/fconf.c b/lib/fconf/fconf.c
new file mode 100644
index 000000000..24b6bcc55
--- /dev/null
+++ b/lib/fconf/fconf.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <lib/fconf/fconf.h>
+#include <lib/fconf/fconf_dyn_cfg_getter.h>
+#include <libfdt.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+int fconf_load_config(unsigned int image_id)
+{
+ int err;
+ const struct dyn_cfg_dtb_info_t *config_info;
+
+ assert((image_id == FW_CONFIG_ID) || (image_id == TB_FW_CONFIG_ID));
+
+ image_info_t config_image_info = {
+ .h.type = (uint8_t)PARAM_IMAGE_BINARY,
+ .h.version = (uint8_t)VERSION_2,
+ .h.size = (uint16_t)sizeof(image_info_t),
+ .h.attr = 0
+ };
+
+ config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, image_id);
+ assert(config_info != NULL);
+
+ config_image_info.image_base = config_info->config_addr;
+ config_image_info.image_max_size = config_info->config_max_size;
+
+ VERBOSE("FCONF: Loading config with image ID: %d\n", image_id);
+ err = load_auth_image(image_id, &config_image_info);
+ if (err != 0) {
+ VERBOSE("Failed to load config %d\n", image_id);
+ return err;
+ }
+
+ INFO("FCONF: Config file with image ID:%d loaded at address = 0x%lx\n",
+ image_id, config_image_info.image_base);
+
+ return 0;
+}
+
+void fconf_populate(const char *config_type, uintptr_t config)
+{
+ assert(config != 0UL);
+
+ /* Check if the pointer to DTB is correct */
+ if (fdt_check_header((void *)config) != 0) {
+ ERROR("FCONF: Invalid DTB file passed for %s\n", config_type);
+ panic();
+ }
+
+ INFO("FCONF: Reading %s firmware configuration file from: 0x%lx\n", config_type, config);
+
+ /* Go through all registered populate functions */
+ IMPORT_SYM(struct fconf_populator *, __FCONF_POPULATOR_START__, start);
+ IMPORT_SYM(struct fconf_populator *, __FCONF_POPULATOR_END__, end);
+ const struct fconf_populator *populator;
+
+ for (populator = start; populator != end; populator++) {
+ assert((populator->info != NULL) && (populator->populate != NULL));
+
+ if (strcmp(populator->config_type, config_type) == 0) {
+ INFO("FCONF: Reading firmware configuration information for: %s\n", populator->info);
+ if (populator->populate(config) != 0) {
+ /* TODO: handle property miss */
+ panic();
+ }
+ }
+ }
+}
diff --git a/lib/fconf/fconf.mk b/lib/fconf/fconf.mk
new file mode 100644
index 000000000..b01dc6fea
--- /dev/null
+++ b/lib/fconf/fconf.mk
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2019-2020, ARM Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Add Firmware Configuration files
+FCONF_SOURCES := lib/fconf/fconf.c
+FCONF_DYN_SOURCES := lib/fconf/fconf_dyn_cfg_getter.c
+
+BL1_SOURCES += ${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
+BL2_SOURCES += ${FCONF_SOURCES} ${FCONF_DYN_SOURCES}
diff --git a/lib/fconf/fconf_cot_getter.c b/lib/fconf/fconf_cot_getter.c
new file mode 100644
index 000000000..adfa5341c
--- /dev/null
+++ b/lib/fconf/fconf_cot_getter.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <common/fdt_wrappers.h>
+#include <drivers/auth/mbedtls/mbedtls_config.h>
+#include <drivers/auth/auth_mod.h>
+#include <lib/fconf/fconf.h>
+#include <lib/object_pool.h>
+#include <libfdt.h>
+
+#include <tools_share/tbbr_oid.h>
+
+/* static structures used during authentication process */
+static auth_param_type_desc_t sig = AUTH_PARAM_TYPE_DESC(
+ AUTH_PARAM_SIG, 0);
+static auth_param_type_desc_t sig_alg = AUTH_PARAM_TYPE_DESC(
+ AUTH_PARAM_SIG_ALG, 0);
+static auth_param_type_desc_t raw_data = AUTH_PARAM_TYPE_DESC(
+ AUTH_PARAM_RAW_DATA, 0);
+
+/* pointers to an array of CoT descriptors */
+static const auth_img_desc_t *cot_desc[MAX_NUMBER_IDS];
+/* array of CoT descriptors */
+static auth_img_desc_t auth_img_descs[MAX_NUMBER_IDS];
+
+/* array of authentication methods structures */
+static auth_method_desc_t auth_methods[MAX_NUMBER_IDS * AUTH_METHOD_NUM];
+static OBJECT_POOL_ARRAY(auth_methods_pool, auth_methods);
+
+/* array of authentication params structures */
+static auth_param_desc_t auth_params[MAX_NUMBER_IDS * COT_MAX_VERIFIED_PARAMS];
+static OBJECT_POOL_ARRAY(auth_params_pool, auth_params);
+
+/* array of authentication param type structures */
+static auth_param_type_desc_t auth_param_type_descs[MAX_NUMBER_IDS];
+static OBJECT_POOL_ARRAY(auth_param_type_descs_pool, auth_param_type_descs);
+
+/*
+ * array of OIDs
+ * Object IDs are used to search hash, pk, counter values in certificate.
+ * As per binding we have below 2 combinations:
+ * 1. Certificates are validated using nv-cntr and pk
+ * 2. Raw images are authenticated using hash
+ * Hence in worst case, there are maximum 2 OIDs per image/certificate
+ */
+static unsigned char oids[(MAX_NUMBER_IDS * 2)][MAX_OID_NAME_LEN];
+static OBJECT_POOL_ARRAY(oid_pool, oids);
+
+/* An array of auth buffer which holds hashes and pk
+ * ToDo: Size decided with the current number of images and
+ * certificates which are available in CoT. Size of these buffers bound to
+ * increase in the future on the addition of images/certificates.
+ */
+static unsigned char hash_auth_bufs[20][HASH_DER_LEN];
+static OBJECT_POOL_ARRAY(hash_auth_buf_pool, hash_auth_bufs);
+static unsigned char pk_auth_bufs[12][PK_DER_LEN];
+static OBJECT_POOL_ARRAY(pk_auth_buf_pool, pk_auth_bufs);
+
+/*******************************************************************************
+ * update_parent_auth_data() - Update authentication data structure
+ * @auth_desc[in]: Pointer to the auth image descriptor
+ * @type_desc[in]: Pointer to authentication parameter
+ * @auth_buf_size[in]: Buffer size to hold pk or hash
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int update_parent_auth_data(const auth_img_desc_t *auth_desc,
+ auth_param_type_desc_t *type_desc,
+ unsigned int auth_buf_size)
+{
+ unsigned int i;
+ auth_param_desc_t *auth_data = &auth_desc->authenticated_data[0];
+ unsigned char *auth_buf;
+
+ for (i = 0U; i < COT_MAX_VERIFIED_PARAMS; i++) {
+ if (auth_data[i].type_desc == type_desc) {
+ return 0;
+ }
+ if (auth_data[i].type_desc == NULL) {
+ break;
+ }
+ }
+
+ if (auth_buf_size == HASH_DER_LEN) {
+ auth_buf = pool_alloc(&hash_auth_buf_pool);
+ } else if (auth_buf_size == PK_DER_LEN) {
+ auth_buf = pool_alloc(&pk_auth_buf_pool);
+ } else {
+ return -1;
+ }
+
+ if (i < COT_MAX_VERIFIED_PARAMS) {
+ auth_data[i].type_desc = type_desc;
+ auth_data[i].data.ptr = auth_buf;
+ auth_data[i].data.len = auth_buf_size;
+ } else {
+ ERROR("Out of authentication data array\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * get_auth_param_type_desc() - Get pointer of authentication parameter
+ * @img_id[in]: Image Id
+ * @type_desc[out]: Pointer to authentication parameter
+ * @buf_size[out]: Buffer size which hold hash/pk
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int get_auth_param_type_desc(unsigned int img_id,
+ auth_param_type_desc_t **type_desc,
+ unsigned int *buf_size)
+{
+ auth_method_desc_t *img_auth_method = NULL;
+ img_type_t type = auth_img_descs[img_id].img_type;
+
+ if (type == IMG_CERT) {
+ img_auth_method =
+ &auth_img_descs[img_id].img_auth_methods[AUTH_METHOD_SIG];
+ *type_desc = img_auth_method->param.sig.pk;
+ *buf_size = PK_DER_LEN;
+ } else if (type == IMG_RAW) {
+ img_auth_method =
+ &auth_img_descs[img_id].img_auth_methods[AUTH_METHOD_HASH];
+ *type_desc = img_auth_method->param.hash.hash;
+ *buf_size = HASH_DER_LEN;
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * set_auth_method() - Update global auth image descriptors with authentication
+ * method data
+ * @auth_method_type[in]: Type of authentication method
+ * @oid[in]: Object Idetifier for pk/hash search
+ * @auth_method[in]: Pointer to authentication method to set
+ ******************************************************************************/
+static void set_auth_method(auth_method_type_t auth_method_type, char *oid,
+ auth_method_desc_t *auth_method)
+{
+ auth_param_type_t auth_param_type = AUTH_PARAM_NONE;
+ auth_param_type_desc_t *auth_param_type_desc;
+
+ assert(auth_method != NULL);
+
+ auth_param_type_desc = pool_alloc(&auth_param_type_descs_pool);
+ auth_method->type = auth_method_type;
+
+ if (auth_method_type == AUTH_METHOD_SIG) {
+ auth_param_type = AUTH_PARAM_PUB_KEY;
+ auth_method->param.sig.sig = &sig;
+ auth_method->param.sig.alg = &sig_alg;
+ auth_method->param.sig.data = &raw_data;
+ auth_method->param.sig.pk = auth_param_type_desc;
+ } else if (auth_method_type == AUTH_METHOD_HASH) {
+ auth_param_type = AUTH_PARAM_HASH;
+ auth_method->param.hash.data = &raw_data;
+ auth_method->param.hash.hash = auth_param_type_desc;
+ } else if (auth_method_type == AUTH_METHOD_NV_CTR) {
+ auth_param_type = AUTH_PARAM_NV_CTR;
+ auth_method->param.nv_ctr.cert_nv_ctr = auth_param_type_desc;
+ auth_method->param.nv_ctr.plat_nv_ctr = auth_param_type_desc;
+ }
+
+ auth_param_type_desc->type = auth_param_type;
+ auth_param_type_desc->cookie = (void *)oid;
+}
+
+/*******************************************************************************
+ * get_oid() - get object identifier from device tree
+ * @dtb[in]: Pointer to the device tree blob in memory
+ * @node[in]: Offset of the node
+ * @prop[in]: Property to read from the given node
+ * @oid[out]: Object Indentifier of key/hash/nv-counter in certificate
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int get_oid(const void *dtb, int node, const char *prop, char **oid)
+{
+ uint32_t phandle;
+ int rc;
+
+ rc = fdt_read_uint32(dtb, node, prop, &phandle);
+ if (rc < 0) {
+ return rc;
+ }
+
+ node = fdt_node_offset_by_phandle(dtb, phandle);
+ if (node < 0) {
+ return node;
+ }
+
+ *oid = pool_alloc(&oid_pool);
+ rc = fdtw_read_string(dtb, node, "oid", *oid, MAX_OID_NAME_LEN);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * populate_and_set_auth_methods() - Populate auth method parameters from
+ * device tree and set authentication method
+ * structure.
+ * @dtb[in]: Pointer to the device tree blob in memory
+ * @node[in]: Offset of the node
+ * @img_id[in]: Image identifier
+ * @type[in]: Type of image
+ * @root_certificate[in]:Root certificate (authenticated by ROTPK)
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int populate_and_set_auth_methods(const void *dtb, int node,
+ unsigned int img_id, img_type_t type,
+ bool root_certificate)
+{
+ auth_method_type_t auth_method_type = AUTH_METHOD_NONE;
+ int rc;
+ char *oid = NULL;
+
+ auth_method_desc_t *auth_method = pool_alloc_n(&auth_methods_pool,
+ AUTH_METHOD_NUM);
+
+ /*
+ * This is as per binding document where certificates are
+ * verified by signature and images are verified by hash.
+ */
+ if (type == IMG_CERT) {
+ if (root_certificate) {
+ oid = NULL;
+ } else {
+ rc = get_oid(dtb, node, "signing-key", &oid);
+ if (rc < 0) {
+ ERROR("FCONF: Can't read %s property\n",
+ "signing-key");
+ return rc;
+ }
+ }
+ auth_method_type = AUTH_METHOD_SIG;
+ } else if (type == IMG_RAW) {
+ rc = get_oid(dtb, node, "hash", &oid);
+ if (rc < 0) {
+ ERROR("FCONF: Can't read %s property\n",
+ "hash");
+ return rc;
+ }
+ auth_method_type = AUTH_METHOD_HASH;
+ } else {
+ return -1;
+ }
+
+ set_auth_method(auth_method_type, oid,
+ &auth_method[auth_method_type]);
+
+ /* Retrieve the optional property */
+ rc = get_oid(dtb, node, "antirollback-counter", &oid);
+ if (rc == 0) {
+ auth_method_type = AUTH_METHOD_NV_CTR;
+ set_auth_method(auth_method_type, oid,
+ &auth_method[auth_method_type]);
+ }
+
+ auth_img_descs[img_id].img_auth_methods = &auth_method[0];
+
+ return 0;
+}
+
+/*******************************************************************************
+ * get_parent_img_id() - Get parent image id for given child node
+ * @dtb[in]: Pointer to the device tree blob in memory
+ * @node[in]: Offset of the child node
+ * @parent_img_id[out]: Image id of parent
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int get_parent_img_id(const void *dtb, int node,
+ unsigned int *parent_img_id)
+{
+ uint32_t phandle;
+ int err;
+
+ err = fdt_read_uint32(dtb, node, "parent", &phandle);
+ if (err < 0) {
+ ERROR("FCONF: Could not read %s property in node\n",
+ "parent");
+ return err;
+ }
+
+ node = fdt_node_offset_by_phandle(dtb, phandle);
+ if (node < 0) {
+ ERROR("FCONF: Failed to locate node using its phandle\n");
+ return node;
+ }
+
+ err = fdt_read_uint32(dtb, node, "image-id", parent_img_id);
+ if (err < 0) {
+ ERROR("FCONF: Could not read %s property in node\n",
+ "image-id");
+ }
+
+ return err;
+}
+
+/*******************************************************************************
+ * set_desc_data() - Update data in descriptor's structure
+ * @dtb[in]: Pointer to the device tree blob in memory
+ * @node[in]: Offset of the node
+ * @type[in]: Type of image (RAW/CERT)
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int set_desc_data(const void *dtb, int node, img_type_t type)
+{
+ int rc;
+ bool root_certificate = false;
+ unsigned int img_id, parent_img_id;
+
+ rc = fdt_read_uint32(dtb, node, "image-id", &img_id);
+ if (rc < 0) {
+ ERROR("FCONF: Can't find property %s in node\n",
+ "image-id");
+ return rc;
+ }
+
+ if (fdt_getprop(dtb, node, "root-certificate",
+ NULL) != NULL) {
+ root_certificate = true;
+ }
+
+ if (!root_certificate) {
+ rc = get_parent_img_id(dtb, node, &parent_img_id);
+ if (rc < 0) {
+ return rc;
+ }
+ auth_img_descs[img_id].parent = &auth_img_descs[parent_img_id];
+ }
+
+ auth_img_descs[img_id].img_id = img_id;
+ auth_img_descs[img_id].img_type = type;
+
+ rc = populate_and_set_auth_methods(dtb, node, img_id, type,
+ root_certificate);
+ if (rc < 0) {
+ return rc;
+ }
+
+ if (type == IMG_CERT) {
+ auth_param_desc_t *auth_param =
+ pool_alloc_n(&auth_params_pool,
+ COT_MAX_VERIFIED_PARAMS);
+ auth_img_descs[img_id].authenticated_data = &auth_param[0];
+ }
+
+ cot_desc[img_id] = &auth_img_descs[img_id];
+
+ return rc;
+}
+
+/*******************************************************************************
+ * populate_manifest_descs() - Populate CoT descriptors and update global
+ * certificate structures
+ * @dtb[in]: Pointer to the device tree blob in memory
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int populate_manifest_descs(const void *dtb)
+{
+ int node, child;
+ int rc;
+
+ /*
+ * Assert the node offset points to "arm, cert-descs"
+ * compatible property
+ */
+ const char *compatible_str = "arm, cert-descs";
+
+ node = fdt_node_offset_by_compatible(dtb, -1, compatible_str);
+ if (node < 0) {
+ ERROR("FCONF: Can't find %s compatible in node\n",
+ compatible_str);
+ return node;
+ }
+
+ fdt_for_each_subnode(child, dtb, node) {
+ rc = set_desc_data(dtb, child, IMG_CERT);
+ if (rc < 0) {
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * populate_image_descs() - Populate CoT descriptors and update global
+ * image descriptor structures.
+ * @dtb[in]: Pointer to the device tree blob in memory
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int populate_image_descs(const void *dtb)
+{
+ int node, child;
+ int rc;
+
+ /*
+ * Assert the node offset points to "arm, img-descs"
+ * compatible property
+ */
+ const char *compatible_str = "arm, img-descs";
+
+ node = fdt_node_offset_by_compatible(dtb, -1, compatible_str);
+ if (node < 0) {
+ ERROR("FCONF: Can't find %s compatible in node\n",
+ compatible_str);
+ return node;
+ }
+
+ fdt_for_each_subnode(child, dtb, node) {
+ rc = set_desc_data(dtb, child, IMG_RAW);
+ if (rc < 0) {
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * fconf_populate_cot_descs() - Populate CoT descriptors and update global
+ * structures
+ * @config[in]: Pointer to the device tree blob in memory
+ *
+ * Return 0 on success or an error value otherwise.
+ ******************************************************************************/
+static int fconf_populate_cot_descs(uintptr_t config)
+{
+ auth_param_type_desc_t *type_desc = NULL;
+ unsigned int auth_buf_size = 0U;
+ int rc;
+
+ /* As libfdt uses void *, we can't avoid this cast */
+ const void *dtb = (void *)config;
+
+ /* populate manifest descs information */
+ rc = populate_manifest_descs(dtb);
+ if (rc < 0) {
+ ERROR("FCONF: population of %s descs failed %d\n",
+ "manifest", rc);
+ return rc;
+ }
+
+ /* populate image descs information */
+ rc = populate_image_descs(dtb);
+ if (rc < 0) {
+ ERROR("FCONF: population of %s descs failed %d\n",
+ "images", rc);
+ return rc;
+ }
+
+ /* update parent's authentication data */
+ for (unsigned int i = 0U; i < MAX_NUMBER_IDS; i++) {
+ if (auth_img_descs[i].parent != NULL) {
+ rc = get_auth_param_type_desc(i,
+ &type_desc,
+ &auth_buf_size);
+ if (rc < 0) {
+ ERROR("FCONF: failed to get auth data %d\n",
+ rc);
+ return rc;
+ }
+
+ rc = update_parent_auth_data(auth_img_descs[i].parent,
+ type_desc,
+ auth_buf_size);
+ if (rc < 0) {
+ ERROR("FCONF: auth data update failed %d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+FCONF_REGISTER_POPULATOR(TB_FW, cot_desc, fconf_populate_cot_descs);
+REGISTER_COT(cot_desc);
diff --git a/lib/fconf/fconf_dyn_cfg_getter.c b/lib/fconf/fconf_dyn_cfg_getter.c
new file mode 100644
index 000000000..25dd7f9ed
--- /dev/null
+++ b/lib/fconf/fconf_dyn_cfg_getter.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <lib/fconf/fconf_dyn_cfg_getter.h>
+#include <lib/object_pool.h>
+#include <libfdt.h>
+
+/* We currently use FW, TB_FW, SOC_FW, TOS_FW, NT_FW and HW configs */
+#define MAX_DTB_INFO U(6)
+/*
+ * Compile time assert if FW_CONFIG_ID is 0 which is more
+ * unlikely as 0 is a valid image ID for FIP as per the current
+ * code but still to avoid code breakage in case of unlikely
+ * event when image IDs get changed.
+ */
+CASSERT(FW_CONFIG_ID != U(0), assert_invalid_fw_config_id);
+
+static struct dyn_cfg_dtb_info_t dtb_infos[MAX_DTB_INFO];
+static OBJECT_POOL_ARRAY(dtb_info_pool, dtb_infos);
+
+/*
+ * This function is used to alloc memory for config information from
+ * global pool and set the configuration information.
+ */
+void set_config_info(uintptr_t config_addr, uint32_t config_max_size,
+ unsigned int config_id)
+{
+ struct dyn_cfg_dtb_info_t *dtb_info;
+
+ dtb_info = pool_alloc(&dtb_info_pool);
+ dtb_info->config_addr = config_addr;
+ dtb_info->config_max_size = config_max_size;
+ dtb_info->config_id = config_id;
+}
+
+struct dyn_cfg_dtb_info_t *dyn_cfg_dtb_info_getter(unsigned int config_id)
+{
+ unsigned int index;
+
+ /* Positions index to the proper config-id */
+ for (index = 0U; index < MAX_DTB_INFO; index++) {
+ if (dtb_infos[index].config_id == config_id) {
+ return &dtb_infos[index];
+ }
+ }
+
+ WARN("FCONF: Invalid config id %u\n", config_id);
+
+ return NULL;
+}
+
+int fconf_populate_dtb_registry(uintptr_t config)
+{
+ int rc;
+ int node, child;
+
+ /* As libfdt use void *, we can't avoid this cast */
+ const void *dtb = (void *)config;
+
+ /*
+ * In case of BL1, fw_config dtb information is already
+ * populated in global dtb_infos array by 'set_fw_config_info'
+ * function, Below check is present to avoid re-population of
+ * fw_config information.
+ *
+ * Other BLs, satisfy below check and populate fw_config information
+ * in global dtb_infos array.
+ */
+ if (dtb_infos[0].config_id == 0U) {
+ uint32_t config_max_size = fdt_totalsize(dtb);
+ set_config_info(config, config_max_size, FW_CONFIG_ID);
+ }
+
+ /* Find the node offset point to "fconf,dyn_cfg-dtb_registry" compatible property */
+ const char *compatible_str = "fconf,dyn_cfg-dtb_registry";
+ node = fdt_node_offset_by_compatible(dtb, -1, compatible_str);
+ if (node < 0) {
+ ERROR("FCONF: Can't find %s compatible in dtb\n", compatible_str);
+ return node;
+ }
+
+ fdt_for_each_subnode(child, dtb, node) {
+ uint32_t config_max_size, config_id;
+ uintptr_t config_addr;
+ uint64_t val64;
+
+ /* Read configuration dtb information */
+ rc = fdt_read_uint64(dtb, child, "load-address", &val64);
+ if (rc < 0) {
+ ERROR("FCONF: Incomplete configuration property in dtb-registry.\n");
+ return rc;
+ }
+ config_addr = (uintptr_t)val64;
+
+ rc = fdt_read_uint32(dtb, child, "max-size", &config_max_size);
+ if (rc < 0) {
+ ERROR("FCONF: Incomplete configuration property in dtb-registry.\n");
+ return rc;
+ }
+
+ rc = fdt_read_uint32(dtb, child, "id", &config_id);
+ if (rc < 0) {
+ ERROR("FCONF: Incomplete configuration property in dtb-registry.\n");
+ return rc;
+ }
+
+ VERBOSE("FCONF: dyn_cfg.dtb_registry cell found with:\n");
+ VERBOSE("\tload-address = %lx\n", config_addr);
+ VERBOSE("\tmax-size = 0x%x\n", config_max_size);
+ VERBOSE("\tconfig-id = %u\n", config_id);
+
+ set_config_info(config_addr, config_max_size, config_id);
+ }
+
+ if ((child < 0) && (child != -FDT_ERR_NOTFOUND)) {
+ ERROR("%d: fdt_for_each_subnode(): %d\n", __LINE__, node);
+ return child;
+ }
+
+ return 0;
+}
+
+FCONF_REGISTER_POPULATOR(FW_CONFIG, dyn_cfg, fconf_populate_dtb_registry);
diff --git a/lib/fconf/fconf_tbbr_getter.c b/lib/fconf/fconf_tbbr_getter.c
new file mode 100644
index 000000000..9a20ced4e
--- /dev/null
+++ b/lib/fconf/fconf_tbbr_getter.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <lib/fconf/fconf_tbbr_getter.h>
+#include <libfdt.h>
+
+struct tbbr_dyn_config_t tbbr_dyn_config;
+
+int fconf_populate_tbbr_dyn_config(uintptr_t config)
+{
+ int err;
+ int node;
+ uint64_t val64;
+ uint32_t val32;
+
+ /* As libfdt use void *, we can't avoid this cast */
+ const void *dtb = (void *)config;
+
+ /* Assert the node offset point to "arm,tb_fw" compatible property */
+ const char *compatible_str = "arm,tb_fw";
+ node = fdt_node_offset_by_compatible(dtb, -1, compatible_str);
+ if (node < 0) {
+ ERROR("FCONF: Can't find `%s` compatible in dtb\n",
+ compatible_str);
+ return node;
+ }
+
+ /* Locate the disable_auth cell and read the value */
+ err = fdt_read_uint32(dtb, node, "disable_auth",
+ &tbbr_dyn_config.disable_auth);
+ if (err < 0) {
+ WARN("FCONF: Read %s failed for `%s`\n",
+ "cell", "disable_auth");
+ return err;
+ }
+
+ /* Check if the value is boolean */
+ if ((tbbr_dyn_config.disable_auth != 0U) &&
+ (tbbr_dyn_config.disable_auth != 1U)) {
+ WARN("Invalid value for `%s` cell %d\n",
+ "disable_auth", tbbr_dyn_config.disable_auth);
+ return -1;
+ }
+
+#if defined(DYN_DISABLE_AUTH)
+ if (tbbr_dyn_config.disable_auth == 1)
+ dyn_disable_auth();
+#endif
+
+ /* Retrieve the Mbed TLS heap details from the DTB */
+ err = fdt_read_uint64(dtb, node, "mbedtls_heap_addr", &val64);
+ if (err < 0) {
+ ERROR("FCONF: Read %s failed for `%s`\n",
+ "cell", "mbedtls_heap_addr");
+ return err;
+ }
+ tbbr_dyn_config.mbedtls_heap_addr = (void *)(uintptr_t)val64;
+
+ err = fdt_read_uint32(dtb, node, "mbedtls_heap_size", &val32);
+ if (err < 0) {
+ ERROR("FCONF: Read %s failed for `%s`\n",
+ "cell", "mbedtls_heap_size");
+ return err;
+ }
+ tbbr_dyn_config.mbedtls_heap_size = val32;
+
+#if MEASURED_BOOT
+ /* Retrieve BL2 hash data details from the DTB */
+ err = fdtw_read_bytes(dtb, node, "bl2_hash_data", TCG_DIGEST_SIZE,
+ &tbbr_dyn_config.bl2_hash_data);
+ if (err < 0) {
+ ERROR("FCONF: Read %s failed for '%s'\n",
+ "bytes", "bl2_hash_data");
+ return err;
+ }
+#endif
+ VERBOSE("%s%s%s %d\n", "FCONF: `tbbr.", "disable_auth",
+ "` cell found with value =", tbbr_dyn_config.disable_auth);
+ VERBOSE("%s%s%s %p\n", "FCONF: `tbbr.", "mbedtls_heap_addr",
+ "` cell found with value =", tbbr_dyn_config.mbedtls_heap_addr);
+ VERBOSE("%s%s%s %zu\n", "FCONF: `tbbr.", "mbedtls_heap_size",
+ "` cell found with value =", tbbr_dyn_config.mbedtls_heap_size);
+#if MEASURED_BOOT
+ VERBOSE("%s%s%s %p\n", "FCONF: `tbbr.", "bl2_hash_data",
+ "` array found at address =", tbbr_dyn_config.bl2_hash_data);
+#endif
+ return 0;
+}
+
+FCONF_REGISTER_POPULATOR(TB_FW, tbbr, fconf_populate_tbbr_dyn_config);
diff --git a/lib/libc/aarch32/memset.S b/lib/libc/aarch32/memset.S
new file mode 100644
index 000000000..880ba8382
--- /dev/null
+++ b/lib/libc/aarch32/memset.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .syntax unified
+ .global memset
+
+/* -----------------------------------------------------------------------
+ * void *memset(void *dst, int val, size_t count)
+ *
+ * Copy the value of 'val' (converted to an unsigned char) into
+ * each of the first 'count' characters of the object pointed to by 'dst'.
+ *
+ * Returns the value of 'dst'.
+ * -----------------------------------------------------------------------
+ */
+func memset
+ mov r12, r0 /* keep r0 */
+ tst r0, #3
+ beq aligned /* 4-bytes aligned */
+
+ /* Unaligned 'dst' */
+unaligned:
+ subs r2, r2, #1
+ strbhs r1, [r12], #1
+ bxls lr /* return if 0 */
+ tst r12, #3
+ bne unaligned /* continue while unaligned */
+
+ /* 4-bytes aligned */
+aligned:bfi r1, r1, #8, #8 /* propagate 'val' */
+ bfi r1, r1, #16, #16
+
+ mov r3, r1
+
+ cmp r2, #16
+ blo less_16 /* < 16 */
+
+ push {r4, lr}
+ mov r4, r1
+ mov lr, r1
+
+write_32:
+ subs r2, r2, #32
+ stmiahs r12!, {r1, r3, r4, lr}
+ stmiahs r12!, {r1, r3, r4, lr}
+ bhi write_32 /* write 32 bytes in a loop */
+ popeq {r4, pc} /* return if 0 */
+ lsls r2, r2, #28 /* C = r2[4]; N = r2[3]; Z = r2[3:0] */
+ stmiacs r12!, {r1, r3, r4, lr} /* write 16 bytes */
+ popeq {r4, pc} /* return if 16 */
+ stmiami r12!, {r1, r3} /* write 8 bytes */
+ lsls r2, r2, #2 /* C = r2[2]; N = r2[1]; Z = r2[1:0] */
+ strcs r1, [r12], #4 /* write 4 bytes */
+ popeq {r4, pc} /* return if 8 or 4 */
+ strhmi r1, [r12], #2 /* write 2 bytes */
+ lsls r2, r2, #1 /* N = Z = r2[0] */
+ strbmi r1, [r12] /* write 1 byte */
+ pop {r4, pc}
+
+less_16:lsls r2, r2, #29 /* C = r2[3]; N = r2[2]; Z = r2[2:0] */
+ stmiacs r12!, {r1, r3} /* write 8 bytes */
+ bxeq lr /* return if 8 */
+ strmi r1, [r12], #4 /* write 4 bytes */
+ lsls r2, r2, #2 /* C = r2[1]; N = Z = r2[0] */
+ strhcs r1, [r12], #2 /* write 2 bytes */
+ strbmi r1, [r12] /* write 1 byte */
+ bx lr
+
+endfunc memset
diff --git a/lib/libc/aarch64/memset.S b/lib/libc/aarch64/memset.S
new file mode 100644
index 000000000..05437041d
--- /dev/null
+++ b/lib/libc/aarch64/memset.S
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .global memset
+
+/* -----------------------------------------------------------------------
+ * void *memset(void *dst, int val, size_t count)
+ *
+ * Copy the value of 'val' (converted to an unsigned char) into
+ * each of the first 'count' characters of the object pointed to by 'dst'.
+ *
+ * Returns the value of 'dst'.
+ * -----------------------------------------------------------------------
+ */
+func memset
+ cbz x2, exit /* exit if 'count' = 0 */
+ mov x3, x0 /* keep x0 */
+ tst x0, #7
+ b.eq aligned /* 8-bytes aligned */
+
+ /* Unaligned 'dst' */
+unaligned:
+ strb w1, [x3], #1
+ subs x2, x2, #1
+ b.eq exit /* exit if 0 */
+ tst x3, #7
+ b.ne unaligned /* continue while unaligned */
+
+ /* 8-bytes aligned */
+aligned:cbz x1, x1_zero
+ bfi w1, w1, #8, #8 /* propagate 'val' */
+ bfi w1, w1, #16, #16
+ bfi x1, x1, #32, #32
+
+x1_zero:ands x4, x2, #~0x3f
+ b.eq less_64
+
+write_64:
+ .rept 4
+ stp x1, x1, [x3], #16 /* write 64 bytes in a loop */
+ .endr
+ subs x4, x4, #64
+ b.ne write_64
+less_64:tbz w2, #5, less_32 /* < 32 bytes */
+ stp x1, x1, [x3], #16 /* write 32 bytes */
+ stp x1, x1, [x3], #16
+less_32:tbz w2, #4, less_16 /* < 16 bytes */
+ stp x1, x1, [x3], #16 /* write 16 bytes */
+less_16:tbz w2, #3, less_8 /* < 8 bytes */
+ str x1, [x3], #8 /* write 8 bytes */
+less_8: tbz w2, #2, less_4 /* < 4 bytes */
+ str w1, [x3], #4 /* write 4 bytes */
+less_4: tbz w2, #1, less_2 /* < 2 bytes */
+ strh w1, [x3], #2 /* write 2 bytes */
+less_2: tbz w2, #0, exit
+ strb w1, [x3] /* write 1 byte */
+exit: ret
+
+endfunc memset
diff --git a/lib/libc/assert.c b/lib/libc/assert.c
index 49f59db16..ff987b3be 100644
--- a/lib/libc/assert.c
+++ b/lib/libc/assert.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -23,7 +23,7 @@ void __dead2 __assert(const char *file, unsigned int line,
{
printf("ASSERT: %s:%d:%s\n", file, line, assertion);
backtrace("assert");
- (void)console_flush();
+ console_flush();
plat_panic_handler();
}
#elif PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_INFO
@@ -31,14 +31,14 @@ void __dead2 __assert(const char *file, unsigned int line)
{
printf("ASSERT: %s:%d\n", file, line);
backtrace("assert");
- (void)console_flush();
+ console_flush();
plat_panic_handler();
}
#else
void __dead2 __assert(void)
{
backtrace("assert");
- (void)console_flush();
+ console_flush();
plat_panic_handler();
}
#endif
diff --git a/lib/libc/libc.mk b/lib/libc/libc.mk
index 93d30d035..b75d09c28 100644
--- a/lib/libc/libc.mk
+++ b/lib/libc/libc.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -20,11 +20,17 @@ LIBC_SRCS := $(addprefix lib/libc/, \
snprintf.c \
strchr.c \
strcmp.c \
+ strlcat.c \
strlcpy.c \
strlen.c \
strncmp.c \
strnlen.c \
- strrchr.c)
+ strrchr.c \
+ strtok.c \
+ strtoul.c \
+ strtoll.c \
+ strtoull.c \
+ strtol.c)
ifeq (${ARCH},aarch64)
LIBC_SRCS += $(addprefix lib/libc/aarch64/, \
diff --git a/lib/libc/libc_asm.mk b/lib/libc/libc_asm.mk
new file mode 100644
index 000000000..2f272651b
--- /dev/null
+++ b/lib/libc/libc_asm.mk
@@ -0,0 +1,44 @@
+#
+# Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+LIBC_SRCS := $(addprefix lib/libc/, \
+ abort.c \
+ assert.c \
+ exit.c \
+ memchr.c \
+ memcmp.c \
+ memcpy.c \
+ memmove.c \
+ memrchr.c \
+ printf.c \
+ putchar.c \
+ puts.c \
+ snprintf.c \
+ strchr.c \
+ strcmp.c \
+ strlcat.c \
+ strlcpy.c \
+ strlen.c \
+ strncmp.c \
+ strnlen.c \
+ strrchr.c \
+ strtok.c \
+ strtoul.c \
+ strtoll.c \
+ strtoull.c \
+ strtol.c)
+
+ifeq (${ARCH},aarch64)
+LIBC_SRCS += $(addprefix lib/libc/aarch64/, \
+ memset.S \
+ setjmp.S)
+else
+LIBC_SRCS += $(addprefix lib/libc/aarch32/, \
+ memset.S)
+endif
+
+INCLUDES += -Iinclude/lib/libc \
+ -Iinclude/lib/libc/$(ARCH) \
diff --git a/lib/libc/memset.c b/lib/libc/memset.c
index d8007d8e9..f9dd4c5db 100644
--- a/lib/libc/memset.c
+++ b/lib/libc/memset.c
@@ -1,18 +1,48 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stddef.h>
#include <string.h>
+#include <stdint.h>
void *memset(void *dst, int val, size_t count)
{
char *ptr = dst;
+ uint64_t *ptr64;
+ uint64_t fill = (unsigned char)val;
- while (count--)
+ /* Simplify code below by making sure we write at least one byte. */
+ if (count == 0) {
+ return dst;
+ }
+
+ /* Handle the first part, until the pointer becomes 64-bit aligned. */
+ while (((uintptr_t)ptr & 7)) {
+ *ptr++ = val;
+ if (--count == 0) {
+ return dst;
+ }
+ }
+
+ /* Duplicate the fill byte to the rest of the 64-bit word. */
+ fill |= fill << 8;
+ fill |= fill << 16;
+ fill |= fill << 32;
+
+ /* Use 64-bit writes for as long as possible. */
+ ptr64 = (void *)ptr;
+ for (; count >= 8; count -= 8) {
+ *ptr64++ = fill;
+ }
+
+ /* Handle the remaining part byte-per-byte. */
+ ptr = (void *)ptr64;
+ while (count--) {
*ptr++ = val;
+ }
return dst;
}
diff --git a/lib/libc/printf.c b/lib/libc/printf.c
index 2715a72d4..45e153ec7 100644
--- a/lib/libc/printf.c
+++ b/lib/libc/printf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -108,6 +108,9 @@ int vprintf(const char *fmt, va_list args)
/* Check the format specifier */
loop:
switch (*fmt) {
+ case '%':
+ (void)putchar('%');
+ break;
case 'i': /* Fall through to next one */
case 'd':
num = get_num_va_args(args, l_count);
diff --git a/lib/libc/snprintf.c b/lib/libc/snprintf.c
index 38ad1c71a..3b175ed6a 100644
--- a/lib/libc/snprintf.c
+++ b/lib/libc/snprintf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,51 +10,90 @@
#include <common/debug.h>
#include <plat/common/platform.h>
+#define CHECK_AND_PUT_CHAR(buf, size, chars_printed, ch) \
+ do { \
+ if ((chars_printed) < (size)) { \
+ *(buf) = (ch); \
+ (buf)++; \
+ } \
+ (chars_printed)++; \
+ } while (false)
+
static void string_print(char **s, size_t n, size_t *chars_printed,
const char *str)
{
while (*str != '\0') {
- if (*chars_printed < n) {
- *(*s) = *str;
- (*s)++;
- }
-
- (*chars_printed)++;
+ CHECK_AND_PUT_CHAR(*s, n, *chars_printed, *str);
str++;
}
}
-static void unsigned_dec_print(char **s, size_t n, size_t *chars_printed,
- unsigned int unum)
+static void unsigned_num_print(char **s, size_t n, size_t *chars_printed,
+ unsigned long long int unum,
+ unsigned int radix, char padc, int padn,
+ bool capitalise)
{
- /* Enough for a 32-bit unsigned decimal integer (4294967295). */
- char num_buf[10];
+ /* Just need enough space to store 64 bit decimal integer */
+ char num_buf[20];
int i = 0;
+ int width;
unsigned int rem;
+ char ascii_a = capitalise ? 'A' : 'a';
do {
- rem = unum % 10U;
- num_buf[i++] = '0' + rem;
- unum /= 10U;
+ rem = unum % radix;
+ if (rem < 10U) {
+ num_buf[i] = '0' + rem;
+ } else {
+ num_buf[i] = ascii_a + (rem - 10U);
+ }
+ i++;
+ unum /= radix;
} while (unum > 0U);
- while (--i >= 0) {
- if (*chars_printed < n) {
- *(*s) = num_buf[i];
- (*s)++;
+ width = i;
+ if (padn > width) {
+ (*chars_printed) += (size_t)padn;
+ } else {
+ (*chars_printed) += (size_t)width;
+ }
+
+ if (*chars_printed < n) {
+
+ if (padn > 0) {
+ while (width < padn) {
+ *(*s)++ = padc;
+ padn--;
+ }
+ }
+
+ while (--i >= 0) {
+ *(*s)++ = num_buf[i];
}
- (*chars_printed)++;
+ if (padn < 0) {
+ while (width < -padn) {
+ *(*s)++ = padc;
+ padn++;
+ }
+ }
}
}
/*******************************************************************
- * Reduced snprintf to be used for Trusted firmware.
+ * Reduced vsnprintf to be used for Trusted firmware.
* The following type specifiers are supported:
*
+ * %x (or %X) - hexadecimal format
* %d or %i - signed decimal format
* %s - string format
* %u - unsigned decimal format
+ * %p - pointer format
+ *
+ * The following padding specifiers are supported by this print
+ * %0NN - Left-pad the number with 0s (NN is a decimal number)
+ * %NN - Left-pad the number or string with spaces (NN is a decimal number)
+ * %-NN - Right-pad the number or string with spaces (NN is a decimal number)
*
* The function panics on all other formats specifiers.
*
@@ -62,12 +101,15 @@ static void unsigned_dec_print(char **s, size_t n, size_t *chars_printed,
* buffer was big enough. If it returns a value lower than n, the
* whole string has been written.
*******************************************************************/
-int snprintf(char *s, size_t n, const char *fmt, ...)
+int vsnprintf(char *s, size_t n, const char *fmt, va_list args)
{
- va_list args;
int num;
- unsigned int unum;
+ unsigned long long int unum;
char *str;
+ char padc; /* Padding character */
+ int padn; /* Number of characters to pad */
+ bool left;
+ bool capitalise;
size_t chars_printed = 0U;
if (n == 0U) {
@@ -81,30 +123,57 @@ int snprintf(char *s, size_t n, const char *fmt, ...)
n--;
}
- va_start(args, fmt);
while (*fmt != '\0') {
+ left = false;
+ padc ='\0';
+ padn = 0;
+ capitalise = false;
if (*fmt == '%') {
fmt++;
/* Check the format specifier. */
+loop:
switch (*fmt) {
+ case '%':
+ CHECK_AND_PUT_CHAR(s, n, chars_printed, '%');
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ padc = (*fmt == '0') ? '0' : ' ';
+ for (padn = 0; *fmt >= '0' && *fmt <= '9'; fmt++) {
+ padn = (padn * 10) + (*fmt - '0');
+ }
+ if (left) {
+ padn = -padn;
+ }
+ goto loop;
+ case '-':
+ left = true;
+ fmt++;
+ goto loop;
+
case 'i':
case 'd':
num = va_arg(args, int);
if (num < 0) {
- if (chars_printed < n) {
- *s = '-';
- s++;
- }
- chars_printed++;
-
+ CHECK_AND_PUT_CHAR(s, n, chars_printed,
+ '-');
unum = (unsigned int)-num;
} else {
unum = (unsigned int)num;
}
- unsigned_dec_print(&s, n, &chars_printed, unum);
+ unsigned_num_print(&s, n, &chars_printed,
+ unum, 10, padc, padn, false);
break;
case 's':
str = va_arg(args, char *);
@@ -112,8 +181,27 @@ int snprintf(char *s, size_t n, const char *fmt, ...)
break;
case 'u':
unum = va_arg(args, unsigned int);
- unsigned_dec_print(&s, n, &chars_printed, unum);
+ unsigned_num_print(&s, n, &chars_printed,
+ unum, 10, padc, padn, false);
+ break;
+ case 'p':
+ unum = (uintptr_t)va_arg(args, void *);
+ if (unum > 0U) {
+ string_print(&s, n, &chars_printed, "0x");
+ padn -= 2;
+ }
+ unsigned_num_print(&s, n, &chars_printed,
+ unum, 16, padc, padn, false);
+ break;
+ case 'X':
+ capitalise = true;
+ case 'x':
+ unum = va_arg(args, unsigned int);
+ unsigned_num_print(&s, n, &chars_printed,
+ unum, 16, padc, padn,
+ capitalise);
break;
+
default:
/* Panic on any other format specifier. */
ERROR("snprintf: specifier with ASCII code '%d' not supported.",
@@ -125,19 +213,47 @@ int snprintf(char *s, size_t n, const char *fmt, ...)
continue;
}
- if (chars_printed < n) {
- *s = *fmt;
- s++;
- }
+ CHECK_AND_PUT_CHAR(s, n, chars_printed, *fmt);
fmt++;
- chars_printed++;
}
- va_end(args);
-
- if (n > 0U)
+ if (n > 0U) {
*s = '\0';
+ }
return (int)chars_printed;
}
+
+/*******************************************************************
+ * Reduced snprintf to be used for Trusted firmware.
+ * The following type specifiers are supported:
+ *
+ * %x (or %X) - hexadecimal format
+ * %d or %i - signed decimal format
+ * %s - string format
+ * %u - unsigned decimal format
+ * %p - pointer format
+ *
+ * The following padding specifiers are supported by this print
+ * %0NN - Left-pad the number with 0s (NN is a decimal number)
+ * %NN - Left-pad the number or string with spaces (NN is a decimal number)
+ * %-NN - Right-pad the number or string with spaces (NN is a decimal number)
+ *
+ * The function panics on all other formats specifiers.
+ *
+ * It returns the number of characters that would be written if the
+ * buffer was big enough. If it returns a value lower than n, the
+ * whole string has been written.
+ *******************************************************************/
+int snprintf(char *s, size_t n, const char *fmt, ...)
+{
+ int count;
+ va_list all_args;
+
+ va_start(all_args, fmt);
+ count = vsnprintf(s, n, fmt, all_args);
+ va_end(all_args);
+
+ return count;
+}
diff --git a/lib/libc/strlcat.c b/lib/libc/strlcat.c
new file mode 100644
index 000000000..e60c863a5
--- /dev/null
+++ b/lib/libc/strlcat.c
@@ -0,0 +1,56 @@
+/* $OpenBSD: strlcat.c,v 1.15 2015/03/02 21:41:08 millert Exp $ */
+
+/*
+ * SPDX-License-Identifier: ISC
+ *
+ * Copyright (c) 1998, 2015 Todd C. Miller <Todd.Miller@courtesan.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <string.h>
+
+/*
+ * Appends src to string dst of size dsize (unlike strncat, dsize is the
+ * full size of dst, not space left). At most dsize-1 characters
+ * will be copied. Always NUL terminates (unless dsize <= strlen(dst)).
+ * Returns strlen(src) + MIN(dsize, strlen(initial dst)).
+ * If retval >= dsize, truncation occurred.
+ */
+size_t
+strlcat(char * dst, const char * src, size_t dsize)
+{
+ const char *odst = dst;
+ const char *osrc = src;
+ size_t n = dsize;
+ size_t dlen;
+
+ /* Find the end of dst and adjust bytes left but don't go past end. */
+ while (n-- != 0 && *dst != '\0')
+ dst++;
+ dlen = dst - odst;
+ n = dsize - dlen;
+
+ if (n-- == 0)
+ return(dlen + strlen(src));
+ while (*src != '\0') {
+ if (n != 0) {
+ *dst++ = *src;
+ n--;
+ }
+ src++;
+ }
+ *dst = '\0';
+
+ return(dlen + (src - osrc)); /* count does not include NUL */
+}
diff --git a/lib/libc/strtok.c b/lib/libc/strtok.c
new file mode 100644
index 000000000..7e1a4d2f7
--- /dev/null
+++ b/lib/libc/strtok.c
@@ -0,0 +1,83 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1998 Softweyr LLC. All rights reserved.
+ *
+ * strtok_r, from Berkeley strtok
+ * Oct 13, 1998 by Wes Peters <wes@softweyr.com>
+ *
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notices, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notices, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SOFTWEYR LLC, THE REGENTS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SOFTWEYR LLC, THE
+ * REGENTS, OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+char *
+strtok_r(char *s, const char *delim, char **last)
+{
+ char *spanp, *tok;
+ int c, sc;
+
+ if (s == NULL && (s = *last) == NULL)
+ return (NULL);
+
+ /*
+ * Skip (span) leading delimiters (s += strspn(s, delim), sort of).
+ */
+cont:
+ c = *s++;
+ for (spanp = (char *)delim; (sc = *spanp++) != 0;) {
+ if (c == sc)
+ goto cont;
+ }
+
+ if (c == 0) { /* no non-delimiter characters */
+ *last = NULL;
+ return (NULL);
+ }
+ tok = s - 1;
+
+ /*
+ * Scan token (scan for delimiters: s += strcspn(s, delim), sort of).
+ * Note that delim must have one NUL; we stop if we see that, too.
+ */
+ for (;;) {
+ c = *s++;
+ spanp = (char *)delim;
+ do {
+ if ((sc = *spanp++) == c) {
+ if (c == 0)
+ s = NULL;
+ else
+ s[-1] = '\0';
+ *last = s;
+ return (tok);
+ }
+ } while (sc != 0);
+ }
+ /* NOTREACHED */
+}
diff --git a/lib/libc/strtol.c b/lib/libc/strtol.c
new file mode 100644
index 000000000..deb862cc0
--- /dev/null
+++ b/lib/libc/strtol.c
@@ -0,0 +1,133 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ * Portions of this software were developed by David Chisnall
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+/*
+ * Convert a string to a long integer.
+ *
+ * Assumes that the upper and lower case
+ * alphabets and digits are each contiguous.
+ */
+long strtol(const char *nptr, char **endptr, int base)
+{
+ const char *s;
+ unsigned long acc;
+ char c;
+ unsigned long cutoff;
+ int neg, any, cutlim;
+
+ /*
+ * Skip white space and pick up leading +/- sign if any.
+ * If base is 0, allow 0x for hex and 0 for octal, else
+ * assume decimal; if base is already 16, allow 0x.
+ */
+ s = nptr;
+ do {
+ c = *s++;
+ } while (isspace((unsigned char)c));
+ if (c == '-') {
+ neg = 1;
+ c = *s++;
+ } else {
+ neg = 0;
+ if (c == '+')
+ c = *s++;
+ }
+ if ((base == 0 || base == 16) &&
+ c == '0' && (*s == 'x' || *s == 'X') &&
+ ((s[1] >= '0' && s[1] <= '9') ||
+ (s[1] >= 'A' && s[1] <= 'F') ||
+ (s[1] >= 'a' && s[1] <= 'f'))) {
+ c = s[1];
+ s += 2;
+ base = 16;
+ }
+ if (base == 0)
+ base = c == '0' ? 8 : 10;
+ acc = any = 0;
+
+ /*
+ * Compute the cutoff value between legal numbers and illegal
+ * numbers. That is the largest legal value, divided by the
+ * base. An input number that is greater than this value, if
+ * followed by a legal input character, is too big. One that
+ * is equal to this value may be valid or not; the limit
+ * between valid and invalid numbers is then based on the last
+ * digit. For instance, if the range for longs is
+ * [-2147483648..2147483647] and the input base is 10,
+ * cutoff will be set to 214748364 and cutlim to either
+ * 7 (neg==0) or 8 (neg==1), meaning that if we have accumulated
+ * a value > 214748364, or equal but the next digit is > 7 (or 8),
+ * the number is too big, and we will return a range error.
+ *
+ * Set 'any' if any `digits' consumed; make it negative to indicate
+ * overflow.
+ */
+ cutoff = neg ? (unsigned long)-(LONG_MIN + LONG_MAX) + LONG_MAX
+ : LONG_MAX;
+ cutlim = cutoff % base;
+ cutoff /= base;
+ for ( ; ; c = *s++) {
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'A' && c <= 'Z')
+ c -= 'A' - 10;
+ else if (c >= 'a' && c <= 'z')
+ c -= 'a' - 10;
+ else
+ break;
+ if (c >= base)
+ break;
+ if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim))
+ any = -1;
+ else {
+ any = 1;
+ acc *= base;
+ acc += c;
+ }
+ }
+ if (any < 0) {
+ acc = neg ? LONG_MIN : LONG_MAX;
+ } else if (neg)
+ acc = -acc;
+ if (endptr != NULL)
+ *endptr = (char *)(any ? s - 1 : nptr);
+ return (acc);
+}
diff --git a/lib/libc/strtoll.c b/lib/libc/strtoll.c
new file mode 100644
index 000000000..4e101e80e
--- /dev/null
+++ b/lib/libc/strtoll.c
@@ -0,0 +1,134 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ * Portions of this software were developed by David Chisnall
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+/*
+ * Convert a string to a long long integer.
+ *
+ * Assumes that the upper and lower case
+ * alphabets and digits are each contiguous.
+ */
+long long strtoll(const char *nptr, char **endptr, int base)
+{
+ const char *s;
+ unsigned long long acc;
+ char c;
+ unsigned long long cutoff;
+ int neg, any, cutlim;
+
+ /*
+ * Skip white space and pick up leading +/- sign if any.
+ * If base is 0, allow 0x for hex and 0 for octal, else
+ * assume decimal; if base is already 16, allow 0x.
+ */
+ s = nptr;
+ do {
+ c = *s++;
+ } while (isspace((unsigned char)c));
+ if (c == '-') {
+ neg = 1;
+ c = *s++;
+ } else {
+ neg = 0;
+ if (c == '+')
+ c = *s++;
+ }
+ if ((base == 0 || base == 16) &&
+ c == '0' && (*s == 'x' || *s == 'X') &&
+ ((s[1] >= '0' && s[1] <= '9') ||
+ (s[1] >= 'A' && s[1] <= 'F') ||
+ (s[1] >= 'a' && s[1] <= 'f'))) {
+ c = s[1];
+ s += 2;
+ base = 16;
+ }
+ if (base == 0)
+ base = c == '0' ? 8 : 10;
+ acc = any = 0;
+
+ /*
+ * Compute the cutoff value between legal numbers and illegal
+ * numbers. That is the largest legal value, divided by the
+ * base. An input number that is greater than this value, if
+ * followed by a legal input character, is too big. One that
+ * is equal to this value may be valid or not; the limit
+ * between valid and invalid numbers is then based on the last
+ * digit. For instance, if the range for quads is
+ * [-9223372036854775808..9223372036854775807] and the input base
+ * is 10, cutoff will be set to 922337203685477580 and cutlim to
+ * either 7 (neg==0) or 8 (neg==1), meaning that if we have
+ * accumulated a value > 922337203685477580, or equal but the
+ * next digit is > 7 (or 8), the number is too big, and we will
+ * return a range error.
+ *
+ * Set 'any' if any `digits' consumed; make it negative to indicate
+ * overflow.
+ */
+ cutoff = neg ? (unsigned long long)-(LLONG_MIN + LLONG_MAX) + LLONG_MAX
+ : LLONG_MAX;
+ cutlim = cutoff % base;
+ cutoff /= base;
+ for ( ; ; c = *s++) {
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'A' && c <= 'Z')
+ c -= 'A' - 10;
+ else if (c >= 'a' && c <= 'z')
+ c -= 'a' - 10;
+ else
+ break;
+ if (c >= base)
+ break;
+ if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim))
+ any = -1;
+ else {
+ any = 1;
+ acc *= base;
+ acc += c;
+ }
+ }
+ if (any < 0) {
+ acc = neg ? LLONG_MIN : LLONG_MAX;
+ } else if (neg)
+ acc = -acc;
+ if (endptr != NULL)
+ *endptr = (char *)(any ? s - 1 : nptr);
+ return (acc);
+}
diff --git a/lib/libc/strtoul.c b/lib/libc/strtoul.c
new file mode 100644
index 000000000..b42fb141e
--- /dev/null
+++ b/lib/libc/strtoul.c
@@ -0,0 +1,112 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ * Portions of this software were developed by David Chisnall
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+/*
+ * Convert a string to an unsigned long integer.
+ *
+ * Assumes that the upper and lower case
+ * alphabets and digits are each contiguous.
+ */
+unsigned long strtoul(const char *nptr, char **endptr, int base)
+{
+ const char *s;
+ unsigned long acc;
+ char c;
+ unsigned long cutoff;
+ int neg, any, cutlim;
+
+ /*
+ * See strtol for comments as to the logic used.
+ */
+ s = nptr;
+ do {
+ c = *s++;
+ } while (isspace((unsigned char)c));
+ if (c == '-') {
+ neg = 1;
+ c = *s++;
+ } else {
+ neg = 0;
+ if (c == '+')
+ c = *s++;
+ }
+ if ((base == 0 || base == 16) &&
+ c == '0' && (*s == 'x' || *s == 'X') &&
+ ((s[1] >= '0' && s[1] <= '9') ||
+ (s[1] >= 'A' && s[1] <= 'F') ||
+ (s[1] >= 'a' && s[1] <= 'f'))) {
+ c = s[1];
+ s += 2;
+ base = 16;
+ }
+ if (base == 0)
+ base = c == '0' ? 8 : 10;
+ acc = any = 0;
+
+ cutoff = ULONG_MAX / base;
+ cutlim = ULONG_MAX % base;
+ for ( ; ; c = *s++) {
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'A' && c <= 'Z')
+ c -= 'A' - 10;
+ else if (c >= 'a' && c <= 'z')
+ c -= 'a' - 10;
+ else
+ break;
+ if (c >= base)
+ break;
+ if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim))
+ any = -1;
+ else {
+ any = 1;
+ acc *= base;
+ acc += c;
+ }
+ }
+ if (any < 0) {
+ acc = ULONG_MAX;
+ } else if (neg)
+ acc = -acc;
+ if (endptr != NULL)
+ *endptr = (char *)(any ? s - 1 : nptr);
+ return (acc);
+}
diff --git a/lib/libc/strtoull.c b/lib/libc/strtoull.c
new file mode 100644
index 000000000..2e65a433d
--- /dev/null
+++ b/lib/libc/strtoull.c
@@ -0,0 +1,112 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ * Portions of this software were developed by David Chisnall
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+/*
+ * Convert a string to an unsigned long long integer.
+ *
+ * Assumes that the upper and lower case
+ * alphabets and digits are each contiguous.
+ */
+unsigned long long strtoull(const char *nptr, char **endptr, int base)
+{
+ const char *s;
+ unsigned long long acc;
+ char c;
+ unsigned long long cutoff;
+ int neg, any, cutlim;
+
+ /*
+ * See strtoq for comments as to the logic used.
+ */
+ s = nptr;
+ do {
+ c = *s++;
+ } while (isspace((unsigned char)c));
+ if (c == '-') {
+ neg = 1;
+ c = *s++;
+ } else {
+ neg = 0;
+ if (c == '+')
+ c = *s++;
+ }
+ if ((base == 0 || base == 16) &&
+ c == '0' && (*s == 'x' || *s == 'X') &&
+ ((s[1] >= '0' && s[1] <= '9') ||
+ (s[1] >= 'A' && s[1] <= 'F') ||
+ (s[1] >= 'a' && s[1] <= 'f'))) {
+ c = s[1];
+ s += 2;
+ base = 16;
+ }
+ if (base == 0)
+ base = c == '0' ? 8 : 10;
+ acc = any = 0;
+
+ cutoff = ULLONG_MAX / base;
+ cutlim = ULLONG_MAX % base;
+ for ( ; ; c = *s++) {
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'A' && c <= 'Z')
+ c -= 'A' - 10;
+ else if (c >= 'a' && c <= 'z')
+ c -= 'a' - 10;
+ else
+ break;
+ if (c >= base)
+ break;
+ if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim))
+ any = -1;
+ else {
+ any = 1;
+ acc *= base;
+ acc += c;
+ }
+ }
+ if (any < 0) {
+ acc = ULLONG_MAX;
+ } else if (neg)
+ acc = -acc;
+ if (endptr != NULL)
+ *endptr = (char *)(any ? s - 1 : nptr);
+ return (acc);
+}
diff --git a/lib/libfdt/libfdt.mk b/lib/libfdt/libfdt.mk
index ff49b6663..2c6664889 100644
--- a/lib/libfdt/libfdt.mk
+++ b/lib/libfdt/libfdt.mk
@@ -14,6 +14,6 @@ LIBFDT_SRCS := $(addprefix lib/libfdt/libfdt/, \
fdt_sw.c \
fdt_wip.c) \
-INCLUDES += -Ilib/libfdt/libfdt
+INCLUDES += -Iinclude/lib/libfdt/
$(eval $(call MAKE_LIB,fdt))
diff --git a/lib/locks/bakery/bakery_lock_normal.c b/lib/locks/bakery/bakery_lock_normal.c
index caced8f46..7d35dea66 100644
--- a/lib/locks/bakery/bakery_lock_normal.c
+++ b/lib/locks/bakery/bakery_lock_normal.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -77,11 +78,13 @@ static inline void read_cache_op(uintptr_t addr, bool cached)
{
if (cached)
dccivac(addr);
+
+ dmbish();
}
/* Helper function to check if the lock is acquired */
static inline bool is_lock_acquired(const bakery_info_t *my_bakery_info,
- int is_cached)
+ bool is_cached)
{
/*
* Even though lock data is updated only by the owning cpu and
@@ -96,7 +99,7 @@ static inline bool is_lock_acquired(const bakery_info_t *my_bakery_info,
}
static unsigned int bakery_get_ticket(bakery_lock_t *lock,
- unsigned int me, int is_cached)
+ unsigned int me, bool is_cached)
{
unsigned int my_ticket, their_ticket;
unsigned int they;
@@ -161,17 +164,14 @@ static unsigned int bakery_get_ticket(bakery_lock_t *lock,
void bakery_lock_get(bakery_lock_t *lock)
{
- unsigned int they, me, is_cached;
+ unsigned int they, me;
unsigned int my_ticket, my_prio, their_ticket;
bakery_info_t *their_bakery_info;
unsigned int their_bakery_data;
+ bool is_cached;
me = plat_my_core_pos();
-#ifdef __aarch64__
- is_cached = read_sctlr_el3() & SCTLR_C_BIT;
-#else
- is_cached = read_sctlr() & SCTLR_C_BIT;
-#endif
+ is_cached = is_dcache_enabled();
/* Get a ticket */
my_ticket = bakery_get_ticket(lock, me, is_cached);
@@ -229,11 +229,7 @@ void bakery_lock_get(bakery_lock_t *lock)
void bakery_lock_release(bakery_lock_t *lock)
{
bakery_info_t *my_bakery_info;
-#ifdef __aarch64__
- unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;
-#else
- unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
-#endif
+ bool is_cached = is_dcache_enabled();
my_bakery_info = get_bakery_info(plat_my_core_pos(), lock);
diff --git a/lib/optee/optee_utils.c b/lib/optee/optee_utils.c
index 2a407939b..0ad108242 100644
--- a/lib/optee/optee_utils.c
+++ b/lib/optee/optee_utils.c
@@ -1,15 +1,12 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
-#include <errno.h>
-#include <arch_helpers.h>
#include <common/debug.h>
-#include <common/desc_image_load.h>
#include <lib/optee_utils.h>
/*
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 5ab15c6ee..9f8a08abb 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,6 +12,7 @@
#include <common/bl_common.h>
#include <common/debug.h>
#include <context.h>
+#include <drivers/delay_timer.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/utils.h>
#include <plat/common/platform.h>
@@ -601,7 +602,7 @@ void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int level;
/* Unlock top down. No unlocking required for level 0. */
- for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1U; level--) {
+ for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) {
parent_idx = parent_nodes[level - 1U];
psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
}
@@ -662,7 +663,8 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
- ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
} else {
mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
@@ -674,7 +676,8 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
*/
daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
- ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
+ ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee,
+ daif);
}
return PSCI_E_SUCCESS;
@@ -973,3 +976,49 @@ void psci_do_pwrdown_sequence(unsigned int power_level)
psci_do_pwrdown_cache_maintenance(power_level);
#endif
}
+
+/*******************************************************************************
+ * This function invokes the callback 'stop_func()' with the 'mpidr' of each
+ * online PE. Caller can pass suitable method to stop a remote core.
+ *
+ * 'wait_ms' is the timeout value in milliseconds for the other cores to
+ * transition to power down state. Passing '0' makes it non-blocking.
+ *
+ * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the
+ * given timeout.
+ ******************************************************************************/
+int psci_stop_other_cores(unsigned int wait_ms,
+ void (*stop_func)(u_register_t mpidr))
+{
+ unsigned int idx, this_cpu_idx;
+
+ this_cpu_idx = plat_my_core_pos();
+
+ /* Invoke stop_func for each core */
+ for (idx = 0U; idx < psci_plat_core_count; idx++) {
+ /* skip current CPU */
+ if (idx == this_cpu_idx) {
+ continue;
+ }
+
+ /* Check if the CPU is ON */
+ if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) {
+ (*stop_func)(psci_cpu_pd_nodes[idx].mpidr);
+ }
+ }
+
+ /* Need to wait for other cores to shutdown */
+ if (wait_ms != 0U) {
+ while ((wait_ms-- != 0U) && (psci_is_last_on_cpu() != 0U)) {
+ mdelay(1U);
+ }
+
+ if (psci_is_last_on_cpu() != 0U) {
+ WARN("Failed to stop all cores!\n");
+ psci_print_power_domain_map();
+ return PSCI_E_DENIED;
+ }
+ }
+
+ return PSCI_E_SUCCESS;
+}
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
index e2dcfa8b1..72bd6bd11 100644
--- a/lib/psci/psci_private.h
+++ b/lib/psci/psci_private.h
@@ -42,6 +42,11 @@
define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) | \
define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
+/* Internally PSCI uses a uint16_t for various cpu indexes so
+ * define a limit to number of CPUs that can be initialised.
+ */
+#define PSCI_MAX_CPUS_INDEX 0xFFFFU
+
/*
* Helper functions to get/set the fields of PSCI per-cpu data.
*/
@@ -134,7 +139,7 @@ typedef struct non_cpu_pwr_domain_node {
unsigned char level;
/* For indexing the psci_lock array*/
- unsigned char lock_index;
+ uint16_t lock_index;
} non_cpu_pd_node_t;
typedef struct cpu_pwr_domain_node {
@@ -239,7 +244,7 @@ static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
#endif /* HW_ASSISTED_COHERENCY */
static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node,
- unsigned char idx)
+ uint16_t idx)
{
non_cpu_pd_node[idx].lock_index = idx;
}
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index d1ec99808..9c37d63f2 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -17,6 +17,12 @@
#include "psci_private.h"
+/*
+ * Check that PLATFORM_CORE_COUNT fits into the number of cores
+ * that can be represented by PSCI_MAX_CPUS_INDEX.
+ */
+CASSERT(PLATFORM_CORE_COUNT <= (PSCI_MAX_CPUS_INDEX + 1U), assert_psci_cores_overflow);
+
/*******************************************************************************
* Per cpu non-secure contexts used to program the architectural state prior
* return to the normal world.
@@ -34,11 +40,13 @@ unsigned int psci_caps;
* Function which initializes the 'psci_non_cpu_pd_nodes' or the
* 'psci_cpu_pd_nodes' corresponding to the power level.
******************************************************************************/
-static void __init psci_init_pwr_domain_node(unsigned char node_idx,
+static void __init psci_init_pwr_domain_node(uint16_t node_idx,
unsigned int parent_idx,
unsigned char level)
{
if (level > PSCI_CPU_PWR_LVL) {
+ assert(node_idx < PSCI_NUM_NON_CPU_PWR_DOMAINS);
+
psci_non_cpu_pd_nodes[node_idx].level = level;
psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
@@ -47,6 +55,8 @@ static void __init psci_init_pwr_domain_node(unsigned char node_idx,
} else {
psci_cpu_data_t *svc_cpu_data;
+ assert(node_idx < PLATFORM_CORE_COUNT);
+
psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
/* Initialize with an invalid mpidr */
@@ -144,7 +154,7 @@ static unsigned int __init populate_power_domain_tree(const unsigned char
for (j = node_index;
j < (node_index + num_children); j++)
- psci_init_pwr_domain_node((unsigned char)j,
+ psci_init_pwr_domain_node((uint16_t)j,
parent_node_index - 1U,
(unsigned char)level);
diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c
index 141d69ef2..002392cad 100644
--- a/lib/psci/psci_system_off.c
+++ b/lib/psci/psci_system_off.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -25,7 +25,7 @@ void __dead2 psci_system_off(void)
psci_spd_pm->svc_system_off();
}
- (void) console_flush();
+ console_flush();
/* Call the platform specific hook */
psci_plat_pm_ops->system_off();
@@ -44,7 +44,7 @@ void __dead2 psci_system_reset(void)
psci_spd_pm->svc_system_reset();
}
- (void) console_flush();
+ console_flush();
/* Call the platform specific hook */
psci_plat_pm_ops->system_reset();
@@ -77,7 +77,7 @@ u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie)
if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_system_reset != NULL)) {
psci_spd_pm->svc_system_reset();
}
- (void) console_flush();
+ console_flush();
return (u_register_t)
psci_plat_pm_ops->system_reset2((int) is_vendor, reset_type,
diff --git a/lib/romlib/Makefile b/lib/romlib/Makefile
index cec94043d..2ff480bd4 100644
--- a/lib/romlib/Makefile
+++ b/lib/romlib/Makefile
@@ -10,14 +10,14 @@ LD = $(CROSS_COMPILE)ld
OC = $(CROSS_COMPILE)objcopy
CPP = $(CROSS_COMPILE)cpp
ROMLIB_GEN = ./romlib_generator.py
-BUILD_DIR = ../../$(BUILD_PLAT)/romlib
-LIB_DIR = ../../$(BUILD_PLAT)/lib
-WRAPPER_DIR = ../../$(BUILD_PLAT)/libwrapper
+BUILD_DIR = $(BUILD_PLAT)/romlib
+LIB_DIR = $(BUILD_PLAT)/lib
+WRAPPER_DIR = $(BUILD_PLAT)/libwrapper
LIBS = -lmbedtls -lfdt -lc
INC = $(INCLUDES:-I%=-I../../%)
PPFLAGS = $(INC) $(DEFINES) -P -x assembler-with-cpp -D__LINKER__ -MD -MP -MT $(BUILD_DIR)/romlib.ld
OBJS = $(BUILD_DIR)/jmptbl.o $(BUILD_DIR)/init.o
-MAPFILE = ../../$(BUILD_PLAT)/romlib/romlib.map
+MAPFILE = $(BUILD_PLAT)/romlib/romlib.map
ifneq ($(PLAT_DIR),)
WRAPPER_SOURCES = $(shell $(ROMLIB_GEN) genwrappers -b $(WRAPPER_DIR) --list ../../$(PLAT_DIR)/jmptbl.i)
diff --git a/lib/semihosting/semihosting.c b/lib/semihosting/semihosting.c
index 60fc52a00..e0845c1a5 100644
--- a/lib/semihosting/semihosting.c
+++ b/lib/semihosting/semihosting.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,8 +14,7 @@
#define SEMIHOSTING_SUPPORTED 1
#endif
-long semihosting_call(unsigned long operation,
- uintptr_t system_block_address);
+long semihosting_call(unsigned long operation, uintptr_t system_block_address);
typedef struct {
const char *file_name;
@@ -52,8 +51,7 @@ long semihosting_file_open(const char *file_name, size_t mode)
open_block.mode = mode;
open_block.name_length = strlen(file_name);
- return semihosting_call(SEMIHOSTING_SYS_OPEN,
- (uintptr_t) &open_block);
+ return semihosting_call(SEMIHOSTING_SYS_OPEN, (uintptr_t)&open_block);
}
long semihosting_file_seek(long file_handle, ssize_t offset)
@@ -64,11 +62,11 @@ long semihosting_file_seek(long file_handle, ssize_t offset)
seek_block.handle = file_handle;
seek_block.location = offset;
- result = semihosting_call(SEMIHOSTING_SYS_SEEK,
- (uintptr_t) &seek_block);
+ result = semihosting_call(SEMIHOSTING_SYS_SEEK, (uintptr_t)&seek_block);
- if (result)
+ if (result != 0) {
result = semihosting_call(SEMIHOSTING_SYS_ERRNO, 0);
+ }
return result;
}
@@ -78,41 +76,42 @@ long semihosting_file_read(long file_handle, size_t *length, uintptr_t buffer)
smh_file_read_write_block_t read_block;
long result = -EINVAL;
- if ((length == NULL) || (buffer == (uintptr_t)NULL))
+ if ((length == NULL) || (buffer == (uintptr_t)NULL)) {
return result;
+ }
read_block.handle = file_handle;
read_block.buffer = buffer;
read_block.length = *length;
- result = semihosting_call(SEMIHOSTING_SYS_READ,
- (uintptr_t) &read_block);
+ result = semihosting_call(SEMIHOSTING_SYS_READ, (uintptr_t)&read_block);
if (result == *length) {
return -EINVAL;
} else if (result < *length) {
*length -= result;
return 0;
- } else
+ } else {
return result;
+ }
}
-long semihosting_file_write(long file_handle,
- size_t *length,
- const uintptr_t buffer)
+long semihosting_file_write(long file_handle, size_t *length,
+ const uintptr_t buffer)
{
smh_file_read_write_block_t write_block;
long result = -EINVAL;
- if ((length == NULL) || (buffer == (uintptr_t)NULL))
+ if ((length == NULL) || (buffer == (uintptr_t)NULL)) {
return -EINVAL;
+ }
write_block.handle = file_handle;
write_block.buffer = (uintptr_t)buffer; /* cast away const */
write_block.length = *length;
result = semihosting_call(SEMIHOSTING_SYS_WRITE,
- (uintptr_t) &write_block);
+ (uintptr_t)&write_block);
*length = result;
@@ -121,14 +120,12 @@ long semihosting_file_write(long file_handle,
long semihosting_file_close(long file_handle)
{
- return semihosting_call(SEMIHOSTING_SYS_CLOSE,
- (uintptr_t) &file_handle);
+ return semihosting_call(SEMIHOSTING_SYS_CLOSE, (uintptr_t)&file_handle);
}
long semihosting_file_length(long file_handle)
{
- return semihosting_call(SEMIHOSTING_SYS_FLEN,
- (uintptr_t) &file_handle);
+ return semihosting_call(SEMIHOSTING_SYS_FLEN, (uintptr_t)&file_handle);
}
char semihosting_read_char(void)
@@ -138,12 +135,12 @@ char semihosting_read_char(void)
void semihosting_write_char(char character)
{
- semihosting_call(SEMIHOSTING_SYS_WRITEC, (uintptr_t) &character);
+ semihosting_call(SEMIHOSTING_SYS_WRITEC, (uintptr_t)&character);
}
void semihosting_write_string(char *string)
{
- semihosting_call(SEMIHOSTING_SYS_WRITE0, (uintptr_t) string);
+ semihosting_call(SEMIHOSTING_SYS_WRITE0, (uintptr_t)string);
}
long semihosting_system(char *command_line)
@@ -154,7 +151,7 @@ long semihosting_system(char *command_line)
system_block.command_length = strlen(command_line);
return semihosting_call(SEMIHOSTING_SYS_SYSTEM,
- (uintptr_t) &system_block);
+ (uintptr_t)&system_block);
}
long semihosting_get_flen(const char *file_name)
@@ -162,16 +159,17 @@ long semihosting_get_flen(const char *file_name)
long file_handle;
long length;
- assert(semihosting_connection_supported());
+ assert(semihosting_connection_supported() != 0);
file_handle = semihosting_file_open(file_name, FOPEN_MODE_RB);
- if (file_handle == -1)
+ if (file_handle == -1) {
return file_handle;
+ }
/* Find the length of the file */
length = semihosting_file_length(file_handle);
- return semihosting_file_close(file_handle) ? -1 : length;
+ return (semihosting_file_close(file_handle) != 0) ? -1 : length;
}
long semihosting_download_file(const char *file_name,
@@ -183,23 +181,27 @@ long semihosting_download_file(const char *file_name,
long file_handle;
/* Null pointer check */
- if (!buf)
+ if (buf == 0U) {
return ret;
+ }
- assert(semihosting_connection_supported());
+ assert(semihosting_connection_supported() != 0);
file_handle = semihosting_file_open(file_name, FOPEN_MODE_RB);
- if (file_handle == -1)
+ if (file_handle == -1) {
return ret;
+ }
/* Find the actual length of the file */
length = semihosting_file_length(file_handle);
- if (length == -1)
+ if (length == (size_t)(-1)) {
goto semihosting_fail;
+ }
/* Signal error if we do not have enough space for the file */
- if (length > buf_size)
+ if (length > buf_size) {
goto semihosting_fail;
+ }
/*
* A successful read will return 0 in which case we pass back
@@ -207,10 +209,11 @@ long semihosting_download_file(const char *file_name,
* value indicating an error.
*/
ret = semihosting_file_read(file_handle, &length, buf);
- if (ret)
+ if (ret != 0) {
goto semihosting_fail;
- else
- ret = length;
+ } else {
+ ret = (long)length;
+ }
semihosting_fail:
semihosting_file_close(file_handle);
@@ -222,9 +225,9 @@ void semihosting_exit(uint32_t reason, uint32_t subcode)
#ifdef __aarch64__
uint64_t parameters[] = {reason, subcode};
- (void) semihosting_call(SEMIHOSTING_SYS_EXIT, (uintptr_t) &parameters);
+ (void)semihosting_call(SEMIHOSTING_SYS_EXIT, (uintptr_t)&parameters);
#else
/* The subcode is not supported on AArch32. */
- (void) semihosting_call(SEMIHOSTING_SYS_EXIT, reason);
+ (void)semihosting_call(SEMIHOSTING_SYS_EXIT, reason);
#endif
}
diff --git a/lib/utils/mem_region.c b/lib/utils/mem_region.c
index 08bccf64b..fec086b8c 100644
--- a/lib/utils/mem_region.c
+++ b/lib/utils/mem_region.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -32,8 +32,8 @@ void clear_mem_regions(mem_region_t *tbl, size_t nregions)
{
size_t i;
- assert(tbl);
- assert(nregions > 0);
+ assert(tbl != NULL);
+ assert(nregions > 0U);
for (i = 0; i < nregions; i++) {
assert(tbl->nbytes > 0);
@@ -64,28 +64,32 @@ void clear_map_dyn_mem_regions(struct mem_region *regions,
const unsigned int attr = MT_MEMORY | MT_RW | MT_NS;
assert(regions != NULL);
- assert(nregions > 0 && chunk > 0);
-
- for ( ; nregions--; regions++) {
- begin = regions->base;
- size = regions->nbytes;
- if ((begin & (chunk-1)) != 0 || (size & (chunk-1)) != 0) {
+ assert(nregions != 0U);
+ assert(chunk != 0U);
+
+ for (unsigned int i = 0U; i < nregions; i++) {
+ begin = regions[i].base;
+ size = regions[i].nbytes;
+ if (((begin & (chunk-1U)) != 0U) ||
+ ((size & (chunk-1U)) != 0U)) {
INFO("PSCI: Not correctly aligned region\n");
panic();
}
- while (size > 0) {
+ while (size > 0U) {
r = mmap_add_dynamic_region(begin, va, chunk, attr);
if (r != 0) {
- INFO("PSCI: mmap_add_dynamic_region failed with %d\n", r);
+ INFO("PSCI: %s failed with %d\n",
+ "mmap_add_dynamic_region", r);
panic();
}
- zero_normalmem((void *) va, chunk);
+ zero_normalmem((void *)va, chunk);
r = mmap_remove_dynamic_region(va, chunk);
if (r != 0) {
- INFO("PSCI: mmap_remove_dynamic_region failed with %d\n", r);
+ INFO("PSCI: %s failed with %d\n",
+ "mmap_remove_dynamic_region", r);
panic();
}
@@ -114,19 +118,20 @@ int mem_region_in_array_chk(mem_region_t *tbl, size_t nregions,
uintptr_t region_start, region_end, start, end;
size_t i;
- assert(tbl);
- assert(nbytes > 0);
+ assert(tbl != NULL);
+ assert(nbytes != 0U);
assert(!check_uptr_overflow(addr, nbytes-1));
region_start = addr;
- region_end = addr + (nbytes - 1);
- for (i = 0; i < nregions; i++) {
+ region_end = addr + (nbytes - 1U);
+ for (i = 0U; i < nregions; i++) {
assert(tbl->nbytes > 0);
assert(!check_uptr_overflow(tbl->base, tbl->nbytes-1));
start = tbl->base;
end = start + (tbl->nbytes - 1);
- if (region_start >= start && region_end <= end)
+ if ((region_start >= start) && (region_end <= end)) {
return 0;
+ }
tbl++;
}
diff --git a/lib/xlat_tables/aarch32/nonlpae_tables.c b/lib/xlat_tables/aarch32/nonlpae_tables.c
index b8c268665..7cd509d56 100644
--- a/lib/xlat_tables/aarch32/nonlpae_tables.c
+++ b/lib/xlat_tables/aarch32/nonlpae_tables.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016-2017, Linaro Limited. All rights reserved.
- * Copyright (c) 2014-2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2014-2020, Arm Limited. All rights reserved.
* Copyright (c) 2014, STMicroelectronics International N.V.
* All rights reserved.
*
@@ -30,8 +30,8 @@ This module is to be used when LPAE is not supported"
CASSERT(PLAT_VIRT_ADDR_SPACE_SIZE == (1ULL << 32), invalid_vaddr_space_size);
CASSERT(PLAT_PHY_ADDR_SPACE_SIZE == (1ULL << 32), invalid_paddr_space_size);
-#define MMU32B_UNSET_DESC ~0ul
-#define MMU32B_INVALID_DESC 0ul
+#define MMU32B_UNSET_DESC ~0UL
+#define MMU32B_INVALID_DESC 0UL
#define MT_UNKNOWN ~0U
@@ -40,38 +40,38 @@ CASSERT(PLAT_PHY_ADDR_SPACE_SIZE == (1ULL << 32), invalid_paddr_space_size);
*/
/* Sharable */
-#define MMU32B_TTB_S (1 << 1)
+#define MMU32B_TTB_S (1U << 1)
/* Not Outer Sharable */
-#define MMU32B_TTB_NOS (1 << 5)
+#define MMU32B_TTB_NOS (1U << 5)
/* Normal memory, Inner Non-cacheable */
-#define MMU32B_TTB_IRGN_NC 0
+#define MMU32B_TTB_IRGN_NC 0U
/* Normal memory, Inner Write-Back Write-Allocate Cacheable */
-#define MMU32B_TTB_IRGN_WBWA (1 << 6)
+#define MMU32B_TTB_IRGN_WBWA (1U << 6)
/* Normal memory, Inner Write-Through Cacheable */
-#define MMU32B_TTB_IRGN_WT 1
+#define MMU32B_TTB_IRGN_WT 1U
/* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
-#define MMU32B_TTB_IRGN_WB (1 | (1 << 6))
+#define MMU32B_TTB_IRGN_WB (1U | (1U << 6))
/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
-#define MMU32B_TTB_RNG_WBWA (1 << 3)
+#define MMU32B_TTB_RNG_WBWA (1U << 3)
#define MMU32B_DEFAULT_ATTRS \
(MMU32B_TTB_S | MMU32B_TTB_NOS | \
MMU32B_TTB_IRGN_WBWA | MMU32B_TTB_RNG_WBWA)
/* armv7 memory mapping attributes: section mapping */
-#define SECTION_SECURE (0 << 19)
-#define SECTION_NOTSECURE (1 << 19)
-#define SECTION_SHARED (1 << 16)
-#define SECTION_NOTGLOBAL (1 << 17)
-#define SECTION_ACCESS_FLAG (1 << 10)
-#define SECTION_UNPRIV (1 << 11)
-#define SECTION_RO (1 << 15)
+#define SECTION_SECURE (0U << 19)
+#define SECTION_NOTSECURE (1U << 19)
+#define SECTION_SHARED (1U << 16)
+#define SECTION_NOTGLOBAL (1U << 17)
+#define SECTION_ACCESS_FLAG (1U << 10)
+#define SECTION_UNPRIV (1U << 11)
+#define SECTION_RO (1U << 15)
#define SECTION_TEX(tex) ((((tex) >> 2) << 12) | \
((((tex) >> 1) & 0x1) << 3) | \
(((tex) & 0x1) << 2))
@@ -80,16 +80,16 @@ CASSERT(PLAT_PHY_ADDR_SPACE_SIZE == (1ULL << 32), invalid_paddr_space_size);
#define SECTION_NORMAL_CACHED \
SECTION_TEX(MMU32B_ATTR_IWBWA_OWBWA_INDEX)
-#define SECTION_XN (1 << 4)
-#define SECTION_PXN (1 << 0)
-#define SECTION_SECTION (2 << 0)
+#define SECTION_XN (1U << 4)
+#define SECTION_PXN (1U << 0)
+#define SECTION_SECTION (2U << 0)
-#define SECTION_PT_NOTSECURE (1 << 3)
-#define SECTION_PT_PT (1 << 0)
+#define SECTION_PT_NOTSECURE (1U << 3)
+#define SECTION_PT_PT (1U << 0)
-#define SMALL_PAGE_SMALL_PAGE (1 << 1)
-#define SMALL_PAGE_SHARED (1 << 10)
-#define SMALL_PAGE_NOTGLOBAL (1 << 11)
+#define SMALL_PAGE_SMALL_PAGE (1U << 1)
+#define SMALL_PAGE_SHARED (1U << 10)
+#define SMALL_PAGE_NOTGLOBAL (1U << 11)
#define SMALL_PAGE_TEX(tex) ((((tex) >> 2) << 6) | \
((((tex) >> 1) & 0x1) << 3) | \
(((tex) & 0x1) << 2))
@@ -99,39 +99,39 @@ CASSERT(PLAT_PHY_ADDR_SPACE_SIZE == (1ULL << 32), invalid_paddr_space_size);
SMALL_PAGE_TEX(MMU32B_ATTR_DEVICE_INDEX)
#define SMALL_PAGE_NORMAL_CACHED \
SMALL_PAGE_TEX(MMU32B_ATTR_IWBWA_OWBWA_INDEX)
-#define SMALL_PAGE_ACCESS_FLAG (1 << 4)
-#define SMALL_PAGE_UNPRIV (1 << 5)
-#define SMALL_PAGE_RO (1 << 9)
-#define SMALL_PAGE_XN (1 << 0)
+#define SMALL_PAGE_ACCESS_FLAG (1U << 4)
+#define SMALL_PAGE_UNPRIV (1U << 5)
+#define SMALL_PAGE_RO (1U << 9)
+#define SMALL_PAGE_XN (1U << 0)
/* The TEX, C and B bits concatenated */
-#define MMU32B_ATTR_DEVICE_INDEX 0x0
-#define MMU32B_ATTR_IWBWA_OWBWA_INDEX 0x1
+#define MMU32B_ATTR_DEVICE_INDEX 0U
+#define MMU32B_ATTR_IWBWA_OWBWA_INDEX 1U
#define MMU32B_PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \
((uint32_t)(nos) << ((idx) + 24)))
#define MMU32B_NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \
((uint32_t)(or) << (2 * (idx) + 16)))
-#define MMU32B_PRRR_DS0 (1 << 16)
-#define MMU32B_PRRR_DS1 (1 << 17)
-#define MMU32B_PRRR_NS0 (1 << 18)
-#define MMU32B_PRRR_NS1 (1 << 19)
+#define MMU32B_PRRR_DS0 (1U << 16)
+#define MMU32B_PRRR_DS1 (1U << 17)
+#define MMU32B_PRRR_NS0 (1U << 18)
+#define MMU32B_PRRR_NS1 (1U << 19)
#define DACR_DOMAIN(num, perm) ((perm) << ((num) * 2))
-#define DACR_DOMAIN_PERM_NO_ACCESS 0x0
-#define DACR_DOMAIN_PERM_CLIENT 0x1
-#define DACR_DOMAIN_PERM_MANAGER 0x3
+#define DACR_DOMAIN_PERM_NO_ACCESS 0U
+#define DACR_DOMAIN_PERM_CLIENT 1U
+#define DACR_DOMAIN_PERM_MANAGER 3U
-#define NUM_1MB_IN_4GB (1U << 12)
-#define NUM_4K_IN_1MB (1U << 8)
+#define NUM_1MB_IN_4GB (1UL << 12)
+#define NUM_4K_IN_1MB (1UL << 8)
#define ONE_MB_SHIFT 20
/* mmu 32b integration */
#define MMU32B_L1_TABLE_SIZE (NUM_1MB_IN_4GB * 4)
#define MMU32B_L2_TABLE_SIZE (NUM_4K_IN_1MB * 4)
-#define MMU32B_L1_TABLE_ALIGN (1 << 14)
-#define MMU32B_L2_TABLE_ALIGN (1 << 10)
+#define MMU32B_L1_TABLE_ALIGN (1U << 14)
+#define MMU32B_L2_TABLE_ALIGN (1U << 10)
static unsigned int next_xlat;
static unsigned long long xlat_max_pa;
@@ -190,8 +190,9 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
assert(IS_PAGE_ALIGNED(base_va));
assert(IS_PAGE_ALIGNED(size));
- if (size == 0U)
+ if (size == 0U) {
return;
+ }
assert(base_pa < end_pa); /* Check for overflows */
assert(base_va < end_va);
@@ -249,8 +250,9 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
#endif /* ENABLE_ASSERTIONS */
/* Find correct place in mmap to insert new region */
- while ((mm->base_va < base_va) && (mm->size != 0U))
+ while ((mm->base_va < base_va) && (mm->size != 0U)) {
++mm;
+ }
/*
* If a section is contained inside another one with the same base
@@ -263,8 +265,9 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
* This is required for mmap_region_attr() to get the attributes of the
* small region correctly.
*/
- while ((mm->base_va == base_va) && (mm->size > size))
+ while ((mm->base_va == base_va) && (mm->size > size)) {
++mm;
+ }
/* Make room for new region by moving other regions up by one place */
(void)memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
@@ -277,10 +280,12 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
mm->size = size;
mm->attr = attr;
- if (end_pa > xlat_max_pa)
+ if (end_pa > xlat_max_pa) {
xlat_max_pa = end_pa;
- if (end_va > xlat_max_va)
+ }
+ if (end_va > xlat_max_va) {
xlat_max_va = end_va;
+ }
}
/* map all memory as shared/global/domain0/no-usr access */
@@ -290,42 +295,44 @@ static uint32_t mmap_desc(unsigned attr, unsigned int addr_pa,
uint32_t desc;
switch (level) {
- case 1:
- assert(!(addr_pa & (MMU32B_L1_TABLE_ALIGN - 1)));
+ case 1U:
+ assert((addr_pa & (MMU32B_L1_TABLE_ALIGN - 1)) == 0U);
desc = SECTION_SECTION | SECTION_SHARED;
- desc |= attr & MT_NS ? SECTION_NOTSECURE : 0;
+ desc |= (attr & MT_NS) != 0U ? SECTION_NOTSECURE : 0U;
desc |= SECTION_ACCESS_FLAG;
- desc |= attr & MT_RW ? 0 : SECTION_RO;
+ desc |= (attr & MT_RW) != 0U ? 0U : SECTION_RO;
- desc |= attr & MT_MEMORY ?
+ desc |= (attr & MT_MEMORY) != 0U ?
SECTION_NORMAL_CACHED : SECTION_DEVICE;
- if ((attr & MT_RW) || !(attr & MT_MEMORY))
+ if (((attr & MT_RW) != 0U) || ((attr & MT_MEMORY) == 0U)) {
desc |= SECTION_XN;
+ }
break;
- case 2:
- assert(!(addr_pa & (MMU32B_L2_TABLE_ALIGN - 1)));
+ case 2U:
+ assert((addr_pa & (MMU32B_L2_TABLE_ALIGN - 1)) == 0U);
desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
desc |= SMALL_PAGE_ACCESS_FLAG;
- desc |= attr & MT_RW ? 0 : SMALL_PAGE_RO;
+ desc |= (attr & MT_RW) != 0U ? 0U : SMALL_PAGE_RO;
- desc |= attr & MT_MEMORY ?
+ desc |= (attr & MT_MEMORY) != 0U ?
SMALL_PAGE_NORMAL_CACHED : SMALL_PAGE_DEVICE;
- if ((attr & MT_RW) || !(attr & MT_MEMORY))
+ if (((attr & MT_RW) != 0U) || ((attr & MT_MEMORY) == 0U)) {
desc |= SMALL_PAGE_XN;
+ }
break;
default:
panic();
}
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* dump only the non-lpae level 2 tables */
- if (level == 2) {
+ if (level == 2U) {
printf(attr & MT_MEMORY ? "MEM" : "dev");
printf(attr & MT_RW ? "-rw" : "-RO");
printf(attr & MT_NS ? "-NS" : "-S");
@@ -357,26 +364,31 @@ static unsigned int mmap_region_attr(const mmap_region_t *mm, uintptr_t base_va,
*/
for ( ; ; ++mm) {
- if (mm->size == 0U)
+ if (mm->size == 0U) {
return ret; /* Reached end of list */
+ }
- if (mm->base_va > (base_va + size - 1U))
+ if (mm->base_va > (base_va + size - 1U)) {
return ret; /* Next region is after area so end */
+ }
- if ((mm->base_va + mm->size - 1U) < base_va)
+ if ((mm->base_va + mm->size - 1U) < base_va) {
continue; /* Next region has already been overtaken */
+ }
- if ((ret == 0U) && (mm->attr == *attr))
+ if ((ret == 0U) && (mm->attr == *attr)) {
continue; /* Region doesn't override attribs so skip */
+ }
if ((mm->base_va > base_va) ||
- ((mm->base_va + mm->size - 1U) < (base_va + size - 1U)))
+ ((mm->base_va + mm->size - 1U) <
+ (base_va + size - 1U))) {
return MT_UNKNOWN; /* Region doesn't fully cover area */
+ }
*attr = mm->attr;
ret = 0U;
}
- return ret;
}
static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
@@ -384,16 +396,16 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
uint32_t *table,
unsigned int level)
{
- unsigned int level_size_shift = (level == 1) ?
+ unsigned int level_size_shift = (level == 1U) ?
ONE_MB_SHIFT : FOUR_KB_SHIFT;
- unsigned int level_size = 1 << level_size_shift;
- unsigned int level_index_mask = (level == 1) ?
+ unsigned int level_size = 1U << level_size_shift;
+ unsigned int level_index_mask = (level == 1U) ?
(NUM_1MB_IN_4GB - 1) << ONE_MB_SHIFT :
(NUM_4K_IN_1MB - 1) << FOUR_KB_SHIFT;
- assert(level == 1 || level == 2);
+ assert((level == 1U) || (level == 2U));
- VERBOSE("init xlat table at %p (level%1d)\n", (void *)table, level);
+ VERBOSE("init xlat table at %p (level%1u)\n", (void *)table, level);
do {
uint32_t desc = MMU32B_UNSET_DESC;
@@ -405,15 +417,17 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
}
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* dump only non-lpae level 2 tables content */
- if (level == 2)
+ if (level == 2U) {
printf(" 0x%lx %x " + 6 - 2 * level,
base_va, level_size);
+ }
#endif
if (mm->base_va >= base_va + level_size) {
/* Next region is after area so nothing to map yet */
desc = MMU32B_INVALID_DESC;
- } else if (mm->base_va <= base_va && mm->base_va + mm->size >=
- base_va + level_size) {
+ } else if ((mm->base_va <= base_va) &&
+ (mm->base_va + mm->size) >=
+ (base_va + level_size)) {
/* Next region covers all of area */
unsigned int attr = mm->attr;
unsigned int r = mmap_region_attr(mm, base_va,
@@ -436,8 +450,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
*/
if (*table) {
assert((*table & 3) == SECTION_PT_PT);
- assert(!(*table & SECTION_PT_NOTSECURE) ==
- !(mm->attr & MT_NS));
+ assert(((*table & SECTION_PT_NOTSECURE) == 0U)
+ == ((mm->attr & MT_NS) == 0U));
xlat_table = (*table) &
~(MMU32B_L1_TABLE_ALIGN - 1);
@@ -447,11 +461,11 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
next_xlat * MMU32B_L2_TABLE_SIZE;
next_xlat++;
assert(next_xlat <= MAX_XLAT_TABLES);
- memset((char *)xlat_table, 0,
+ (void)memset((char *)xlat_table, 0,
MMU32B_L2_TABLE_SIZE);
desc = xlat_table | SECTION_PT_PT;
- desc |= mm->attr & MT_NS ?
+ desc |= (mm->attr & MT_NS) != 0U ?
SECTION_PT_NOTSECURE : 0;
}
/* Recurse to fill in new table */
@@ -461,12 +475,13 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
}
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* dump only non-lpae level 2 tables content */
- if (level == 2)
+ if (level == 2U) {
printf("\n");
+ }
#endif
*table++ = desc;
base_va += level_size;
- } while (mm->size && (base_va & level_index_mask));
+ } while ((mm->size != 0U) && ((base_va & level_index_mask) != 0U));
return mm;
}
@@ -475,17 +490,16 @@ void init_xlat_tables(void)
{
print_mmap();
- assert(!((unsigned int)mmu_l1_base & (MMU32B_L1_TABLE_ALIGN - 1)));
- assert(!((unsigned int)mmu_l2_base & (MMU32B_L2_TABLE_ALIGN - 1)));
+ assert(((unsigned int)mmu_l1_base & (MMU32B_L1_TABLE_ALIGN - 1)) == 0U);
+ assert(((unsigned int)mmu_l2_base & (MMU32B_L2_TABLE_ALIGN - 1)) == 0U);
- memset(mmu_l1_base, 0, MMU32B_L1_TABLE_SIZE);
+ (void)memset(mmu_l1_base, 0, MMU32B_L1_TABLE_SIZE);
init_xlation_table_inner(mmap, 0, (uint32_t *)mmu_l1_base, 1);
VERBOSE("init xlat - max_va=%p, max_pa=%llx\n",
(void *)xlat_max_va, xlat_max_pa);
- assert(xlat_max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
- assert(xlat_max_pa <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
+ assert(xlat_max_pa <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1));
}
/*******************************************************************************
@@ -499,7 +513,7 @@ void enable_mmu_svc_mon(unsigned int flags)
unsigned int sctlr;
assert(IS_IN_SECURE());
- assert((read_sctlr() & SCTLR_M_BIT) == 0);
+ assert((read_sctlr() & SCTLR_M_BIT) == 0U);
/* Enable Access flag (simplified access permissions) and TEX remap */
write_sctlr(read_sctlr() | SCTLR_AFE_BIT | SCTLR_TRE_BIT);
@@ -522,7 +536,7 @@ void enable_mmu_svc_mon(unsigned int flags)
/* set MMU base xlat table entry (use only TTBR0) */
write_ttbr0((uint32_t)mmu_l1_base | MMU32B_DEFAULT_ATTRS);
- write_ttbr1(0);
+ write_ttbr1(0U);
/*
* Ensure all translation table writes have drained
@@ -535,14 +549,15 @@ void enable_mmu_svc_mon(unsigned int flags)
sctlr = read_sctlr();
sctlr |= SCTLR_M_BIT;
-#if ARMV7_SUPPORTS_VIRTUALIZATION
+#ifdef ARMV7_SUPPORTS_VIRTUALIZATION
sctlr |= SCTLR_WXN_BIT;
#endif
- if (flags & DISABLE_DCACHE)
+ if ((flags & DISABLE_DCACHE) != 0U) {
sctlr &= ~SCTLR_C_BIT;
- else
+ } else {
sctlr |= SCTLR_C_BIT;
+ }
write_sctlr(sctlr);
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 8eeeea1dd..3832b0703 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -135,7 +135,7 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
bool is_dcache_enabled(void)
{
- unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+ unsigned int el = get_current_el_maybe_constant();
if (el == 1U) {
return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
diff --git a/lib/xlat_tables_v2/ro_xlat_tables.mk b/lib/xlat_tables_v2/ro_xlat_tables.mk
new file mode 100644
index 000000000..7991e1afd
--- /dev/null
+++ b/lib/xlat_tables_v2/ro_xlat_tables.mk
@@ -0,0 +1,37 @@
+#
+# Copyright (c) 2020, ARM Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${USE_DEBUGFS}, 1)
+ $(error "Debugfs requires functionality from the dynamic translation \
+ library and is incompatible with ALLOW_RO_XLAT_TABLES.")
+endif
+
+ifeq (${ARCH},aarch32)
+ ifeq (${RESET_TO_SP_MIN},1)
+ $(error "RESET_TO_SP_MIN requires functionality from the dynamic \
+ translation library and is incompatible with \
+ ALLOW_RO_XLAT_TABLES.")
+ endif
+else # if AArch64
+ ifeq (${PLAT},tegra)
+ $(error "Tegra requires functionality from the dynamic translation \
+ library and is incompatible with ALLOW_RO_XLAT_TABLES.")
+ endif
+ ifeq (${RESET_TO_BL31},1)
+ $(error "RESET_TO_BL31 requires functionality from the dynamic \
+ translation library and is incompatible with \
+ ALLOW_RO_XLAT_TABLES.")
+ endif
+ ifeq (${SPD},trusty)
+ $(error "Trusty requires functionality from the dynamic translation \
+ library and is incompatible with ALLOW_RO_XLAT_TABLES.")
+ endif
+ ifeq (${SPM_MM},1)
+ $(error "SPM_MM requires functionality to change memory region \
+ attributes, which is not possible once the translation tables \
+ have been made read-only.")
+ endif
+endif
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
index c946315bf..bcc3e68d8 100644
--- a/lib/xlat_tables_v2/xlat_tables.mk
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -13,3 +13,7 @@ XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
XLAT_TABLES_LIB_V2 := 1
$(eval $(call add_define,XLAT_TABLES_LIB_V2))
+
+ifeq (${ALLOW_RO_XLAT_TABLES}, 1)
+ include lib/xlat_tables_v2/ro_xlat_tables.mk
+endif
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
index f4b64b33f..95dae88eb 100644
--- a/lib/xlat_tables_v2/xlat_tables_context.c
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -1,9 +1,10 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <arch_helpers.h>
#include <assert.h>
#include <platform_def.h>
@@ -25,7 +26,7 @@ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
* currently executing.
*/
REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
- PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
unsigned int attr)
@@ -119,6 +120,75 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
}
+#if PLAT_RO_XLAT_TABLES
+/* Change the memory attributes of the descriptors which resolve the address
+ * range that belongs to the translation tables themselves, which are by default
+ * mapped as part of read-write data in the BL image's memory.
+ *
+ * Since the translation tables map themselves via these level 3 (page)
+ * descriptors, any change applied to them with the MMU on would introduce a
+ * chicken and egg problem because of the break-before-make sequence.
+ * Eventually, it would reach the descriptor that resolves the very table it
+ * belongs to and the invalidation (break step) would cause the subsequent write
+ * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled
+ * before making the change.
+ *
+ * No assumption is made about what data this function needs, therefore all the
+ * caches are flushed in order to ensure coherency. A future optimization would
+ * be to only flush the required data to main memory.
+ */
+int xlat_make_tables_readonly(void)
+{
+ assert(tf_xlat_ctx.initialized == true);
+#ifdef __aarch64__
+ if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
+ disable_mmu_el1();
+ } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) {
+ disable_mmu_el3();
+ } else {
+ assert(tf_xlat_ctx.xlat_regime == EL2_REGIME);
+ return -1;
+ }
+
+ /* Flush all caches. */
+ dcsw_op_all(DCCISW);
+#else /* !__aarch64__ */
+ assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME);
+ /* On AArch32, we flush the caches before disabling the MMU. The reason
+ * for this is that the dcsw_op_all AArch32 function pushes some
+ * registers onto the stack under the assumption that it is writing to
+ * cache, which is not true with the MMU off. This would result in the
+ * stack becoming corrupted and a wrong/junk value for the LR being
+ * restored at the end of the routine.
+ */
+ dcsw_op_all(DC_OP_CISW);
+ disable_mmu_secure();
+#endif
+
+ int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx,
+ (uintptr_t)tf_xlat_ctx.tables,
+ tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE,
+ MT_RO_DATA | MT_SECURE);
+
+#ifdef __aarch64__
+ if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
+ enable_mmu_el1(0U);
+ } else {
+ assert(tf_xlat_ctx.xlat_regime == EL3_REGIME);
+ enable_mmu_el3(0U);
+ }
+#else /* !__aarch64__ */
+ enable_mmu_svc_mon(0U);
+#endif
+
+ if (rc == 0) {
+ tf_xlat_ctx.readonly_tables = true;
+ }
+
+ return rc;
+}
+#endif /* PLAT_RO_XLAT_TABLES */
+
/*
* If dynamic allocation of new regions is disabled then by the time we call the
* function enabling the MMU, we'll have registered all the memory regions to
@@ -162,6 +232,23 @@ void enable_mmu_el3(unsigned int flags)
enable_mmu_direct_el3(flags);
}
+void enable_mmu(unsigned int flags)
+{
+ switch (get_current_el_maybe_constant()) {
+ case 1:
+ enable_mmu_el1(flags);
+ break;
+ case 2:
+ enable_mmu_el2(flags);
+ break;
+ case 3:
+ enable_mmu_el3(flags);
+ break;
+ default:
+ panic();
+ }
+}
+
#else /* !__aarch64__ */
void enable_mmu_svc_mon(unsigned int flags)
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index b2259e5f3..bb6d18459 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -109,6 +109,7 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
{
uint64_t desc;
uint32_t mem_type;
+ uint32_t shareability_type;
/* Make sure that the granularity is fine enough to map this address. */
assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
@@ -194,8 +195,16 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
}
+ shareability_type = MT_SHAREABILITY(attr);
if (mem_type == MT_MEMORY) {
- desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+ desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX);
+ if (shareability_type == MT_SHAREABILITY_NSH) {
+ desc |= LOWER_ATTRS(NSH);
+ } else if (shareability_type == MT_SHAREABILITY_OSH) {
+ desc |= LOWER_ATTRS(OSH);
+ } else {
+ desc |= LOWER_ATTRS(ISH);
+ }
/* Check if Branch Target Identification is enabled */
#if ENABLE_BTI
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 70ef39523..863470cf3 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -99,9 +99,6 @@ unsigned long long xlat_arch_get_max_supported_pa(void);
*/
bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
-/* Returns true if the data cache is enabled at the current EL. */
-bool is_dcache_enabled(void);
-
/*
* Returns minimum virtual address space size supported by the architecture
*/
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 30babc63f..9fae7e917 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -472,7 +472,7 @@ int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
/*
* Sanity checks.
*/
- for (size_t i = 0U; i < pages_count; ++i) {
+ for (unsigned int i = 0U; i < pages_count; ++i) {
const uint64_t *entry;
uint64_t desc, attr_index;
unsigned int level;
@@ -497,8 +497,8 @@ int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
(level != XLAT_TABLE_LEVEL_MAX)) {
WARN("Address 0x%lx is not mapped at the right granularity.\n",
base_va);
- WARN("Granularity is 0x%llx, should be 0x%x.\n",
- (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+ WARN("Granularity is 0x%lx, should be 0x%lx.\n",
+ XLAT_BLOCK_SIZE(level), PAGE_SIZE);
return -EINVAL;
}