aboutsummaryrefslogtreecommitdiffstats
path: root/lib/cpus/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'lib/cpus/aarch64')
-rw-r--r--lib/cpus/aarch64/cortex_a53.S17
-rw-r--r--lib/cpus/aarch64/cortex_a55.S17
-rw-r--r--lib/cpus/aarch64/cortex_a57.S29
-rw-r--r--lib/cpus/aarch64/cortex_a72.S17
-rw-r--r--lib/cpus/aarch64/cortex_a76.S188
-rw-r--r--lib/cpus/aarch64/cortex_a77.S133
-rw-r--r--lib/cpus/aarch64/cortex_a78.S237
-rw-r--r--lib/cpus/aarch64/cortex_a78_ae.S (renamed from lib/cpus/aarch64/cortex_hercules_ae.S)60
-rw-r--r--lib/cpus/aarch64/cortex_hercules.S143
-rw-r--r--lib/cpus/aarch64/cortex_klein.S77
-rw-r--r--lib/cpus/aarch64/cortex_matterhorn.S77
-rw-r--r--lib/cpus/aarch64/cpu_helpers.S74
-rw-r--r--lib/cpus/aarch64/denver.S87
-rw-r--r--lib/cpus/aarch64/dsu_helpers.S38
-rw-r--r--lib/cpus/aarch64/generic.S89
-rw-r--r--lib/cpus/aarch64/neoverse_n1.S107
-rw-r--r--lib/cpus/aarch64/neoverse_n2.S109
-rw-r--r--lib/cpus/aarch64/neoverse_n_common.S26
-rw-r--r--lib/cpus/aarch64/neoverse_v1.S (renamed from lib/cpus/aarch64/neoverse_zeus.S)48
-rw-r--r--lib/cpus/aarch64/rainier.S130
20 files changed, 1402 insertions, 301 deletions
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index b105de26b..df11d8690 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -239,6 +239,20 @@ exit_check_errata_843419:
ret
endfunc check_errata_843419
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A53 Errata #1530924.
+ * This applies to all revisions of Cortex A53.
+ * --------------------------------------------------
+ */
+func check_errata_1530924
+#if ERRATA_A53_1530924
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1530924
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A53.
* Shall clobber: x0-x19
@@ -359,6 +373,7 @@ func cortex_a53_errata_report
report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
report_errata ERRATA_A53_843419, cortex_a53, 843419
report_errata ERRATA_A53_855873, cortex_a53, 855873
+ report_errata ERRATA_A53_1530924, cortex_a53, 1530924
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
index 8e138244b..783830450 100644
--- a/lib/cpus/aarch64/cortex_a55.S
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -222,6 +222,20 @@ func check_errata_1221012
b cpu_rev_var_ls
endfunc check_errata_1221012
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A55 Errata #1530923.
+ * This applies to all revisions of Cortex A55.
+ * --------------------------------------------------
+ */
+func check_errata_1530923
+#if ERRATA_A55_1530923
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1530923
+
func cortex_a55_reset_func
mov x19, x30
@@ -306,6 +320,7 @@ func cortex_a55_errata_report
report_errata ERRATA_A55_846532, cortex_a55, 846532
report_errata ERRATA_A55_903758, cortex_a55, 903758
report_errata ERRATA_A55_1221012, cortex_a55, 1221012
+ report_errata ERRATA_A55_1530923, cortex_a55, 1530923
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index dd03c0f02..8ef0f922a 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -395,6 +396,20 @@ func check_errata_cve_2018_3639
ret
endfunc check_errata_cve_2018_3639
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A57 Errata #1319537.
+ * This applies to all revisions of Cortex A57.
+ * --------------------------------------------------
+ */
+func check_errata_1319537
+#if ERRATA_A57_1319537
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1319537
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A57.
* Shall clobber: x0-x19
@@ -469,6 +484,17 @@ func cortex_a57_reset_func
dsb sy
#endif
+#if A57_ENABLE_NONCACHEABLE_LOAD_FWD
+ /* ---------------------------------------------
+ * Enable higher performance non-cacheable load
+ * forwarding
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A57_CPUACTLR_EL1_EN_NC_LOAD_FWD
+ msr CORTEX_A57_CPUACTLR_EL1, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
@@ -601,6 +627,7 @@ func cortex_a57_errata_report
report_errata ERRATA_A57_829520, cortex_a57, 829520
report_errata ERRATA_A57_833471, cortex_a57, 833471
report_errata ERRATA_A57_859972, cortex_a57, 859972
+ report_errata ERRATA_A57_1319537, cortex_a57, 1319537
report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 38b76b940..aff6072a0 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -119,6 +119,20 @@ func check_errata_cve_2018_3639
ret
endfunc check_errata_cve_2018_3639
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A72 Errata #1319367.
+ * This applies to all revisions of Cortex A72.
+ * --------------------------------------------------
+ */
+func check_errata_1319367
+#if ERRATA_A72_1319367
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1319367
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A72.
* -------------------------------------------------
@@ -282,6 +296,7 @@ func cortex_a72_errata_report
* checking functions of each errata.
*/
report_errata ERRATA_A72_859971, cortex_a72, 859971
+ report_errata ERRATA_A72_1319367, cortex_a72, 1319367
report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index baefa4676..4f7f4bb9a 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -337,33 +337,83 @@ func check_errata_1262888
b cpu_rev_var_ls
endfunc check_errata_1262888
- /* --------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1275112
- * and Errata #1262606.
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1286807.
* This applies only to revision <= r3p0 of Cortex A76.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * ---------------------------------------------------
+ */
+func check_errata_1286807
+#if ERRATA_A76_1286807
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x30
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_1286807
+
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A76 Errata #1791580.
+ * This applies to revisions <= r4p0 of Cortex A76.
* Inputs:
* x0: variant[4:7] and revision[0:3] of current cpu.
* Shall clobber: x0-x17
* --------------------------------------------------
*/
-func errata_a76_1275112_1262606_wa
- /*
- * Compare x0 against revision r3p0
- */
+func errata_a76_1791580_wa
+ /* Compare x0 against revision r4p0 */
mov x17, x30
- /*
- * Since both errata #1275112 and #1262606 have the same check, we can
- * invoke any one of them for the check here.
- */
- bl check_errata_1275112
+ bl check_errata_1791580
cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x1, CORTEX_A76_CPUACTLR2_EL1_BIT_2
+ msr CORTEX_A76_CPUACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1791580_wa
+
+func check_errata_1791580
+ /* Applies to everything <=r4p0. */
+ mov x1, #0x40
+ b cpu_rev_var_ls
+endfunc check_errata_1791580
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1262606,
+ * #1275112, and #1868343. #1262606 and #1275112
+ * apply to revisions <= r3p0 and #1868343 applies to
+ * revisions <= r4p0.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+
+func errata_a76_1262606_1275112_1868343_wa
+ mov x17, x30
+
+/* Check for <= r3p0 cases and branch if check passes. */
+#if ERRATA_A76_1262606 || ERRATA_A76_1275112
+ bl check_errata_1262606
+ cbnz x0, 1f
+#endif
+
+/* Check for <= r4p0 cases and branch if check fails. */
+#if ERRATA_A76_1868343
+ bl check_errata_1868343
+ cbz x0, 2f
+#endif
+1:
mrs x1, CORTEX_A76_CPUACTLR_EL1
- orr x1, x1, CORTEX_A76_CPUACTLR_EL1_BIT_13
+ orr x1, x1, #CORTEX_A76_CPUACTLR_EL1_BIT_13
msr CORTEX_A76_CPUACTLR_EL1, x1
isb
-1:
+2:
ret x17
-endfunc errata_a76_1275112_1262606_wa
+endfunc errata_a76_1262606_1275112_1868343_wa
func check_errata_1262606
mov x1, #0x30
@@ -375,22 +425,65 @@ func check_errata_1275112
b cpu_rev_var_ls
endfunc check_errata_1275112
- /* ---------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1286807.
- * This applies only to revision <= r3p0 of Cortex A76.
- * Due to the nature of the errata it is applied unconditionally
- * when built in, report it as applicable in this case
- * ---------------------------------------------------
- */
-func check_errata_1286807
-#if ERRATA_A76_1286807
- mov x0, #ERRATA_APPLIES
- ret
-#else
- mov x1, #0x30
+func check_errata_1868343
+ mov x1, #0x40
b cpu_rev_var_ls
-#endif
-endfunc check_errata_1286807
+endfunc check_errata_1868343
+
+/* --------------------------------------------------
+ * Errata Workaround for A76 Erratum 1946160.
+ * This applies to revisions r3p0 - r4p1 of A76.
+ * It also exists in r0p0 - r2p0 but there is no fix
+ * in those revisions.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1946160_wa
+ /* Compare x0 against revisions r3p0 - r4p1 */
+ mov x17, x30
+ bl check_errata_1946160
+ cbz x0, 1f
+
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a76_1946160_wa
+
+func check_errata_1946160
+ /* Applies to revisions r3p0 - r4p1. */
+ mov x1, #0x30
+ mov x2, #0x41
+ b cpu_rev_var_range
+endfunc check_errata_1946160
func check_errata_cve_2018_3639
#if WORKAROUND_CVE_2018_3639
@@ -409,6 +502,23 @@ func cortex_a76_disable_wa_cve_2018_3639
ret
endfunc cortex_a76_disable_wa_cve_2018_3639
+ /* --------------------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1165522.
+ * This applies only to revisions <= r3p0 of Cortex A76.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * --------------------------------------------------------------
+ */
+func check_errata_1165522
+#if ERRATA_A76_1165522
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x30
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_1165522
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A76.
* Shall clobber: x0-x19
@@ -439,9 +549,9 @@ func cortex_a76_reset_func
bl errata_a76_1257314_wa
#endif
-#if ERRATA_A76_1262606 || ERRATA_A76_1275112
+#if ERRATA_A76_1262606 || ERRATA_A76_1275112 || ERRATA_A76_1868343
mov x0, x18
- bl errata_a76_1275112_1262606_wa
+ bl errata_a76_1262606_1275112_1868343_wa
#endif
#if ERRATA_A76_1262888
@@ -449,6 +559,16 @@ func cortex_a76_reset_func
bl errata_a76_1262888_wa
#endif
+#if ERRATA_A76_1791580
+ mov x0, x18
+ bl errata_a76_1791580_wa
+#endif
+
+#if ERRATA_A76_1946160
+ mov x0, x18
+ bl errata_a76_1946160_wa
+#endif
+
#if WORKAROUND_CVE_2018_3639
/* If the PE implements SSBS, we don't need the dynamic workaround */
mrs x0, id_aa64pfr1_el1
@@ -529,6 +649,10 @@ func cortex_a76_errata_report
report_errata ERRATA_A76_1262888, cortex_a76, 1262888
report_errata ERRATA_A76_1275112, cortex_a76, 1275112
report_errata ERRATA_A76_1286807, cortex_a76, 1286807
+ report_errata ERRATA_A76_1791580, cortex_a76, 1791580
+ report_errata ERRATA_A76_1165522, cortex_a76, 1165522
+ report_errata ERRATA_A76_1868343, cortex_a76, 1868343
+ report_errata ERRATA_A76_1946160, cortex_a76, 1946160
report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index f3fd5e196..e3a6f5fbf 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,6 +21,122 @@
#error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1508412.
+ * This applies only to revision <= r1p0 of Cortex A77.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1508412_wa
+ /*
+ * Compare x0 against revision r1p0
+ */
+ mov x17, x30
+ bl check_errata_1508412
+ cbz x0, 3f
+ /*
+ * Compare x0 against revision r0p0
+ */
+ bl check_errata_1508412_0
+ cbz x0, 1f
+ ldr x0, =0x0
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8400000
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FFE00000
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ ldr x0, =0x4004003FF
+ msr CORTEX_A77_CPUPCR_EL3, x0
+ ldr x0, =0x1
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8C00040
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FFE00040
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ b 2f
+1:
+ ldr x0, =0x0
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8400000
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FF600000
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ ldr x0, =0x00E8E00080
+ msr CORTEX_A77_CPUPOR2_EL3, x0
+ ldr x0, =0x00FFE000C0
+ msr CORTEX_A77_CPUPMR2_EL3, x0
+2:
+ ldr x0, =0x04004003FF
+ msr CORTEX_A77_CPUPCR_EL3, x0
+ isb
+3:
+ ret x17
+endfunc errata_a77_1508412_wa
+
+func check_errata_1508412
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1508412
+
+func check_errata_1508412_0
+ mov x1, #0x0
+ b cpu_rev_var_ls
+endfunc check_errata_1508412_0
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1925769.
+ * This applies to revision <= r1p1 of Cortex A77.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1925769_wa
+ /* Compare x0 against revision <= r1p1 */
+ mov x17, x30
+ bl check_errata_1925769
+ cbz x0, 1f
+
+ /* Set bit 8 in ECTLR_EL1 */
+ mrs x1, CORTEX_A77_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A77_CPUECTLR_EL1_BIT_8
+ msr CORTEX_A77_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a77_1925769_wa
+
+func check_errata_1925769
+ /* Applies to everything <= r1p1 */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1925769
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A77.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a77_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A77_1508412
+ mov x0, x18
+ bl errata_a77_1508412_wa
+#endif
+
+#if ERRATA_A77_1925769
+ mov x0, x18
+ bl errata_a77_1925769_wa
+#endif
+
+ ret x19
+endfunc cortex_a77_reset_func
+
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
@@ -42,6 +158,19 @@ endfunc cortex_a77_core_pwr_dwn
* Errata printing function for Cortex-A77. Must follow AAPCS.
*/
func cortex_a77_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A77_1508412, cortex_a77, 1508412
+ report_errata ERRATA_A77_1925769, cortex_a77, 1925769
+
+ ldp x8, x30, [sp], #16
ret
endfunc cortex_a77_errata_report
#endif
@@ -67,5 +196,5 @@ func cortex_a77_cpu_reg_dump
endfunc cortex_a77_cpu_reg_dump
declare_cpu_ops cortex_a77, CORTEX_A77_MIDR, \
- CPU_NO_RESET_FUNC, \
+ cortex_a77_reset_func, \
cortex_a77_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
new file mode 100644
index 000000000..f61726b46
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a78.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+
+/* --------------------------------------------------
+ * Errata Workaround for A78 Erratum 1688305.
+ * This applies to revision r0p0 and r1p0 of A78.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1688305_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1688305
+ cbz x0, 1f
+ mrs x1, CORTEX_A78_ACTLR2_EL1
+ orr x1, x1, #CORTEX_A78_ACTLR2_EL1_BIT_1
+ msr CORTEX_A78_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a78_1688305_wa
+
+func check_errata_1688305
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1688305
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata #1941498.
+ * This applies to revisions r0p0, r1p0, and r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1941498_wa
+ /* Compare x0 against revision <= r1p1 */
+ mov x17, x30
+ bl check_errata_1941498
+ cbz x0, 1f
+
+ /* Set bit 8 in ECTLR_EL1 */
+ mrs x1, CORTEX_A78_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A78_CPUECTLR_EL1_BIT_8
+ msr CORTEX_A78_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a78_1941498_wa
+
+func check_errata_1941498
+ /* Check for revision <= r1p1, might need to be updated later. */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1941498
+
+ /* --------------------------------------------------
+ * Errata Workaround for A78 Erratum 1951500.
+ * This applies to revisions r1p0 and r1p1 of A78.
+ * The issue also exists in r0p0 but there is no fix
+ * in that revision.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1951500_wa
+ /* Compare x0 against revisions r1p0 - r1p1 */
+ mov x17, x30
+ bl check_errata_1951500
+ cbz x0, 1f
+
+ msr S3_6_c15_c8_0, xzr
+ ldr x0, =0x10E3900002
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #1
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #2
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a78_1951500_wa
+
+func check_errata_1951500
+ /* Applies to revisions r1p0 and r1p1. */
+ mov x1, #CPU_REV(1, 0)
+ mov x2, #CPU_REV(1, 1)
+ b cpu_rev_var_range
+endfunc check_errata_1951500
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A78
+ * -------------------------------------------------
+ */
+func cortex_a78_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A78_1688305
+ mov x0, x18
+ bl errata_a78_1688305_wa
+#endif
+
+#if ERRATA_A78_1941498
+ mov x0, x18
+ bl errata_a78_1941498_wa
+#endif
+
+#if ERRATA_A78_1951500
+ mov x0, x18
+ bl errata_a78_1951500_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET0_EL0, x0
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET1_EL0, x0
+#endif
+
+ isb
+ ret x19
+endfunc cortex_a78_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a78_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A78_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ msr CORTEX_A78_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a78_core_pwr_dwn
+
+ /*
+ * Errata printing function for cortex_a78. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_a78_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A78_1688305, cortex_a78, 1688305
+ report_errata ERRATA_A78_1941498, cortex_a78, 1941498
+ report_errata ERRATA_A78_1951500, cortex_a78, 1951500
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a78_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a78 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a78_regs, "aS"
+cortex_a78_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a78_cpu_reg_dump
+ adr x6, cortex_a78_regs
+ mrs x8, CORTEX_A78_CPUECTLR_EL1
+ ret
+endfunc cortex_a78_cpu_reg_dump
+
+declare_cpu_ops cortex_a78, CORTEX_A78_MIDR, \
+ cortex_a78_reset_func, \
+ cortex_a78_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hercules_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S
index c4a216353..9aff9ac85 100644
--- a/lib/cpus/aarch64/cortex_hercules_ae.S
+++ b/lib/cpus/aarch64/cortex_a78_ae.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2020, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,71 +7,71 @@
#include <arch.h>
#include <asm_macros.S>
#include <common/bl_common.h>
-#include <cortex_hercules_ae.h>
+#include <cortex_a78_ae.h>
#include <cpu_macros.S>
#include <plat_macros.S>
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
-#error "cortex_hercules_ae must be compiled with HW_ASSISTED_COHERENCY enabled"
+#error "cortex_a78_ae must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
/* -------------------------------------------------
- * The CPU Ops reset function for Cortex-Hercules-AE
+ * The CPU Ops reset function for Cortex-A78-AE
* -------------------------------------------------
*/
#if ENABLE_AMU
-func cortex_hercules_ae_reset_func
+func cortex_a78_ae_reset_func
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
- bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
msr actlr_el3, x0
/* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
mrs x0, actlr_el2
- bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
msr actlr_el2, x0
/* Enable group0 counters */
- mov x0, #CORTEX_HERCULES_AMU_GROUP0_MASK
+ mov x0, #CORTEX_A78_AMU_GROUP0_MASK
msr CPUAMCNTENSET0_EL0, x0
/* Enable group1 counters */
- mov x0, #CORTEX_HERCULES_AMU_GROUP1_MASK
+ mov x0, #CORTEX_A78_AMU_GROUP1_MASK
msr CPUAMCNTENSET1_EL0, x0
isb
ret
-endfunc cortex_hercules_ae_reset_func
+endfunc cortex_a78_ae_reset_func
#endif
/* -------------------------------------------------------
* HW will do the cache maintenance while powering down
* -------------------------------------------------------
*/
-func cortex_hercules_ae_core_pwr_dwn
+func cortex_a78_ae_core_pwr_dwn
/* -------------------------------------------------------
* Enable CPU power down bit in power control register
* -------------------------------------------------------
*/
- mrs x0, CORTEX_HERCULES_CPUPWRCTLR_EL1
- orr x0, x0, #CORTEX_HERCULES_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
- msr CORTEX_HERCULES_CPUPWRCTLR_EL1, x0
+ mrs x0, CORTEX_A78_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ msr CORTEX_A78_CPUPWRCTLR_EL1, x0
isb
ret
-endfunc cortex_hercules_ae_core_pwr_dwn
+endfunc cortex_a78_ae_core_pwr_dwn
/*
- * Errata printing function for cortex_hercules_ae. Must follow AAPCS.
+ * Errata printing function for cortex_a78_ae. Must follow AAPCS.
*/
#if REPORT_ERRATA
-func cortex_hercules_ae_errata_report
+func cortex_a78_ae_errata_report
ret
-endfunc cortex_hercules_ae_errata_report
+endfunc cortex_a78_ae_errata_report
#endif
/* -------------------------------------------------------
- * This function provides cortex_hercules_ae specific
+ * This function provides cortex_a78_ae specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
@@ -79,22 +79,22 @@ endfunc cortex_hercules_ae_errata_report
* reported.
* -------------------------------------------------------
*/
-.section .rodata.cortex_hercules_ae_regs, "aS"
-cortex_hercules_ae_regs: /* The ascii list of register names to be reported */
+.section .rodata.cortex_a78_ae_regs, "aS"
+cortex_a78_ae_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""
-func cortex_hercules_ae_cpu_reg_dump
- adr x6, cortex_hercules_ae_regs
- mrs x8, CORTEX_HERCULES_CPUECTLR_EL1
+func cortex_a78_ae_cpu_reg_dump
+ adr x6, cortex_a78_ae_regs
+ mrs x8, CORTEX_A78_CPUECTLR_EL1
ret
-endfunc cortex_hercules_ae_cpu_reg_dump
+endfunc cortex_a78_ae_cpu_reg_dump
#if ENABLE_AMU
-#define HERCULES_AE_RESET_FUNC cortex_hercules_ae_reset_func
+#define A78_AE_RESET_FUNC cortex_a78_ae_reset_func
#else
-#define HERCULES_AE_RESET_FUNC CPU_NO_RESET_FUNC
+#define A78_AE_RESET_FUNC CPU_NO_RESET_FUNC
#endif
-declare_cpu_ops cortex_hercules_ae, CORTEX_HERCULES_AE_MIDR, \
- HERCULES_AE_RESET_FUNC, \
- cortex_hercules_ae_core_pwr_dwn
+declare_cpu_ops cortex_a78_ae, CORTEX_A78_AE_MIDR, \
+ A78_AE_RESET_FUNC, \
+ cortex_a78_ae_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hercules.S b/lib/cpus/aarch64/cortex_hercules.S
deleted file mode 100644
index a23919626..000000000
--- a/lib/cpus/aarch64/cortex_hercules.S
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2019, ARM Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <common/bl_common.h>
-#include <cortex_hercules.h>
-#include <cpu_macros.S>
-#include <plat_macros.S>
-
-/* Hardware handled coherency */
-#if HW_ASSISTED_COHERENCY == 0
-#error "cortex_hercules must be compiled with HW_ASSISTED_COHERENCY enabled"
-#endif
-
-
-/* --------------------------------------------------
- * Errata Workaround for Hercules Erratum 1688305.
- * This applies to revision r0p0 and r1p0 of Hercules.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
- */
-func errata_hercules_1688305_wa
- /* Compare x0 against revision r1p0 */
- mov x17, x30
- bl check_errata_1688305
- cbz x0, 1f
- mrs x1, CORTEX_HERCULES_ACTLR2_EL1
- orr x1, x1, CORTEX_HERCULES_ACTLR2_EL1_BIT_1
- msr CORTEX_HERCULES_ACTLR2_EL1, x1
- isb
-1:
- ret x17
-endfunc errata_hercules_1688305_wa
-
-func check_errata_1688305
- /* Applies to r0p0 and r1p0 */
- mov x1, #0x10
- b cpu_rev_var_ls
-endfunc check_errata_1688305
-
- /* -------------------------------------------------
- * The CPU Ops reset function for Cortex-Hercules
- * -------------------------------------------------
- */
-func cortex_hercules_reset_func
- mov x19, x30
- bl cpu_get_rev_var
- mov x18, x0
-
-#if ERRATA_HERCULES_1688305
- mov x0, x18
- bl errata_hercules_1688305_wa
-#endif
-
-#if ENABLE_AMU
- /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
- mrs x0, actlr_el3
- bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
- msr actlr_el3, x0
-
- /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
- mrs x0, actlr_el2
- bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
- msr actlr_el2, x0
-
- /* Enable group0 counters */
- mov x0, #CORTEX_HERCULES_AMU_GROUP0_MASK
- msr CPUAMCNTENSET0_EL0, x0
-
- /* Enable group1 counters */
- mov x0, #CORTEX_HERCULES_AMU_GROUP1_MASK
- msr CPUAMCNTENSET1_EL0, x0
-#endif
-
- isb
- ret x19
-endfunc cortex_hercules_reset_func
-
- /* ---------------------------------------------
- * HW will do the cache maintenance while powering down
- * ---------------------------------------------
- */
-func cortex_hercules_core_pwr_dwn
- /* ---------------------------------------------
- * Enable CPU power down bit in power control register
- * ---------------------------------------------
- */
- mrs x0, CORTEX_HERCULES_CPUPWRCTLR_EL1
- orr x0, x0, #CORTEX_HERCULES_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
- msr CORTEX_HERCULES_CPUPWRCTLR_EL1, x0
- isb
- ret
-endfunc cortex_hercules_core_pwr_dwn
-
- /*
- * Errata printing function for cortex_hercules. Must follow AAPCS.
- */
-#if REPORT_ERRATA
-func cortex_hercules_errata_report
- stp x8, x30, [sp, #-16]!
-
- bl cpu_get_rev_var
- mov x8, x0
-
- /*
- * Report all errata. The revision-variant information is passed to
- * checking functions of each errata.
- */
- report_errata ERRATA_HERCULES_1688305, cortex_hercules, 1688305
-
- ldp x8, x30, [sp], #16
- ret
-endfunc cortex_hercules_errata_report
-#endif
-
- /* ---------------------------------------------
- * This function provides cortex_hercules specific
- * register information for crash reporting.
- * It needs to return with x6 pointing to
- * a list of register names in ascii and
- * x8 - x15 having values of registers to be
- * reported.
- * ---------------------------------------------
- */
-.section .rodata.cortex_hercules_regs, "aS"
-cortex_hercules_regs: /* The ascii list of register names to be reported */
- .asciz "cpuectlr_el1", ""
-
-func cortex_hercules_cpu_reg_dump
- adr x6, cortex_hercules_regs
- mrs x8, CORTEX_HERCULES_CPUECTLR_EL1
- ret
-endfunc cortex_hercules_cpu_reg_dump
-
-declare_cpu_ops cortex_hercules, CORTEX_HERCULES_MIDR, \
- cortex_hercules_reset_func, \
- cortex_hercules_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_klein.S b/lib/cpus/aarch64/cortex_klein.S
new file mode 100644
index 000000000..d3a8ab481
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_klein.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_klein.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Klein must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Klein supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_klein_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_KLEIN_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_KLEIN_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_KLEIN_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_klein_core_pwr_dwn
+
+ /*
+ * Errata printing function for Cortex Klein. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_klein_errata_report
+ ret
+endfunc cortex_klein_errata_report
+#endif
+
+func cortex_klein_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+ isb
+ ret
+endfunc cortex_klein_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Cortex-Klein specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_klein_regs, "aS"
+cortex_klein_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_klein_cpu_reg_dump
+ adr x6, cortex_klein_regs
+ mrs x8, CORTEX_KLEIN_CPUECTLR_EL1
+ ret
+endfunc cortex_klein_cpu_reg_dump
+
+declare_cpu_ops cortex_klein, CORTEX_KLEIN_MIDR, \
+ cortex_klein_reset_func, \
+ cortex_klein_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_matterhorn.S b/lib/cpus/aarch64/cortex_matterhorn.S
new file mode 100644
index 000000000..4156f3cf8
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_matterhorn.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_matterhorn.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Matterhorn must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Matterhorn supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_matterhorn_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_MATTERHORN_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_MATTERHORN_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_MATTERHORN_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_matterhorn_core_pwr_dwn
+
+ /*
+ * Errata printing function for Cortex Matterhorn. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_matterhorn_errata_report
+ ret
+endfunc cortex_matterhorn_errata_report
+#endif
+
+func cortex_matterhorn_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+ isb
+ ret
+endfunc cortex_matterhorn_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Cortex-Matterhorn specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_matterhorn_regs, "aS"
+cortex_matterhorn_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_matterhorn_cpu_reg_dump
+ adr x6, cortex_matterhorn_regs
+ mrs x8, CORTEX_MATTERHORN_CPUECTLR_EL1
+ ret
+endfunc cortex_matterhorn_cpu_reg_dump
+
+declare_cpu_ops cortex_matterhorn, CORTEX_MATTERHORN_MIDR, \
+ cortex_matterhorn_reset_func, \
+ cortex_matterhorn_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 808c7f807..730b09beb 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -78,6 +78,10 @@ func prepare_cpu_pwr_dwn
mov x1, #CPU_PWR_DWN_OPS
add x1, x1, x2, lsl #3
ldr x1, [x0, x1]
+#if ENABLE_ASSERTIONS
+ cmp x1, #0
+ ASM_ASSERT(ne)
+#endif
br x1
endfunc prepare_cpu_pwr_dwn
@@ -136,6 +140,13 @@ endfunc do_cpu_reg_dump
* midr of the core. It reads the MIDR_EL1 and finds the matching
* entry in cpu_ops entries. Only the implementation and part number
* are used to match the entries.
+ *
+ * If cpu_ops for the MIDR_EL1 cannot be found and
+ * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
+ * default cpu_ops with an MIDR value of 0.
+ * (Implementation number 0x0 should be reseverd for software use
+ * and therefore no clashes should happen with that default value).
+ *
* Return :
* x0 - The matching cpu_ops pointer on Success
* x0 - 0 on failure.
@@ -143,23 +154,26 @@ endfunc do_cpu_reg_dump
*/
.globl get_cpu_ops_ptr
func get_cpu_ops_ptr
- /* Get the cpu_ops start and end locations */
- adr x4, (__CPU_OPS_START__ + CPU_MIDR)
- adr x5, (__CPU_OPS_END__ + CPU_MIDR)
-
- /* Initialize the return parameter */
- mov x0, #0
-
/* Read the MIDR_EL1 */
mrs x2, midr_el1
mov_imm x3, CPU_IMPL_PN_MASK
/* Retain only the implementation and part number using mask */
and w2, w2, w3
+
+ /* Get the cpu_ops end location */
+ adr x5, (__CPU_OPS_END__ + CPU_MIDR)
+
+ /* Initialize the return parameter */
+ mov x0, #0
1:
+ /* Get the cpu_ops start location */
+ adr x4, (__CPU_OPS_START__ + CPU_MIDR)
+
+2:
/* Check if we have reached end of list */
cmp x4, x5
- b.eq error_exit
+ b.eq search_def_ptr
/* load the midr from the cpu_ops */
ldr x1, [x4], #CPU_OPS_SIZE
@@ -167,11 +181,35 @@ func get_cpu_ops_ptr
/* Check if midr matches to midr of this core */
cmp w1, w2
- b.ne 1b
+ b.ne 2b
/* Subtract the increment and offset to get the cpu-ops pointer */
sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+#ifdef SUPPORT_UNKNOWN_MPID
+ cbnz x2, exit_mpid_found
+ /* Mark the unsupported MPID flag */
+ adrp x1, unsupported_mpid_flag
+ add x1, x1, :lo12:unsupported_mpid_flag
+ str w2, [x1]
+exit_mpid_found:
+#endif
+ ret
+
+ /*
+ * Search again for a default pointer (MIDR = 0x0)
+ * or return error if already searched.
+ */
+search_def_ptr:
+#ifdef SUPPORT_UNKNOWN_MPID
+ cbz x2, error_exit
+ mov x2, #0
+ b 1b
error_exit:
+#endif
ret
endfunc get_cpu_ops_ptr
@@ -276,7 +314,15 @@ func print_errata_status
* turn.
*/
mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x1, #0
+ ASM_ASSERT(ne)
+#endif
ldr x0, [x1, #CPU_ERRATA_FUNC]
cbz x0, .Lnoprint
@@ -326,6 +372,10 @@ func check_wa_cve_2017_5715
ASM_ASSERT(ne)
#endif
ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
ldr x0, [x0, #CPU_EXTRA1_FUNC]
/*
* If the reserved function pointer is NULL, this CPU
@@ -359,6 +409,10 @@ func wa_cve_2018_3639_get_disable_ptr
ASM_ASSERT(ne)
#endif
ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
ldr x0, [x0, #CPU_EXTRA2_FUNC]
ret
endfunc wa_cve_2018_3639_get_disable_ptr
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index c377b28b4..224ee2676 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -26,13 +27,17 @@
* table.
* -------------------------------------------------
*/
- .globl workaround_bpflush_runtime_exceptions
-
vector_base workaround_bpflush_runtime_exceptions
.macro apply_workaround
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ /* Disable cycle counter when event counting is prohibited */
+ mrs x1, pmcr_el0
+ orr x0, x1, #PMCR_EL0_DP_BIT
+ msr pmcr_el0, x0
+ isb
+
/* -------------------------------------------------
* A new write-only system register where a write of
* 1 to bit 0 will cause the indirect branch predictor
@@ -156,13 +161,19 @@ endfunc denver_disable_ext_debug
* ----------------------------------------------------
*/
func denver_enable_dco
- mov x3, x30
+ /* DCO is not supported on PN5 and later */
+ mrs x1, midr_el1
+ mov_imm x2, DENVER_MIDR_PN4
+ cmp x1, x2
+ b.hi 1f
+
+ mov x18, x30
bl plat_my_core_pos
mov x1, #1
lsl x1, x1, x0
msr s3_0_c15_c0_2, x1
- mov x30, x3
- ret
+ mov x30, x18
+1: ret
endfunc denver_enable_dco
/* ----------------------------------------------------
@@ -170,10 +181,14 @@ endfunc denver_enable_dco
* ----------------------------------------------------
*/
func denver_disable_dco
-
- mov x3, x30
+ /* DCO is not supported on PN5 and later */
+ mrs x1, midr_el1
+ mov_imm x2, DENVER_MIDR_PN4
+ cmp x1, x2
+ b.hi 2f
/* turn off background work */
+ mov x18, x30
bl plat_my_core_pos
mov x1, #1
lsl x1, x1, x0
@@ -188,8 +203,8 @@ func denver_disable_dco
and x2, x2, x1
cbnz x2, 1b
- mov x30, x3
- ret
+ mov x30, x18
+2: ret
endfunc denver_disable_dco
func check_errata_cve_2017_5715
@@ -348,37 +363,23 @@ func denver_cpu_reg_dump
ret
endfunc denver_cpu_reg_dump
-declare_cpu_ops_wa denver, DENVER_MIDR_PN0, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
-
-declare_cpu_ops_wa denver, DENVER_MIDR_PN1, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
-
-declare_cpu_ops_wa denver, DENVER_MIDR_PN2, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
-
-declare_cpu_ops_wa denver, DENVER_MIDR_PN3, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
-
-declare_cpu_ops_wa denver, DENVER_MIDR_PN4, \
- denver_reset_func, \
- check_errata_cve_2017_5715, \
- CPU_NO_EXTRA2_FUNC, \
- denver_core_pwr_dwn, \
- denver_cluster_pwr_dwn
+/* macro to declare cpu_ops for Denver SKUs */
+.macro denver_cpu_ops_wa midr
+ declare_cpu_ops_wa denver, \midr, \
+ denver_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ denver_core_pwr_dwn, \
+ denver_cluster_pwr_dwn
+.endm
+
+denver_cpu_ops_wa DENVER_MIDR_PN0
+denver_cpu_ops_wa DENVER_MIDR_PN1
+denver_cpu_ops_wa DENVER_MIDR_PN2
+denver_cpu_ops_wa DENVER_MIDR_PN3
+denver_cpu_ops_wa DENVER_MIDR_PN4
+denver_cpu_ops_wa DENVER_MIDR_PN5
+denver_cpu_ops_wa DENVER_MIDR_PN6
+denver_cpu_ops_wa DENVER_MIDR_PN7
+denver_cpu_ops_wa DENVER_MIDR_PN8
+denver_cpu_ops_wa DENVER_MIDR_PN9
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
index 100ffaa97..da052d5c9 100644
--- a/lib/cpus/aarch64/dsu_helpers.S
+++ b/lib/cpus/aarch64/dsu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -72,18 +72,36 @@ endfunc errata_dsu_798953_wa
* This function is called from both assembly and C environment. So it
* follows AAPCS.
*
- * Clobbers: x0-x3
+ * Clobbers: x0-x15
* -----------------------------------------------------------------------
*/
.globl check_errata_dsu_936184
.globl errata_dsu_936184_wa
+ .weak is_scu_present_in_dsu
+
+ /* --------------------------------------------------------------------
+ * Default behaviour respresents SCU is always present with DSU.
+ * CPUs can override this definition if required.
+ *
+ * Can clobber only: x0-x14
+ * --------------------------------------------------------------------
+ */
+func is_scu_present_in_dsu
+ mov x0, #1
+ ret
+endfunc is_scu_present_in_dsu
func check_errata_dsu_936184
- mov x2, #ERRATA_NOT_APPLIES
- mov x3, #ERRATA_APPLIES
+ mov x15, x30
+ bl is_scu_present_in_dsu
+ cmp x0, xzr
+ /* Default error status */
+ mov x0, #ERRATA_NOT_APPLIES
+
+ /* If SCU is not present, return without applying patch */
+ b.eq 1f
/* Erratum applies only if DSU has the ACP interface */
- mov x0, x2
mrs x1, CLUSTERCFR_EL1
ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
cbz x1, 1f
@@ -92,13 +110,13 @@ func check_errata_dsu_936184
mrs x1, CLUSTERIDR_EL1
/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
- ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ ubfx x2, x1, #CLUSTERIDR_REV_SHIFT,\
#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
- mov x1, #(0x2 << CLUSTERIDR_VAR_SHIFT)
- cmp x0, x1
- csel x0, x2, x3, hs
+ cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT)
+ b.hs 1f
+ mov x0, #ERRATA_APPLIES
1:
- ret
+ ret x15
endfunc check_errata_dsu_936184
/* --------------------------------------------------
diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S
new file mode 100644
index 000000000..ef1f048a1
--- /dev/null
+++ b/lib/cpus/aarch64/generic.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <generic.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func generic_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc generic_disable_dcache
+
+func generic_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl generic_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ ret x18
+endfunc generic_core_pwr_dwn
+
+func generic_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl generic_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ ret x18
+
+endfunc generic_cluster_pwr_dwn
+
+/* ---------------------------------------------
+ * Unimplemented functions.
+ * ---------------------------------------------
+ */
+.equ generic_errata_report, 0
+.equ generic_cpu_reg_dump, 0
+.equ generic_reset_func, 0
+
+declare_cpu_ops generic, AARCH64_GENERIC_MIDR, \
+ generic_reset_func, \
+ generic_core_pwr_dwn, \
+ generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index d537ed6a8..9c97cf60a 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,15 +1,15 @@
/*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
-#include <neoverse_n1.h>
#include <cpuamu.h>
#include <cpu_macros.S>
#include <context.h>
+#include <neoverse_n1.h>
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@@ -375,6 +375,95 @@ func check_errata_1542419
b cpu_rev_var_range
endfunc check_errata_1542419
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1868343.
+ * This applies to revision <= r4p0 of Neoverse N1.
+ * This workaround is the same as the workaround for
+ * errata 1262606 and 1275112 but applies to a wider
+ * revision range.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1868343_wa
+ /*
+ * Compare x0 against revision r4p0
+ */
+ mov x17, x30
+ bl check_errata_1868343
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR_EL1_BIT_13
+ msr NEOVERSE_N1_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_n1_1868343_wa
+
+func check_errata_1868343
+ /* Applies to everything <= r4p0 */
+ mov x1, #0x40
+ b cpu_rev_var_ls
+endfunc check_errata_1868343
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1946160.
+ * This applies to revisions r3p0, r3p1, r4p0, and
+ * r4p1 of Neoverse N1. It also exists in r0p0, r1p0,
+ * and r2p0 but there is no fix in these revisions.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1946160_wa
+ /*
+ * Compare x0 against r3p0 - r4p1
+ */
+ mov x17, x30
+ bl check_errata_1946160
+ cbz x0, 1f
+
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_n1_1946160_wa
+
+func check_errata_1946160
+ /* Applies to r3p0 - r4p1. */
+ mov x1, #0x30
+ mov x2, #0x41
+ b cpu_rev_var_range
+endfunc check_errata_1946160
+
func neoverse_n1_reset_func
mov x19, x30
@@ -449,6 +538,16 @@ func neoverse_n1_reset_func
bl errata_n1_1542419_wa
#endif
+#if ERRATA_N1_1868343
+ mov x0, x18
+ bl errata_n1_1868343_wa
+#endif
+
+#if ERRATA_N1_1946160
+ mov x0, x18
+ bl errata_n1_1946160_wa
+#endif
+
#if ENABLE_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
@@ -465,7 +564,7 @@ func neoverse_n1_reset_func
msr CPUAMCNTENSET_EL0, x0
#endif
-#if NEOVERSE_N1_EXTERNAL_LLC
+#if NEOVERSE_Nx_EXTERNAL_LLC
/* Some system may have External LLC, core needs to be made aware */
mrs x0, NEOVERSE_N1_CPUECTLR_EL1
orr x0, x0, NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT
@@ -522,6 +621,8 @@ func neoverse_n1_errata_report
report_errata ERRATA_N1_1275112, neoverse_n1, 1275112
report_errata ERRATA_N1_1315703, neoverse_n1, 1315703
report_errata ERRATA_N1_1542419, neoverse_n1, 1542419
+ report_errata ERRATA_N1_1868343, neoverse_n1, 1868343
+ report_errata ERRATA_N1_1946160, neoverse_n1, 1946160
report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184
ldp x8, x30, [sp], #16
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
new file mode 100644
index 000000000..8d646cba5
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <neoverse_n2.h>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse N2 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Neoverse N2.
+ * -------------------------------------------------
+ */
+func neoverse_n2_reset_func
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+1:
+ /* Force all cacheable atomic instructions to be near */
+ mrs x0, NEOVERSE_N2_CPUACTLR2_EL1
+ orr x0, x0, #NEOVERSE_N2_CPUACTLR2_EL1_BIT_2
+ msr NEOVERSE_N2_CPUACTLR2_EL1, x0
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, cptr_el3
+ orr x0, x0, #TAM_BIT
+ msr cptr_el3, x0
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, cptr_el2
+ orr x0, x0, #TAM_BIT
+ msr cptr_el2, x0
+
+ /* No need to enable the counters as this would be done at el3 exit */
+#endif
+
+#if NEOVERSE_Nx_EXTERNAL_LLC
+ /* Some systems may have External LLC, core needs to be made aware */
+ mrs x0, NEOVERSE_N2_CPUECTLR_EL1
+ orr x0, x0, NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT
+ msr NEOVERSE_N2_CPUECTLR_EL1, x0
+#endif
+
+ isb
+ ret
+endfunc neoverse_n2_reset_func
+
+func neoverse_n2_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * No need to do cache maintenance here.
+ * ---------------------------------------------
+ */
+ mrs x0, NEOVERSE_N2_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_N2_CORE_PWRDN_EN_BIT
+ msr NEOVERSE_N2_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc neoverse_n2_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Neoverse N2 cores. Must follow AAPCS.
+ */
+func neoverse_n2_errata_report
+ /* No errata reported for Neoverse N2 cores */
+ ret
+endfunc neoverse_n2_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Neoverse N2 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ASCII and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_n2_regs, "aS"
+neoverse_n2_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpupwrctlr_el1", ""
+
+func neoverse_n2_cpu_reg_dump
+ adr x6, neoverse_n2_regs
+ mrs x8, NEOVERSE_N2_CPUPWRCTLR_EL1
+ ret
+endfunc neoverse_n2_cpu_reg_dump
+
+declare_cpu_ops neoverse_n2, NEOVERSE_N2_MIDR, \
+ neoverse_n2_reset_func, \
+ neoverse_n2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n_common.S b/lib/cpus/aarch64/neoverse_n_common.S
new file mode 100644
index 000000000..b816342ba
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n_common.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <neoverse_n_common.h>
+
+ .global is_scu_present_in_dsu
+
+/*
+ * Check if the SCU L3 Unit is present on the DSU
+ * 1-> SCU present
+ * 0-> SCU not present
+ *
+ * This function is implemented as weak on dsu_helpers.S and must be
+ * overwritten for Neoverse Nx cores.
+ */
+
+func is_scu_present_in_dsu
+ mrs x0, CPUCFR_EL1
+ ubfx x0, x0, #SCU_SHIFT, #1
+ eor x0, x0, #1
+ ret
+endfunc is_scu_present_in_dsu
diff --git a/lib/cpus/aarch64/neoverse_zeus.S b/lib/cpus/aarch64/neoverse_v1.S
index 44882b459..733629425 100644
--- a/lib/cpus/aarch64/neoverse_zeus.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2020, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,46 +7,46 @@
#include <arch.h>
#include <asm_macros.S>
#include <common/bl_common.h>
-#include <neoverse_zeus.h>
+#include <neoverse_v1.h>
#include <cpu_macros.S>
#include <plat_macros.S>
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
-#error "Neoverse Zeus must be compiled with HW_ASSISTED_COHERENCY enabled"
+#error "Neoverse V1 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
/* 64-bit only core */
#if CTX_INCLUDE_AARCH32_REGS == 1
-#error "Neoverse-Zeus supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
*/
-func neoverse_zeus_core_pwr_dwn
+func neoverse_v1_core_pwr_dwn
/* ---------------------------------------------
* Enable CPU power down bit in power control register
* ---------------------------------------------
*/
- mrs x0, NEOVERSE_ZEUS_CPUPWRCTLR_EL1
- orr x0, x0, #NEOVERSE_ZEUS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
- msr NEOVERSE_ZEUS_CPUPWRCTLR_EL1, x0
+ mrs x0, NEOVERSE_V1_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_V1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr NEOVERSE_V1_CPUPWRCTLR_EL1, x0
isb
ret
-endfunc neoverse_zeus_core_pwr_dwn
+endfunc neoverse_v1_core_pwr_dwn
/*
- * Errata printing function for Neoverse Zeus. Must follow AAPCS.
+ * Errata printing function for Neoverse V1. Must follow AAPCS.
*/
#if REPORT_ERRATA
-func neoverse_zeus_errata_report
+func neoverse_v1_errata_report
ret
-endfunc neoverse_zeus_errata_report
+endfunc neoverse_v1_errata_report
#endif
-func neoverse_zeus_reset_func
+func neoverse_v1_reset_func
mov x19, x30
/* Disable speculative loads */
@@ -54,10 +54,10 @@ func neoverse_zeus_reset_func
isb
ret x19
-endfunc neoverse_zeus_reset_func
+endfunc neoverse_v1_reset_func
/* ---------------------------------------------
- * This function provides Neoverse-Zeus specific
+ * This function provides Neoverse-V1 specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
@@ -65,16 +65,16 @@ endfunc neoverse_zeus_reset_func
* reported.
* ---------------------------------------------
*/
-.section .rodata.neoverse_zeus_regs, "aS"
-neoverse_zeus_regs: /* The ascii list of register names to be reported */
+.section .rodata.neoverse_v1_regs, "aS"
+neoverse_v1_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""
-func neoverse_zeus_cpu_reg_dump
- adr x6, neoverse_zeus_regs
- mrs x8, NEOVERSE_ZEUS_CPUECTLR_EL1
+func neoverse_v1_cpu_reg_dump
+ adr x6, neoverse_v1_regs
+ mrs x8, NEOVERSE_V1_CPUECTLR_EL1
ret
-endfunc neoverse_zeus_cpu_reg_dump
+endfunc neoverse_v1_cpu_reg_dump
-declare_cpu_ops neoverse_zeus, NEOVERSE_ZEUS_MIDR, \
- neoverse_zeus_reset_func, \
- neoverse_zeus_core_pwr_dwn
+declare_cpu_ops neoverse_v1, NEOVERSE_V1_MIDR, \
+ neoverse_v1_reset_func, \
+ neoverse_v1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
new file mode 100644
index 000000000..3017a5012
--- /dev/null
+++ b/lib/cpus/aarch64/rainier.S
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <cpu_macros.S>
+#include <cpuamu.h>
+#include <rainier.h>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Rainier CPU must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* --------------------------------------------------
+ * Disable speculative loads if Rainier supports
+ * SSBS.
+ *
+ * Shall clobber: x0.
+ * --------------------------------------------------
+ */
+func rainier_disable_speculative_loads
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+1:
+ ret
+endfunc rainier_disable_speculative_loads
+
+func rainier_reset_func
+ mov x19, x30
+
+ bl rainier_disable_speculative_loads
+
+ /* Forces all cacheable atomic instructions to be near */
+ mrs x0, RAINIER_CPUACTLR2_EL1
+ orr x0, x0, #RAINIER_CPUACTLR2_EL1_BIT_2
+ msr RAINIER_CPUACTLR2_EL1, x0
+ isb
+
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ orr x0, x0, #RAINIER_ACTLR_AMEN_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ orr x0, x0, #RAINIER_ACTLR_AMEN_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #RAINIER_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+#endif
+
+ isb
+ ret x19
+endfunc rainier_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func rainier_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, RAINIER_CPUPWRCTLR_EL1
+ orr x0, x0, #RAINIER_CORE_PWRDN_EN_MASK
+ msr RAINIER_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc rainier_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Rainier. Must follow AAPCS.
+ */
+func rainier_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc rainier_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Rainier specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.rainier_regs, "aS"
+rainier_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func rainier_cpu_reg_dump
+ adr x6, rainier_regs
+ mrs x8, RAINIER_CPUECTLR_EL1
+ ret
+endfunc rainier_cpu_reg_dump
+
+declare_cpu_ops rainier, RAINIER_MIDR, \
+ rainier_reset_func, \
+ rainier_core_pwr_dwn