aboutsummaryrefslogtreecommitdiffstats
path: root/lib/extensions
diff options
context:
space:
mode:
Diffstat (limited to 'lib/extensions')
-rw-r--r--lib/extensions/amu/aarch32/amu.c178
-rw-r--r--lib/extensions/amu/aarch64/amu.c167
-rw-r--r--lib/extensions/mpam/mpam.c21
-rw-r--r--lib/extensions/mtpmu/aarch32/mtpmu.S105
-rw-r--r--lib/extensions/mtpmu/aarch64/mtpmu.S96
-rw-r--r--lib/extensions/ras/ras_common.c42
-rw-r--r--lib/extensions/spe/spe.c2
7 files changed, 496 insertions, 115 deletions
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index 82d2e1864..0f75f0791 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -1,39 +1,73 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <assert.h>
#include <stdbool.h>
#include <arch.h>
#include <arch_helpers.h>
+
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>
-#include <plat/common/platform.h>
-
-#define AMU_GROUP0_NR_COUNTERS 4
-struct amu_ctx {
- uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
- uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
-};
+#include <plat/common/platform.h>
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void)
{
- uint64_t features;
+ uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- return (features & ID_PFR0_AMU_MASK) == 1U;
+ features &= ID_PFR0_AMU_MASK;
+ return ((features == 1U) || (features == 2U));
}
+#if AMU_GROUP1_NR_COUNTERS
+/* Check if group 1 counters is implemented */
+bool amu_group1_supported(void)
+{
+ uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
+
+ return (features & AMCFGR_NCG_MASK) == 1U;
+}
+#endif
+
+/*
+ * Enable counters. This function is meant to be invoked
+ * by the context management library before exiting from EL3.
+ */
void amu_enable(bool el2_unused)
{
- if (!amu_supported())
+ if (!amu_supported()) {
return;
+ }
+
+#if AMU_GROUP1_NR_COUNTERS
+ /* Check and set presence of group 1 counters */
+ if (!amu_group1_supported()) {
+ ERROR("AMU Counter Group 1 is not implemented\n");
+ panic();
+ }
+
+ /* Check number of group 1 counters */
+ uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
+ AMCGCR_CG1NC_MASK;
+ VERBOSE("%s%u. %s%u\n",
+ "Number of AMU Group 1 Counters ", cnt_num,
+ "Requested number ", AMU_GROUP1_NR_COUNTERS);
+
+ if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
+ ERROR("%s%u is less than %s%u\n",
+ "Number of AMU Group 1 Counters ", cnt_num,
+ "Requested number ", AMU_GROUP1_NR_COUNTERS);
+ panic();
+ }
+#endif
if (el2_unused) {
uint64_t v;
@@ -49,112 +83,156 @@ void amu_enable(bool el2_unused)
/* Enable group 0 counters */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
+#endif
}
/* Read the group 0 counter identified by the given `idx`. */
-uint64_t amu_group0_cnt_read(int idx)
+uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
+ assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
}
-/* Write the group 0 counter identified by the given `idx` with `val`. */
-void amu_group0_cnt_write(int idx, uint64_t val)
+/* Write the group 0 counter identified by the given `idx` with `val` */
+void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
+ assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
isb();
}
-/* Read the group 1 counter identified by the given `idx`. */
-uint64_t amu_group1_cnt_read(int idx)
+#if AMU_GROUP1_NR_COUNTERS
+/* Read the group 1 counter identified by the given `idx` */
+uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
}
-/* Write the group 1 counter identified by the given `idx` with `val`. */
-void amu_group1_cnt_write(int idx, uint64_t val)
+/* Write the group 1 counter identified by the given `idx` with `val` */
+void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
isb();
}
-void amu_group1_set_evtype(int idx, unsigned int val)
+/*
+ * Program the event type register for the given `idx` with
+ * the event number `val`
+ */
+void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
isb();
}
+#endif /* AMU_GROUP1_NR_COUNTERS */
static void *amu_context_save(const void *arg)
{
- struct amu_ctx *ctx;
- int i;
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int i;
- if (!amu_supported())
+ if (!amu_supported()) {
return (void *)-1;
+ }
- ctx = &amu_ctxs[plat_my_core_pos()];
-
- /* Assert that group 0 counter configuration is what we expect */
- assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK &&
- read_amcntenset1() == AMU_GROUP1_COUNTERS_MASK);
+#if AMU_GROUP1_NR_COUNTERS
+ if (!amu_group1_supported()) {
+ return (void *)-1;
+ }
+#endif
+ /* Assert that group 0/1 counter configuration is what we expect */
+ assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
+#if AMU_GROUP1_NR_COUNTERS
+ assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+#endif
/*
- * Disable group 0 counters to avoid other observers like SCP sampling
+ * Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
+
+#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
+#endif
isb();
- for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ /* Save all group 0 counters */
+ for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+ }
- for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
- ctx->group1_cnts[i] = amu_group1_cnt_read(i);
-
+#if AMU_GROUP1_NR_COUNTERS
+ /* Save group 1 counters */
+ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
+ ctx->group1_cnts[i] = amu_group1_cnt_read(i);
+ }
+ }
+#endif
return (void *)0;
}
static void *amu_context_restore(const void *arg)
{
- struct amu_ctx *ctx;
- int i;
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int i;
- if (!amu_supported())
+ if (!amu_supported()) {
return (void *)-1;
+ }
- ctx = &amu_ctxs[plat_my_core_pos()];
-
+#if AMU_GROUP1_NR_COUNTERS
+ if (!amu_group1_supported()) {
+ return (void *)-1;
+ }
+#endif
/* Counters were disabled in `amu_context_save()` */
- assert((read_amcntenset0() == 0U) && (read_amcntenset1() == 0U));
+ assert(read_amcntenset0_el0() == 0U);
+
+#if AMU_GROUP1_NR_COUNTERS
+ assert(read_amcntenset1_el0() == 0U);
+#endif
- /* Restore group 0 counters */
- for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ /* Restore all group 0 counters */
+ for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
amu_group0_cnt_write(i, ctx->group0_cnts[i]);
- for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
- amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ }
- /* Enable group 0 counters */
+ /* Restore group 0 counter configuration */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
- /* Enable group 1 counters */
+#if AMU_GROUP1_NR_COUNTERS
+ /* Restore group 1 counters */
+ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
+ amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ }
+ }
+
+ /* Restore group 1 counter configuration */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
+#endif
+
return (void *)0;
}
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 85f7007aa..499736345 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,38 +9,67 @@
#include <arch.h>
#include <arch_helpers.h>
+
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>
-#include <plat/common/platform.h>
-#define AMU_GROUP0_NR_COUNTERS 4
-
-struct amu_ctx {
- uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
- uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
-};
+#include <plat/common/platform.h>
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void)
{
- uint64_t features;
+ uint64_t features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
+
+ features &= ID_AA64PFR0_AMU_MASK;
+ return ((features == 1U) || (features == 2U));
+}
+
+#if AMU_GROUP1_NR_COUNTERS
+/* Check if group 1 counters is implemented */
+bool amu_group1_supported(void)
+{
+ uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
- features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
- return (features & ID_AA64PFR0_AMU_MASK) == 1U;
+ return (features & AMCFGR_EL0_NCG_MASK) == 1U;
}
+#endif
/*
- * Enable counters. This function is meant to be invoked
+ * Enable counters. This function is meant to be invoked
* by the context management library before exiting from EL3.
*/
void amu_enable(bool el2_unused)
{
uint64_t v;
- if (!amu_supported())
+ if (!amu_supported()) {
return;
+ }
+
+#if AMU_GROUP1_NR_COUNTERS
+ /* Check and set presence of group 1 counters */
+ if (!amu_group1_supported()) {
+ ERROR("AMU Counter Group 1 is not implemented\n");
+ panic();
+ }
+
+ /* Check number of group 1 counters */
+ uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
+ AMCGCR_EL0_CG1NC_MASK;
+ VERBOSE("%s%llu. %s%u\n",
+ "Number of AMU Group 1 Counters ", cnt_num,
+ "Requested number ", AMU_GROUP1_NR_COUNTERS);
+
+ if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
+ ERROR("%s%llu is less than %s%u\n",
+ "Number of AMU Group 1 Counters ", cnt_num,
+ "Requested number ", AMU_GROUP1_NR_COUNTERS);
+ panic();
+ }
+#endif
if (el2_unused) {
/*
@@ -62,43 +91,49 @@ void amu_enable(bool el2_unused)
/* Enable group 0 counters */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+
+#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+#endif
}
/* Read the group 0 counter identified by the given `idx`. */
-uint64_t amu_group0_cnt_read(int idx)
+uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
+ assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
}
-/* Write the group 0 counter identified by the given `idx` with `val`. */
-void amu_group0_cnt_write(int idx, uint64_t val)
+/* Write the group 0 counter identified by the given `idx` with `val` */
+void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
+ assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
isb();
}
-/* Read the group 1 counter identified by the given `idx`. */
-uint64_t amu_group1_cnt_read(int idx)
+#if AMU_GROUP1_NR_COUNTERS
+/* Read the group 1 counter identified by the given `idx` */
+uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
}
-/* Write the group 1 counter identified by the given `idx` with `val`. */
-void amu_group1_cnt_write(int idx, uint64_t val)
+/* Write the group 1 counter identified by the given `idx` with `val` */
+void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
isb();
@@ -106,78 +141,106 @@ void amu_group1_cnt_write(int idx, uint64_t val)
/*
* Program the event type register for the given `idx` with
- * the event number `val`.
+ * the event number `val`
*/
-void amu_group1_set_evtype(int idx, unsigned int val)
+void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{
assert(amu_supported());
- assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
+ assert(amu_group1_supported());
+ assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
isb();
}
+#endif /* AMU_GROUP1_NR_COUNTERS */
static void *amu_context_save(const void *arg)
{
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
- int i;
+ unsigned int i;
- if (!amu_supported())
+ if (!amu_supported()) {
return (void *)-1;
+ }
+#if AMU_GROUP1_NR_COUNTERS
+ if (!amu_group1_supported()) {
+ return (void *)-1;
+ }
+#endif
/* Assert that group 0/1 counter configuration is what we expect */
- assert((read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK) &&
- (read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK));
-
- assert(((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
- <= AMU_GROUP1_NR_COUNTERS);
+ assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
+#if AMU_GROUP1_NR_COUNTERS
+ assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+#endif
/*
* Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
+
+#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
+#endif
isb();
- /* Save group 0 counters */
- for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ /* Save all group 0 counters */
+ for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+ }
+#if AMU_GROUP1_NR_COUNTERS
/* Save group 1 counters */
- for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
- ctx->group1_cnts[i] = amu_group1_cnt_read(i);
-
+ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
+ ctx->group1_cnts[i] = amu_group1_cnt_read(i);
+ }
+ }
+#endif
return (void *)0;
}
static void *amu_context_restore(const void *arg)
{
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
- int i;
+ unsigned int i;
- if (!amu_supported())
+ if (!amu_supported()) {
return (void *)-1;
+ }
+#if AMU_GROUP1_NR_COUNTERS
+ if (!amu_group1_supported()) {
+ return (void *)-1;
+ }
+#endif
/* Counters were disabled in `amu_context_save()` */
- assert((read_amcntenset0_el0() == 0U) && (read_amcntenset1_el0() == 0U));
+ assert(read_amcntenset0_el0() == 0U);
- assert(((sizeof(int) * 8U) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
- <= AMU_GROUP1_NR_COUNTERS);
+#if AMU_GROUP1_NR_COUNTERS
+ assert(read_amcntenset1_el0() == 0U);
+#endif
- /* Restore group 0 counters */
- for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
- if ((AMU_GROUP0_COUNTERS_MASK & (1U << i)) != 0U)
- amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+ /* Restore all group 0 counters */
+ for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
+ amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+ }
+ /* Restore group 0 counter configuration */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+
+#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
- for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
- if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U)
+ for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ }
+ }
- /* Restore group 0/1 counter configuration */
- write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ /* Restore group 1 counter configuration */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+#endif
return (void *)0;
}
diff --git a/lib/extensions/mpam/mpam.c b/lib/extensions/mpam/mpam.c
index e794f013b..65601ddec 100644
--- a/lib/extensions/mpam/mpam.c
+++ b/lib/extensions/mpam/mpam.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,20 +7,16 @@
#include <stdbool.h>
#include <arch.h>
+#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/extensions/mpam.h>
-bool mpam_supported(void)
-{
- uint64_t features = read_id_aa64dfr0_el1() >> ID_AA64PFR0_MPAM_SHIFT;
-
- return ((features & ID_AA64PFR0_MPAM_MASK) != 0U);
-}
-
void mpam_enable(bool el2_unused)
{
- if (!mpam_supported())
+ /* Check if MPAM is implemented */
+ if (get_mpam_version() == 0U) {
return;
+ }
/*
* Enable MPAM, and disable trapping to EL3 when lower ELs access their
@@ -34,10 +30,11 @@ void mpam_enable(bool el2_unused)
* If EL2 is implemented and used, enable trapping to EL2.
*/
if (el2_unused) {
- write_mpam2_el2(0);
+ write_mpam2_el2(0ULL);
- if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U)
- write_mpamhcr_el2(0);
+ if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) {
+ write_mpamhcr_el2(0ULL);
+ }
} else {
write_mpam2_el2(MPAM2_EL2_TRAPMPAM0EL1 |
MPAM2_EL2_TRAPMPAM1EL1);
diff --git a/lib/extensions/mtpmu/aarch32/mtpmu.S b/lib/extensions/mtpmu/aarch32/mtpmu.S
new file mode 100644
index 000000000..834cee3ea
--- /dev/null
+++ b/lib/extensions/mtpmu/aarch32/mtpmu.S
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .global mtpmu_disable
+
+/* -------------------------------------------------------------
+ * The functions in this file are called at entrypoint, before
+ * the CPU has decided whether this is a cold or a warm boot.
+ * Therefore there are no stack yet to rely on for a C function
+ * call.
+ * -------------------------------------------------------------
+ */
+
+/*
+ * bool mtpmu_supported(void)
+ *
+ * Return a boolean indicating whether FEAT_MTPMU is supported or not.
+ *
+ * Trash registers: r0.
+ */
+func mtpmu_supported
+ ldcopr r0, ID_DFR1
+ and r0, r0, #(ID_DFR1_MTPMU_MASK >> ID_DFR1_MTPMU_SHIFT)
+ cmp r0, #ID_DFR1_MTPMU_SUPPORTED
+ mov r0, #0
+ addeq r0, r0, #1
+ bx lr
+endfunc mtpmu_supported
+
+/*
+ * bool el_implemented(unsigned int el)
+ *
+ * Return a boolean indicating if the specified EL (2 or 3) is implemented.
+ *
+ * Trash registers: r0
+ */
+func el_implemented
+ cmp r0, #3
+ ldcopr r0, ID_PFR1
+ lsreq r0, r0, #ID_PFR1_SEC_SHIFT
+ lsrne r0, r0, #ID_PFR1_VIRTEXT_SHIFT
+ /*
+ * ID_PFR1_VIRTEXT_MASK is the same as ID_PFR1_SEC_MASK
+ * so use any one of them
+ */
+ and r0, r0, #ID_PFR1_VIRTEXT_MASK
+ cmp r0, #ID_PFR1_ELx_ENABLED
+ mov r0, #0
+ addeq r0, r0, #1
+ bx lr
+endfunc el_implemented
+
+/*
+ * void mtpmu_disable(void)
+ *
+ * Disable mtpmu feature if supported.
+ *
+ * Trash register: r0, r1, r2
+ */
+func mtpmu_disable
+ mov r2, lr
+ bl mtpmu_supported
+ cmp r0, #0
+ bxeq r2 /* FEAT_MTPMU not supported */
+
+ /* FEAT_MTMPU Supported */
+ mov r0, #3
+ bl el_implemented
+ cmp r0, #0
+ beq 1f
+
+ /* EL3 implemented */
+ ldcopr r0, SDCR
+ ldr r1, =SDCR_MTPME_BIT
+ bic r0, r0, r1
+ stcopr r0, SDCR
+
+ /*
+ * If EL3 is implemented, HDCR.MTPME is implemented as Res0 and
+ * FEAT_MTPMU is controlled only from EL3, so no need to perform
+ * any operations for EL2.
+ */
+ isb
+ bx r2
+1:
+ /* EL3 not implemented */
+ mov r0, #2
+ bl el_implemented
+ cmp r0, #0
+ bxeq r2 /* No EL2 or EL3 implemented */
+
+ /* EL2 implemented */
+ ldcopr r0, HDCR
+ ldr r1, =HDCR_MTPME_BIT
+ orr r0, r0, r1
+ stcopr r0, HDCR
+ isb
+ bx r2
+endfunc mtpmu_disable
diff --git a/lib/extensions/mtpmu/aarch64/mtpmu.S b/lib/extensions/mtpmu/aarch64/mtpmu.S
new file mode 100644
index 000000000..0a1d57b9d
--- /dev/null
+++ b/lib/extensions/mtpmu/aarch64/mtpmu.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .global mtpmu_disable
+
+/* -------------------------------------------------------------
+ * The functions in this file are called at entrypoint, before
+ * the CPU has decided whether this is a cold or a warm boot.
+ * Therefore there are no stack yet to rely on for a C function
+ * call.
+ * -------------------------------------------------------------
+ */
+
+/*
+ * bool mtpmu_supported(void)
+ *
+ * Return a boolean indicating whether FEAT_MTPMU is supported or not.
+ *
+ * Trash registers: x0, x1
+ */
+func mtpmu_supported
+ mrs x0, id_aa64dfr0_el1
+ mov_imm x1, ID_AA64DFR0_MTPMU_MASK
+ and x0, x1, x0, LSR #ID_AA64DFR0_MTPMU_SHIFT
+ cmp x0, ID_AA64DFR0_MTPMU_SUPPORTED
+ cset x0, eq
+ ret
+endfunc mtpmu_supported
+
+/*
+ * bool el_implemented(unsigned int el_shift)
+ *
+ * Return a boolean indicating if the specified EL is implemented.
+ * The EL is represented as the bitmask shift on id_aa64pfr0_el1 register.
+ *
+ * Trash registers: x0, x1
+ */
+func el_implemented
+ mrs x1, id_aa64pfr0_el1
+ lsr x1, x1, x0
+ cmp x1, #ID_AA64PFR0_ELX_MASK
+ cset x0, eq
+ ret
+endfunc el_implemented
+
+/*
+ * void mtpmu_disable(void)
+ *
+ * Disable mtpmu feature if supported.
+ *
+ * Trash register: x0, x1, x30
+ */
+func mtpmu_disable
+ mov x10, x30
+ bl mtpmu_supported
+ cbz x0, exit_disable
+
+ /* FEAT_MTMPU Supported */
+ mov_imm x0, ID_AA64PFR0_EL3_SHIFT
+ bl el_implemented
+ cbz x0, 1f
+
+ /* EL3 implemented */
+ mrs x0, mdcr_el3
+ mov_imm x1, MDCR_MTPME_BIT
+ bic x0, x0, x1
+ msr mdcr_el3, x0
+
+ /*
+ * If EL3 is implemented, MDCR_EL2.MTPME is implemented as Res0 and
+ * FEAT_MTPMU is controlled only from EL3, so no need to perform
+ * any operations for EL2.
+ */
+ isb
+exit_disable:
+ ret x10
+1:
+ /* EL3 not implemented */
+ mov_imm x0, ID_AA64PFR0_EL2_SHIFT
+ bl el_implemented
+ cbz x0, exit_disable
+
+ /* EL2 implemented */
+ mrs x0, mdcr_el2
+ mov_imm x1, MDCR_EL2_MTPME
+ bic x0, x0, x1
+ msr mdcr_el2, x0
+ isb
+ ret x10
+endfunc mtpmu_disable
diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c
index 64a48524b..36f9a95b6 100644
--- a/lib/extensions/ras/ras_common.c
+++ b/lib/extensions/ras/ras_common.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,6 +19,47 @@
# error Platform must define RAS priority value
#endif
+/*
+ * Function to convert architecturally-defined primary error code SERR,
+ * bits[7:0] from ERR<n>STATUS to its corresponding error string.
+ */
+const char *ras_serr_to_str(unsigned int serr)
+{
+ const char *str[ERROR_STATUS_NUM_SERR] = {
+ "No error",
+ "IMPLEMENTATION DEFINED error",
+ "Data value from (non-associative) internal memory",
+ "IMPLEMENTATION DEFINED pin",
+ "Assertion failure",
+ "Error detected on internal data path",
+ "Data value from associative memory",
+ "Address/control value from associative memory",
+ "Data value from a TLB",
+ "Address/control value from a TLB",
+ "Data value from producer",
+ "Address/control value from producer",
+ "Data value from (non-associative) external memory",
+ "Illegal address (software fault)",
+ "Illegal access (software fault)",
+ "Illegal state (software fault)",
+ "Internal data register",
+ "Internal control register",
+ "Error response from slave",
+ "External timeout",
+ "Internal timeout",
+ "Deferred error from slave not supported at master"
+ };
+
+ /*
+ * All other values are reserved. Reserved values might be defined
+ * in a future version of the architecture
+ */
+ if (serr >= ERROR_STATUS_NUM_SERR)
+ return "unknown SERR";
+
+ return str[serr];
+}
+
/* Handler that receives External Aborts on RAS-capable systems */
int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
void *handle, uint64_t flags)
diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c
index 78876c66b..f0d734223 100644
--- a/lib/extensions/spe/spe.c
+++ b/lib/extensions/spe/spe.c
@@ -25,7 +25,7 @@ bool spe_supported(void)
uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
- return (features & ID_AA64DFR0_PMS_MASK) == 1U;
+ return (features & ID_AA64DFR0_PMS_MASK) > 0ULL;
}
void spe_enable(bool el2_unused)