Enable v8.6 AMU enhancements (FEAT_AMUv1p1)

ARMv8.6 adds virtual offset registers to support virtualization of the
event counters in EL1 and EL0.  This patch enables support for this
feature in EL3 firmware.

Signed-off-by: John Powell <john.powell@arm.com>
Change-Id: I7ee1f3d9f554930bf5ef6f3d492e932e6d95b217
This commit is contained in:
johpow01 2020-10-02 13:41:11 -05:00 committed by John
parent 8909fa9bbf
commit 873d4241e3
15 changed files with 536 additions and 92 deletions

View File

@ -895,6 +895,7 @@ $(eval $(call assert_booleans,\
DYN_DISABLE_AUTH \
EL3_EXCEPTION_HANDLING \
ENABLE_AMU \
AMU_RESTRICT_COUNTERS \
ENABLE_ASSERTIONS \
ENABLE_MPAM_FOR_LOWER_ELS \
ENABLE_PIE \
@ -984,6 +985,7 @@ $(eval $(call add_defines,\
DECRYPTION_SUPPORT_${DECRYPTION_SUPPORT} \
DISABLE_MTPMU \
ENABLE_AMU \
AMU_RESTRICT_COUNTERS \
ENABLE_ASSERTIONS \
ENABLE_BTI \
ENABLE_MPAM_FOR_LOWER_ELS \

View File

@ -22,6 +22,10 @@ Common build options
directory containing the SP source, relative to the ``bl32/``; the directory
is expected to contain a makefile called ``<aarch32_sp-value>.mk``.
- ``AMU_RESTRICT_COUNTERS``: Register reads to the group 1 counters will return
zero at all but the highest implemented exception level. Reads from the
memory mapped view are unaffected by this control.
- ``ARCH`` : Choose the target build architecture for TF-A. It can take either
``aarch64`` or ``aarch32`` as values. By default, it is defined to
``aarch64``.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -116,6 +116,9 @@
#define ID_PFR0_AMU_SHIFT U(20)
#define ID_PFR0_AMU_LENGTH U(4)
#define ID_PFR0_AMU_MASK U(0xf)
#define ID_PFR0_AMU_NOT_SUPPORTED U(0x0)
#define ID_PFR0_AMU_V1 U(0x1)
#define ID_PFR0_AMU_V1P1 U(0x2)
#define ID_PFR0_DIT_SHIFT U(24)
#define ID_PFR0_DIT_LENGTH U(4)
@ -653,7 +656,7 @@
#define PAR_ADDR_MASK (BIT_64(40) - ULL(1)) /* 40-bits-wide page address */
/*******************************************************************************
* Definitions for system register interface to AMU for ARMv8.4 onwards
* Definitions for system register interface to AMU for FEAT_AMUv1
******************************************************************************/
#define AMCR p15, 0, c13, c2, 0
#define AMCFGR p15, 0, c13, c2, 1
@ -712,6 +715,9 @@
#define AMEVTYPER1E p15, 0, c13, c15, 6
#define AMEVTYPER1F p15, 0, c13, c15, 7
/* AMCR definitions */
#define AMCR_CG1RZ_BIT (ULL(1) << 17)
/* AMCFGR definitions */
#define AMCFGR_NCG_SHIFT U(28)
#define AMCFGR_NCG_MASK U(0xf)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -303,6 +303,7 @@ DEFINE_COPROCR_RW_FUNCS(dacr, DACR)
/* Coproc registers for 32bit AMU support */
DEFINE_COPROCR_READ_FUNC(amcfgr, AMCFGR)
DEFINE_COPROCR_READ_FUNC(amcgcr, AMCGCR)
DEFINE_COPROCR_RW_FUNCS(amcr, AMCR)
DEFINE_COPROCR_RW_FUNCS(amcntenset0, AMCNTENSET0)
DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@ -161,6 +161,9 @@
#define ID_AA64PFR0_EL3_SHIFT U(12)
#define ID_AA64PFR0_AMU_SHIFT U(44)
#define ID_AA64PFR0_AMU_MASK ULL(0xf)
#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
#define ID_AA64PFR0_AMU_V1 U(0x1)
#define ID_AA64PFR0_AMU_V1P1 U(0x2)
#define ID_AA64PFR0_ELX_MASK ULL(0xf)
#define ID_AA64PFR0_GIC_SHIFT U(24)
#define ID_AA64PFR0_GIC_WIDTH U(4)
@ -406,9 +409,10 @@
#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
#define SCR_TWEDEL_SHIFT U(30)
#define SCR_TWEDEL_MASK ULL(0xf)
#define SCR_AMVOFFEN_BIT (UL(1) << 35)
#define SCR_TWEDEn_BIT (UL(1) << 29)
#define SCR_ECVEN_BIT (UL(1) << 28)
#define SCR_FGTEN_BIT (UL(1) << 27)
#define SCR_ECVEN_BIT (UL(1) << 28)
#define SCR_FGTEN_BIT (UL(1) << 27)
#define SCR_ATA_BIT (UL(1) << 26)
#define SCR_FIEN_BIT (UL(1) << 21)
#define SCR_EEL2_BIT (UL(1) << 18)
@ -479,6 +483,7 @@
#define VTTBR_BADDR_SHIFT U(0)
/* HCR definitions */
#define HCR_AMVOFFEN_BIT (ULL(1) << 51)
#define HCR_API_BIT (ULL(1) << 41)
#define HCR_APK_BIT (ULL(1) << 40)
#define HCR_E2H_BIT (ULL(1) << 34)
@ -721,13 +726,13 @@
#define MAX_CACHE_LINE_SIZE U(0x800) /* 2KB */
/* Physical timer control register bit fields shifts and masks */
#define CNTP_CTL_ENABLE_SHIFT U(0)
#define CNTP_CTL_IMASK_SHIFT U(1)
#define CNTP_CTL_ISTATUS_SHIFT U(2)
#define CNTP_CTL_ENABLE_SHIFT U(0)
#define CNTP_CTL_IMASK_SHIFT U(1)
#define CNTP_CTL_ISTATUS_SHIFT U(2)
#define CNTP_CTL_ENABLE_MASK U(1)
#define CNTP_CTL_IMASK_MASK U(1)
#define CNTP_CTL_ISTATUS_MASK U(1)
#define CNTP_CTL_ENABLE_MASK U(1)
#define CNTP_CTL_IMASK_MASK U(1)
#define CNTP_CTL_ISTATUS_MASK U(1)
/* Physical timer control macros */
#define CNTP_CTL_ENABLE_BIT (U(1) << CNTP_CTL_ENABLE_SHIFT)
@ -913,7 +918,7 @@
#define MPAM3_EL3 S3_6_C10_C5_0
/*******************************************************************************
* Definitions for system register interface to AMU for ARMv8.4 onwards
* Definitions for system register interface to AMU for FEAT_AMUv1
******************************************************************************/
#define AMCR_EL0 S3_3_C13_C2_0
#define AMCFGR_EL0 S3_3_C13_C2_1
@ -991,6 +996,50 @@
#define MPAMIDR_HAS_HCR_BIT (ULL(1) << 17)
/*******************************************************************************
* Definitions for system register interface to AMU for FEAT_AMUv1p1
******************************************************************************/
/* Definition for register defining which virtual offsets are implemented. */
#define AMCG1IDR_EL0 S3_3_C13_C2_6
#define AMCG1IDR_CTR_MASK ULL(0xffff)
#define AMCG1IDR_CTR_SHIFT U(0)
#define AMCG1IDR_VOFF_MASK ULL(0xffff)
#define AMCG1IDR_VOFF_SHIFT U(16)
/* New bit added to AMCR_EL0 */
#define AMCR_CG1RZ_BIT (ULL(0x1) << 17)
/*
* Definitions for virtual offset registers for architected activity monitor
* event counters.
* AMEVCNTVOFF01_EL2 intentionally left undefined, as it does not exist.
*/
#define AMEVCNTVOFF00_EL2 S3_4_C13_C8_0
#define AMEVCNTVOFF02_EL2 S3_4_C13_C8_2
#define AMEVCNTVOFF03_EL2 S3_4_C13_C8_3
/*
* Definitions for virtual offset registers for auxiliary activity monitor event
* counters.
*/
#define AMEVCNTVOFF10_EL2 S3_4_C13_C10_0
#define AMEVCNTVOFF11_EL2 S3_4_C13_C10_1
#define AMEVCNTVOFF12_EL2 S3_4_C13_C10_2
#define AMEVCNTVOFF13_EL2 S3_4_C13_C10_3
#define AMEVCNTVOFF14_EL2 S3_4_C13_C10_4
#define AMEVCNTVOFF15_EL2 S3_4_C13_C10_5
#define AMEVCNTVOFF16_EL2 S3_4_C13_C10_6
#define AMEVCNTVOFF17_EL2 S3_4_C13_C10_7
#define AMEVCNTVOFF18_EL2 S3_4_C13_C11_0
#define AMEVCNTVOFF19_EL2 S3_4_C13_C11_1
#define AMEVCNTVOFF1A_EL2 S3_4_C13_C11_2
#define AMEVCNTVOFF1B_EL2 S3_4_C13_C11_3
#define AMEVCNTVOFF1C_EL2 S3_4_C13_C11_4
#define AMEVCNTVOFF1D_EL2 S3_4_C13_C11_5
#define AMEVCNTVOFF1E_EL2 S3_4_C13_C11_6
#define AMEVCNTVOFF1F_EL2 S3_4_C13_C11_7
/*******************************************************************************
* RAS system registers
******************************************************************************/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, Arm Limited. All rights reserved.
* Copyright (c) 2019-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -82,6 +82,12 @@ static inline bool is_armv8_5_rng_present(void)
ID_AA64ISAR0_RNDR_MASK);
}
static inline bool is_armv8_6_feat_amuv1p1_present(void)
{
return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
ID_AA64PFR0_AMU_MASK) >= ID_AA64PFR0_AMU_V1P1);
}
/*
* Return MPAM version:
*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -485,6 +485,8 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
DEFINE_RENAME_SYSREG_READ_FUNC(amcfgr_el0, AMCFGR_EL0)
DEFINE_RENAME_SYSREG_READ_FUNC(amcgcr_el0, AMCGCR_EL0)
DEFINE_RENAME_SYSREG_READ_FUNC(amcg1idr_el0, AMCG1IDR_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcr_el0, AMCR_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -66,19 +66,31 @@ CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
struct amu_ctx {
uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
#if __aarch64__
/* Architected event counter 1 does not have an offset register. */
uint64_t group0_voffsets[AMU_GROUP0_NR_COUNTERS-1];
#endif
#if AMU_GROUP1_NR_COUNTERS
uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
#if __aarch64__
uint64_t group1_voffsets[AMU_GROUP1_NR_COUNTERS];
#endif
#endif
};
bool amu_supported(void);
unsigned int amu_get_version(void);
void amu_enable(bool el2_unused);
/* Group 0 configuration helpers */
uint64_t amu_group0_cnt_read(unsigned int idx);
void amu_group0_cnt_write(unsigned int idx, uint64_t val);
#if __aarch64__
uint64_t amu_group0_voffset_read(unsigned int idx);
void amu_group0_voffset_write(unsigned int idx, uint64_t val);
#endif
#if AMU_GROUP1_NR_COUNTERS
bool amu_group1_supported(void);
@ -86,6 +98,12 @@ bool amu_group1_supported(void);
uint64_t amu_group1_cnt_read(unsigned int idx);
void amu_group1_cnt_write(unsigned int idx, uint64_t val);
void amu_group1_set_evtype(unsigned int idx, unsigned int val);
#if __aarch64__
uint64_t amu_group1_voffset_read(unsigned int idx);
void amu_group1_voffset_write(unsigned int idx, uint64_t val);
#endif
#endif
#endif /* AMU_H */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -16,4 +16,12 @@ uint64_t amu_group1_cnt_read_internal(unsigned int idx);
void amu_group1_cnt_write_internal(unsigned int idx, uint64_t val);
void amu_group1_set_evtype_internal(unsigned int idx, unsigned int val);
#if __aarch64__
uint64_t amu_group0_voffset_read_internal(unsigned int idx);
void amu_group0_voffset_write_internal(unsigned int idx, uint64_t val);
uint64_t amu_group1_voffset_read_internal(unsigned int idx);
void amu_group1_voffset_write_internal(unsigned int idx, uint64_t val);
#endif
#endif /* AMU_PRIVATE_H */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -216,6 +216,16 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
scr_el3 |= SCR_EEL2_BIT;
}
/*
* FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
* and EL2, when clear, this bit traps accesses from EL2 so we set it
* to 1 when EL2 is present.
*/
if (is_armv8_6_feat_amuv1p1_present() &&
(el_implemented(2) != EL_IMPL_NONE)) {
scr_el3 |= SCR_AMVOFFEN_BIT;
}
/*
* Initialise SCTLR_EL1 to the reset value corresponding to the target
* execution state setting all fields rather than relying of the hw.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -18,13 +18,17 @@
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void)
/*
* Get AMU version value from pfr0.
* Return values
* ID_PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
* ID_PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
* ID_PFR0_AMU_NOT_SUPPORTED: not supported
*/
unsigned int amu_get_version(void)
{
uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
features &= ID_PFR0_AMU_MASK;
return ((features == 1U) || (features == 2U));
return (unsigned int)(read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
ID_PFR0_AMU_MASK;
}
#if AMU_GROUP1_NR_COUNTERS
@ -43,7 +47,7 @@ bool amu_group1_supported(void)
*/
void amu_enable(bool el2_unused)
{
if (!amu_supported()) {
if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
return;
}
@ -87,12 +91,31 @@ void amu_enable(bool el2_unused)
/* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
#endif
/* Initialize FEAT_AMUv1p1 features if present. */
if (amu_get_version() < ID_PFR0_AMU_V1P1) {
return;
}
#if AMU_RESTRICT_COUNTERS
/*
* FEAT_AMUv1p1 adds a register field to restrict access to group 1
* counters at all but the highest implemented EL. This is controlled
* with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
* register reads at lower ELs return zero. Reads from the memory
* mapped view are unaffected.
*/
VERBOSE("AMU group 1 counter access restricted.\n");
write_amcr(read_amcr() | AMCR_CG1RZ_BIT);
#else
write_amcr(read_amcr() & ~AMCR_CG1RZ_BIT);
#endif
}
/* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_supported());
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
@ -101,7 +124,7 @@ uint64_t amu_group0_cnt_read(unsigned int idx)
/* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
@ -112,7 +135,7 @@ void amu_group0_cnt_write(unsigned int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx` */
uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_supported());
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -122,7 +145,7 @@ uint64_t amu_group1_cnt_read(unsigned int idx)
/* Write the group 1 counter identified by the given `idx` with `val` */
void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -136,7 +159,7 @@ void amu_group1_cnt_write(unsigned int idx, uint64_t val)
*/
void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{
assert(amu_supported());
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -150,7 +173,7 @@ static void *amu_context_save(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (!amu_supported()) {
if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
return (void *)-1;
}
@ -197,7 +220,7 @@ static void *amu_context_restore(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (!amu_supported()) {
if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
return (void *)-1;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -75,13 +75,13 @@ func amu_group0_cnt_write_internal
1:
stcopr16 r2, r3, AMEVCNTR00 /* index 0 */
bx lr
bx lr
stcopr16 r2, r3, AMEVCNTR01 /* index 1 */
bx lr
bx lr
stcopr16 r2, r3, AMEVCNTR02 /* index 2 */
bx lr
bx lr
stcopr16 r2, r3, AMEVCNTR03 /* index 3 */
bx lr
bx lr
endfunc amu_group0_cnt_write_internal
/*
@ -169,37 +169,37 @@ func amu_group1_cnt_write_internal
bx r1
1:
stcopr16 r2, r3, AMEVCNTR10 /* index 0 */
stcopr16 r2, r3, AMEVCNTR10 /* index 0 */
bx lr
stcopr16 r2, r3, AMEVCNTR11 /* index 1 */
stcopr16 r2, r3, AMEVCNTR11 /* index 1 */
bx lr
stcopr16 r2, r3, AMEVCNTR12 /* index 2 */
stcopr16 r2, r3, AMEVCNTR12 /* index 2 */
bx lr
stcopr16 r2, r3, AMEVCNTR13 /* index 3 */
stcopr16 r2, r3, AMEVCNTR13 /* index 3 */
bx lr
stcopr16 r2, r3, AMEVCNTR14 /* index 4 */
stcopr16 r2, r3, AMEVCNTR14 /* index 4 */
bx lr
stcopr16 r2, r3, AMEVCNTR15 /* index 5 */
stcopr16 r2, r3, AMEVCNTR15 /* index 5 */
bx lr
stcopr16 r2, r3, AMEVCNTR16 /* index 6 */
stcopr16 r2, r3, AMEVCNTR16 /* index 6 */
bx lr
stcopr16 r2, r3, AMEVCNTR17 /* index 7 */
stcopr16 r2, r3, AMEVCNTR17 /* index 7 */
bx lr
stcopr16 r2, r3, AMEVCNTR18 /* index 8 */
stcopr16 r2, r3, AMEVCNTR18 /* index 8 */
bx lr
stcopr16 r2, r3, AMEVCNTR19 /* index 9 */
stcopr16 r2, r3, AMEVCNTR19 /* index 9 */
bx lr
stcopr16 r2, r3, AMEVCNTR1A /* index 10 */
stcopr16 r2, r3, AMEVCNTR1A /* index 10 */
bx lr
stcopr16 r2, r3, AMEVCNTR1B /* index 11 */
stcopr16 r2, r3, AMEVCNTR1B /* index 11 */
bx lr
stcopr16 r2, r3, AMEVCNTR1C /* index 12 */
stcopr16 r2, r3, AMEVCNTR1C /* index 12 */
bx lr
stcopr16 r2, r3, AMEVCNTR1D /* index 13 */
stcopr16 r2, r3, AMEVCNTR1D /* index 13 */
bx lr
stcopr16 r2, r3, AMEVCNTR1E /* index 14 */
stcopr16 r2, r3, AMEVCNTR1E /* index 14 */
bx lr
stcopr16 r2, r3, AMEVCNTR1F /* index 15 */
stcopr16 r2, r3, AMEVCNTR1F /* index 15 */
bx lr
endfunc amu_group1_cnt_write_internal
@ -234,36 +234,36 @@ func amu_group1_set_evtype_internal
bx r2
1:
stcopr r1, AMEVTYPER10 /* index 0 */
stcopr r1, AMEVTYPER10 /* index 0 */
bx lr
stcopr r1, AMEVTYPER11 /* index 1 */
stcopr r1, AMEVTYPER11 /* index 1 */
bx lr
stcopr r1, AMEVTYPER12 /* index 2 */
stcopr r1, AMEVTYPER12 /* index 2 */
bx lr
stcopr r1, AMEVTYPER13 /* index 3 */
stcopr r1, AMEVTYPER13 /* index 3 */
bx lr
stcopr r1, AMEVTYPER14 /* index 4 */
stcopr r1, AMEVTYPER14 /* index 4 */
bx lr
stcopr r1, AMEVTYPER15 /* index 5 */
stcopr r1, AMEVTYPER15 /* index 5 */
bx lr
stcopr r1, AMEVTYPER16 /* index 6 */
stcopr r1, AMEVTYPER16 /* index 6 */
bx lr
stcopr r1, AMEVTYPER17 /* index 7 */
stcopr r1, AMEVTYPER17 /* index 7 */
bx lr
stcopr r1, AMEVTYPER18 /* index 8 */
stcopr r1, AMEVTYPER18 /* index 8 */
bx lr
stcopr r1, AMEVTYPER19 /* index 9 */
stcopr r1, AMEVTYPER19 /* index 9 */
bx lr
stcopr r1, AMEVTYPER1A /* index 10 */
stcopr r1, AMEVTYPER1A /* index 10 */
bx lr
stcopr r1, AMEVTYPER1B /* index 11 */
stcopr r1, AMEVTYPER1B /* index 11 */
bx lr
stcopr r1, AMEVTYPER1C /* index 12 */
stcopr r1, AMEVTYPER1C /* index 12 */
bx lr
stcopr r1, AMEVTYPER1D /* index 13 */
stcopr r1, AMEVTYPER1D /* index 13 */
bx lr
stcopr r1, AMEVTYPER1E /* index 14 */
stcopr r1, AMEVTYPER1E /* index 14 */
bx lr
stcopr r1, AMEVTYPER1F /* index 15 */
stcopr r1, AMEVTYPER1F /* index 15 */
bx lr
endfunc amu_group1_set_evtype_internal

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -8,6 +8,7 @@
#include <stdbool.h>
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/el3_runtime/pubsub_events.h>
@ -18,13 +19,17 @@
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void)
/*
* Get AMU version value from aa64pfr0.
* Return values
* ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
* ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
* ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported
*/
unsigned int amu_get_version(void)
{
uint64_t features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
features &= ID_AA64PFR0_AMU_MASK;
return ((features == 1U) || (features == 2U));
return (unsigned int)(read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
ID_AA64PFR0_AMU_MASK;
}
#if AMU_GROUP1_NR_COUNTERS
@ -44,8 +49,9 @@ bool amu_group1_supported(void)
void amu_enable(bool el2_unused)
{
uint64_t v;
unsigned int amu_version = amu_get_version();
if (!amu_supported()) {
if (amu_version == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
return;
}
@ -96,12 +102,36 @@ void amu_enable(bool el2_unused)
/* Enable group 1 counters */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
#endif
/* Initialize FEAT_AMUv1p1 features if present. */
if (amu_version < ID_AA64PFR0_AMU_V1P1) {
return;
}
if (el2_unused) {
/* Make sure virtual offsets are disabled if EL2 not used. */
write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
}
#if AMU_RESTRICT_COUNTERS
/*
* FEAT_AMUv1p1 adds a register field to restrict access to group 1
* counters at all but the highest implemented EL. This is controlled
* with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
* register reads at lower ELs return zero. Reads from the memory
* mapped view are unaffected.
*/
VERBOSE("AMU group 1 counter access restricted.\n");
write_amcr_el0(read_amcr_el0() | AMCR_CG1RZ_BIT);
#else
write_amcr_el0(read_amcr_el0() & ~AMCR_CG1RZ_BIT);
#endif
}
/* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_supported());
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
@ -110,18 +140,49 @@ uint64_t amu_group0_cnt_read(unsigned int idx)
/* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
isb();
}
/*
* Read the group 0 offset register for a given index. Index must be 0, 2,
* or 3, the register for 1 does not exist.
*
* Using this function requires FEAT_AMUv1p1 support.
*/
uint64_t amu_group0_voffset_read(unsigned int idx)
{
assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
assert(idx < AMU_GROUP0_NR_COUNTERS);
assert(idx != 1U);
return amu_group0_voffset_read_internal(idx);
}
/*
* Write the group 0 offset register for a given index. Index must be 0, 2, or
* 3, the register for 1 does not exist.
*
* Using this function requires FEAT_AMUv1p1 support.
*/
void amu_group0_voffset_write(unsigned int idx, uint64_t val)
{
assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
assert(idx < AMU_GROUP0_NR_COUNTERS);
assert(idx != 1U);
amu_group0_voffset_write_internal(idx, val);
isb();
}
#if AMU_GROUP1_NR_COUNTERS
/* Read the group 1 counter identified by the given `idx` */
uint64_t amu_group1_cnt_read(unsigned int idx)
uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_supported());
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -129,9 +190,9 @@ uint64_t amu_group1_cnt_read(unsigned int idx)
}
/* Write the group 1 counter identified by the given `idx` with `val` */
void amu_group1_cnt_write(unsigned int idx, uint64_t val)
void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -139,13 +200,46 @@ void amu_group1_cnt_write(unsigned int idx, uint64_t val)
isb();
}
/*
* Read the group 1 offset register for a given index.
*
* Using this function requires FEAT_AMUv1p1 support.
*/
uint64_t amu_group1_voffset_read(unsigned int idx)
{
assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
(1ULL << idx)) != 0ULL);
return amu_group1_voffset_read_internal(idx);
}
/*
* Write the group 1 offset register for a given index.
*
* Using this function requires FEAT_AMUv1p1 support.
*/
void amu_group1_voffset_write(unsigned int idx, uint64_t val)
{
assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
(1ULL << idx)) != 0ULL);
amu_group1_voffset_write_internal(idx, val);
isb();
}
/*
* Program the event type register for the given `idx` with
* the event number `val`
*/
void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{
assert(amu_supported());
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -159,7 +253,7 @@ static void *amu_context_save(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (!amu_supported()) {
if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
return (void *)-1;
}
@ -190,13 +284,37 @@ static void *amu_context_save(const void *arg)
ctx->group0_cnts[i] = amu_group0_cnt_read(i);
}
/* Save group 0 virtual offsets if supported and enabled. */
if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
/* Not using a loop because count is fixed and index 1 DNE. */
ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
}
#if AMU_GROUP1_NR_COUNTERS
/* Save group 1 counters */
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
ctx->group1_cnts[i] = amu_group1_cnt_read(i);
}
}
/* Save group 1 virtual offsets if supported and enabled. */
if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
u_register_t amcg1idr = read_amcg1idr_el0() >>
AMCG1IDR_VOFF_SHIFT;
amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if (((amcg1idr >> i) & 1ULL) != 0ULL) {
ctx->group1_voffsets[i] =
amu_group1_voffset_read(i);
}
}
}
#endif
return (void *)0;
}
@ -206,7 +324,7 @@ static void *amu_context_restore(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (!amu_supported()) {
if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
return (void *)-1;
}
@ -227,17 +345,41 @@ static void *amu_context_restore(const void *arg)
amu_group0_cnt_write(i, ctx->group0_cnts[i]);
}
/* Restore group 0 virtual offsets if supported and enabled. */
if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
/* Not using a loop because count is fixed and index 1 DNE. */
amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
}
/* Restore group 0 counter configuration */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
}
}
/* Restore group 1 virtual offsets if supported and enabled. */
if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
u_register_t amcg1idr = read_amcg1idr_el0() >>
AMCG1IDR_VOFF_SHIFT;
amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if (((amcg1idr >> i) & 1ULL) != 0ULL) {
amu_group1_voffset_write(i,
ctx->group1_voffsets[i]);
}
}
}
/* Restore group 1 counter configuration */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -14,6 +14,12 @@
.globl amu_group1_cnt_write_internal
.globl amu_group1_set_evtype_internal
/* FEAT_AMUv1p1 virtualisation offset register functions */
.globl amu_group0_voffset_read_internal
.globl amu_group0_voffset_write_internal
.globl amu_group1_voffset_read_internal
.globl amu_group1_voffset_write_internal
/*
* uint64_t amu_group0_cnt_read_internal(int idx);
*
@ -211,3 +217,169 @@ func amu_group1_set_evtype_internal
write AMEVTYPER1E_EL0 /* index 14 */
write AMEVTYPER1F_EL0 /* index 15 */
endfunc amu_group1_set_evtype_internal
/*
* Accessor functions for virtual offset registers added with FEAT_AMUv1p1
*/
/*
* uint64_t amu_group0_voffset_read_internal(int idx);
*
* Given `idx`, read the corresponding AMU virtual offset register
* and return it in `x0`.
*/
func amu_group0_voffset_read_internal
adr x1, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
tst x0, #~3
ASM_ASSERT(eq)
/* Make sure idx != 1 since AMEVCNTVOFF01_EL2 does not exist */
cmp x0, #1
ASM_ASSERT(ne)
#endif
/*
* Given `idx` calculate address of mrs/ret instruction pair
* in the table below.
*/
add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */
#if ENABLE_BTI
add x1, x1, x0, lsl #2 /* + "bti j" instruction */
#endif
br x1
1: read AMEVCNTVOFF00_EL2 /* index 0 */
.skip 8 /* AMEVCNTVOFF01_EL2 does not exist */
#if ENABLE_BTI
.skip 4
#endif
read AMEVCNTVOFF02_EL2 /* index 2 */
read AMEVCNTVOFF03_EL2 /* index 3 */
endfunc amu_group0_voffset_read_internal
/*
* void amu_group0_voffset_write_internal(int idx, uint64_t val);
*
* Given `idx`, write `val` to the corresponding AMU virtual offset register.
*/
func amu_group0_voffset_write_internal
adr x2, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
tst x0, #~3
ASM_ASSERT(eq)
/* Make sure idx != 1 since AMEVCNTVOFF01_EL2 does not exist */
cmp x0, #1
ASM_ASSERT(ne)
#endif
/*
* Given `idx` calculate address of mrs/ret instruction pair
* in the table below.
*/
add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
#if ENABLE_BTI
add x2, x2, x0, lsl #2 /* + "bti j" instruction */
#endif
br x2
1: write AMEVCNTVOFF00_EL2 /* index 0 */
.skip 8 /* AMEVCNTVOFF01_EL2 does not exist */
#if ENABLE_BTI
.skip 4
#endif
write AMEVCNTVOFF02_EL2 /* index 2 */
write AMEVCNTVOFF03_EL2 /* index 3 */
endfunc amu_group0_voffset_write_internal
/*
* uint64_t amu_group1_voffset_read_internal(int idx);
*
* Given `idx`, read the corresponding AMU virtual offset register
* and return it in `x0`.
*/
func amu_group1_voffset_read_internal
adr x1, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
tst x0, #~0xF
ASM_ASSERT(eq)
#endif
/*
* Given `idx` calculate address of mrs/ret instruction pair
* in the table below.
*/
add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */
#if ENABLE_BTI
add x1, x1, x0, lsl #2 /* + "bti j" instruction */
#endif
br x1
1: read AMEVCNTVOFF10_EL2 /* index 0 */
read AMEVCNTVOFF11_EL2 /* index 1 */
read AMEVCNTVOFF12_EL2 /* index 2 */
read AMEVCNTVOFF13_EL2 /* index 3 */
read AMEVCNTVOFF14_EL2 /* index 4 */
read AMEVCNTVOFF15_EL2 /* index 5 */
read AMEVCNTVOFF16_EL2 /* index 6 */
read AMEVCNTVOFF17_EL2 /* index 7 */
read AMEVCNTVOFF18_EL2 /* index 8 */
read AMEVCNTVOFF19_EL2 /* index 9 */
read AMEVCNTVOFF1A_EL2 /* index 10 */
read AMEVCNTVOFF1B_EL2 /* index 11 */
read AMEVCNTVOFF1C_EL2 /* index 12 */
read AMEVCNTVOFF1D_EL2 /* index 13 */
read AMEVCNTVOFF1E_EL2 /* index 14 */
read AMEVCNTVOFF1F_EL2 /* index 15 */
endfunc amu_group1_voffset_read_internal
/*
* void amu_group1_voffset_write_internal(int idx, uint64_t val);
*
* Given `idx`, write `val` to the corresponding AMU virtual offset register.
*/
func amu_group1_voffset_write_internal
adr x2, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
tst x0, #~0xF
ASM_ASSERT(eq)
#endif
/*
* Given `idx` calculate address of mrs/ret instruction pair
* in the table below.
*/
add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
#if ENABLE_BTI
add x2, x2, x0, lsl #2 /* + "bti j" instruction */
#endif
br x2
1: write AMEVCNTVOFF10_EL2 /* index 0 */
write AMEVCNTVOFF11_EL2 /* index 1 */
write AMEVCNTVOFF12_EL2 /* index 2 */
write AMEVCNTVOFF13_EL2 /* index 3 */
write AMEVCNTVOFF14_EL2 /* index 4 */
write AMEVCNTVOFF15_EL2 /* index 5 */
write AMEVCNTVOFF16_EL2 /* index 6 */
write AMEVCNTVOFF17_EL2 /* index 7 */
write AMEVCNTVOFF18_EL2 /* index 8 */
write AMEVCNTVOFF19_EL2 /* index 9 */
write AMEVCNTVOFF1A_EL2 /* index 10 */
write AMEVCNTVOFF1B_EL2 /* index 11 */
write AMEVCNTVOFF1C_EL2 /* index 12 */
write AMEVCNTVOFF1D_EL2 /* index 13 */
write AMEVCNTVOFF1E_EL2 /* index 14 */
write AMEVCNTVOFF1F_EL2 /* index 15 */
endfunc amu_group1_voffset_write_internal

View File

@ -291,9 +291,10 @@ endif
# Include Memory Tagging Extension registers in cpu context. This must be set
# to 1 if the platform wants to use this feature in the Secure world and MTE is
# enabled at ELX.
CTX_INCLUDE_MTE_REGS := 0
CTX_INCLUDE_MTE_REGS := 0
ENABLE_AMU := 0
AMU_RESTRICT_COUNTERS := 0
# By default, enable Scalable Vector Extension if implemented for Non-secure
# lower ELs