fix(amu): limit virtual offset register access to NS world

Previously the SCR_EL3.AMVOFFEN bit was set for all contexts, this
behavior is incorrect as it allows secure world to access the virtual
offset registers when it should not be able to. This patch only sets
AMVOFFEN for non-secure world.

Signed-off-by: John Powell <john.powell@arm.com>
Change-Id: I2c61fe0a8a0092df089f1cb2c0d8a45c8c8ad0d3
This commit is contained in:
John Powell 2022-03-29 00:25:59 -05:00
parent 942b039221
commit a4c394561a
3 changed files with 21 additions and 13 deletions

View File

@ -483,7 +483,8 @@
#define SCR_HXEn_BIT (UL(1) << 38)
#define SCR_ENTP2_SHIFT U(41)
#define SCR_ENTP2_BIT (UL(1) << SCR_ENTP2_SHIFT)
#define SCR_AMVOFFEN_BIT (UL(1) << 35)
#define SCR_AMVOFFEN_SHIFT U(35)
#define SCR_AMVOFFEN_BIT (UL(1) << SCR_AMVOFFEN_SHIFT)
#define SCR_TWEDEn_BIT (UL(1) << 29)
#define SCR_ECVEN_BIT (UL(1) << 28)
#define SCR_FGTEN_BIT (UL(1) << 27)

View File

@ -282,16 +282,6 @@ static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *e
}
}
/*
* FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
* and EL2, when clear, this bit traps accesses from EL2 so we set it
* to 1 when EL2 is present.
*/
if (is_armv8_6_feat_amuv1p1_present() &&
(el_implemented(2) != EL_IMPL_NONE)) {
scr_el3 |= SCR_AMVOFFEN_BIT;
}
/*
* Initialise SCTLR_EL1 to the reset value corresponding to the target
* execution state setting all fields rather than relying of the hw.

View File

@ -75,7 +75,7 @@ static inline __unused void write_cptr_el2_tam(uint64_t value)
((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
}
static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
{
uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
@ -85,6 +85,16 @@ static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
}
static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
{
uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
value &= ~SCR_AMVOFFEN_BIT;
value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
}
static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
{
write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
@ -226,7 +236,7 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
* in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
* the Activity Monitor registers do not trap to EL3.
*/
write_cptr_el3_tam(ctx, 0U);
ctx_write_cptr_el3_tam(ctx, 0U);
/*
* Retrieve the number of architected counters. All of these counters
@ -285,6 +295,13 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
* used.
*/
write_hcr_el2_amvoffen(0U);
} else {
/*
* Virtual offset registers are only accessible from EL3
* and EL2, when clear, this bit traps accesses from EL2
* so we set it to 1 when EL2 is present.
*/
ctx_write_scr_el3_amvoffen(ctx, 1U);
}
#if AMU_RESTRICT_COUNTERS