Merge pull request #1397 from dp-arm/dp/cortex-a76

Add support for Cortex-A76 and Cortex-Ares
This commit is contained in:
Dimitris Papastamos 2018-06-08 14:01:38 +01:00 committed by GitHub
commit 608529aa24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 557 additions and 2 deletions

View File

@ -0,0 +1,29 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __CORTEX_A76_H__
#define __CORTEX_A76_H__
/* Cortex-A76 MIDR for revision 0 */
#define CORTEX_A76_MIDR 0x410fd0b0
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
#define CORTEX_A76_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define CORTEX_A76_CPUECTLR_EL1 S3_0_C15_C1_4
/*******************************************************************************
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
#define CORTEX_A76_CPUACTLR2_EL1 S3_0_C15_C1_1
#define CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE (1 << 16)
/* Definitions of register field mask in CORTEX_A76_CPUPWRCTLR_EL1 */
#define CORTEX_A76_CORE_PWRDN_EN_MASK 0x1
#endif /* __CORTEX_A76_H__ */

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __CORTEX_ARES_H__
#define __CORTEX_ARES_H__
/* Cortex-ARES MIDR for revision 0 */
#define CORTEX_ARES_MIDR 0x410fd0c0
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
#define CORTEX_ARES_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define CORTEX_ARES_CPUECTLR_EL1 S3_0_C15_C1_4
/* Definitions of register field mask in CORTEX_ARES_CPUPWRCTLR_EL1 */
#define CORTEX_ARES_CORE_PWRDN_EN_MASK 0x1
#define CORTEX_ARES_ACTLR_AMEN_BIT (U(1) << 4)
#define CORTEX_ARES_AMU_NR_COUNTERS U(5)
#define CORTEX_ARES_AMU_GROUP0_MASK U(0x1f)
/* Instruction patching registers */
#define CPUPSELR_EL3 S3_6_C15_C8_0
#define CPUPCR_EL3 S3_6_C15_C8_1
#define CPUPOR_EL3 S3_6_C15_C8_2
#define CPUPMR_EL3 S3_6_C15_C8_3
#endif /* __CORTEX_ARES_H__ */

View File

@ -271,6 +271,7 @@ typedef struct cpu_context {
#endif
#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx)
#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx)
#define get_cve_2018_3639_ctx(h) (&((cpu_context_t *) h)->cve_2018_3639_ctx)
/*
* Compile time assertions related to the 'cpu_context' structure to

View File

@ -0,0 +1,290 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arm_arch_svc.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <context.h>
#include <cortex_a76.h>
#include <cpu_macros.S>
#include <plat_macros.S>
#if !DYNAMIC_WORKAROUND_CVE_2018_3639
#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
#endif
#define ESR_EL3_A64_SMC0 0x5e000000
#define ESR_EL3_A32_SMC0 0x4e000000
/*
* This macro applies the mitigation for CVE-2018-3639.
* It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
* SMC calls from a lower EL running in AArch32 or AArch64
* will go through the fast and return early.
*
* The macro saves x2-x3 to the context. In the fast path
* x0-x3 registers do not need to be restored as the calling
* context will have saved them.
*/
.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
.if \_is_sync_exception
/*
* Ensure SMC is coming from A64/A32 state on #0
* with W0 = SMCCC_ARCH_WORKAROUND_2
*
* This sequence evaluates as:
* (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
* allowing use of a single branch operation
*/
orr w2, wzr, #SMCCC_ARCH_WORKAROUND_2
cmp x0, x2
mrs x3, esr_el3
mov_imm w2, \_esr_el3_val
ccmp w2, w3, #0, eq
/*
* Static predictor will predict a fall-through, optimizing
* the `SMCCC_ARCH_WORKAROUND_2` fast path.
*/
bne 1f
/*
* The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
* fast path.
*/
cmp x1, xzr /* enable/disable check */
/*
* When the calling context wants mitigation disabled,
* we program the mitigation disable function in the
* CPU context, which gets invoked on subsequent exits from
* EL3 via the `el3_exit` function. Otherwise NULL is
* programmed in the CPU context, which results in caller's
* inheriting the EL3 mitigation state (enabled) on subsequent
* `el3_exit`.
*/
mov x0, xzr
adr x1, cortex_a76_disable_wa_cve_2018_3639
csel x1, x1, x0, eq
str x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
mrs x2, CORTEX_A76_CPUACTLR2_EL1
orr x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
bic x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
csel x3, x3, x1, eq
msr CORTEX_A76_CPUACTLR2_EL1, x3
eret /* ERET implies ISB */
.endif
1:
/*
* Always enable v4 mitigation during EL3 execution. This is not
* required for the fast path above because it does not perform any
* memory loads.
*/
mrs x2, CORTEX_A76_CPUACTLR2_EL1
orr x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
msr CORTEX_A76_CPUACTLR2_EL1, x2
isb
/*
* The caller may have passed arguments to EL3 via x2-x3.
* Restore these registers from the context before jumping to the
* main runtime vector table entry.
*/
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
.endm
vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
/* ---------------------------------------------------------------------
* Current EL with SP_EL0 : 0x0 - 0x200
* ---------------------------------------------------------------------
*/
vector_entry cortex_a76_sync_exception_sp_el0
b sync_exception_sp_el0
check_vector_size cortex_a76_sync_exception_sp_el0
vector_entry cortex_a76_irq_sp_el0
b irq_sp_el0
check_vector_size cortex_a76_irq_sp_el0
vector_entry cortex_a76_fiq_sp_el0
b fiq_sp_el0
check_vector_size cortex_a76_fiq_sp_el0
vector_entry cortex_a76_serror_sp_el0
b serror_sp_el0
check_vector_size cortex_a76_serror_sp_el0
/* ---------------------------------------------------------------------
* Current EL with SP_ELx: 0x200 - 0x400
* ---------------------------------------------------------------------
*/
vector_entry cortex_a76_sync_exception_sp_elx
b sync_exception_sp_elx
check_vector_size cortex_a76_sync_exception_sp_elx
vector_entry cortex_a76_irq_sp_elx
b irq_sp_elx
check_vector_size cortex_a76_irq_sp_elx
vector_entry cortex_a76_fiq_sp_elx
b fiq_sp_elx
check_vector_size cortex_a76_fiq_sp_elx
vector_entry cortex_a76_serror_sp_elx
b serror_sp_elx
check_vector_size cortex_a76_serror_sp_elx
/* ---------------------------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x600
* ---------------------------------------------------------------------
*/
vector_entry cortex_a76_sync_exception_aarch64
apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
b sync_exception_aarch64
check_vector_size cortex_a76_sync_exception_aarch64
vector_entry cortex_a76_irq_aarch64
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
b irq_aarch64
check_vector_size cortex_a76_irq_aarch64
vector_entry cortex_a76_fiq_aarch64
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
b fiq_aarch64
check_vector_size cortex_a76_fiq_aarch64
vector_entry cortex_a76_serror_aarch64
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
b serror_aarch64
check_vector_size cortex_a76_serror_aarch64
/* ---------------------------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x800
* ---------------------------------------------------------------------
*/
vector_entry cortex_a76_sync_exception_aarch32
apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
b sync_exception_aarch32
check_vector_size cortex_a76_sync_exception_aarch32
vector_entry cortex_a76_irq_aarch32
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
b irq_aarch32
check_vector_size cortex_a76_irq_aarch32
vector_entry cortex_a76_fiq_aarch32
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
b fiq_aarch32
check_vector_size cortex_a76_fiq_aarch32
vector_entry cortex_a76_serror_aarch32
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
b serror_aarch32
check_vector_size cortex_a76_serror_aarch32
func check_errata_cve_2018_3639
#if WORKAROUND_CVE_2018_3639
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
endfunc check_errata_cve_2018_3639
func cortex_a76_disable_wa_cve_2018_3639
mrs x0, CORTEX_A76_CPUACTLR2_EL1
bic x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
msr CORTEX_A76_CPUACTLR2_EL1, x0
isb
ret
endfunc cortex_a76_disable_wa_cve_2018_3639
func cortex_a76_reset_func
#if WORKAROUND_CVE_2018_3639
mrs x0, CORTEX_A76_CPUACTLR2_EL1
orr x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
msr CORTEX_A76_CPUACTLR2_EL1, x0
isb
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2018_3639
/*
* The Cortex-A76 generic vectors are overwritten to use the vectors
* defined above. This is required in order to apply mitigation
* against CVE-2018-3639 on exception entry from lower ELs.
*/
adr x0, cortex_a76_wa_cve_2018_3639_a76_vbar
msr vbar_el3, x0
isb
#endif
ret
endfunc cortex_a76_reset_func
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
*/
func cortex_a76_core_pwr_dwn
/* ---------------------------------------------
* Enable CPU power down bit in power control register
* ---------------------------------------------
*/
mrs x0, CORTEX_A76_CPUPWRCTLR_EL1
orr x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
msr CORTEX_A76_CPUPWRCTLR_EL1, x0
isb
ret
endfunc cortex_a76_core_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex Cortex A76. Must follow AAPCS.
*/
func cortex_a76_errata_report
stp x8, x30, [sp, #-16]!
bl cpu_get_rev_var
mov x8, x0
/*
* Report all errata. The revision-variant information is passed to
* checking functions of each errata.
*/
report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
ldp x8, x30, [sp], #16
ret
endfunc cortex_a76_errata_report
#endif
/* ---------------------------------------------
* This function provides cortex_a76 specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
* x8 - x15 having values of registers to be
* reported.
* ---------------------------------------------
*/
.section .rodata.cortex_a76_regs, "aS"
cortex_a76_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""
func cortex_a76_cpu_reg_dump
adr x6, cortex_a76_regs
mrs x8, CORTEX_A76_CPUECTLR_EL1
ret
endfunc cortex_a76_cpu_reg_dump
declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
cortex_a76_reset_func, \
CPU_NO_EXTRA1_FUNC, \
cortex_a76_disable_wa_cve_2018_3639, \
cortex_a76_core_pwr_dwn

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <cortex_ares.h>
#include <cpuamu.h>
#include <cpu_macros.S>
/* --------------------------------------------------
* Errata Workaround for Cortex-Ares Errata
* This applies to revision r0p0 and r1p0 of Cortex-Ares.
* Inputs:
* x0: variant[4:7] and revision[0:3] of current cpu.
* Shall clobber: x0-x17
* --------------------------------------------------
*/
func errata_ares_1043202_wa
/* Compare x0 against revision r1p0 */
mov x17, x30
bl check_errata_1043202
cbz x0, 1f
/* Apply instruction patching sequence */
ldr x0, =0x0
msr CPUPSELR_EL3, x0
ldr x0, =0xF3BF8F2F
msr CPUPOR_EL3, x0
ldr x0, =0xFFFFFFFF
msr CPUPMR_EL3, x0
ldr x0, =0x800200071
msr CPUPCR_EL3, x0
isb
1:
ret x17
endfunc errata_ares_1043202_wa
func check_errata_1043202
/* Applies to r0p0 and r1p0 */
mov x1, #0x10
b cpu_rev_var_ls
endfunc check_errata_1043202
func cortex_ares_reset_func
mov x19, x30
bl cpu_get_rev_var
mov x18, x0
#if ERRATA_ARES_1043202
mov x0, x18
bl errata_ares_1043202_wa
#endif
#if ENABLE_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
orr x0, x0, #CORTEX_ARES_ACTLR_AMEN_BIT
msr actlr_el3, x0
isb
/* Make sure accesses from EL0/EL1 are not trapped to EL2 */
mrs x0, actlr_el2
orr x0, x0, #CORTEX_ARES_ACTLR_AMEN_BIT
msr actlr_el2, x0
isb
/* Enable group0 counters */
mov x0, #CORTEX_ARES_AMU_GROUP0_MASK
msr CPUAMCNTENSET_EL0, x0
isb
#endif
ret x19
endfunc cortex_ares_reset_func
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
*/
func cortex_ares_core_pwr_dwn
/* ---------------------------------------------
* Enable CPU power down bit in power control register
* ---------------------------------------------
*/
mrs x0, CORTEX_ARES_CPUPWRCTLR_EL1
orr x0, x0, #CORTEX_ARES_CORE_PWRDN_EN_MASK
msr CORTEX_ARES_CPUPWRCTLR_EL1, x0
isb
ret
endfunc cortex_ares_core_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-Ares. Must follow AAPCS.
*/
func cortex_a72_errata_report
stp x8, x30, [sp, #-16]!
bl cpu_get_rev_var
mov x8, x0
/*
* Report all errata. The revision-variant information is passed to
* checking functions of each errata.
*/
report_errata ERRATA_ARES_1043202, cortex_ares, 1043202
ldp x8, x30, [sp], #16
ret
endfunc cortex_a72_errata_report
#endif
/* ---------------------------------------------
* This function provides cortex_ares specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
* x8 - x15 having values of registers to be
* reported.
* ---------------------------------------------
*/
.section .rodata.cortex_ares_regs, "aS"
cortex_ares_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""
func cortex_ares_cpu_reg_dump
adr x6, cortex_ares_regs
mrs x8, CORTEX_ARES_CPUECTLR_EL1
ret
endfunc cortex_ares_cpu_reg_dump
declare_cpu_ops cortex_ares, CORTEX_ARES_MIDR, \
cortex_ares_reset_func, \
cortex_ares_core_pwr_dwn

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cortex_ares.h>
#include <cpuamu.h>
#include <pubsub_events.h>
static void *cortex_ares_context_save(const void *arg)
{
if (midr_match(CORTEX_ARES_MIDR) != 0)
cpuamu_context_save(CORTEX_ARES_AMU_NR_COUNTERS);
return 0;
}
static void *cortex_ares_context_restore(const void *arg)
{
if (midr_match(CORTEX_ARES_MIDR) != 0)
cpuamu_context_restore(CORTEX_ARES_AMU_NR_COUNTERS);
return 0;
}
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_ares_context_save);
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_ares_context_restore);

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
# Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@ -119,6 +119,10 @@ ERRATA_A57_859972 ?=0
# only to revision <= r0p3 of the Cortex A72 cpu.
ERRATA_A72_859971 ?=0
# Flag to apply T32 CLREX workaround during reset. This erratum applies
# only to r0p0 and r1p0 of the Ares cpu.
ERRATA_ARES_1043202 ?=1
# Process ERRATA_A53_826319 flag
$(eval $(call assert_boolean,ERRATA_A53_826319))
$(eval $(call add_define,ERRATA_A53_826319))
@ -179,6 +183,10 @@ $(eval $(call add_define,ERRATA_A57_859972))
$(eval $(call assert_boolean,ERRATA_A72_859971))
$(eval $(call add_define,ERRATA_A72_859971))
# Process ERRATA_ARES_1043202 flag
$(eval $(call assert_boolean,ERRATA_ARES_1043202))
$(eval $(call add_define,ERRATA_ARES_1043202))
# Errata build flags
ifneq (${ERRATA_A53_843419},0)
TF_LDFLAGS_aarch64 += --fix-cortex-a53-843419

View File

@ -114,7 +114,9 @@ FVP_CPU_LIBS += lib/cpus/aarch64/cortex_a35.S \
lib/cpus/aarch64/cortex_a57.S \
lib/cpus/aarch64/cortex_a72.S \
lib/cpus/aarch64/cortex_a73.S \
lib/cpus/aarch64/cortex_a75.S
lib/cpus/aarch64/cortex_a75.S \
lib/cpus/aarch64/cortex_a76.S \
lib/cpus/aarch64/cortex_ares.S
else
FVP_CPU_LIBS += lib/cpus/aarch32/cortex_a32.S
endif
@ -204,8 +206,12 @@ ENABLE_PLAT_COMPAT := 0
# Enable Activity Monitor Unit extensions by default
ENABLE_AMU := 1
# Enable dynamic mitigation support by default
DYNAMIC_WORKAROUND_CVE_2018_3639 := 1
ifeq (${ENABLE_AMU},1)
BL31_SOURCES += lib/cpus/aarch64/cortex_a75_pubsub.c \
lib/cpus/aarch64/cortex_ares_pubsub.c \
lib/cpus/aarch64/cpuamu.c \
lib/cpus/aarch64/cpuamu_helpers.S
endif

View File

@ -39,6 +39,11 @@ typedef struct sdei_dispatch_context {
/* Exception state registers */
uint64_t elr_el3;
uint64_t spsr_el3;
#if DYNAMIC_WORKAROUND_CVE_2018_3639
/* CVE-2018-3639 mitigation state */
uint64_t disable_cve_2018_3639;
#endif
} sdei_dispatch_context_t;
/* Per-CPU SDEI state data */
@ -170,6 +175,18 @@ static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state,
memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
#if DYNAMIC_WORKAROUND_CVE_2018_3639
cve_2018_3639_t *tgt_cve_2018_3639;
tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
/* Save CVE-2018-3639 mitigation state */
disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
CTX_CVE_2018_3639_DISABLE);
/* Force SDEI handler to execute with mitigation enabled by default */
write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
#endif
}
static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
@ -188,6 +205,15 @@ static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
#if DYNAMIC_WORKAROUND_CVE_2018_3639
cve_2018_3639_t *tgt_cve_2018_3639;
tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
/* Restore CVE-2018-3639 mitigation state */
write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
disp_ctx->disable_cve_2018_3639);
#endif
}
static void save_secure_context(void)