Merge "fix(security): workaround for CVE-2022-23960" into integration

This commit is contained in:
Madhukar Pappireddy 2022-03-12 01:39:37 +01:00 committed by TrustedFirmware Code Review
commit 29ba22e8ed
18 changed files with 378 additions and 38 deletions

View File

@ -29,6 +29,10 @@ vulnerability workarounds should be applied at runtime.
platform contains at least 1 CPU that requires dynamic mitigation.
Defaults to 0.
- ``WORKAROUND_CVE_2022_23960``: Enables mitigation for `CVE-2022-23960`_.
This build option should be set to 1 if the target platform contains at
least 1 CPU that requires this mitigation. Defaults to 1.
.. _arm_cpu_macros_errata_workarounds:
CPU Errata Workarounds
@ -585,6 +589,7 @@ architecture that can be enabled by the platform as desired.
.. _CVE-2017-5715: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715
.. _CVE-2018-3639: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639
.. _CVE-2022-23960: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23960
.. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/index.html
.. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/index.html
.. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
* Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -9,6 +9,9 @@
#define CORTEX_A710_MIDR U(0x410FD470)
/* Cortex-A710 loop count for CVE-2022-23960 mitigation */
#define CORTEX_A710_BHB_LOOP_COUNT U(32)
/*******************************************************************************
* CPU Extended Control register specific definitions
******************************************************************************/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -12,6 +12,9 @@
/* Cortex-A77 MIDR */
#define CORTEX_A77_MIDR U(0x410FD0D0)
/* Cortex-A77 loop count for CVE-2022-23960 mitigation */
#define CORTEX_A77_BHB_LOOP_COUNT U(24)
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, ARM Limited. All rights reserved.
* Copyright (c) 2019-2022, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -11,6 +11,9 @@
#define CORTEX_A78_MIDR U(0x410FD410)
/* Cortex-A78 loop count for CVE-2022-23960 mitigation */
#define CORTEX_A78_BHB_LOOP_COUNT U(32)
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/

View File

@ -9,6 +9,9 @@
#define CORTEX_X2_MIDR U(0x410FD480)
/* Cortex-X2 loop count for CVE-2022-23960 mitigation */
#define CORTEX_X2_BHB_LOOP_COUNT U(32)
/*******************************************************************************
* CPU Extended Control register specific definitions
******************************************************************************/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,58 +10,61 @@
#include <lib/utils_def.h>
/* Neoverse N1 MIDR for revision 0 */
#define NEOVERSE_N1_MIDR U(0x410fd0c0)
#define NEOVERSE_N1_MIDR U(0x410fd0c0)
/* Neoverse N1 loop count for CVE-2022-23960 mitigation */
#define NEOVERSE_N1_BHB_LOOP_COUNT U(24)
/* Exception Syndrome register EC code for IC Trap */
#define NEOVERSE_N1_EC_IC_TRAP U(0x1f)
#define NEOVERSE_N1_EC_IC_TRAP U(0x1f)
/*******************************************************************************
* CPU Power Control register specific definitions.
******************************************************************************/
#define NEOVERSE_N1_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define NEOVERSE_N1_CPUPWRCTLR_EL1 S3_0_C15_C2_7
/* Definitions of register field mask in NEOVERSE_N1_CPUPWRCTLR_EL1 */
#define NEOVERSE_N1_CORE_PWRDN_EN_MASK U(0x1)
#define NEOVERSE_N1_CORE_PWRDN_EN_MASK U(0x1)
#define NEOVERSE_N1_ACTLR_AMEN_BIT (U(1) << 4)
#define NEOVERSE_N1_ACTLR_AMEN_BIT (U(1) << 4)
#define NEOVERSE_N1_AMU_NR_COUNTERS U(5)
#define NEOVERSE_N1_AMU_GROUP0_MASK U(0x1f)
#define NEOVERSE_N1_AMU_NR_COUNTERS U(5)
#define NEOVERSE_N1_AMU_GROUP0_MASK U(0x1f)
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
#define NEOVERSE_N1_CPUECTLR_EL1 S3_0_C15_C1_4
#define NEOVERSE_N1_CPUECTLR_EL1 S3_0_C15_C1_4
#define NEOVERSE_N1_WS_THR_L2_MASK (ULL(3) << 24)
#define NEOVERSE_N1_WS_THR_L2_MASK (ULL(3) << 24)
#define NEOVERSE_N1_CPUECTLR_EL1_MM_TLBPF_DIS_BIT (ULL(1) << 51)
#define NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT (ULL(1) << 0)
/*******************************************************************************
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
#define NEOVERSE_N1_CPUACTLR_EL1 S3_0_C15_C1_0
#define NEOVERSE_N1_CPUACTLR_EL1 S3_0_C15_C1_0
#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6 (ULL(1) << 6)
#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6 (ULL(1) << 6)
#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
#define NEOVERSE_N1_CPUACTLR2_EL1 S3_0_C15_C1_1
#define NEOVERSE_N1_CPUACTLR2_EL1 S3_0_C15_C1_1
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11 (ULL(1) << 11)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15 (ULL(1) << 15)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16 (ULL(1) << 16)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59 (ULL(1) << 59)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11 (ULL(1) << 11)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15 (ULL(1) << 15)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16 (ULL(1) << 16)
#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59 (ULL(1) << 59)
#define NEOVERSE_N1_CPUACTLR3_EL1 S3_0_C15_C1_2
#define NEOVERSE_N1_CPUACTLR3_EL1 S3_0_C15_C1_2
#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
/* Instruction patching registers */
#define CPUPSELR_EL3 S3_6_C15_C8_0
#define CPUPCR_EL3 S3_6_C15_C8_1
#define CPUPOR_EL3 S3_6_C15_C8_2
#define CPUPMR_EL3 S3_6_C15_C8_3
#define CPUPSELR_EL3 S3_6_C15_C8_0
#define CPUPCR_EL3 S3_6_C15_C8_1
#define CPUPOR_EL3 S3_6_C15_C8_2
#define CPUPMR_EL3 S3_6_C15_C8_3
#endif /* NEOVERSE_N1_H */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, Arm Limited. All rights reserved.
* Copyright (c) 2020-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,6 +10,9 @@
/* Neoverse N2 ID register for revision r0p0 */
#define NEOVERSE_N2_MIDR U(0x410FD490)
/* Neoverse N2 loop count for CVE-2022-23960 mitigation */
#define NEOVERSE_N2_BHB_LOOP_COUNT U(32)
/*******************************************************************************
* CPU Power control register
******************************************************************************/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, ARM Limited. All rights reserved.
* Copyright (c) 2019-2022, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -9,6 +9,9 @@
#define NEOVERSE_V1_MIDR U(0x410FD400)
/* Neoverse V1 loop count for CVE-2022-23960 mitigation */
#define NEOVERSE_V1_BHB_LOOP_COUNT U(32)
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
* Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,6 +10,7 @@
#include <cortex_a710.h>
#include <cpu_macros.S>
#include <plat_macros.S>
#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@ -21,6 +22,10 @@
#error "Cortex A710 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
#endif /* WORKAROUND_CVE_2022_23960 */
/* --------------------------------------------------
* Errata Workaround for Cortex-A710 Erratum 1987031.
* This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710. It is still
@ -305,6 +310,15 @@ func check_errata_2282622
b cpu_rev_var_ls
endfunc check_errata_2282622
func check_errata_cve_2022_23960
#if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
endfunc check_errata_cve_2022_23960
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
* ----------------------------------------------------
@ -344,6 +358,7 @@ func cortex_a710_errata_report
report_errata ERRATA_A710_2267065, cortex_a710, 2267065
report_errata ERRATA_A710_2136059, cortex_a710, 2136059
report_errata ERRATA_A710_2282622, cortex_a710, 2282622
report_errata WORKAROUND_CVE_2022_23960, cortex_a710, cve_2022_23960
ldp x8, x30, [sp], #16
ret
@ -404,6 +419,15 @@ func cortex_a710_reset_func
bl errata_a710_2282622_wa
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Cortex-A710 generic vectors are overridden to apply errata
* mitigation on exception entry from lower ELs.
*/
adr x0, wa_cve_vbar_cortex_a710
msr vbar_el3, x0
#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
isb
ret x19
endfunc cortex_a710_reset_func

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,6 +10,7 @@
#include <cortex_a77.h>
#include <cpu_macros.S>
#include <plat_macros.S>
#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@ -21,6 +22,10 @@
#error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
#endif /* WORKAROUND_CVE_2022_23960 */
/* --------------------------------------------------
* Errata Workaround for Cortex A77 Errata #1508412.
* This applies only to revision <= r1p0 of Cortex A77.
@ -194,6 +199,15 @@ func check_errata_1791578
b cpu_rev_var_ls
endfunc check_errata_1791578
func check_errata_cve_2022_23960
#if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
endfunc check_errata_cve_2022_23960
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A77.
* Shall clobber: x0-x19
@ -224,6 +238,16 @@ func cortex_a77_reset_func
bl errata_a77_1791578_wa
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Cortex-A77 generic vectors are overridden to apply errata
* mitigation on exception entry from lower ELs.
*/
adr x0, wa_cve_vbar_cortex_a77
msr vbar_el3, x0
#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
isb
ret x19
endfunc cortex_a77_reset_func
@ -261,6 +285,7 @@ func cortex_a77_errata_report
report_errata ERRATA_A77_1925769, cortex_a77, 1925769
report_errata ERRATA_A77_1946167, cortex_a77, 1946167
report_errata ERRATA_A77_1791578, cortex_a77, 1791578
report_errata WORKAROUND_CVE_2022_23960, cortex_a77, cve_2022_23960
ldp x8, x30, [sp], #16
ret

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, ARM Limited. All rights reserved.
* Copyright (c) 2019-2022, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,12 +10,16 @@
#include <cortex_a78.h>
#include <cpu_macros.S>
#include <plat_macros.S>
#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
#error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
#endif /* WORKAROUND_CVE_2022_23960 */
/* --------------------------------------------------
* Errata Workaround for A78 Erratum 1688305.
@ -263,6 +267,15 @@ func check_errata_2242635
b cpu_rev_var_range
endfunc check_errata_2242635
func check_errata_cve_2022_23960
#if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
endfunc check_errata_cve_2022_23960
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A78
* -------------------------------------------------
@ -327,6 +340,15 @@ func cortex_a78_reset_func
msr CPUAMCNTENSET1_EL0, x0
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Cortex-A78 generic vectors are overridden to apply errata
* mitigation on exception entry from lower ELs.
*/
adr x0, wa_cve_vbar_cortex_a78
msr vbar_el3, x0
#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
isb
ret x19
endfunc cortex_a78_reset_func
@ -368,6 +390,7 @@ func cortex_a78_errata_report
report_errata ERRATA_A78_1952683, cortex_a78, 1952683
report_errata ERRATA_A78_2132060, cortex_a78, 2132060
report_errata ERRATA_A78_2242635, cortex_a78, 2242635
report_errata WORKAROUND_CVE_2022_23960, cortex_a78, cve_2022_23960
ldp x8, x30, [sp], #16
ret

View File

@ -10,6 +10,7 @@
#include <cortex_x2.h>
#include <cpu_macros.S>
#include <plat_macros.S>
#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@ -21,6 +22,10 @@
#error "Cortex X2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
#endif /* WORKAROUND_CVE_2022_23960 */
/* --------------------------------------------------
* Errata Workaround for Cortex X2 Errata #2002765.
* This applies to revisions r0p0, r1p0, and r2p0 and
@ -222,6 +227,16 @@ func check_errata_2216384
mov x1, #0x20
b cpu_rev_var_ls
endfunc check_errata_2216384
func check_errata_cve_2022_23960
#if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
endfunc check_errata_cve_2022_23960
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
* ----------------------------------------------------
@ -258,6 +273,7 @@ func cortex_x2_errata_report
report_errata ERRATA_X2_2017096, cortex_x2, 2017096
report_errata ERRATA_X2_2081180, cortex_x2, 2081180
report_errata ERRATA_X2_2216384, cortex_x2, 2216384
report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
ldp x8, x30, [sp], #16
ret
@ -305,6 +321,16 @@ func cortex_x2_reset_func
bl errata_x2_2216384_wa
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Cortex-X2 generic vectors are overridden to apply errata
* mitigation on exception entry from lower ELs.
*/
adr x0, wa_cve_vbar_cortex_x2
msr vbar_el3, x0
#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
isb
ret x19
endfunc cortex_x2_reset_func

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -8,8 +8,8 @@
#include <asm_macros.S>
#include <cpuamu.h>
#include <cpu_macros.S>
#include <context.h>
#include <neoverse_n1.h>
#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@ -23,6 +23,10 @@
.global neoverse_n1_errata_ic_trap_handler
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
#endif /* WORKAROUND_CVE_2022_23960 */
/* --------------------------------------------------
* Errata Workaround for Neoverse N1 Erratum 1043202.
* This applies to revision r0p0 and r1p0 of Neoverse N1.
@ -464,6 +468,15 @@ func check_errata_1946160
b cpu_rev_var_range
endfunc check_errata_1946160
func check_errata_cve_2022_23960
#if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
endfunc check_errata_cve_2022_23960
func neoverse_n1_reset_func
mov x19, x30
@ -575,6 +588,15 @@ func neoverse_n1_reset_func
bl errata_dsu_936184_wa
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Neoverse-N1 generic vectors are overridden to apply errata
* mitigation on exception entry from lower ELs.
*/
adr x0, wa_cve_vbar_neoverse_n1
msr vbar_el3, x0
#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
isb
ret x19
endfunc neoverse_n1_reset_func
@ -624,6 +646,7 @@ func neoverse_n1_errata_report
report_errata ERRATA_N1_1868343, neoverse_n1, 1868343
report_errata ERRATA_N1_1946160, neoverse_n1, 1946160
report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184
report_errata WORKAROUND_CVE_2022_23960, neoverse_n1, cve_2022_23960
ldp x8, x30, [sp], #16
ret

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, Arm Limited. All rights reserved.
* Copyright (c) 2020-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -8,6 +8,7 @@
#include <asm_macros.S>
#include <cpu_macros.S>
#include <neoverse_n2.h>
#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@ -19,6 +20,10 @@
#error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
#endif /* WORKAROUND_CVE_2022_23960 */
/* --------------------------------------------------
* Errata Workaround for Neoverse N2 Erratum 2002655.
* This applies to revision r0p0 of Neoverse N2. it is still open.
@ -333,6 +338,15 @@ func check_errata_2280757
b cpu_rev_var_ls
endfunc check_errata_2280757
func check_errata_cve_2022_23960
#if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
endfunc check_errata_cve_2022_23960
/* -------------------------------------------
* The CPU Ops reset function for Neoverse N2.
* -------------------------------------------
@ -428,6 +442,15 @@ func neoverse_n2_reset_func
bl errata_n2_2002655_wa
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Neoverse-N2 generic vectors are overridden to apply errata
* mitigation on exception entry from lower ELs.
*/
adr x0, wa_cve_vbar_neoverse_n2
msr vbar_el3, x0
#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
isb
ret x19
endfunc neoverse_n2_reset_func
@ -469,6 +492,7 @@ func neoverse_n2_errata_report
report_errata ERRATA_N2_2138958, neoverse_n2, 2138958
report_errata ERRATA_N2_2242400, neoverse_n2, 2242400
report_errata ERRATA_N2_2280757, neoverse_n2, 2280757
report_errata WORKAROUND_CVE_2022_23960, neoverse_n2, cve_2022_23960
ldp x8, x30, [sp], #16
ret

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, Arm Limited. All rights reserved.
* Copyright (c) 2019-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,6 +10,7 @@
#include <neoverse_v1.h>
#include <cpu_macros.S>
#include <plat_macros.S>
#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@ -21,6 +22,10 @@
#error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
#endif /* WORKAROUND_CVE_2022_23960 */
/* --------------------------------------------------
* Errata Workaround for Neoverse V1 Errata #1774420.
* This applies to revisions r0p0 and r1p0, fixed in r1p1.
@ -325,6 +330,15 @@ func check_errata_2216392
b cpu_rev_var_range
endfunc check_errata_2216392
func check_errata_cve_2022_23960
#if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
endfunc check_errata_cve_2022_23960
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
@ -364,6 +378,7 @@ func neoverse_v1_errata_report
report_errata ERRATA_V1_2139242, neoverse_v1, 2139242
report_errata ERRATA_V1_2108267, neoverse_v1, 2108267
report_errata ERRATA_V1_2216392, neoverse_v1, 2216392
report_errata WORKAROUND_CVE_2022_23960, neoverse_v1, cve_2022_23960
ldp x8, x30, [sp], #16
ret
@ -422,6 +437,16 @@ func neoverse_v1_reset_func
bl errata_neoverse_v1_2216392_wa
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Neoverse-V1 generic vectors are overridden to apply errata
* mitigation on exception entry from lower ELs.
*/
adr x0, wa_cve_vbar_neoverse_v1
msr vbar_el3, x0
#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
isb
ret x19
endfunc neoverse_v1_reset_func

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <context.h>
#if WORKAROUND_CVE_2022_23960
/*
* This macro applies the mitigation for CVE-2022-23960.
* The macro saves x2-x3 to the CPU context.
* SP should point to the CPU context.
*/
.macro apply_cve_2022_23960_bhb_wa _bhb_loop_count
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
/* CVE-BHB-NUM loop count */
mov x2, \_bhb_loop_count
1:
/* b pc+4 part of the workaround */
b 2f
2:
subs x2, x2, #1
bne 1b
dsb sy
isb
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
.endm
#endif /* WORKAROUND_CVE_2022_23960 */

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <services/arm_arch_svc.h>
#include "wa_cve_2022_23960_bhb.S"
/*
* This macro is used to isolate the vector table for relevant CPUs
* used in the mitigation for CVE_2022_23960.
*/
.macro wa_cve_2022_23960_bhb_vector_table _bhb_loop_count, _cpu
.globl wa_cve_vbar_\_cpu
vector_base wa_cve_vbar_\_cpu
/* ---------------------------------------------------------------------
* Current EL with SP_EL0 : 0x0 - 0x200
* ---------------------------------------------------------------------
*/
vector_entry bhb_sync_exception_sp_el0_\_cpu
b sync_exception_sp_el0
end_vector_entry bhb_sync_exception_sp_el0_\_cpu
vector_entry bhb_irq_sp_el0_\_cpu
b irq_sp_el0
end_vector_entry bhb_irq_sp_el0_\_cpu
vector_entry bhb_fiq_sp_el0_\_cpu
b fiq_sp_el0
end_vector_entry bhb_fiq_sp_el0_\_cpu
vector_entry bhb_serror_sp_el0_\_cpu
b serror_sp_el0
end_vector_entry bhb_serror_sp_el0_\_cpu
/* ---------------------------------------------------------------------
* Current EL with SP_ELx: 0x200 - 0x400
* ---------------------------------------------------------------------
*/
vector_entry bhb_sync_exception_sp_elx_\_cpu
b sync_exception_sp_elx
end_vector_entry bhb_sync_exception_sp_elx_\_cpu
vector_entry bhb_irq_sp_elx_\_cpu
b irq_sp_elx
end_vector_entry bhb_irq_sp_elx_\_cpu
vector_entry bhb_fiq_sp_elx_\_cpu
b fiq_sp_elx
end_vector_entry bhb_fiq_sp_elx_\_cpu
vector_entry bhb_serror_sp_elx_\_cpu
b serror_sp_elx
end_vector_entry bhb_serror_sp_elx_\_cpu
/* ---------------------------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x600
* ---------------------------------------------------------------------
*/
vector_entry bhb_sync_exception_aarch64_\_cpu
apply_cve_2022_23960_bhb_wa \_bhb_loop_count
b sync_exception_aarch64
end_vector_entry bhb_sync_exception_aarch64_\_cpu
vector_entry bhb_irq_aarch64_\_cpu
apply_cve_2022_23960_bhb_wa \_bhb_loop_count
b irq_aarch64
end_vector_entry bhb_irq_aarch64_\_cpu
vector_entry bhb_fiq_aarch64_\_cpu
apply_cve_2022_23960_bhb_wa \_bhb_loop_count
b fiq_aarch64
end_vector_entry bhb_fiq_aarch64_\_cpu
vector_entry bhb_serror_aarch64_\_cpu
apply_cve_2022_23960_bhb_wa \_bhb_loop_count
b serror_aarch64
end_vector_entry bhb_serror_aarch64_\_cpu
/* ---------------------------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x800
* ---------------------------------------------------------------------
*/
vector_entry bhb_sync_exception_aarch32_\_cpu
apply_cve_2022_23960_bhb_wa \_bhb_loop_count
b sync_exception_aarch32
end_vector_entry bhb_sync_exception_aarch32_\_cpu
vector_entry bhb_irq_aarch32_\_cpu
apply_cve_2022_23960_bhb_wa \_bhb_loop_count
b irq_aarch32
end_vector_entry bhb_irq_aarch32_\_cpu
vector_entry bhb_fiq_aarch32_\_cpu
apply_cve_2022_23960_bhb_wa \_bhb_loop_count
b fiq_aarch32
end_vector_entry bhb_fiq_aarch32_\_cpu
vector_entry bhb_serror_aarch32_\_cpu
apply_cve_2022_23960_bhb_wa \_bhb_loop_count
b serror_aarch32
end_vector_entry bhb_serror_aarch32_\_cpu
.endm

View File

@ -24,6 +24,7 @@ A57_ENABLE_NONCACHEABLE_LOAD_FWD ?= 0
WORKAROUND_CVE_2017_5715 ?=1
WORKAROUND_CVE_2018_3639 ?=1
DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0
WORKAROUND_CVE_2022_23960 ?=1
# Flags to indicate internal or external Last level cache
# By default internal
@ -56,6 +57,10 @@ $(eval $(call add_define,WORKAROUND_CVE_2018_3639))
$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
# Process WORKAROUND_CVE_2022_23960 flag
$(eval $(call assert_boolean,WORKAROUND_CVE_2022_23960))
$(eval $(call add_define,WORKAROUND_CVE_2022_23960))
$(eval $(call assert_boolean,NEOVERSE_Nx_EXTERNAL_LLC))
$(eval $(call add_define,NEOVERSE_Nx_EXTERNAL_LLC))