From 2c3a10780df3317c004de74fbe85df53daab94e5 Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Fri, 6 Apr 2018 15:29:34 +0100 Subject: [PATCH 1/4] Rename symbols and files relating to CVE-2017-5715 This patch renames symbols and files relating to CVE-2017-5715 to make it easier to introduce new symbols and files for new CVE mitigations. Change-Id: I24c23822862ca73648c772885f1690bed043dbc7 Signed-off-by: Dimitris Papastamos --- bl31/bl31.mk | 4 +- bl32/sp_min/sp_min.mk | 4 +- ...715_bpiall.S => wa_cve_2017_5715_bpiall.S} | 4 +- ...he_inv.S => wa_cve_2017_5715_icache_inv.S} | 4 +- include/lib/cpus/wa_cve_2017_5715.h | 12 ++ include/lib/cpus/workaround_cve_2017_5715.h | 12 -- lib/cpus/aarch64/cortex_a57.S | 2 +- lib/cpus/aarch64/cortex_a72.S | 2 +- lib/cpus/aarch64/cortex_a73.S | 2 +- lib/cpus/aarch64/cortex_a75.S | 2 +- lib/cpus/aarch64/cpu_helpers.S | 8 +- ...715_bpiall.S => wa_cve_2017_5715_bpiall.S} | 164 +++++++++--------- ...2017_5715_mmu.S => wa_cve_2017_5715_mmu.S} | 86 ++++----- services/arm_arch_svc/arm_arch_svc_setup.c | 4 +- 14 files changed, 155 insertions(+), 155 deletions(-) rename bl32/sp_min/{workaround_cve_2017_5715_bpiall.S => wa_cve_2017_5715_bpiall.S} (94%) rename bl32/sp_min/{workaround_cve_2017_5715_icache_inv.S => wa_cve_2017_5715_icache_inv.S} (94%) create mode 100644 include/lib/cpus/wa_cve_2017_5715.h delete mode 100644 include/lib/cpus/workaround_cve_2017_5715.h rename lib/cpus/aarch64/{workaround_cve_2017_5715_bpiall.S => wa_cve_2017_5715_bpiall.S} (66%) rename lib/cpus/aarch64/{workaround_cve_2017_5715_mmu.S => wa_cve_2017_5715_mmu.S} (55%) diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 0e47ddf68..a6c0a9a07 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -61,8 +61,8 @@ BL31_SOURCES += lib/extensions/sve/sve.c endif ifeq (${WORKAROUND_CVE_2017_5715},1) -BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S \ - lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S +BL31_SOURCES += lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S \ + lib/cpus/aarch64/wa_cve_2017_5715_mmu.S endif BL31_LINKERFILE := bl31/bl31.ld.S diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk index 193b1d5ea..6233299d7 100644 --- a/bl32/sp_min/sp_min.mk +++ b/bl32/sp_min/sp_min.mk @@ -29,8 +29,8 @@ BL32_SOURCES += lib/extensions/amu/aarch32/amu.c\ endif ifeq (${WORKAROUND_CVE_2017_5715},1) -BL32_SOURCES += bl32/sp_min/workaround_cve_2017_5715_bpiall.S \ - bl32/sp_min/workaround_cve_2017_5715_icache_inv.S +BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_bpiall.S \ + bl32/sp_min/wa_cve_2017_5715_icache_inv.S endif BL32_LINKERFILE := bl32/sp_min/sp_min.ld.S diff --git a/bl32/sp_min/workaround_cve_2017_5715_bpiall.S b/bl32/sp_min/wa_cve_2017_5715_bpiall.S similarity index 94% rename from bl32/sp_min/workaround_cve_2017_5715_bpiall.S rename to bl32/sp_min/wa_cve_2017_5715_bpiall.S index 5387cefc9..385f3d4b3 100644 --- a/bl32/sp_min/workaround_cve_2017_5715_bpiall.S +++ b/bl32/sp_min/wa_cve_2017_5715_bpiall.S @@ -6,9 +6,9 @@ #include - .globl workaround_bpiall_runtime_exceptions + .globl wa_cve_2017_5715_bpiall_vbar -vector_base workaround_bpiall_runtime_exceptions +vector_base wa_cve_2017_5715_bpiall_vbar /* We encode the exception entry in the bottom 3 bits of SP */ add sp, sp, #1 /* Reset: 0b111 */ add sp, sp, #1 /* Undef: 0b110 */ diff --git a/bl32/sp_min/workaround_cve_2017_5715_icache_inv.S b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S similarity index 94% rename from bl32/sp_min/workaround_cve_2017_5715_icache_inv.S rename to bl32/sp_min/wa_cve_2017_5715_icache_inv.S index 9102b02f9..d0a46250f 100644 --- a/bl32/sp_min/workaround_cve_2017_5715_icache_inv.S +++ b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S @@ -6,9 +6,9 @@ #include - .globl workaround_icache_inv_runtime_exceptions + .globl wa_cve_2017_5715_icache_inv_vbar -vector_base workaround_icache_inv_runtime_exceptions +vector_base wa_cve_2017_5715_icache_inv_vbar /* We encode the exception entry in the bottom 3 bits of SP */ add sp, sp, #1 /* Reset: 0b111 */ add sp, sp, #1 /* Undef: 0b110 */ diff --git a/include/lib/cpus/wa_cve_2017_5715.h b/include/lib/cpus/wa_cve_2017_5715.h new file mode 100644 index 000000000..0a65a5692 --- /dev/null +++ b/include/lib/cpus/wa_cve_2017_5715.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __WA_CVE_2017_5715_H__ +#define __WA_CVE_2017_5715_H__ + +int check_wa_cve_2017_5715(void); + +#endif /* __WA_CVE_2017_5715_H__ */ diff --git a/include/lib/cpus/workaround_cve_2017_5715.h b/include/lib/cpus/workaround_cve_2017_5715.h deleted file mode 100644 index e837a673a..000000000 --- a/include/lib/cpus/workaround_cve_2017_5715.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef __WORKAROUND_CVE_2017_5715_H__ -#define __WORKAROUND_CVE_2017_5715_H__ - -int check_workaround_cve_2017_5715(void); - -#endif /* __WORKAROUND_CVE_2017_5715_H__ */ diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index 4d072e11c..8470c6c5f 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -393,7 +393,7 @@ func cortex_a57_reset_func #endif #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 - adr x0, workaround_mmu_runtime_exceptions + adr x0, wa_cve_2017_5715_mmu_vbar msr vbar_el3, x0 #endif diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index 29fa77b90..b67c98776 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -126,7 +126,7 @@ func cortex_a72_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_mmu_runtime_exceptions + adr x0, wa_cve_2017_5715_mmu_vbar msr vbar_el3, x0 1: #endif diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index 0a961ea33..c66067d70 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -38,7 +38,7 @@ endfunc cortex_a73_disable_smp func cortex_a73_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_bpiall_vbar0_runtime_exceptions + adr x0, wa_cve_2017_5715_bpiall_vbar msr vbar_el3, x0 1: #endif diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 288f5afed..f92e4ed02 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -13,7 +13,7 @@ func cortex_a75_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_bpiall_vbar0_runtime_exceptions + adr x0, wa_cve_2017_5715_bpiall_vbar msr vbar_el3, x0 1: #endif diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index 9f13ed2ca..78c66e652 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -285,7 +285,7 @@ endfunc print_errata_status #endif /* - * int check_workaround_cve_2017_5715(void); + * int check_wa_cve_2017_5715(void); * * This function returns: * - ERRATA_APPLIES when firmware mitigation is required. @@ -296,8 +296,8 @@ endfunc print_errata_status * NOTE: Must be called only after cpu_ops have been initialized * in per-CPU data. */ - .globl check_workaround_cve_2017_5715 -func check_workaround_cve_2017_5715 + .globl check_wa_cve_2017_5715 +func check_wa_cve_2017_5715 mrs x0, tpidr_el3 #if ENABLE_ASSERTIONS cmp x0, #0 @@ -315,4 +315,4 @@ func check_workaround_cve_2017_5715 1: mov x0, #ERRATA_NOT_APPLIES ret -endfunc check_workaround_cve_2017_5715 +endfunc check_wa_cve_2017_5715 diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S similarity index 66% rename from lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S rename to lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S index cd8249732..843715515 100644 --- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S +++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S @@ -9,13 +9,13 @@ #include #include - .globl workaround_bpiall_vbar0_runtime_exceptions + .globl wa_cve_2017_5715_bpiall_vbar #define EMIT_BPIALL 0xee070fd5 #define EMIT_SMC 0xe1600070 #define ESR_EL3_A64_SMC0 0x5e000000 - .macro enter_workaround _from_vector + .macro apply_cve_2017_5715_wa _from_vector /* * Save register state to enable a call to AArch32 S-EL1 and return * Identify the original calling vector in w2 (==_from_vector) @@ -66,7 +66,7 @@ movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK) /* Switch EL3 exception vectors while the workaround is executing. */ - adr x9, workaround_bpiall_vbar1_runtime_exceptions + adr x9, wa_cve_2017_5715_bpiall_ret_vbar /* Setup SCTLR_EL1 with MMU off and I$ on */ ldr x10, stub_sel1_sctlr @@ -93,13 +93,13 @@ * is not enabled, the existing runtime exception vector table is used. * --------------------------------------------------------------------- */ -vector_base workaround_bpiall_vbar0_runtime_exceptions +vector_base wa_cve_2017_5715_bpiall_vbar /* --------------------------------------------------------------------- * Current EL with SP_EL0 : 0x0 - 0x200 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0 +vector_entry bpiall_sync_exception_sp_el0 b sync_exception_sp_el0 nop /* to force 8 byte alignment for the following stub */ @@ -114,79 +114,79 @@ aarch32_stub: .word EMIT_BPIALL .word EMIT_SMC - check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0 + check_vector_size bpiall_sync_exception_sp_el0 -vector_entry workaround_bpiall_vbar0_irq_sp_el0 +vector_entry bpiall_irq_sp_el0 b irq_sp_el0 - check_vector_size workaround_bpiall_vbar0_irq_sp_el0 + check_vector_size bpiall_irq_sp_el0 -vector_entry workaround_bpiall_vbar0_fiq_sp_el0 +vector_entry bpiall_fiq_sp_el0 b fiq_sp_el0 - check_vector_size workaround_bpiall_vbar0_fiq_sp_el0 + check_vector_size bpiall_fiq_sp_el0 -vector_entry workaround_bpiall_vbar0_serror_sp_el0 +vector_entry bpiall_serror_sp_el0 b serror_sp_el0 - check_vector_size workaround_bpiall_vbar0_serror_sp_el0 + check_vector_size bpiall_serror_sp_el0 /* --------------------------------------------------------------------- * Current EL with SP_ELx: 0x200 - 0x400 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx +vector_entry bpiall_sync_exception_sp_elx b sync_exception_sp_elx - check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx + check_vector_size bpiall_sync_exception_sp_elx -vector_entry workaround_bpiall_vbar0_irq_sp_elx +vector_entry bpiall_irq_sp_elx b irq_sp_elx - check_vector_size workaround_bpiall_vbar0_irq_sp_elx + check_vector_size bpiall_irq_sp_elx -vector_entry workaround_bpiall_vbar0_fiq_sp_elx +vector_entry bpiall_fiq_sp_elx b fiq_sp_elx - check_vector_size workaround_bpiall_vbar0_fiq_sp_elx + check_vector_size bpiall_fiq_sp_elx -vector_entry workaround_bpiall_vbar0_serror_sp_elx +vector_entry bpiall_serror_sp_elx b serror_sp_elx - check_vector_size workaround_bpiall_vbar0_serror_sp_elx + check_vector_size bpiall_serror_sp_elx /* --------------------------------------------------------------------- * Lower EL using AArch64 : 0x400 - 0x600 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar0_sync_exception_aarch64 - enter_workaround 1 - check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64 +vector_entry bpiall_sync_exception_aarch64 + apply_cve_2017_5715_wa 1 + check_vector_size bpiall_sync_exception_aarch64 -vector_entry workaround_bpiall_vbar0_irq_aarch64 - enter_workaround 2 - check_vector_size workaround_bpiall_vbar0_irq_aarch64 +vector_entry bpiall_irq_aarch64 + apply_cve_2017_5715_wa 2 + check_vector_size bpiall_irq_aarch64 -vector_entry workaround_bpiall_vbar0_fiq_aarch64 - enter_workaround 4 - check_vector_size workaround_bpiall_vbar0_fiq_aarch64 +vector_entry bpiall_fiq_aarch64 + apply_cve_2017_5715_wa 4 + check_vector_size bpiall_fiq_aarch64 -vector_entry workaround_bpiall_vbar0_serror_aarch64 - enter_workaround 8 - check_vector_size workaround_bpiall_vbar0_serror_aarch64 +vector_entry bpiall_serror_aarch64 + apply_cve_2017_5715_wa 8 + check_vector_size bpiall_serror_aarch64 /* --------------------------------------------------------------------- * Lower EL using AArch32 : 0x600 - 0x800 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar0_sync_exception_aarch32 - enter_workaround 1 - check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32 +vector_entry bpiall_sync_exception_aarch32 + apply_cve_2017_5715_wa 1 + check_vector_size bpiall_sync_exception_aarch32 -vector_entry workaround_bpiall_vbar0_irq_aarch32 - enter_workaround 2 - check_vector_size workaround_bpiall_vbar0_irq_aarch32 +vector_entry bpiall_irq_aarch32 + apply_cve_2017_5715_wa 2 + check_vector_size bpiall_irq_aarch32 -vector_entry workaround_bpiall_vbar0_fiq_aarch32 - enter_workaround 4 - check_vector_size workaround_bpiall_vbar0_fiq_aarch32 +vector_entry bpiall_fiq_aarch32 + apply_cve_2017_5715_wa 4 + check_vector_size bpiall_fiq_aarch32 -vector_entry workaround_bpiall_vbar0_serror_aarch32 - enter_workaround 8 - check_vector_size workaround_bpiall_vbar0_serror_aarch32 +vector_entry bpiall_serror_aarch32 + apply_cve_2017_5715_wa 8 + check_vector_size bpiall_serror_aarch32 /* --------------------------------------------------------------------- * This vector table is used while the workaround is executing. It @@ -195,73 +195,73 @@ vector_entry workaround_bpiall_vbar0_serror_aarch32 * EL3 state before proceeding with the normal runtime exception vector. * --------------------------------------------------------------------- */ -vector_base workaround_bpiall_vbar1_runtime_exceptions +vector_base wa_cve_2017_5715_bpiall_ret_vbar /* --------------------------------------------------------------------- * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED) * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0 +vector_entry bpiall_ret_sync_exception_sp_el0 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0 + check_vector_size bpiall_ret_sync_exception_sp_el0 -vector_entry workaround_bpiall_vbar1_irq_sp_el0 +vector_entry bpiall_ret_irq_sp_el0 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_sp_el0 + check_vector_size bpiall_ret_irq_sp_el0 -vector_entry workaround_bpiall_vbar1_fiq_sp_el0 +vector_entry bpiall_ret_fiq_sp_el0 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_sp_el0 + check_vector_size bpiall_ret_fiq_sp_el0 -vector_entry workaround_bpiall_vbar1_serror_sp_el0 +vector_entry bpiall_ret_serror_sp_el0 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_sp_el0 + check_vector_size bpiall_ret_serror_sp_el0 /* --------------------------------------------------------------------- * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED) * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx +vector_entry bpiall_ret_sync_exception_sp_elx b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx + check_vector_size bpiall_ret_sync_exception_sp_elx -vector_entry workaround_bpiall_vbar1_irq_sp_elx +vector_entry bpiall_ret_irq_sp_elx b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_sp_elx + check_vector_size bpiall_ret_irq_sp_elx -vector_entry workaround_bpiall_vbar1_fiq_sp_elx +vector_entry bpiall_ret_fiq_sp_elx b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_sp_elx + check_vector_size bpiall_ret_fiq_sp_elx -vector_entry workaround_bpiall_vbar1_serror_sp_elx +vector_entry bpiall_ret_serror_sp_elx b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_sp_elx + check_vector_size bpiall_ret_serror_sp_elx /* --------------------------------------------------------------------- * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED) * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar1_sync_exception_aarch64 +vector_entry bpiall_ret_sync_exception_aarch64 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64 + check_vector_size bpiall_ret_sync_exception_aarch64 -vector_entry workaround_bpiall_vbar1_irq_aarch64 +vector_entry bpiall_ret_irq_aarch64 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_aarch64 + check_vector_size bpiall_ret_irq_aarch64 -vector_entry workaround_bpiall_vbar1_fiq_aarch64 +vector_entry bpiall_ret_fiq_aarch64 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_aarch64 + check_vector_size bpiall_ret_fiq_aarch64 -vector_entry workaround_bpiall_vbar1_serror_aarch64 +vector_entry bpiall_ret_serror_aarch64 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_aarch64 + check_vector_size bpiall_ret_serror_aarch64 /* --------------------------------------------------------------------- * Lower EL using AArch32 : 0x600 - 0x800 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 +vector_entry bpiall_ret_sync_exception_aarch32 /* * w2 indicates which SEL1 stub was run and thus which original vector was used * w3-w6 contain saved system register state (esr_el3 in w3) @@ -281,7 +281,7 @@ vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 * to workaround entry table in preparation for subsequent * Sync/IRQ/FIQ/SError exceptions. */ - adr x0, workaround_bpiall_vbar0_runtime_exceptions + adr x0, wa_cve_2017_5715_bpiall_vbar msr vbar_el3, x0 /* @@ -324,34 +324,34 @@ vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 1: ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b sync_exception_aarch64 - check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32 + check_vector_size bpiall_ret_sync_exception_aarch32 -vector_entry workaround_bpiall_vbar1_irq_aarch32 +vector_entry bpiall_ret_irq_aarch32 b report_unhandled_interrupt /* * Post-workaround fan-out for non-sync exceptions */ workaround_not_sync: - tbnz w2, #3, workaround_bpiall_vbar1_serror - tbnz w2, #2, workaround_bpiall_vbar1_fiq + tbnz w2, #3, bpiall_ret_serror + tbnz w2, #2, bpiall_ret_fiq /* IRQ */ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b irq_aarch64 -workaround_bpiall_vbar1_fiq: +bpiall_ret_fiq: ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b fiq_aarch64 -workaround_bpiall_vbar1_serror: +bpiall_ret_serror: ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b serror_aarch64 - check_vector_size workaround_bpiall_vbar1_irq_aarch32 + check_vector_size bpiall_ret_irq_aarch32 -vector_entry workaround_bpiall_vbar1_fiq_aarch32 +vector_entry bpiall_ret_fiq_aarch32 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_aarch32 + check_vector_size bpiall_ret_fiq_aarch32 -vector_entry workaround_bpiall_vbar1_serror_aarch32 +vector_entry bpiall_ret_serror_aarch32 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_aarch32 + check_vector_size bpiall_ret_serror_aarch32 diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S similarity index 55% rename from lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S rename to lib/cpus/aarch64/wa_cve_2017_5715_mmu.S index b24b620c8..039e373c1 100644 --- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S +++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S @@ -9,13 +9,13 @@ #include #include - .globl workaround_mmu_runtime_exceptions + .globl wa_cve_2017_5715_mmu_vbar #define ESR_EL3_A64_SMC0 0x5e000000 -vector_base workaround_mmu_runtime_exceptions +vector_base wa_cve_2017_5715_mmu_vbar - .macro apply_workaround _is_sync_exception + .macro apply_cve_2017_5715_wa _is_sync_exception stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] mrs x1, sctlr_el3 /* Disable MMU */ @@ -63,86 +63,86 @@ vector_base workaround_mmu_runtime_exceptions * Current EL with SP_EL0 : 0x0 - 0x200 * --------------------------------------------------------------------- */ -vector_entry workaround_mmu_sync_exception_sp_el0 +vector_entry mmu_sync_exception_sp_el0 b sync_exception_sp_el0 - check_vector_size workaround_mmu_sync_exception_sp_el0 + check_vector_size mmu_sync_exception_sp_el0 -vector_entry workaround_mmu_irq_sp_el0 +vector_entry mmu_irq_sp_el0 b irq_sp_el0 - check_vector_size workaround_mmu_irq_sp_el0 + check_vector_size mmu_irq_sp_el0 -vector_entry workaround_mmu_fiq_sp_el0 +vector_entry mmu_fiq_sp_el0 b fiq_sp_el0 - check_vector_size workaround_mmu_fiq_sp_el0 + check_vector_size mmu_fiq_sp_el0 -vector_entry workaround_mmu_serror_sp_el0 +vector_entry mmu_serror_sp_el0 b serror_sp_el0 - check_vector_size workaround_mmu_serror_sp_el0 + check_vector_size mmu_serror_sp_el0 /* --------------------------------------------------------------------- * Current EL with SP_ELx: 0x200 - 0x400 * --------------------------------------------------------------------- */ -vector_entry workaround_mmu_sync_exception_sp_elx +vector_entry mmu_sync_exception_sp_elx b sync_exception_sp_elx - check_vector_size workaround_mmu_sync_exception_sp_elx + check_vector_size mmu_sync_exception_sp_elx -vector_entry workaround_mmu_irq_sp_elx +vector_entry mmu_irq_sp_elx b irq_sp_elx - check_vector_size workaround_mmu_irq_sp_elx + check_vector_size mmu_irq_sp_elx -vector_entry workaround_mmu_fiq_sp_elx +vector_entry mmu_fiq_sp_elx b fiq_sp_elx - check_vector_size workaround_mmu_fiq_sp_elx + check_vector_size mmu_fiq_sp_elx -vector_entry workaround_mmu_serror_sp_elx +vector_entry mmu_serror_sp_elx b serror_sp_elx - check_vector_size workaround_mmu_serror_sp_elx + check_vector_size mmu_serror_sp_elx /* --------------------------------------------------------------------- * Lower EL using AArch64 : 0x400 - 0x600 * --------------------------------------------------------------------- */ -vector_entry workaround_mmu_sync_exception_aarch64 - apply_workaround _is_sync_exception=1 +vector_entry mmu_sync_exception_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=1 b sync_exception_aarch64 - check_vector_size workaround_mmu_sync_exception_aarch64 + check_vector_size mmu_sync_exception_aarch64 -vector_entry workaround_mmu_irq_aarch64 - apply_workaround _is_sync_exception=0 +vector_entry mmu_irq_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 b irq_aarch64 - check_vector_size workaround_mmu_irq_aarch64 + check_vector_size mmu_irq_aarch64 -vector_entry workaround_mmu_fiq_aarch64 - apply_workaround _is_sync_exception=0 +vector_entry mmu_fiq_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 b fiq_aarch64 - check_vector_size workaround_mmu_fiq_aarch64 + check_vector_size mmu_fiq_aarch64 -vector_entry workaround_mmu_serror_aarch64 - apply_workaround _is_sync_exception=0 +vector_entry mmu_serror_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 b serror_aarch64 - check_vector_size workaround_mmu_serror_aarch64 + check_vector_size mmu_serror_aarch64 /* --------------------------------------------------------------------- * Lower EL using AArch32 : 0x600 - 0x800 * --------------------------------------------------------------------- */ -vector_entry workaround_mmu_sync_exception_aarch32 - apply_workaround _is_sync_exception=1 +vector_entry mmu_sync_exception_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=1 b sync_exception_aarch32 - check_vector_size workaround_mmu_sync_exception_aarch32 + check_vector_size mmu_sync_exception_aarch32 -vector_entry workaround_mmu_irq_aarch32 - apply_workaround _is_sync_exception=0 +vector_entry mmu_irq_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 b irq_aarch32 - check_vector_size workaround_mmu_irq_aarch32 + check_vector_size mmu_irq_aarch32 -vector_entry workaround_mmu_fiq_aarch32 - apply_workaround _is_sync_exception=0 +vector_entry mmu_fiq_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 b fiq_aarch32 - check_vector_size workaround_mmu_fiq_aarch32 + check_vector_size mmu_fiq_aarch32 -vector_entry workaround_mmu_serror_aarch32 - apply_workaround _is_sync_exception=0 +vector_entry mmu_serror_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 b serror_aarch32 - check_vector_size workaround_mmu_serror_aarch32 + check_vector_size mmu_serror_aarch32 diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c index eb736c060..c357ebdb9 100644 --- a/services/arm_arch_svc/arm_arch_svc_setup.c +++ b/services/arm_arch_svc/arm_arch_svc_setup.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include static int32_t smccc_version(void) { @@ -25,7 +25,7 @@ static int32_t smccc_arch_features(u_register_t arg) return SMC_OK; #if WORKAROUND_CVE_2017_5715 case SMCCC_ARCH_WORKAROUND_1: - if (check_workaround_cve_2017_5715() == ERRATA_NOT_APPLIES) + if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES) return 1; return 0; /* ERRATA_APPLIES || ERRATA_MISSING */ #endif From b8a25bbb0bab4e4afdbfb04bee98f0bf28141c4b Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Thu, 5 Apr 2018 14:38:26 +0100 Subject: [PATCH 2/4] Implement static workaround for CVE-2018-3639 For affected CPUs, this approach enables the mitigation during EL3 initialization, following every PE reset. No mechanism is provided to disable the mitigation at runtime. This approach permanently mitigates the entire software stack and no additional mitigation code is required in other software components. TF-A implements this approach for the following affected CPUs: * Cortex-A57 and Cortex-A72, by setting bit 55 (Disable load pass store) of `CPUACTLR_EL1` (`S3_1_C15_C2_0`). * Cortex-A73, by setting bit 3 of `S3_0_C15_C0_0` (not documented in the Technical Reference Manual (TRM)). * Cortex-A75, by setting bit 35 (reserved in TRM) of `CPUACTLR_EL1` (`S3_0_C15_C1_0`). Additionally, a new SMC interface is implemented to allow software executing in lower ELs to discover whether the system is mitigated against CVE-2018-3639. Refer to "Firmware interfaces for mitigating cache speculation vulnerabilities System Software on Arm Systems"[0] for more information. [0] https://developer.arm.com/cache-speculation-vulnerability-firmware-specification Change-Id: I084aa7c3bc7c26bf2df2248301270f77bed22ceb Signed-off-by: Dimitris Papastamos --- docs/cpu-specific-build-macros.rst | 6 ++++++ include/lib/cpus/aarch64/cortex_a57.h | 1 + include/lib/cpus/aarch64/cortex_a72.h | 1 + include/lib/cpus/aarch64/cortex_a73.h | 7 +++++++ include/lib/cpus/aarch64/cortex_a75.h | 7 +++++++ include/services/arm_arch_svc.h | 3 +++ lib/cpus/aarch64/cortex_a57.S | 18 ++++++++++++++++++ lib/cpus/aarch64/cortex_a72.S | 18 ++++++++++++++++++ lib/cpus/aarch64/cortex_a73.S | 17 +++++++++++++++++ lib/cpus/aarch64/cortex_a75.S | 17 +++++++++++++++++ lib/cpus/cpu-ops.mk | 5 +++++ services/arm_arch_svc/arm_arch_svc_setup.c | 14 ++++++++++++++ 12 files changed, 114 insertions(+) diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst index 65f6adb8d..a89305b7d 100644 --- a/docs/cpu-specific-build-macros.rst +++ b/docs/cpu-specific-build-macros.rst @@ -24,6 +24,12 @@ vulnerability workarounds should be applied at runtime. with the recommendation in the spec regarding workaround discovery. Defaults to 1. +- ``WORKAROUND_CVE_2018_3639``: Enables the security workaround for + `CVE-2018-3639`_. Defaults to 1. The TF-A project recommends to keep + the default value of 1 even on platforms that are unaffected by + CVE-2018-3639, in order to comply with the recommendation in the spec + regarding workaround discovery. + CPU Errata Workarounds ---------------------- diff --git a/include/lib/cpus/aarch64/cortex_a57.h b/include/lib/cpus/aarch64/cortex_a57.h index 6c45c0669..83ec93426 100644 --- a/include/lib/cpus/aarch64/cortex_a57.h +++ b/include/lib/cpus/aarch64/cortex_a57.h @@ -44,6 +44,7 @@ #define CORTEX_A57_CPUACTLR_EL1 S3_1_C15_C2_0 #define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB (ULL(1) << 59) +#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE (ULL(1) << 55) #define CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE (ULL(1) << 54) #define CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD (ULL(1) << 52) #define CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA (ULL(1) << 49) diff --git a/include/lib/cpus/aarch64/cortex_a72.h b/include/lib/cpus/aarch64/cortex_a72.h index 6fbb70760..9f1847061 100644 --- a/include/lib/cpus/aarch64/cortex_a72.h +++ b/include/lib/cpus/aarch64/cortex_a72.h @@ -32,6 +32,7 @@ #define CORTEX_A72_CPUACTLR_EL1 S3_1_C15_C2_0 #define CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH (ULL(1) << 56) +#define CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE (ULL(1) << 55) #define CORTEX_A72_CPUACTLR_EL1_NO_ALLOC_WBWA (ULL(1) << 49) #define CORTEX_A72_CPUACTLR_EL1_DCC_AS_DCCI (ULL(1) << 44) #define CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH (ULL(1) << 32) diff --git a/include/lib/cpus/aarch64/cortex_a73.h b/include/lib/cpus/aarch64/cortex_a73.h index faff5fef6..4db0cae23 100644 --- a/include/lib/cpus/aarch64/cortex_a73.h +++ b/include/lib/cpus/aarch64/cortex_a73.h @@ -22,4 +22,11 @@ ******************************************************************************/ #define CORTEX_A73_L2MERRSR_EL1 S3_1_C15_C2_3 /* Instruction def. */ +/******************************************************************************* + * CPU implementation defined register specific definitions. + ******************************************************************************/ +#define CORTEX_A73_IMP_DEF_REG1 S3_0_C15_C0_0 + +#define CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE (1 << 3) + #endif /* __CORTEX_A73_H__ */ diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h index 20f02518a..493c7d472 100644 --- a/include/lib/cpus/aarch64/cortex_a75.h +++ b/include/lib/cpus/aarch64/cortex_a75.h @@ -16,6 +16,13 @@ #define CORTEX_A75_CPUPWRCTLR_EL1 S3_0_C15_C2_7 #define CORTEX_A75_CPUECTLR_EL1 S3_0_C15_C1_4 +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CORTEX_A75_CPUACTLR_EL1 S3_0_C15_C1_0 + +#define CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE (1 << 35) + /* Definitions of register field mask in CORTEX_A75_CPUPWRCTLR_EL1 */ #define CORTEX_A75_CORE_PWRDN_EN_MASK 0x1 diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h index 29616013a..0d2f47745 100644 --- a/include/services/arm_arch_svc.h +++ b/include/services/arm_arch_svc.h @@ -10,5 +10,8 @@ #define SMCCC_VERSION U(0x80000000) #define SMCCC_ARCH_FEATURES U(0x80000001) #define SMCCC_ARCH_WORKAROUND_1 U(0x80008000) +#define SMCCC_ARCH_WORKAROUND_2 U(0x80007FFF) + +#define SMCCC_ARCH_NOT_REQUIRED -2 #endif /* __ARM_ARCH_SVC_H__ */ diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index 8470c6c5f..721bb49ab 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -337,6 +337,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A57. * Shall clobber: x0-x19 @@ -397,6 +406,14 @@ func cortex_a57_reset_func msr vbar_el3, x0 #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A57_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE + msr CORTEX_A57_CPUACTLR_EL1, x0 + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -528,6 +545,7 @@ func cortex_a57_errata_report report_errata ERRATA_A57_833471, cortex_a57, 833471 report_errata ERRATA_A57_859972, cortex_a57, 859972 report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639 ldp x8, x30, [sp], #16 ret diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index b67c98776..6ef35cfcf 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -110,6 +110,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A72. * ------------------------------------------------- @@ -131,6 +140,14 @@ func cortex_a72_reset_func 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A72_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE + msr CORTEX_A72_CPUACTLR_EL1, x0 + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -265,6 +282,7 @@ func cortex_a72_errata_report */ report_errata ERRATA_A72_859971, cortex_a72, 859971 report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639 ldp x8, x30, [sp], #16 ret diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index c66067d70..2dbd515f8 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -43,6 +43,13 @@ func cortex_a73_reset_func 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A73_IMP_DEF_REG1 + orr x0, x0, #CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE + msr CORTEX_A73_IMP_DEF_REG1, x0 + isb +#endif + /* --------------------------------------------- * Enable the SMP bit. * Clobbers : x0 @@ -129,6 +136,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + #if REPORT_ERRATA /* * Errata printing function for Cortex A75. Must follow AAPCS. @@ -144,6 +160,7 @@ func cortex_a73_errata_report * checking functions of each errata. */ report_errata WORKAROUND_CVE_2017_5715, cortex_a73, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a73, cve_2018_3639 ldp x8, x30, [sp], #16 ret diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index f92e4ed02..9cc2c01ed 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -18,6 +18,13 @@ func cortex_a75_reset_func 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A75_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE + msr CORTEX_A75_CPUACTLR_EL1, x0 + isb +#endif + #if ENABLE_AMU /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */ mrs x0, actlr_el3 @@ -57,6 +64,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* --------------------------------------------- * HW will do the cache maintenance while powering down * --------------------------------------------- @@ -88,6 +104,7 @@ func cortex_a75_errata_report * checking functions of each errata. */ report_errata WORKAROUND_CVE_2017_5715, cortex_a75, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639 ldp x8, x30, [sp], #16 ret diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk index 3ba8c1fcc..31cd837be 100644 --- a/lib/cpus/cpu-ops.mk +++ b/lib/cpus/cpu-ops.mk @@ -17,6 +17,7 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1 A57_DISABLE_NON_TEMPORAL_HINT ?=1 WORKAROUND_CVE_2017_5715 ?=1 +WORKAROUND_CVE_2018_3639 ?=1 # Process SKIP_A57_L1_FLUSH_PWR_DWN flag $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN)) @@ -34,6 +35,10 @@ $(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT)) $(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715)) $(eval $(call add_define,WORKAROUND_CVE_2017_5715)) +# Process WORKAROUND_CVE_2018_3639 flag +$(eval $(call assert_boolean,WORKAROUND_CVE_2018_3639)) +$(eval $(call add_define,WORKAROUND_CVE_2018_3639)) + # CPU Errata Build flags. # These should be enabled by the platform if the erratum workaround needs to be # applied. diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c index c357ebdb9..6089cf6ac 100644 --- a/services/arm_arch_svc/arm_arch_svc_setup.c +++ b/services/arm_arch_svc/arm_arch_svc_setup.c @@ -28,6 +28,10 @@ static int32_t smccc_arch_features(u_register_t arg) if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES) return 1; return 0; /* ERRATA_APPLIES || ERRATA_MISSING */ +#endif +#if WORKAROUND_CVE_2018_3639 + case SMCCC_ARCH_WORKAROUND_2: + return SMCCC_ARCH_NOT_REQUIRED; #endif default: return SMC_UNK; @@ -59,6 +63,16 @@ static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid, * has no effect. */ SMC_RET0(handle); +#endif +#if WORKAROUND_CVE_2018_3639 + case SMCCC_ARCH_WORKAROUND_2: + /* + * The workaround has already been applied on affected PEs + * requiring dynamic mitigation during entry to EL3. + * On unaffected or statically mitigated PEs, this function + * has no effect. + */ + SMC_RET0(handle); #endif default: WARN("Unimplemented Arm Architecture Service Call: 0x%x \n", From e0865708155826a70e2199a54cab8e90e8d07a32 Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Thu, 17 May 2018 14:41:13 +0100 Subject: [PATCH 3/4] aarch32: Implement static workaround for CVE-2018-3639 Implement static mitigation for CVE-2018-3639 on Cortex A57 and A72. Change-Id: I83409a16238729b84142b19e258c23737cc1ddc3 Signed-off-by: Dimitris Papastamos --- include/lib/cpus/aarch32/cortex_a57.h | 1 + include/lib/cpus/aarch32/cortex_a72.h | 1 + lib/cpus/aarch32/cortex_a57.S | 18 ++++++++++++++++++ lib/cpus/aarch32/cortex_a72.S | 19 +++++++++++++++++++ 4 files changed, 39 insertions(+) diff --git a/include/lib/cpus/aarch32/cortex_a57.h b/include/lib/cpus/aarch32/cortex_a57.h index 3fac9c7be..18cabe11b 100644 --- a/include/lib/cpus/aarch32/cortex_a57.h +++ b/include/lib/cpus/aarch32/cortex_a57.h @@ -44,6 +44,7 @@ #define CORTEX_A57_CPUACTLR p15, 0, c15 #define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB (ULL(1) << 59) +#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE (ULL(1) << 55) #define CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE (ULL(1) << 54) #define CORTEX_A57_CPUACTLR_DIS_OVERREAD (ULL(1) << 52) #define CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA (ULL(1) << 49) diff --git a/include/lib/cpus/aarch32/cortex_a72.h b/include/lib/cpus/aarch32/cortex_a72.h index f7da1f013..0331ace7c 100644 --- a/include/lib/cpus/aarch32/cortex_a72.h +++ b/include/lib/cpus/aarch32/cortex_a72.h @@ -32,6 +32,7 @@ #define CORTEX_A72_CPUACTLR p15, 0, c15 #define CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH (ULL(1) << 56) +#define CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE (ULL(1) << 55) #define CORTEX_A72_CPUACTLR_NO_ALLOC_WBWA (ULL(1) << 49) #define CORTEX_A72_CPUACTLR_DCC_AS_DCCI (ULL(1) << 44) #define CORTEX_A72_CPUACTLR_DIS_INSTR_PREFETCH (ULL(1) << 32) diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S index f446bfffa..dff86be76 100644 --- a/lib/cpus/aarch32/cortex_a57.S +++ b/lib/cpus/aarch32/cortex_a57.S @@ -337,6 +337,15 @@ func check_errata_cve_2017_5715 bx lr endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov r0, #ERRATA_APPLIES +#else + mov r0, #ERRATA_MISSING +#endif + bx lr +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A57. * Shall clobber: r0-r6 @@ -392,6 +401,14 @@ func cortex_a57_reset_func bl errata_a57_859972_wa #endif +#if WORKAROUND_CVE_2018_3639 + ldcopr16 r0, r1, CORTEX_A57_CPUACTLR + orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE + stcopr16 r0, r1, CORTEX_A57_CPUACTLR + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -525,6 +542,7 @@ func cortex_a57_errata_report report_errata ERRATA_A57_833471, cortex_a57, 833471 report_errata ERRATA_A57_859972, cortex_a57, 859972 report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639 pop {r12, lr} bx lr diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S index 56e91f5c0..3bc3388bc 100644 --- a/lib/cpus/aarch32/cortex_a72.S +++ b/lib/cpus/aarch32/cortex_a72.S @@ -92,6 +92,15 @@ func check_errata_cve_2017_5715 bx lr endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov r0, #ERRATA_APPLIES +#else + mov r0, #ERRATA_MISSING +#endif + bx lr +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A72. * ------------------------------------------------- @@ -105,6 +114,15 @@ func cortex_a72_reset_func mov r0, r4 bl errata_a72_859971_wa #endif + +#if WORKAROUND_CVE_2018_3639 + ldcopr16 r0, r1, CORTEX_A72_CPUACTLR + orr64_imm r0, r1, CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE + stcopr16 r0, r1, CORTEX_A72_CPUACTLR + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -241,6 +259,7 @@ func cortex_a72_errata_report */ report_errata ERRATA_A72_859971, cortex_a72, 859971 report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639 pop {r12, lr} bx lr From fe007b2e15ec7b569c07fedbd9bfccb5ed742eec Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Wed, 16 May 2018 11:36:14 +0100 Subject: [PATCH 4/4] Add support for dynamic mitigation for CVE-2018-3639 Some CPUS may benefit from using a dynamic mitigation approach for CVE-2018-3639. A new SMC interface is defined to allow software executing in lower ELs to enable or disable the mitigation for their execution context. It should be noted that regardless of the state of the mitigation for lower ELs, code executing in EL3 is always mitigated against CVE-2018-3639. NOTE: This change is a compatibility break for any platform using the declare_cpu_ops_workaround_cve_2017_5715 macro. Migrate to the declare_cpu_ops_wa macro instead. Change-Id: I3509a9337ad217bbd96de9f380c4ff8bf7917013 Signed-off-by: Dimitris Papastamos --- docs/cpu-specific-build-macros.rst | 5 +++++ include/lib/cpus/aarch64/cpu_macros.S | 21 ++++++++++++++----- include/lib/cpus/wa_cve_2018_3639.h | 12 +++++++++++ include/lib/el3_runtime/aarch64/context.h | 15 +++++++++++++- lib/cpus/aarch64/cortex_a57.S | 3 ++- lib/cpus/aarch64/cortex_a72.S | 3 ++- lib/cpus/aarch64/cortex_a73.S | 3 ++- lib/cpus/aarch64/cortex_a75.S | 3 ++- lib/cpus/aarch64/cpu_helpers.S | 24 ++++++++++++++++++++++ lib/cpus/cpu-ops.mk | 10 +++++++++ lib/el3_runtime/aarch64/context.S | 9 ++++++++ services/arm_arch_svc/arm_arch_svc_setup.c | 18 ++++++++++++++++ 12 files changed, 116 insertions(+), 10 deletions(-) create mode 100644 include/lib/cpus/wa_cve_2018_3639.h diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst index a89305b7d..c11f64039 100644 --- a/docs/cpu-specific-build-macros.rst +++ b/docs/cpu-specific-build-macros.rst @@ -30,6 +30,11 @@ vulnerability workarounds should be applied at runtime. CVE-2018-3639, in order to comply with the recommendation in the spec regarding workaround discovery. +- ``DYNAMIC_WORKAROUND_CVE_2018_3639``: Enables dynamic mitigation for + `CVE-2018-3639`_. This build option should be set to 1 if the target + platform contains at least 1 CPU that requires dynamic mitigation. + Defaults to 0. + CPU Errata Workarounds ---------------------- diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S index bfe2449e9..cd8f3e8fb 100644 --- a/include/lib/cpus/aarch64/cpu_macros.S +++ b/include/lib/cpus/aarch64/cpu_macros.S @@ -18,6 +18,9 @@ /* Special constant to specify that CPU has no reset function */ #define CPU_NO_RESET_FUNC 0 +#define CPU_NO_EXTRA1_FUNC 0 +#define CPU_NO_EXTRA2_FUNC 0 + /* Word size for 64-bit CPUs */ #define CPU_WORD_SIZE 8 @@ -48,6 +51,8 @@ CPU_RESET_FUNC: /* cpu_ops reset_func */ #endif CPU_EXTRA1_FUNC: .space 8 +CPU_EXTRA2_FUNC: + .space 8 #ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ CPU_PWR_DWN_OPS: /* cpu_ops power down functions */ .space (8 * CPU_MAX_PWR_DWN_OPS) @@ -119,6 +124,10 @@ CPU_OPS_SIZE = . * This is a placeholder for future per CPU operations. Currently, * some CPUs use this entry to set a test function to determine if * the workaround for CVE-2017-5715 needs to be applied or not. + * _extra2: + * This is a placeholder for future per CPU operations. Currently + * some CPUs use this entry to set a function to disable the + * workaround for CVE-2018-3639. * _power_down_ops: * Comma-separated list of functions to perform power-down * operatios on the CPU. At least one, and up to @@ -129,7 +138,7 @@ CPU_OPS_SIZE = . * used to handle power down at subsequent levels */ .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \ - _extra1:req, _power_down_ops:vararg + _extra1:req, _extra2:req, _power_down_ops:vararg .section cpu_ops, "a" .align 3 .type cpu_ops_\_name, %object @@ -138,6 +147,7 @@ CPU_OPS_SIZE = . .quad \_resetfunc #endif .quad \_extra1 + .quad \_extra2 #ifdef IMAGE_BL31 1: /* Insert list of functions */ @@ -196,14 +206,15 @@ CPU_OPS_SIZE = . .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \ _power_down_ops:vararg - declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, \ + declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, \ \_power_down_ops .endm - .macro declare_cpu_ops_workaround_cve_2017_5715 _name:req, _midr:req, \ - _resetfunc:req, _extra1:req, _power_down_ops:vararg + .macro declare_cpu_ops_wa _name:req, _midr:req, \ + _resetfunc:req, _extra1:req, _extra2:req, \ + _power_down_ops:vararg declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ - \_extra1, \_power_down_ops + \_extra1, \_extra2, \_power_down_ops .endm #if REPORT_ERRATA diff --git a/include/lib/cpus/wa_cve_2018_3639.h b/include/lib/cpus/wa_cve_2018_3639.h new file mode 100644 index 000000000..36546f70d --- /dev/null +++ b/include/lib/cpus/wa_cve_2018_3639.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __WA_CVE_2018_3639_H__ +#define __WA_CVE_2018_3639_H__ + +void *wa_cve_2018_3639_get_disable_ptr(void); + +#endif /* __WA_CVE_2018_3639_H__ */ diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h index cdd74a34c..a4f3ea1bb 100644 --- a/include/lib/el3_runtime/aarch64/context.h +++ b/include/lib/el3_runtime/aarch64/context.h @@ -128,8 +128,8 @@ * Constants that allow assembler code to access members of and the 'fp_regs' * structure at their correct offsets. ******************************************************************************/ -#if CTX_INCLUDE_FPREGS #define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END) +#if CTX_INCLUDE_FPREGS #define CTX_FP_Q0 U(0x0) #define CTX_FP_Q1 U(0x10) #define CTX_FP_Q2 U(0x20) @@ -170,8 +170,14 @@ #else #define CTX_FPREGS_END U(0x210) /* Align to the next 16 byte boundary */ #endif +#else +#define CTX_FPREGS_END U(0) #endif +#define CTX_CVE_2018_3639_OFFSET (CTX_FPREGS_OFFSET + CTX_FPREGS_END) +#define CTX_CVE_2018_3639_DISABLE U(0) +#define CTX_CVE_2018_3639_END U(0x10) /* Align to the next 16 byte boundary */ + #ifndef __ASSEMBLY__ #include @@ -195,6 +201,7 @@ #define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) #endif #define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT) +#define CTX_CVE_2018_3639_ALL (CTX_CVE_2018_3639_END >> DWORD_SHIFT) /* * AArch64 general purpose register context structure. Usually x0-x18, @@ -227,6 +234,9 @@ DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL); */ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); +/* Function pointer used by CVE-2018-3639 dynamic mitigation */ +DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL); + /* * Macros to access members of any of the above structures using their * offsets @@ -251,6 +261,7 @@ typedef struct cpu_context { #if CTX_INCLUDE_FPREGS fp_regs_t fpregs_ctx; #endif + cve_2018_3639_t cve_2018_3639_ctx; } cpu_context_t; /* Macros to access members of the 'cpu_context_t' structure */ @@ -276,6 +287,8 @@ CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \ #endif CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \ assert_core_context_el3state_offset_mismatch); +CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx), \ + assert_core_context_cve_2018_3639_offset_mismatch); /* * Helper macro to set the general purpose registers that correspond to diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index 721bb49ab..07fadd154 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -573,8 +573,9 @@ func cortex_a57_cpu_reg_dump ret endfunc cortex_a57_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a57, CORTEX_A57_MIDR, \ +declare_cpu_ops_wa cortex_a57, CORTEX_A57_MIDR, \ cortex_a57_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a57_core_pwr_dwn, \ cortex_a57_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index 6ef35cfcf..bb9381d17 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -310,8 +310,9 @@ func cortex_a72_cpu_reg_dump ret endfunc cortex_a72_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a72, CORTEX_A72_MIDR, \ +declare_cpu_ops_wa cortex_a72, CORTEX_A72_MIDR, \ cortex_a72_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a72_core_pwr_dwn, \ cortex_a72_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index 2dbd515f8..d595f128f 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -187,8 +187,9 @@ func cortex_a73_cpu_reg_dump ret endfunc cortex_a73_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a73, CORTEX_A73_MIDR, \ +declare_cpu_ops_wa cortex_a73, CORTEX_A73_MIDR, \ cortex_a73_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a73_core_pwr_dwn, \ cortex_a73_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 9cc2c01ed..20ec32ce2 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -130,7 +130,8 @@ func cortex_a75_cpu_reg_dump ret endfunc cortex_a75_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a75, CORTEX_A75_MIDR, \ +declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \ cortex_a75_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a75_core_pwr_dwn diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index 78c66e652..69ece8fff 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -316,3 +316,27 @@ func check_wa_cve_2017_5715 mov x0, #ERRATA_NOT_APPLIES ret endfunc check_wa_cve_2017_5715 + +/* + * void *wa_cve_2018_3639_get_disable_ptr(void); + * + * Returns a function pointer which is used to disable mitigation + * for CVE-2018-3639. + * The function pointer is only returned on cores that employ + * dynamic mitigation. If the core uses static mitigation or is + * unaffected by CVE-2018-3639 this function returns NULL. + * + * NOTE: Must be called only after cpu_ops have been initialized + * in per-CPU data. + */ + .globl wa_cve_2018_3639_get_disable_ptr +func wa_cve_2018_3639_get_disable_ptr + mrs x0, tpidr_el3 +#if ENABLE_ASSERTIONS + cmp x0, #0 + ASM_ASSERT(ne) +#endif + ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] + ldr x0, [x0, #CPU_EXTRA2_FUNC] + ret +endfunc wa_cve_2018_3639_get_disable_ptr diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk index 31cd837be..434c13ea0 100644 --- a/lib/cpus/cpu-ops.mk +++ b/lib/cpus/cpu-ops.mk @@ -18,6 +18,7 @@ A57_DISABLE_NON_TEMPORAL_HINT ?=1 WORKAROUND_CVE_2017_5715 ?=1 WORKAROUND_CVE_2018_3639 ?=1 +DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0 # Process SKIP_A57_L1_FLUSH_PWR_DWN flag $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN)) @@ -39,6 +40,15 @@ $(eval $(call add_define,WORKAROUND_CVE_2017_5715)) $(eval $(call assert_boolean,WORKAROUND_CVE_2018_3639)) $(eval $(call add_define,WORKAROUND_CVE_2018_3639)) +$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639)) +$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639)) + +ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0) + ifeq (${WORKAROUND_CVE_2018_3639},0) + $(error "Error: WORKAROUND_CVE_2018_3639 must be 1 if DYNAMIC_WORKAROUND_CVE_2018_3639 is 1") + endif +endif + # CPU Errata Build flags. # These should be enabled by the platform if the erratum workaround needs to be # applied. diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index 121ca4d30..707e6dbd4 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -404,6 +404,15 @@ func el3_exit msr spsr_el3, x16 msr elr_el3, x17 +#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 + /* Restore mitigation state as it was on entry to EL3 */ + ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE] + cmp x17, xzr + beq 1f + blr x17 +#endif + +1: /* Restore saved general purpose registers and return */ b restore_gp_registers_eret endfunc el3_exit diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c index 6089cf6ac..45c4704ee 100644 --- a/services/arm_arch_svc/arm_arch_svc_setup.c +++ b/services/arm_arch_svc/arm_arch_svc_setup.c @@ -11,6 +11,7 @@ #include #include #include +#include static int32_t smccc_version(void) { @@ -31,7 +32,24 @@ static int32_t smccc_arch_features(u_register_t arg) #endif #if WORKAROUND_CVE_2018_3639 case SMCCC_ARCH_WORKAROUND_2: +#if DYNAMIC_WORKAROUND_CVE_2018_3639 + /* + * On a platform where at least one CPU requires + * dynamic mitigation but others are either unaffected + * or permanently mitigated, report the latter as not + * needing dynamic mitigation. + */ + if (wa_cve_2018_3639_get_disable_ptr() == NULL) + return 1; + /* + * If we get here, this CPU requires dynamic mitigation + * so report it as such. + */ + return 0; +#else + /* Either the CPUs are unaffected or permanently mitigated */ return SMCCC_ARCH_NOT_REQUIRED; +#endif #endif default: return SMC_UNK;