arm-trusted-firmware/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S

153 lines
4.3 KiB
ArmAsm
Raw Normal View History

/*
* Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <context.h>
#include <services/arm_arch_svc.h>
.globl wa_cve_2017_5715_mmu_vbar
#define ESR_EL3_A64_SMC0 0x5e000000
#define ESR_EL3_A32_SMC0 0x4e000000
vector_base wa_cve_2017_5715_mmu_vbar
.macro apply_cve_2017_5715_wa _is_sync_exception _esr_el3_val
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
mrs x1, sctlr_el3
/* Disable MMU */
bic x1, x1, #SCTLR_M_BIT
msr sctlr_el3, x1
isb
/* Enable MMU */
orr x1, x1, #SCTLR_M_BIT
msr sctlr_el3, x1
/*
* Defer ISB to avoid synchronizing twice in case we hit
* the workaround SMC call which will implicitly synchronize
* because of the ERET instruction.
*/
/*
* Ensure SMC is coming from A64/A32 state on #0
* with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
*
* This sequence evaluates as:
* (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
* (ESR_EL3==SMC#0) : (NE)
* allowing use of a single branch operation
*/
.if \_is_sync_exception
orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
cmp w0, w1
orr w1, wzr, #SMCCC_ARCH_WORKAROUND_3
ccmp w0, w1, #4, ne
mrs x0, esr_el3
mov_imm w1, \_esr_el3_val
ccmp w0, w1, #0, eq
/* Static predictor will predict a fall through */
bne 1f
Prevent speculative execution past ERET Even though ERET always causes a jump to another address, aarch64 CPUs speculatively execute following instructions as if the ERET instruction was not a jump instruction. The speculative execution does not cross privilege-levels (to the jump target as one would expect), but it continues on the kernel privilege level as if the ERET instruction did not change the control flow - thus execution anything that is accidentally linked after the ERET instruction. Later, the results of this speculative execution are always architecturally discarded, however they can leak data using microarchitectural side channels. This speculative execution is very reliable (seems to be unconditional) and it manages to complete even relatively performance-heavy operations (e.g. multiple dependent fetches from uncached memory). This was fixed in Linux, FreeBSD, OpenBSD and Optee OS: https://github.com/torvalds/linux/commit/679db70801da9fda91d26caf13bf5b5ccc74e8e8 https://github.com/freebsd/freebsd/commit/29fb48ace4186a41c409fde52bcf4216e9e50b61 https://github.com/openbsd/src/commit/3a08873ece1cb28ace89fd65e8f3c1375cc98de2 https://github.com/OP-TEE/optee_os/commit/abfd092aa19f9c0251e3d5551e2d68a9ebcfec8a It is demonstrated in a SafeSide example: https://github.com/google/safeside/blob/master/demos/eret_hvc_smc_wrapper.cc https://github.com/google/safeside/blob/master/kernel_modules/kmod_eret_hvc_smc/eret_hvc_smc_module.c Signed-off-by: Anthony Steinhauser <asteinhauser@google.com> Change-Id: Iead39b0b9fb4b8d8b5609daaa8be81497ba63a0f
2020-01-07 23:44:06 +00:00
exception_return
1:
.endif
/*
* Synchronize now to enable the MMU. This is required
* to ensure the load pair below reads the data stored earlier.
*/
isb
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
.endm
/* ---------------------------------------------------------------------
* Current EL with SP_EL0 : 0x0 - 0x200
* ---------------------------------------------------------------------
*/
vector_entry mmu_sync_exception_sp_el0
b sync_exception_sp_el0
end_vector_entry mmu_sync_exception_sp_el0
vector_entry mmu_irq_sp_el0
b irq_sp_el0
end_vector_entry mmu_irq_sp_el0
vector_entry mmu_fiq_sp_el0
b fiq_sp_el0
end_vector_entry mmu_fiq_sp_el0
vector_entry mmu_serror_sp_el0
b serror_sp_el0
end_vector_entry mmu_serror_sp_el0
/* ---------------------------------------------------------------------
* Current EL with SP_ELx: 0x200 - 0x400
* ---------------------------------------------------------------------
*/
vector_entry mmu_sync_exception_sp_elx
b sync_exception_sp_elx
end_vector_entry mmu_sync_exception_sp_elx
vector_entry mmu_irq_sp_elx
b irq_sp_elx
end_vector_entry mmu_irq_sp_elx
vector_entry mmu_fiq_sp_elx
b fiq_sp_elx
end_vector_entry mmu_fiq_sp_elx
vector_entry mmu_serror_sp_elx
b serror_sp_elx
end_vector_entry mmu_serror_sp_elx
/* ---------------------------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x600
* ---------------------------------------------------------------------
*/
vector_entry mmu_sync_exception_aarch64
apply_cve_2017_5715_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
b sync_exception_aarch64
end_vector_entry mmu_sync_exception_aarch64
vector_entry mmu_irq_aarch64
apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
b irq_aarch64
end_vector_entry mmu_irq_aarch64
vector_entry mmu_fiq_aarch64
apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
b fiq_aarch64
end_vector_entry mmu_fiq_aarch64
vector_entry mmu_serror_aarch64
apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
b serror_aarch64
end_vector_entry mmu_serror_aarch64
/* ---------------------------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x800
* ---------------------------------------------------------------------
*/
vector_entry mmu_sync_exception_aarch32
apply_cve_2017_5715_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
b sync_exception_aarch32
end_vector_entry mmu_sync_exception_aarch32
vector_entry mmu_irq_aarch32
apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
b irq_aarch32
end_vector_entry mmu_irq_aarch32
vector_entry mmu_fiq_aarch32
apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
b fiq_aarch32
end_vector_entry mmu_fiq_aarch32
vector_entry mmu_serror_aarch32
apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
b serror_aarch32
end_vector_entry mmu_serror_aarch32