From ef653d93ccd6ba1888c61706469021fc623c3318 Mon Sep 17 00:00:00 2001 From: Jeenu Viswambharan Date: Wed, 29 Nov 2017 16:59:34 +0000 Subject: [PATCH] AArch64: Refactor GP register restore to separate function At present, the function that restores general purpose registers also does ERET. Refactor the restore code to restore general purpose registers without ERET to complement the save function. The macro save_x18_to_x29_sp_el0 was used only once, and is therefore removed, and its contents expanded inline for readability. No functional changes, but with this patch: - The SMC return path will incur an branch-return and an additional register load. - The unknown SMC path restores registers x0 to x3. Change-Id: I7a1a63e17f34f9cde810685d70a0ad13ca3b7c50 Signed-off-by: Jeenu Viswambharan --- bl31/aarch64/runtime_exceptions.S | 45 +++++++++++++------------------ lib/el3_runtime/aarch64/context.S | 32 ++++++++++++++-------- 2 files changed, 40 insertions(+), 37 deletions(-) diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index 1c3ed3f33..e8cde5470 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -154,25 +154,6 @@ interrupt_exit_\label: .endm - .macro save_x4_to_x29_sp_el0 - stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] - stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] - stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] - stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] - stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] - mrs x18, sp_el0 - str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] - .endm - - vector_base runtime_exceptions /* --------------------------------------------------------------------- @@ -345,7 +326,21 @@ smc_handler64: * * Save x4-x29 and sp_el0. */ - save_x4_to_x29_sp_el0 + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + mrs x18, sp_el0 + str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] mov x5, xzr mov x6, sp @@ -431,14 +426,12 @@ compat_or_vendor: smc_unknown: /* - * Here we restore x4-x18 regardless of where we came from. AArch32 - * callers will find the registers contents unchanged, but AArch64 - * callers will find the registers modified (with stale earlier NS - * content). Either way, we aren't leaking any secure information - * through them. + * Unknown SMC call. Populate return value with SMC_UNK, restore + * GP registers, and return to caller. */ mov x0, #SMC_UNK - b restore_gp_registers_callee_eret + str x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b restore_gp_registers_eret smc_prohibited: ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index 620ec16ff..9a53b76c1 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -15,8 +15,8 @@ .global fpregs_context_restore #endif .global save_gp_registers + .global restore_gp_registers .global restore_gp_registers_eret - .global restore_gp_registers_callee_eret .global el3_exit /* ----------------------------------------------------- @@ -332,30 +332,40 @@ func save_gp_registers ret endfunc save_gp_registers -func restore_gp_registers_eret +/* + * This function restores all general purpose registers except x30 from the + * CPU context. x30 register must be explicitly restored by the caller. + */ +func restore_gp_registers ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - b restore_gp_registers_callee_eret -endfunc restore_gp_registers_eret - -func restore_gp_registers_callee_eret ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + ldr x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] + msr sp_el0, x28 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] - ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - msr sp_el0, x17 - ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + ret +endfunc restore_gp_registers + +/* + * Restore general purpose registers (including x30), and exit EL3 via. ERET to + * a lower exception level. + */ +func restore_gp_registers_eret + bl restore_gp_registers + ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] eret -endfunc restore_gp_registers_callee_eret +endfunc restore_gp_registers_eret /* ----------------------------------------------------- * This routine assumes that the SP_EL3 is pointing to