From 28f39f02ade1bd3ae86c8a472d01873ba0cdacb7 Mon Sep 17 00:00:00 2001 From: Max Shvetsov Date: Tue, 25 Feb 2020 13:56:19 +0000 Subject: [PATCH 1/6] SPMD: save/restore EL2 system registers. NOTE: Not all EL-2 system registers are saved/restored. This subset includes registers recognized by ARMv8.0 Change-Id: I9993c7d78d8f5f8e72d1c6c8d6fd871283aa3ce0 Signed-off-by: Jose Marinho Signed-off-by: Olivier Deprez Signed-off-by: Artsem Artsemenka Signed-off-by: Max Shvetsov --- Makefile | 61 ++-- include/arch/aarch64/arch.h | 27 ++ include/lib/el3_runtime/aarch64/context.h | 99 +++++- include/lib/el3_runtime/context_mgmt.h | 5 + lib/el3_runtime/aarch64/context.S | 391 +++++++++++++++++++++- lib/el3_runtime/aarch64/context_mgmt.c | 46 +++ make_helpers/defaults.mk | 5 + services/std_svc/spmd/spmd_main.c | 10 + 8 files changed, 608 insertions(+), 36 deletions(-) diff --git a/Makefile b/Makefile index 03f9fc6d8..a84c413b8 100644 --- a/Makefile +++ b/Makefile @@ -412,40 +412,45 @@ INCLUDE_TBBR_MK := 1 ################################################################################ ifneq (${SPD},none) -ifeq (${ARCH},aarch32) + ifeq (${ARCH},aarch32) $(error "Error: SPD is incompatible with AArch32.") -endif -ifdef EL3_PAYLOAD_BASE + endif + + ifdef EL3_PAYLOAD_BASE $(warning "SPD and EL3_PAYLOAD_BASE are incompatible build options.") $(warning "The SPD and its BL32 companion will be present but ignored.") -endif - ifeq (${SPD},spmd) - # SPMD is located in std_svc directory - SPD_DIR := std_svc - else - # All other SPDs in spd directory - SPD_DIR := spd - endif + endif - # We expect to locate an spd.mk under the specified SPD directory - SPD_MAKE := $(wildcard services/${SPD_DIR}/${SPD}/${SPD}.mk) + ifeq (${SPD},spmd) + # SPMD is located in std_svc directory + SPD_DIR := std_svc - - ifeq (${SPD_MAKE},) - $(error Error: No services/${SPD_DIR}/${SPD}/${SPD}.mk located) + ifeq ($(CTX_INCLUDE_EL2_REGS),0) + $(error spmd requires CTX_INCLUDE_EL2_REGS option) endif - $(info Including ${SPD_MAKE}) - include ${SPD_MAKE} + else + # All other SPDs in spd directory + SPD_DIR := spd + endif - # If there's BL32 companion for the chosen SPD, we expect that the SPD's - # Makefile would set NEED_BL32 to "yes". In this case, the build system - # supports two mutually exclusive options: - # * BL32 is built from source: then BL32_SOURCES must contain the list - # of source files to build BL32 - # * BL32 is a prebuilt binary: then BL32 must point to the image file - # that will be included in the FIP - # If both BL32_SOURCES and BL32 are defined, the binary takes precedence - # over the sources. + # We expect to locate an spd.mk under the specified SPD directory + SPD_MAKE := $(wildcard services/${SPD_DIR}/${SPD}/${SPD}.mk) + + ifeq (${SPD_MAKE},) + $(error Error: No services/${SPD_DIR}/${SPD}/${SPD}.mk located) + endif + $(info Including ${SPD_MAKE}) + include ${SPD_MAKE} + + # If there's BL32 companion for the chosen SPD, we expect that the SPD's + # Makefile would set NEED_BL32 to "yes". In this case, the build system + # supports two mutually exclusive options: + # * BL32 is built from source: then BL32_SOURCES must contain the list + # of source files to build BL32 + # * BL32 is a prebuilt binary: then BL32 must point to the image file + # that will be included in the FIP + # If both BL32_SOURCES and BL32 are defined, the binary takes precedence + # over the sources. endif ################################################################################ @@ -761,6 +766,7 @@ $(eval $(call assert_boolean,CTX_INCLUDE_AARCH32_REGS)) $(eval $(call assert_boolean,CTX_INCLUDE_FPREGS)) $(eval $(call assert_boolean,CTX_INCLUDE_PAUTH_REGS)) $(eval $(call assert_boolean,CTX_INCLUDE_MTE_REGS)) +$(eval $(call assert_boolean,CTX_INCLUDE_EL2_REGS)) $(eval $(call assert_boolean,DEBUG)) $(eval $(call assert_boolean,DYN_DISABLE_AUTH)) $(eval $(call assert_boolean,EL3_EXCEPTION_HANDLING)) @@ -832,6 +838,7 @@ $(eval $(call add_define,CTX_INCLUDE_FPREGS)) $(eval $(call add_define,CTX_INCLUDE_PAUTH_REGS)) $(eval $(call add_define,EL3_EXCEPTION_HANDLING)) $(eval $(call add_define,CTX_INCLUDE_MTE_REGS)) +$(eval $(call add_define,CTX_INCLUDE_EL2_REGS)) $(eval $(call add_define,ENABLE_AMU)) $(eval $(call add_define,ENABLE_ASSERTIONS)) $(eval $(call add_define,ENABLE_BTI)) diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h index 1faddbedc..d5939971e 100644 --- a/include/arch/aarch64/arch.h +++ b/include/arch/aarch64/arch.h @@ -96,6 +96,33 @@ #define ICC_EOIR1_EL1 S3_0_c12_c12_1 #define ICC_SGI0R_EL1 S3_0_c12_c11_7 +/******************************************************************************* + * Definitions for EL2 system registers for save/restore routine + ******************************************************************************/ + +#define CNTPOFF_EL2 S3_4_C14_C0_6 +#define HAFGRTR_EL2 S3_4_C3_C1_6 +#define HDFGRTR_EL2 S3_4_C3_C1_4 +#define HDFGWTR_EL2 S3_4_C3_C1_5 +#define HFGITR_EL2 S3_4_C1_C1_6 +#define HFGRTR_EL2 S3_4_C1_C1_4 +#define HFGWTR_EL2 S3_4_C1_C1_5 +#define ICH_EISR_EL2 S3_4_C12_C11_3 +#define ICH_ELRSR_EL2 S3_4_C12_C11_5 +#define ICH_HCR_EL2 S3_4_C12_C11_0 +#define ICH_MISR_EL2 S3_4_C12_C11_2 +#define ICH_VMCR_EL2 S3_4_C12_C11_7 +#define ICH_VTR_EL2 S3_4_C12_C11_1 +#define MPAMVPM0_EL2 S3_4_C10_C5_0 +#define MPAMVPM1_EL2 S3_4_C10_C5_1 +#define MPAMVPM2_EL2 S3_4_C10_C5_2 +#define MPAMVPM3_EL2 S3_4_C10_C5_3 +#define MPAMVPM4_EL2 S3_4_C10_C5_4 +#define MPAMVPM5_EL2 S3_4_C10_C5_5 +#define MPAMVPM6_EL2 S3_4_C10_C5_6 +#define MPAMVPM7_EL2 S3_4_C10_C5_7 +#define MPAMVPMV_EL2 S3_4_C10_C4_1 + /******************************************************************************* * Generic timer memory mapped registers & offsets ******************************************************************************/ diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h index 4158c023e..6559b60a3 100644 --- a/include/lib/el3_runtime/aarch64/context.h +++ b/include/lib/el3_runtime/aarch64/context.h @@ -135,10 +135,88 @@ #define CTX_MTE_REGS_END CTX_TIMER_SYSREGS_END #endif /* CTX_INCLUDE_MTE_REGS */ +/* + * S-EL2 register set + */ + +#if CTX_INCLUDE_EL2_REGS +/* For later discussion + * ICH_AP0R_EL2 + * ICH_AP1R_EL2 + * AMEVCNTVOFF0_EL2 + * AMEVCNTVOFF1_EL2 + * ICH_LR_EL2 + */ +#define CTX_ACTLR_EL2 (CTX_MTE_REGS_END + U(0x0)) +#define CTX_AFSR0_EL2 (CTX_MTE_REGS_END + U(0x8)) +#define CTX_AFSR1_EL2 (CTX_MTE_REGS_END + U(0x10)) +#define CTX_AMAIR_EL2 (CTX_MTE_REGS_END + U(0x18)) +#define CTX_CNTHCTL_EL2 (CTX_MTE_REGS_END + U(0x20)) +#define CTX_CNTHP_CTL_EL2 (CTX_MTE_REGS_END + U(0x28)) +#define CTX_CNTHP_CVAL_EL2 (CTX_MTE_REGS_END + U(0x30)) +#define CTX_CNTHP_TVAL_EL2 (CTX_MTE_REGS_END + U(0x38)) +#define CTX_CNTPOFF_EL2 (CTX_MTE_REGS_END + U(0x40)) +#define CTX_CNTVOFF_EL2 (CTX_MTE_REGS_END + U(0x48)) +#define CTX_CPTR_EL2 (CTX_MTE_REGS_END + U(0x50)) +#define CTX_DBGVCR32_EL2 (CTX_MTE_REGS_END + U(0x58)) +#define CTX_ELR_EL2 (CTX_MTE_REGS_END + U(0x60)) +#define CTX_ESR_EL2 (CTX_MTE_REGS_END + U(0x68)) +#define CTX_FAR_EL2 (CTX_MTE_REGS_END + U(0x70)) +#define CTX_FPEXC32_EL2 (CTX_MTE_REGS_END + U(0x78)) +#define CTX_HACR_EL2 (CTX_MTE_REGS_END + U(0x80)) +#define CTX_HAFGRTR_EL2 (CTX_MTE_REGS_END + U(0x88)) +#define CTX_HCR_EL2 (CTX_MTE_REGS_END + U(0x90)) +#define CTX_HDFGRTR_EL2 (CTX_MTE_REGS_END + U(0x98)) +#define CTX_HDFGWTR_EL2 (CTX_MTE_REGS_END + U(0xA0)) +#define CTX_HFGITR_EL2 (CTX_MTE_REGS_END + U(0xA8)) +#define CTX_HFGRTR_EL2 (CTX_MTE_REGS_END + U(0xB0)) +#define CTX_HFGWTR_EL2 (CTX_MTE_REGS_END + U(0xB8)) +#define CTX_HPFAR_EL2 (CTX_MTE_REGS_END + U(0xC0)) +#define CTX_HSTR_EL2 (CTX_MTE_REGS_END + U(0xC8)) +#define CTX_ICC_SRE_EL2 (CTX_MTE_REGS_END + U(0xD0)) +#define CTX_ICH_EISR_EL2 (CTX_MTE_REGS_END + U(0xD8)) +#define CTX_ICH_ELRSR_EL2 (CTX_MTE_REGS_END + U(0xE0)) +#define CTX_ICH_HCR_EL2 (CTX_MTE_REGS_END + U(0xE8)) +#define CTX_ICH_MISR_EL2 (CTX_MTE_REGS_END + U(0xF0)) +#define CTX_ICH_VMCR_EL2 (CTX_MTE_REGS_END + U(0xF8)) +#define CTX_ICH_VTR_EL2 (CTX_MTE_REGS_END + U(0x100)) +#define CTX_MAIR_EL2 (CTX_MTE_REGS_END + U(0x108)) +#define CTX_MDCR_EL2 (CTX_MTE_REGS_END + U(0x110)) +#define CTX_MPAM2_EL2 (CTX_MTE_REGS_END + U(0x118)) +#define CTX_MPAMHCR_EL2 (CTX_MTE_REGS_END + U(0x120)) +#define CTX_MPAMVPM0_EL2 (CTX_MTE_REGS_END + U(0x128)) +#define CTX_MPAMVPM1_EL2 (CTX_MTE_REGS_END + U(0x130)) +#define CTX_MPAMVPM2_EL2 (CTX_MTE_REGS_END + U(0x138)) +#define CTX_MPAMVPM3_EL2 (CTX_MTE_REGS_END + U(0x140)) +#define CTX_MPAMVPM4_EL2 (CTX_MTE_REGS_END + U(0x148)) +#define CTX_MPAMVPM5_EL2 (CTX_MTE_REGS_END + U(0x150)) +#define CTX_MPAMVPM6_EL2 (CTX_MTE_REGS_END + U(0x158)) +#define CTX_MPAMVPM7_EL2 (CTX_MTE_REGS_END + U(0x160)) +#define CTX_MPAMVPMV_EL2 (CTX_MTE_REGS_END + U(0x168)) +#define CTX_RMR_EL2 (CTX_MTE_REGS_END + U(0x170)) +#define CTX_SCTLR_EL2 (CTX_MTE_REGS_END + U(0x178)) +#define CTX_SPSR_EL2 (CTX_MTE_REGS_END + U(0x180)) +#define CTX_SP_EL2 (CTX_MTE_REGS_END + U(0x188)) +#define CTX_TCR_EL2 (CTX_MTE_REGS_END + U(0x190)) +#define CTX_TPIDR_EL2 (CTX_MTE_REGS_END + U(0x198)) +#define CTX_TTBR0_EL2 (CTX_MTE_REGS_END + U(0x1A0)) +#define CTX_VBAR_EL2 (CTX_MTE_REGS_END + U(0x1A8)) +#define CTX_VMPIDR_EL2 (CTX_MTE_REGS_END + U(0x1B0)) +#define CTX_VPIDR_EL2 (CTX_MTE_REGS_END + U(0x1B8)) +#define CTX_VTCR_EL2 (CTX_MTE_REGS_END + U(0x1C0)) +#define CTX_VTTBR_EL2 (CTX_MTE_REGS_END + U(0x1C8)) +#define CTX_ZCR_EL2 (CTX_MTE_REGS_END + U(0x1B0)) + +/* Align to the next 16 byte boundary */ +#define CTX_EL2_REGS_END (CTX_MTE_REGS_END + U(0x1C0)) +#else +#define CTX_EL2_REGS_END CTX_MTE_REGS_END +#endif /* CTX_INCLUDE_EL2_REGS */ + /* * End of system registers. */ -#define CTX_SYSREGS_END CTX_MTE_REGS_END +#define CTX_SYSREGS_END CTX_EL2_REGS_END /******************************************************************************* * Constants that allow assembler code to access members of and the 'fp_regs' @@ -255,11 +333,10 @@ DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); /* - * AArch64 EL1 system register context structure for preserving the - * architectural state during switches from one security state to - * another in EL1. + * AArch64 EL1/EL2 system register context structure for preserving the + * architectural state during world switches. */ -DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL); +DEFINE_REG_STRUCT(sys_regs, CTX_SYSREG_ALL); /* * AArch64 floating point register context structure for preserving @@ -304,7 +381,7 @@ DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL); typedef struct cpu_context { gp_regs_t gpregs_ctx; el3_state_t el3state_ctx; - el1_sys_regs_t sysregs_ctx; + sys_regs_t sysregs_ctx; #if CTX_INCLUDE_FPREGS fp_regs_t fpregs_ctx; #endif @@ -387,8 +464,14 @@ CASSERT(CTX_PAUTH_REGS_OFFSET == __builtin_offsetof(cpu_context_t, pauth_ctx), \ /******************************************************************************* * Function prototypes ******************************************************************************/ -void el1_sysregs_context_save(el1_sys_regs_t *regs); -void el1_sysregs_context_restore(el1_sys_regs_t *regs); +void el1_sysregs_context_save(sys_regs_t *regs); +void el1_sysregs_context_restore(sys_regs_t *regs); + +#if CTX_INCLUDE_EL2_REGS +void el2_sysregs_context_save(sys_regs_t *regs); +void el2_sysregs_context_restore(sys_regs_t *regs); +#endif + #if CTX_INCLUDE_FPREGS void fpregs_context_save(fp_regs_t *regs); void fpregs_context_restore(fp_regs_t *regs); diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h index 17955e3a8..b36cd3d70 100644 --- a/include/lib/el3_runtime/context_mgmt.h +++ b/include/lib/el3_runtime/context_mgmt.h @@ -36,6 +36,11 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep); void cm_prepare_el3_exit(uint32_t security_state); #ifdef __aarch64__ +#if CTX_INCLUDE_EL2_REGS +void cm_el2_sysregs_context_save(uint32_t security_state); +void cm_el2_sysregs_context_restore(uint32_t security_state); +#endif + void cm_el1_sysregs_context_save(uint32_t security_state); void cm_el1_sysregs_context_restore(uint32_t security_state); void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint); diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index 9bd25bac9..bcc7eef9e 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,6 +9,11 @@ #include #include +#if CTX_INCLUDE_EL2_REGS + .global el2_sysregs_context_save + .global el2_sysregs_context_restore +#endif + .global el1_sysregs_context_save .global el1_sysregs_context_restore #if CTX_INCLUDE_FPREGS @@ -19,6 +24,390 @@ .global restore_gp_pmcr_pauth_regs .global el3_exit +#if CTX_INCLUDE_EL2_REGS + +/* ----------------------------------------------------- + * The following function strictly follows the AArch64 + * PCS to use x9-x17 (temporary caller-saved registers) + * to save EL1 system register context. It assumes that + * 'x0' is pointing to a 'el1_sys_regs' structure where + * the register context will be saved. + * ----------------------------------------------------- + */ +func el2_sysregs_context_save + + mrs x9, actlr_el2 + str x9, [x0, #CTX_ACTLR_EL2] + + mrs x9, afsr0_el2 + str x9, [x0, #CTX_AFSR0_EL2] + + mrs x9, afsr1_el2 + str x9, [x0, #CTX_AFSR1_EL2] + + mrs x9, amair_el2 + str x9, [x0, #CTX_AMAIR_EL2] + + mrs x9, cnthctl_el2 + str x9, [x0, #CTX_CNTHCTL_EL2] + + mrs x9, cnthp_ctl_el2 + str x9, [x0, #CTX_CNTHP_CTL_EL2] + + mrs x9, cnthp_cval_el2 + str x9, [x0, #CTX_CNTHP_CVAL_EL2] + + mrs x9, cnthp_tval_el2 + str x9, [x0, #CTX_CNTHP_TVAL_EL2] + + mrs x9, CNTPOFF_EL2 + str x9, [x0, #CTX_CNTPOFF_EL2] + + mrs x9, cntvoff_el2 + str x9, [x0, #CTX_CNTVOFF_EL2] + + mrs x9, cptr_el2 + str x9, [x0, #CTX_CPTR_EL2] + + mrs x9, dbgvcr32_el2 + str x9, [x0, #CTX_DBGVCR32_EL2] + + mrs x9, elr_el2 + str x9, [x0, #CTX_ELR_EL2] + + mrs x9, esr_el2 + str x9, [x0, #CTX_ESR_EL2] + + mrs x9, far_el2 + str x9, [x0, #CTX_FAR_EL2] + + mrs x9, fpexc32_el2 + str x9, [x0, #CTX_FPEXC32_EL2] + + mrs x9, hacr_el2 + str x9, [x0, #CTX_HACR_EL2] + + mrs x9, HAFGRTR_EL2 + str x9, [x0, #CTX_HAFGRTR_EL2] + + mrs x9, hcr_el2 + str x9, [x0, #CTX_HCR_EL2] + + mrs x9, HDFGRTR_EL2 + str x9, [x0, #CTX_HDFGRTR_EL2] + + mrs x9, HDFGWTR_EL2 + str x9, [x0, #CTX_HDFGWTR_EL2] + + mrs x9, HFGITR_EL2 + str x9, [x0, #CTX_HFGITR_EL2] + + mrs x9, HFGRTR_EL2 + str x9, [x0, #CTX_HFGRTR_EL2] + + mrs x9, HFGWTR_EL2 + str x9, [x0, #CTX_HFGWTR_EL2] + + mrs x9, hpfar_el2 + str x9, [x0, #CTX_HPFAR_EL2] + + mrs x9, hstr_el2 + str x9, [x0, #CTX_HSTR_EL2] + + mrs x9, ICC_SRE_EL2 + str x9, [x0, #CTX_ICC_SRE_EL2] + + mrs x9, ICH_EISR_EL2 + str x9, [x0, #CTX_ICH_EISR_EL2] + + mrs x9, ICH_ELRSR_EL2 + str x9, [x0, #CTX_ICH_ELRSR_EL2] + + mrs x9, ICH_HCR_EL2 + str x9, [x0, #CTX_ICH_HCR_EL2] + + mrs x9, ICH_MISR_EL2 + str x9, [x0, #CTX_ICH_MISR_EL2] + + mrs x9, ICH_VMCR_EL2 + str x9, [x0, #CTX_ICH_VMCR_EL2] + + mrs x9, ICH_VTR_EL2 + str x9, [x0, #CTX_ICH_VTR_EL2] + + mrs x9, mair_el2 + str x9, [x0, #CTX_MAIR_EL2] + + mrs x9, mdcr_el2 + str x9, [x0, #CTX_MDCR_EL2] + + mrs x9, MPAM2_EL2 + str x9, [x0, #CTX_MPAM2_EL2] + + mrs x9, MPAMHCR_EL2 + str x9, [x0, #CTX_MPAMHCR_EL2] + + mrs x9, MPAMVPM0_EL2 + str x9, [x0, #CTX_MPAMVPM0_EL2] + + mrs x9, MPAMVPM1_EL2 + str x9, [x0, #CTX_MPAMVPM1_EL2] + + mrs x9, MPAMVPM2_EL2 + str x9, [x0, #CTX_MPAMVPM2_EL2] + + mrs x9, MPAMVPM3_EL2 + str x9, [x0, #CTX_MPAMVPM3_EL2] + + mrs x9, MPAMVPM4_EL2 + str x9, [x0, #CTX_MPAMVPM4_EL2] + + mrs x9, MPAMVPM5_EL2 + str x9, [x0, #CTX_MPAMVPM5_EL2] + + mrs x9, MPAMVPM6_EL2 + str x9, [x0, #CTX_MPAMVPM6_EL2] + + mrs x9, MPAMVPM7_EL2 + str x9, [x0, #CTX_MPAMVPM7_EL2] + + mrs x9, MPAMVPMV_EL2 + str x9, [x0, #CTX_MPAMVPMV_EL2] + + mrs x9, rmr_el2 + str x9, [x0, #CTX_RMR_EL2] + + mrs x9, sctlr_el2 + str x9, [x0, #CTX_SCTLR_EL2] + + mrs x9, spsr_el2 + str x9, [x0, #CTX_SPSR_EL2] + + mrs x9, sp_el2 + str x9, [x0, #CTX_SP_EL2] + + mrs x9, tcr_el2 + str x9, [x0, #CTX_TCR_EL2] + + mrs x9, tpidr_el2 + str x9, [x0, #CTX_TPIDR_EL2] + + mrs x9, ttbr0_el2 + str x9, [x0, #CTX_TTBR0_EL2] + + mrs x9, vbar_el2 + str x9, [x0, #CTX_VBAR_EL2] + + mrs x9, vmpidr_el2 + str x9, [x0, #CTX_VMPIDR_EL2] + + mrs x9, vpidr_el2 + str x9, [x0, #CTX_VPIDR_EL2] + + mrs x9, vtcr_el2 + str x9, [x0, #CTX_VTCR_EL2] + + mrs x9, vttbr_el2 + str x9, [x0, #CTX_VTTBR_EL2] + + mrs x9, ZCR_EL2 + str x9, [x0, #CTX_ZCR_EL2] + + ret +endfunc el2_sysregs_context_save + +/* ----------------------------------------------------- + * The following function strictly follows the AArch64 + * PCS to use x9-x17 (temporary caller-saved registers) + * to restore EL1 system register context. It assumes + * that 'x0' is pointing to a 'el1_sys_regs' structure + * from where the register context will be restored + * ----------------------------------------------------- + */ +func el2_sysregs_context_restore + + ldr x9, [x0, #CTX_ACTLR_EL2] + msr actlr_el2, x9 + + ldr x9, [x0, #CTX_AFSR0_EL2] + msr afsr0_el2, x9 + + ldr x9, [x0, #CTX_AFSR1_EL2] + msr afsr1_el2, x9 + + ldr x9, [x0, #CTX_AMAIR_EL2] + msr amair_el2, x9 + + ldr x9, [x0, #CTX_CNTHCTL_EL2] + msr cnthctl_el2, x9 + + ldr x9, [x0, #CTX_CNTHP_CTL_EL2] + msr cnthp_ctl_el2, x9 + + ldr x9, [x0, #CTX_CNTHP_CVAL_EL2] + msr cnthp_cval_el2, x9 + + ldr x9, [x0, #CTX_CNTHP_TVAL_EL2] + msr cnthp_tval_el2, x9 + + ldr x9, [x0, #CTX_CNTPOFF_EL2] + msr CNTPOFF_EL2, x9 + + ldr x9, [x0, #CTX_CNTVOFF_EL2] + msr cntvoff_el2, x9 + + ldr x9, [x0, #CTX_CPTR_EL2] + msr cptr_el2, x9 + + ldr x9, [x0, #CTX_DBGVCR32_EL2] + msr dbgvcr32_el2, x9 + + ldr x9, [x0, #CTX_ELR_EL2] + msr elr_el2, x9 + + ldr x9, [x0, #CTX_ESR_EL2] + msr esr_el2, x9 + + ldr x9, [x0, #CTX_FAR_EL2] + msr far_el2, x9 + + ldr x9, [x0, #CTX_FPEXC32_EL2] + msr fpexc32_el2, x9 + + ldr x9, [x0, #CTX_HACR_EL2] + msr hacr_el2, x9 + + ldr x9, [x0, #CTX_HAFGRTR_EL2] + msr HAFGRTR_EL2, x9 + + ldr x9, [x0, #CTX_HCR_EL2] + msr hcr_el2, x9 + + ldr x9, [x0, #CTX_HDFGRTR_EL2] + msr HDFGRTR_EL2, x9 + + ldr x9, [x0, #CTX_HDFGWTR_EL2] + msr HDFGWTR_EL2, x9 + + ldr x9, [x0, #CTX_HFGITR_EL2] + msr HFGITR_EL2, x9 + + ldr x9, [x0, #CTX_HFGRTR_EL2] + msr HFGRTR_EL2, x9 + + ldr x9, [x0, #CTX_HFGWTR_EL2] + msr HFGWTR_EL2, x9 + + ldr x9, [x0, #CTX_HPFAR_EL2] + msr hpfar_el2, x9 + + ldr x9, [x0, #CTX_HSTR_EL2] + msr hstr_el2, x9 + + ldr x9, [x0, #CTX_ICC_SRE_EL2] + msr ICC_SRE_EL2, x9 + + ldr x9, [x0, #CTX_ICH_EISR_EL2] + msr ICH_EISR_EL2, x9 + + ldr x9, [x0, #CTX_ICH_ELRSR_EL2] + msr ICH_ELRSR_EL2, x9 + + ldr x9, [x0, #CTX_ICH_HCR_EL2] + msr ICH_HCR_EL2, x9 + + ldr x9, [x0, #CTX_ICH_MISR_EL2] + msr ICH_MISR_EL2, x9 + + ldr x9, [x0, #CTX_ICH_VMCR_EL2] + msr ICH_VMCR_EL2, x9 + + ldr x9, [x0, #CTX_ICH_VTR_EL2] + msr ICH_VTR_EL2, x9 + + ldr x9, [x0, #CTX_MAIR_EL2] + msr mair_el2, x9 + + ldr x9, [x0, #CTX_MDCR_EL2] + msr mdcr_el2, x9 + + ldr x9, [x0, #CTX_MPAM2_EL2] + msr MPAM2_EL2, x9 + + ldr x9, [x0, #CTX_MPAMHCR_EL2] + msr MPAMHCR_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPM0_EL2] + msr MPAMVPM0_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPM1_EL2] + msr MPAMVPM1_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPM2_EL2] + msr MPAMVPM2_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPM3_EL2] + msr MPAMVPM3_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPM4_EL2] + msr MPAMVPM4_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPM5_EL2] + msr MPAMVPM5_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPM6_EL2] + msr MPAMVPM6_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPM7_EL2] + msr MPAMVPM7_EL2, x9 + + ldr x9, [x0, #CTX_MPAMVPMV_EL2] + msr MPAMVPMV_EL2, x9 + + ldr x9, [x0, #CTX_RMR_EL2] + msr rmr_el2, x9 + + ldr x9, [x0, #CTX_SCTLR_EL2] + msr sctlr_el2, x9 + + ldr x9, [x0, #CTX_SPSR_EL2] + msr spsr_el2, x9 + + ldr x9, [x0, #CTX_SP_EL2] + msr sp_el2, x9 + + ldr x9, [x0, #CTX_TCR_EL2] + msr tcr_el2, x9 + + ldr x9, [x0, #CTX_TPIDR_EL2] + msr tpidr_el2, x9 + + ldr x9, [x0, #CTX_TTBR0_EL2] + msr ttbr0_el2, x9 + + ldr x9, [x0, #CTX_VBAR_EL2] + msr vbar_el2, x9 + + ldr x9, [x0, #CTX_VMPIDR_EL2] + msr vmpidr_el2, x9 + + ldr x9, [x0, #CTX_VPIDR_EL2] + msr vpidr_el2, x9 + + ldr x9, [x0, #CTX_VTCR_EL2] + msr vtcr_el2, x9 + + ldr x9, [x0, #CTX_VTTBR_EL2] + msr vttbr_el2, x9 + + ldr x9, [x0, #CTX_ZCR_EL2] + msr ZCR_EL2, x9 + + ret +endfunc el2_sysregs_context_restore + +#endif /* CTX_INCLUDE_EL2_REGS */ + /* ------------------------------------------------------------------ * The following function strictly follows the AArch64 PCS to use * x9-x17 (temporary caller-saved registers) to save EL1 system diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index 546e39e16..f59bcfcd9 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -530,6 +530,52 @@ void cm_prepare_el3_exit(uint32_t security_state) cm_set_next_eret_context(security_state); } +#if CTX_INCLUDE_EL2_REGS +/******************************************************************************* + * Save EL2 sysreg context + ******************************************************************************/ +void cm_el2_sysregs_context_save(uint32_t security_state) +{ + u_register_t scr_el3 = read_scr(); + + /* + * Always save the non-secure EL2 context, only save the + * S-EL2 context if S-EL2 is enabled. + */ + if ((security_state == NON_SECURE) || + ((scr_el3 & SCR_EEL2_BIT) != 0U)) { + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + el2_sysregs_context_save(get_sysregs_ctx(ctx)); + } +} + +/******************************************************************************* + * Restore EL2 sysreg context + ******************************************************************************/ +void cm_el2_sysregs_context_restore(uint32_t security_state) +{ + u_register_t scr_el3 = read_scr(); + + /* + * Always restore the non-secure EL2 context, only restore the + * S-EL2 context if S-EL2 is enabled. + */ + if ((security_state == NON_SECURE) || + ((scr_el3 & SCR_EEL2_BIT) != 0U)) { + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + el2_sysregs_context_restore(get_sysregs_ctx(ctx)); + } +} +#endif /* CTX_INCLUDE_EL2_REGS */ + /******************************************************************************* * The next four functions are used by runtime services to save and restore * EL1 context on the 'cpu_context' structure for the specified security diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk index 60958a1d1..8e1f273a3 100644 --- a/make_helpers/defaults.mk +++ b/make_helpers/defaults.mk @@ -262,3 +262,8 @@ USE_SPINLOCK_CAS := 0 # Enable Link Time Optimization ENABLE_LTO := 0 + +# Build flag to include EL2 registers in cpu context save and restore during +# S-EL2 firmware entry/exit. This flag is to be used with SPD=spmd option. +# Default is 0. +CTX_INCLUDE_EL2_REGS := 0 diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c index 677f63968..110719020 100644 --- a/services/std_svc/spmd/spmd_main.c +++ b/services/std_svc/spmd/spmd_main.c @@ -49,6 +49,7 @@ uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) /* Restore the context assigned above */ cm_el1_sysregs_context_restore(SECURE); + cm_el2_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); /* Invalidate TLBs at EL1. */ @@ -60,6 +61,7 @@ uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) /* Save secure state */ cm_el1_sysregs_context_save(SECURE); + cm_el2_sysregs_context_save(SECURE); return rc; } @@ -321,9 +323,11 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, /* Save incoming security state */ cm_el1_sysregs_context_save(in_sstate); + cm_el2_sysregs_context_save(in_sstate); /* Restore outgoing security state */ cm_el1_sysregs_context_restore(out_sstate); + cm_el2_sysregs_context_restore(out_sstate); cm_set_next_eret_context(out_sstate); SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4, @@ -366,9 +370,11 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, if (in_sstate == NON_SECURE) { /* Save incoming security state */ cm_el1_sysregs_context_save(in_sstate); + cm_el2_sysregs_context_save(in_sstate); /* Restore outgoing security state */ cm_el1_sysregs_context_restore(out_sstate); + cm_el2_sysregs_context_restore(out_sstate); cm_set_next_eret_context(out_sstate); SMC_RET8(cm_get_context(out_sstate), smc_fid, @@ -432,9 +438,11 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, /* Save incoming security state */ cm_el1_sysregs_context_save(in_sstate); + cm_el2_sysregs_context_save(in_sstate); /* Restore outgoing security state */ cm_el1_sysregs_context_restore(out_sstate); + cm_el2_sysregs_context_restore(out_sstate); cm_set_next_eret_context(out_sstate); SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4, @@ -466,9 +474,11 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, /* Save incoming security state */ cm_el1_sysregs_context_save(in_sstate); + cm_el2_sysregs_context_save(in_sstate); /* Restore outgoing security state */ cm_el1_sysregs_context_restore(out_sstate); + cm_el2_sysregs_context_restore(out_sstate); cm_set_next_eret_context(out_sstate); SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4, From 2825946e92c0bb14482a1a23e2304aed95e72718 Mon Sep 17 00:00:00 2001 From: Max Shvetsov Date: Mon, 17 Feb 2020 16:15:47 +0000 Subject: [PATCH 2/6] SPMD: Adds partially supported EL2 registers. This patch adds EL2 registers that are supported up to ARMv8.6. ARM_ARCH_MINOR has to specified to enable save/restore routine. Note: Following registers are still not covered in save/restore. * AMEVCNTVOFF0_EL2 * AMEVCNTVOFF1_EL2 * ICH_AP0R_EL2 * ICH_AP1R_EL2 * ICH_LR_EL2 Change-Id: I4813f3243e56e21cb297b31ef549a4b38d4876e1 Signed-off-by: Max Shvetsov --- include/arch/aarch64/arch.h | 7 +- include/lib/el3_runtime/aarch64/context.h | 211 +++++--- lib/el3_runtime/aarch64/context.S | 613 +++++++++++----------- lib/el3_runtime/aarch64/context_mgmt.c | 14 +- services/spd/trusty/trusty.c | 6 +- services/std_svc/spm_mm/spm_mm_setup.c | 16 +- 6 files changed, 455 insertions(+), 412 deletions(-) diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h index d5939971e..b0c265047 100644 --- a/include/arch/aarch64/arch.h +++ b/include/arch/aarch64/arch.h @@ -107,12 +107,8 @@ #define HFGITR_EL2 S3_4_C1_C1_6 #define HFGRTR_EL2 S3_4_C1_C1_4 #define HFGWTR_EL2 S3_4_C1_C1_5 -#define ICH_EISR_EL2 S3_4_C12_C11_3 -#define ICH_ELRSR_EL2 S3_4_C12_C11_5 #define ICH_HCR_EL2 S3_4_C12_C11_0 -#define ICH_MISR_EL2 S3_4_C12_C11_2 #define ICH_VMCR_EL2 S3_4_C12_C11_7 -#define ICH_VTR_EL2 S3_4_C12_C11_1 #define MPAMVPM0_EL2 S3_4_C10_C5_0 #define MPAMVPM1_EL2 S3_4_C10_C5_1 #define MPAMVPM2_EL2 S3_4_C10_C5_2 @@ -122,6 +118,9 @@ #define MPAMVPM6_EL2 S3_4_C10_C5_6 #define MPAMVPM7_EL2 S3_4_C10_C5_7 #define MPAMVPMV_EL2 S3_4_C10_C4_1 +#define TRFCR_EL2 S3_4_C1_C2_1 +#define PMSCR_EL2 S3_4_C9_C9_0 +#define TFSR_EL2 S3_4_C5_C6_0 /******************************************************************************* * Generic timer memory mapped registers & offsets diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h index 6559b60a3..e061950c8 100644 --- a/include/lib/el3_runtime/aarch64/context.h +++ b/include/lib/el3_runtime/aarch64/context.h @@ -68,7 +68,7 @@ * registers are only 32-bits wide but are stored as 64-bit values for * convenience ******************************************************************************/ -#define CTX_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END) +#define CTX_EL1_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END) #define CTX_SPSR_EL1 U(0x0) #define CTX_ELR_EL1 U(0x8) #define CTX_SCTLR_EL1 U(0x10) @@ -136,7 +136,12 @@ #endif /* CTX_INCLUDE_MTE_REGS */ /* - * S-EL2 register set + * End of system registers. + */ +#define CTX_EL1_SYSREGS_END CTX_MTE_REGS_END + +/* + * EL2 register set */ #if CTX_INCLUDE_EL2_REGS @@ -147,82 +152,104 @@ * AMEVCNTVOFF1_EL2 * ICH_LR_EL2 */ -#define CTX_ACTLR_EL2 (CTX_MTE_REGS_END + U(0x0)) -#define CTX_AFSR0_EL2 (CTX_MTE_REGS_END + U(0x8)) -#define CTX_AFSR1_EL2 (CTX_MTE_REGS_END + U(0x10)) -#define CTX_AMAIR_EL2 (CTX_MTE_REGS_END + U(0x18)) -#define CTX_CNTHCTL_EL2 (CTX_MTE_REGS_END + U(0x20)) -#define CTX_CNTHP_CTL_EL2 (CTX_MTE_REGS_END + U(0x28)) -#define CTX_CNTHP_CVAL_EL2 (CTX_MTE_REGS_END + U(0x30)) -#define CTX_CNTHP_TVAL_EL2 (CTX_MTE_REGS_END + U(0x38)) -#define CTX_CNTPOFF_EL2 (CTX_MTE_REGS_END + U(0x40)) -#define CTX_CNTVOFF_EL2 (CTX_MTE_REGS_END + U(0x48)) -#define CTX_CPTR_EL2 (CTX_MTE_REGS_END + U(0x50)) -#define CTX_DBGVCR32_EL2 (CTX_MTE_REGS_END + U(0x58)) -#define CTX_ELR_EL2 (CTX_MTE_REGS_END + U(0x60)) -#define CTX_ESR_EL2 (CTX_MTE_REGS_END + U(0x68)) -#define CTX_FAR_EL2 (CTX_MTE_REGS_END + U(0x70)) -#define CTX_FPEXC32_EL2 (CTX_MTE_REGS_END + U(0x78)) -#define CTX_HACR_EL2 (CTX_MTE_REGS_END + U(0x80)) -#define CTX_HAFGRTR_EL2 (CTX_MTE_REGS_END + U(0x88)) -#define CTX_HCR_EL2 (CTX_MTE_REGS_END + U(0x90)) -#define CTX_HDFGRTR_EL2 (CTX_MTE_REGS_END + U(0x98)) -#define CTX_HDFGWTR_EL2 (CTX_MTE_REGS_END + U(0xA0)) -#define CTX_HFGITR_EL2 (CTX_MTE_REGS_END + U(0xA8)) -#define CTX_HFGRTR_EL2 (CTX_MTE_REGS_END + U(0xB0)) -#define CTX_HFGWTR_EL2 (CTX_MTE_REGS_END + U(0xB8)) -#define CTX_HPFAR_EL2 (CTX_MTE_REGS_END + U(0xC0)) -#define CTX_HSTR_EL2 (CTX_MTE_REGS_END + U(0xC8)) -#define CTX_ICC_SRE_EL2 (CTX_MTE_REGS_END + U(0xD0)) -#define CTX_ICH_EISR_EL2 (CTX_MTE_REGS_END + U(0xD8)) -#define CTX_ICH_ELRSR_EL2 (CTX_MTE_REGS_END + U(0xE0)) -#define CTX_ICH_HCR_EL2 (CTX_MTE_REGS_END + U(0xE8)) -#define CTX_ICH_MISR_EL2 (CTX_MTE_REGS_END + U(0xF0)) -#define CTX_ICH_VMCR_EL2 (CTX_MTE_REGS_END + U(0xF8)) -#define CTX_ICH_VTR_EL2 (CTX_MTE_REGS_END + U(0x100)) -#define CTX_MAIR_EL2 (CTX_MTE_REGS_END + U(0x108)) -#define CTX_MDCR_EL2 (CTX_MTE_REGS_END + U(0x110)) -#define CTX_MPAM2_EL2 (CTX_MTE_REGS_END + U(0x118)) -#define CTX_MPAMHCR_EL2 (CTX_MTE_REGS_END + U(0x120)) -#define CTX_MPAMVPM0_EL2 (CTX_MTE_REGS_END + U(0x128)) -#define CTX_MPAMVPM1_EL2 (CTX_MTE_REGS_END + U(0x130)) -#define CTX_MPAMVPM2_EL2 (CTX_MTE_REGS_END + U(0x138)) -#define CTX_MPAMVPM3_EL2 (CTX_MTE_REGS_END + U(0x140)) -#define CTX_MPAMVPM4_EL2 (CTX_MTE_REGS_END + U(0x148)) -#define CTX_MPAMVPM5_EL2 (CTX_MTE_REGS_END + U(0x150)) -#define CTX_MPAMVPM6_EL2 (CTX_MTE_REGS_END + U(0x158)) -#define CTX_MPAMVPM7_EL2 (CTX_MTE_REGS_END + U(0x160)) -#define CTX_MPAMVPMV_EL2 (CTX_MTE_REGS_END + U(0x168)) -#define CTX_RMR_EL2 (CTX_MTE_REGS_END + U(0x170)) -#define CTX_SCTLR_EL2 (CTX_MTE_REGS_END + U(0x178)) -#define CTX_SPSR_EL2 (CTX_MTE_REGS_END + U(0x180)) -#define CTX_SP_EL2 (CTX_MTE_REGS_END + U(0x188)) -#define CTX_TCR_EL2 (CTX_MTE_REGS_END + U(0x190)) -#define CTX_TPIDR_EL2 (CTX_MTE_REGS_END + U(0x198)) -#define CTX_TTBR0_EL2 (CTX_MTE_REGS_END + U(0x1A0)) -#define CTX_VBAR_EL2 (CTX_MTE_REGS_END + U(0x1A8)) -#define CTX_VMPIDR_EL2 (CTX_MTE_REGS_END + U(0x1B0)) -#define CTX_VPIDR_EL2 (CTX_MTE_REGS_END + U(0x1B8)) -#define CTX_VTCR_EL2 (CTX_MTE_REGS_END + U(0x1C0)) -#define CTX_VTTBR_EL2 (CTX_MTE_REGS_END + U(0x1C8)) -#define CTX_ZCR_EL2 (CTX_MTE_REGS_END + U(0x1B0)) +#define CTX_EL2_SYSREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END) +#define CTX_ACTLR_EL2 U(0x0) +#define CTX_AFSR0_EL2 U(0x8) +#define CTX_AFSR1_EL2 U(0x10) +#define CTX_AMAIR_EL2 U(0x18) +#define CTX_CNTHCTL_EL2 U(0x20) +#define CTX_CNTHP_CTL_EL2 U(0x28) +#define CTX_CNTHP_CVAL_EL2 U(0x30) +#define CTX_CNTHP_TVAL_EL2 U(0x38) +#define CTX_CNTVOFF_EL2 U(0x40) +#define CTX_CPTR_EL2 U(0x48) +#define CTX_DBGVCR32_EL2 U(0x50) +#define CTX_ELR_EL2 U(0x58) +#define CTX_ESR_EL2 U(0x60) +#define CTX_FAR_EL2 U(0x68) +#define CTX_FPEXC32_EL2 U(0x70) +#define CTX_HACR_EL2 U(0x78) +#define CTX_HCR_EL2 U(0x80) +#define CTX_HPFAR_EL2 U(0x88) +#define CTX_HSTR_EL2 U(0x90) +#define CTX_ICC_SRE_EL2 U(0x98) +#define CTX_ICH_HCR_EL2 U(0xa0) +#define CTX_ICH_VMCR_EL2 U(0xa8) +#define CTX_MAIR_EL2 U(0xb0) +#define CTX_MDCR_EL2 U(0xb8) +#define CTX_PMSCR_EL2 U(0xc0) +#define CTX_SCTLR_EL2 U(0xc8) +#define CTX_SPSR_EL2 U(0xd0) +#define CTX_SP_EL2 U(0xd8) +#define CTX_TCR_EL2 U(0xe0) +#define CTX_TRFCR_EL2 U(0xe8) +#define CTX_TTBR0_EL2 U(0xf0) +#define CTX_VBAR_EL2 U(0xf8) +#define CTX_VMPIDR_EL2 U(0x100) +#define CTX_VPIDR_EL2 U(0x108) +#define CTX_VTCR_EL2 U(0x110) +#define CTX_VTTBR_EL2 U(0x118) + +// Only if MTE registers in use +#define CTX_TFSR_EL2 U(0x120) + +// Only if ENABLE_MPAM_FOR_LOWER_ELS==1 +#define CTX_MPAM2_EL2 U(0x128) +#define CTX_MPAMHCR_EL2 U(0x130) +#define CTX_MPAMVPM0_EL2 U(0x138) +#define CTX_MPAMVPM1_EL2 U(0x140) +#define CTX_MPAMVPM2_EL2 U(0x148) +#define CTX_MPAMVPM3_EL2 U(0x150) +#define CTX_MPAMVPM4_EL2 U(0x158) +#define CTX_MPAMVPM5_EL2 U(0x160) +#define CTX_MPAMVPM6_EL2 U(0x168) +#define CTX_MPAMVPM7_EL2 U(0x170) +#define CTX_MPAMVPMV_EL2 U(0x178) + +// Starting with Armv8.6 +#define CTX_HAFGRTR_EL2 U(0x180) +#define CTX_HDFGRTR_EL2 U(0x188) +#define CTX_HDFGWTR_EL2 U(0x190) +#define CTX_HFGITR_EL2 U(0x198) +#define CTX_HFGRTR_EL2 U(0x1a0) +#define CTX_HFGWTR_EL2 U(0x1a8) +#define CTX_CNTPOFF_EL2 U(0x1b0) + +// Starting with Armv8.4 +#define CTX_CNTHPS_CTL_EL2 U(0x1b8) +#define CTX_CNTHPS_CVAL_EL2 U(0x1c0) +#define CTX_CNTHPS_TVAL_EL2 U(0x1c8) +#define CTX_CNTHVS_CTL_EL2 U(0x1d0) +#define CTX_CNTHVS_CVAL_EL2 U(0x1d8) +#define CTX_CNTHVS_TVAL_EL2 U(0x1e0) +#define CTX_CNTHV_CTL_EL2 U(0x1e8) +#define CTX_CNTHV_CVAL_EL2 U(0x1f0) +#define CTX_CNTHV_TVAL_EL2 U(0x1f8) +#define CTX_CONTEXTIDR_EL2 U(0x200) +#define CTX_SDER32_EL2 U(0x208) +#define CTX_TTBR1_EL2 U(0x210) +#define CTX_VDISR_EL2 U(0x218) +#define CTX_VNCR_EL2 U(0x220) +#define CTX_VSESR_EL2 U(0x228) +#define CTX_VSTCR_EL2 U(0x230) +#define CTX_VSTTBR_EL2 U(0x238) + +// Starting with Armv8.5 +#define CTX_SCXTNUM_EL2 U(0x240) /* Align to the next 16 byte boundary */ -#define CTX_EL2_REGS_END (CTX_MTE_REGS_END + U(0x1C0)) -#else -#define CTX_EL2_REGS_END CTX_MTE_REGS_END +#define CTX_EL2_SYSREGS_END U(0x250) #endif /* CTX_INCLUDE_EL2_REGS */ -/* - * End of system registers. - */ -#define CTX_SYSREGS_END CTX_EL2_REGS_END - /******************************************************************************* * Constants that allow assembler code to access members of and the 'fp_regs' * structure at their correct offsets. ******************************************************************************/ -#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END) +#if CTX_INCLUDE_EL2_REGS +# define CTX_FPREGS_OFFSET (CTX_EL2_SYSREGS_OFFSET + CTX_EL2_SYSREGS_END) +#else +# define CTX_FPREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END) +#endif #if CTX_INCLUDE_FPREGS #define CTX_FP_Q0 U(0x0) #define CTX_FP_Q1 U(0x10) @@ -313,7 +340,10 @@ /* Constants to determine the size of individual context structures */ #define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) -#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) +#define CTX_EL1_SYSREGS_ALL (CTX_EL1_SYSREGS_END >> DWORD_SHIFT) +#if CTX_INCLUDE_EL2_REGS +# define CTX_EL2_SYSREGS_ALL (CTX_EL2_SYSREGS_END >> DWORD_SHIFT) +#endif #if CTX_INCLUDE_FPREGS # define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) #endif @@ -333,10 +363,19 @@ DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); /* - * AArch64 EL1/EL2 system register context structure for preserving the + * AArch64 EL1 system register context structure for preserving the * architectural state during world switches. */ -DEFINE_REG_STRUCT(sys_regs, CTX_SYSREG_ALL); +DEFINE_REG_STRUCT(el1_sysregs, CTX_EL1_SYSREGS_ALL); + + +/* + * AArch64 EL2 system register context structure for preserving the + * architectural state during world switches. + */ +#if CTX_INCLUDE_EL2_REGS +DEFINE_REG_STRUCT(el2_sysregs, CTX_EL2_SYSREGS_ALL); +#endif /* * AArch64 floating point register context structure for preserving @@ -381,7 +420,10 @@ DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL); typedef struct cpu_context { gp_regs_t gpregs_ctx; el3_state_t el3state_ctx; - sys_regs_t sysregs_ctx; + el1_sysregs_t el1_sysregs_ctx; +#if CTX_INCLUDE_EL2_REGS + el2_sysregs_t el2_sysregs_ctx; +#endif #if CTX_INCLUDE_FPREGS fp_regs_t fpregs_ctx; #endif @@ -396,7 +438,10 @@ typedef struct cpu_context { #if CTX_INCLUDE_FPREGS # define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx) #endif -#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx) +#define get_el1_sysregs_ctx(h) (&((cpu_context_t *) h)->el1_sysregs_ctx) +#if CTX_INCLUDE_EL2_REGS +# define get_el2_sysregs_ctx(h) (&((cpu_context_t *) h)->el2_sysregs_ctx) +#endif #define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx) #define get_cve_2018_3639_ctx(h) (&((cpu_context_t *) h)->cve_2018_3639_ctx) #if CTX_INCLUDE_PAUTH_REGS @@ -410,8 +455,12 @@ typedef struct cpu_context { */ CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \ assert_core_context_gp_offset_mismatch); -CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \ - assert_core_context_sys_offset_mismatch); +CASSERT(CTX_EL1_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el1_sysregs_ctx), \ + assert_core_context_el1_sys_offset_mismatch); +#if CTX_INCLUDE_EL2_REGS +CASSERT(CTX_EL2_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el2_sysregs_ctx), \ + assert_core_context_el2_sys_offset_mismatch); +#endif #if CTX_INCLUDE_FPREGS CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \ assert_core_context_fp_offset_mismatch); @@ -464,12 +513,12 @@ CASSERT(CTX_PAUTH_REGS_OFFSET == __builtin_offsetof(cpu_context_t, pauth_ctx), \ /******************************************************************************* * Function prototypes ******************************************************************************/ -void el1_sysregs_context_save(sys_regs_t *regs); -void el1_sysregs_context_restore(sys_regs_t *regs); +void el1_sysregs_context_save(el1_sysregs_t *regs); +void el1_sysregs_context_restore(el1_sysregs_t *regs); #if CTX_INCLUDE_EL2_REGS -void el2_sysregs_context_save(sys_regs_t *regs); -void el2_sysregs_context_restore(sys_regs_t *regs); +void el2_sysregs_context_save(el2_sysregs_t *regs); +void el2_sysregs_context_restore(el2_sysregs_t *regs); #endif #if CTX_INCLUDE_FPREGS diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index bcc7eef9e..30ad7b7d1 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -29,189 +29,187 @@ /* ----------------------------------------------------- * The following function strictly follows the AArch64 * PCS to use x9-x17 (temporary caller-saved registers) - * to save EL1 system register context. It assumes that - * 'x0' is pointing to a 'el1_sys_regs' structure where + * to save EL2 system register context. It assumes that + * 'x0' is pointing to a 'el2_sys_regs' structure where * the register context will be saved. + * + * The following registers are not added. + * AMEVCNTVOFF0_EL2 + * AMEVCNTVOFF1_EL2 + * ICH_AP0R_EL2 + * ICH_AP1R_EL2 + * ICH_LR_EL2 * ----------------------------------------------------- */ + func el2_sysregs_context_save - mrs x9, actlr_el2 - str x9, [x0, #CTX_ACTLR_EL2] + mrs x10, afsr0_el2 + stp x9, x10, [x0, #CTX_ACTLR_EL2] - mrs x9, afsr0_el2 - str x9, [x0, #CTX_AFSR0_EL2] + mrs x11, afsr1_el2 + mrs x12, amair_el2 + stp x11, x12, [x0, #CTX_AFSR1_EL2] - mrs x9, afsr1_el2 - str x9, [x0, #CTX_AFSR1_EL2] + mrs x13, cnthctl_el2 + mrs x14, cnthp_ctl_el2 + stp x13, x14, [x0, #CTX_CNTHCTL_EL2] - mrs x9, amair_el2 - str x9, [x0, #CTX_AMAIR_EL2] - - mrs x9, cnthctl_el2 - str x9, [x0, #CTX_CNTHCTL_EL2] - - mrs x9, cnthp_ctl_el2 - str x9, [x0, #CTX_CNTHP_CTL_EL2] - - mrs x9, cnthp_cval_el2 - str x9, [x0, #CTX_CNTHP_CVAL_EL2] - - mrs x9, cnthp_tval_el2 - str x9, [x0, #CTX_CNTHP_TVAL_EL2] - - mrs x9, CNTPOFF_EL2 - str x9, [x0, #CTX_CNTPOFF_EL2] - - mrs x9, cntvoff_el2 - str x9, [x0, #CTX_CNTVOFF_EL2] + mrs x15, cnthp_cval_el2 + mrs x16, cnthp_tval_el2 + stp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2] + mrs x17, cntvoff_el2 mrs x9, cptr_el2 - str x9, [x0, #CTX_CPTR_EL2] + stp x17, x9, [x0, #CTX_CNTVOFF_EL2] - mrs x9, dbgvcr32_el2 - str x9, [x0, #CTX_DBGVCR32_EL2] + mrs x10, dbgvcr32_el2 + mrs x11, elr_el2 + stp x10, x11, [x0, #CTX_DBGVCR32_EL2] - mrs x9, elr_el2 - str x9, [x0, #CTX_ELR_EL2] + mrs x14, esr_el2 + mrs x15, far_el2 + stp x14, x15, [x0, #CTX_ESR_EL2] - mrs x9, esr_el2 - str x9, [x0, #CTX_ESR_EL2] - - mrs x9, far_el2 - str x9, [x0, #CTX_FAR_EL2] - - mrs x9, fpexc32_el2 - str x9, [x0, #CTX_FPEXC32_EL2] - - mrs x9, hacr_el2 - str x9, [x0, #CTX_HACR_EL2] - - mrs x9, HAFGRTR_EL2 - str x9, [x0, #CTX_HAFGRTR_EL2] + mrs x16, fpexc32_el2 + mrs x17, hacr_el2 + stp x16, x17, [x0, #CTX_FPEXC32_EL2] mrs x9, hcr_el2 - str x9, [x0, #CTX_HCR_EL2] + mrs x10, hpfar_el2 + stp x9, x10, [x0, #CTX_HCR_EL2] - mrs x9, HDFGRTR_EL2 - str x9, [x0, #CTX_HDFGRTR_EL2] + mrs x11, hstr_el2 + mrs x12, ICC_SRE_EL2 + stp x11, x12, [x0, #CTX_HSTR_EL2] - mrs x9, HDFGWTR_EL2 - str x9, [x0, #CTX_HDFGWTR_EL2] + mrs x13, ICH_HCR_EL2 + mrs x14, ICH_VMCR_EL2 + stp x13, x14, [x0, #CTX_ICH_HCR_EL2] - mrs x9, HFGITR_EL2 - str x9, [x0, #CTX_HFGITR_EL2] - - mrs x9, HFGRTR_EL2 - str x9, [x0, #CTX_HFGRTR_EL2] - - mrs x9, HFGWTR_EL2 - str x9, [x0, #CTX_HFGWTR_EL2] - - mrs x9, hpfar_el2 - str x9, [x0, #CTX_HPFAR_EL2] - - mrs x9, hstr_el2 - str x9, [x0, #CTX_HSTR_EL2] - - mrs x9, ICC_SRE_EL2 - str x9, [x0, #CTX_ICC_SRE_EL2] - - mrs x9, ICH_EISR_EL2 - str x9, [x0, #CTX_ICH_EISR_EL2] - - mrs x9, ICH_ELRSR_EL2 - str x9, [x0, #CTX_ICH_ELRSR_EL2] - - mrs x9, ICH_HCR_EL2 - str x9, [x0, #CTX_ICH_HCR_EL2] - - mrs x9, ICH_MISR_EL2 - str x9, [x0, #CTX_ICH_MISR_EL2] - - mrs x9, ICH_VMCR_EL2 - str x9, [x0, #CTX_ICH_VMCR_EL2] - - mrs x9, ICH_VTR_EL2 - str x9, [x0, #CTX_ICH_VTR_EL2] - - mrs x9, mair_el2 - str x9, [x0, #CTX_MAIR_EL2] - - mrs x9, mdcr_el2 - str x9, [x0, #CTX_MDCR_EL2] - - mrs x9, MPAM2_EL2 - str x9, [x0, #CTX_MPAM2_EL2] - - mrs x9, MPAMHCR_EL2 - str x9, [x0, #CTX_MPAMHCR_EL2] - - mrs x9, MPAMVPM0_EL2 - str x9, [x0, #CTX_MPAMVPM0_EL2] - - mrs x9, MPAMVPM1_EL2 - str x9, [x0, #CTX_MPAMVPM1_EL2] - - mrs x9, MPAMVPM2_EL2 - str x9, [x0, #CTX_MPAMVPM2_EL2] - - mrs x9, MPAMVPM3_EL2 - str x9, [x0, #CTX_MPAMVPM3_EL2] - - mrs x9, MPAMVPM4_EL2 - str x9, [x0, #CTX_MPAMVPM4_EL2] - - mrs x9, MPAMVPM5_EL2 - str x9, [x0, #CTX_MPAMVPM5_EL2] - - mrs x9, MPAMVPM6_EL2 - str x9, [x0, #CTX_MPAMVPM6_EL2] - - mrs x9, MPAMVPM7_EL2 - str x9, [x0, #CTX_MPAMVPM7_EL2] - - mrs x9, MPAMVPMV_EL2 - str x9, [x0, #CTX_MPAMVPMV_EL2] - - mrs x9, rmr_el2 - str x9, [x0, #CTX_RMR_EL2] + mrs x15, mair_el2 + mrs x16, mdcr_el2 + stp x15, x16, [x0, #CTX_MAIR_EL2] + mrs x17, PMSCR_EL2 mrs x9, sctlr_el2 - str x9, [x0, #CTX_SCTLR_EL2] + stp x17, x9, [x0, #CTX_PMSCR_EL2] - mrs x9, spsr_el2 - str x9, [x0, #CTX_SPSR_EL2] + mrs x10, spsr_el2 + mrs x11, sp_el2 + stp x10, x11, [x0, #CTX_SPSR_EL2] - mrs x9, sp_el2 - str x9, [x0, #CTX_SP_EL2] + mrs x12, tcr_el2 + mrs x13, TRFCR_EL2 + stp x12, x13, [x0, #CTX_TCR_EL2] - mrs x9, tcr_el2 - str x9, [x0, #CTX_TCR_EL2] + mrs x14, ttbr0_el2 + mrs x15, vbar_el2 + stp x14, x15, [x0, #CTX_TTBR0_EL2] - mrs x9, tpidr_el2 - str x9, [x0, #CTX_TPIDR_EL2] - - mrs x9, ttbr0_el2 - str x9, [x0, #CTX_TTBR0_EL2] - - mrs x9, vbar_el2 - str x9, [x0, #CTX_VBAR_EL2] - - mrs x9, vmpidr_el2 - str x9, [x0, #CTX_VMPIDR_EL2] - - mrs x9, vpidr_el2 - str x9, [x0, #CTX_VPIDR_EL2] + mrs x16, vmpidr_el2 + mrs x17, vpidr_el2 + stp x16, x17, [x0, #CTX_VMPIDR_EL2] mrs x9, vtcr_el2 - str x9, [x0, #CTX_VTCR_EL2] + mrs x10, vttbr_el2 + stp x9, x10, [x0, #CTX_VTCR_EL2] - mrs x9, vttbr_el2 - str x9, [x0, #CTX_VTTBR_EL2] +#if CTX_INCLUDE_MTE_REGS + mrs x11, TFSR_EL2 + str x11, [x0, #CTX_TFSR_EL2] +#endif - mrs x9, ZCR_EL2 - str x9, [x0, #CTX_ZCR_EL2] +#if ENABLE_MPAM_FOR_LOWER_ELS + mrs x9, MPAM2_EL2 + mrs x10, MPAMHCR_EL2 + stp x9, x10, [x0, #CTX_MPAM2_EL2] + + mrs x11, MPAMVPM0_EL2 + mrs x12, MPAMVPM1_EL2 + stp x11, x12, [x0, #CTX_MPAMVPM0_EL2] + + mrs x13, MPAMVPM2_EL2 + mrs x14, MPAMVPM3_EL2 + stp x13, x14, [x0, #CTX_MPAMVPM2_EL2] + + mrs x15, MPAMVPM4_EL2 + mrs x16, MPAMVPM5_EL2 + stp x15, x16, [x0, #CTX_MPAMVPM4_EL2] + + mrs x17, MPAMVPM6_EL2 + mrs x9, MPAMVPM7_EL2 + stp x17, x9, [x0, #CTX_MPAMVPM6_EL2] + + mrs x10, MPAMVPMV_EL2 + str x10, [x0, #CTX_MPAMVPMV_EL2] +#endif + + +#if ARM_ARCH_AT_LEAST(8, 6) + mrs x11, HAFGRTR_EL2 + mrs x12, HDFGRTR_EL2 + stp x11, x12, [x0, #CTX_HAFGRTR_EL2] + + mrs x13, HDFGWTR_EL2 + mrs x14, HFGITR_EL2 + stp x13, x14, [x0, #CTX_HDFGWTR_EL2] + + mrs x15, HFGRTR_EL2 + mrs x16, HFGWTR_EL2 + stp x15, x16, [x0, #CTX_HFGRTR_EL2] + + mrs x17, CNTPOFF_EL2 + str x17, [x0, #CTX_CNTPOFF_EL2] +#endif + +#if ARM_ARCH_AT_LEAST(8, 4) + mrs x9, cnthps_ctl_el2 + mrs x10, cnthps_cval_el2 + stp x9, x10, [x0, #CTX_CNTHPS_CTL_EL2] + + mrs x11, cnthps_tval_el2 + mrs x12, cnthvs_ctl_el2 + stp x11, x12, [x0, #CTX_CNTHPS_TVAL_EL2] + + mrs x13, cnthvs_cval_el2 + mrs x14, cnthvs_tval_el2 + stp x13, x14, [x0, #CTX_CNTHVS_CVAL_EL2] + + mrs x15, cnthv_ctl_el2 + mrs x16, cnthv_cval_el2 + stp x15, x16, [x0, #CTX_CNTHV_CTL_EL2] + + mrs x17, cnthv_tval_el2 + mrs x9, contextidr_el2 + stp x17, x9, [x0, #CTX_CNTHV_TVAL_EL2] + + mrs x10, sder32_el2 + str x10, [x0, #CTX_SDER32_EL2] + + mrs x11, ttbr1_el2 + str x11, [x0, #CTX_TTBR1_EL2] + + mrs x12, vdisr_el2 + str x12, [x0, #CTX_VDISR_EL2] + + mrs x13, vncr_el2 + str x13, [x0, #CTX_VNCR_EL2] + + mrs x14, vsesr_el2 + str x14, [x0, #CTX_VSESR_EL2] + + mrs x15, vstcr_el2 + str x15, [x0, #CTX_VSTCR_EL2] + + mrs x16, vsttbr_el2 + str x16, [x0, #CTX_VSTTBR_EL2] +#endif + +#if ARM_ARCH_AT_LEAST(8, 5) + mrs x17, scxtnum_el2 + str x17, [x0, #CTX_SCXTNUM_EL2] +#endif ret endfunc el2_sysregs_context_save @@ -219,189 +217,186 @@ endfunc el2_sysregs_context_save /* ----------------------------------------------------- * The following function strictly follows the AArch64 * PCS to use x9-x17 (temporary caller-saved registers) - * to restore EL1 system register context. It assumes - * that 'x0' is pointing to a 'el1_sys_regs' structure + * to restore EL2 system register context. It assumes + * that 'x0' is pointing to a 'el2_sys_regs' structure * from where the register context will be restored + + * The following registers are not restored + * AMEVCNTVOFF0_EL2 + * AMEVCNTVOFF1_EL2 + * ICH_AP0R_EL2 + * ICH_AP1R_EL2 + * ICH_LR_EL2 * ----------------------------------------------------- */ func el2_sysregs_context_restore - ldr x9, [x0, #CTX_ACTLR_EL2] + ldp x9, x10, [x0, #CTX_ACTLR_EL2] msr actlr_el2, x9 + msr afsr0_el2, x10 - ldr x9, [x0, #CTX_AFSR0_EL2] - msr afsr0_el2, x9 + ldp x11, x12, [x0, #CTX_AFSR1_EL2] + msr afsr1_el2, x11 + msr amair_el2, x12 - ldr x9, [x0, #CTX_AFSR1_EL2] - msr afsr1_el2, x9 + ldp x13, x14, [x0, #CTX_CNTHCTL_EL2] + msr cnthctl_el2, x13 + msr cnthp_ctl_el2, x14 - ldr x9, [x0, #CTX_AMAIR_EL2] - msr amair_el2, x9 + ldp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2] + msr cnthp_cval_el2, x15 + msr cnthp_tval_el2, x16 - ldr x9, [x0, #CTX_CNTHCTL_EL2] - msr cnthctl_el2, x9 - - ldr x9, [x0, #CTX_CNTHP_CTL_EL2] - msr cnthp_ctl_el2, x9 - - ldr x9, [x0, #CTX_CNTHP_CVAL_EL2] - msr cnthp_cval_el2, x9 - - ldr x9, [x0, #CTX_CNTHP_TVAL_EL2] - msr cnthp_tval_el2, x9 - - ldr x9, [x0, #CTX_CNTPOFF_EL2] - msr CNTPOFF_EL2, x9 - - ldr x9, [x0, #CTX_CNTVOFF_EL2] - msr cntvoff_el2, x9 - - ldr x9, [x0, #CTX_CPTR_EL2] + ldp x17, x9, [x0, #CTX_CNTVOFF_EL2] + msr cntvoff_el2, x17 msr cptr_el2, x9 - ldr x9, [x0, #CTX_DBGVCR32_EL2] - msr dbgvcr32_el2, x9 + ldp x10, x11, [x0, #CTX_DBGVCR32_EL2] + msr dbgvcr32_el2, x10 + msr elr_el2, x11 - ldr x9, [x0, #CTX_ELR_EL2] - msr elr_el2, x9 + ldp x14, x15, [x0, #CTX_ESR_EL2] + msr esr_el2, x14 + msr far_el2, x15 - ldr x9, [x0, #CTX_ESR_EL2] - msr esr_el2, x9 + ldp x16, x17, [x0, #CTX_FPEXC32_EL2] + msr fpexc32_el2, x16 + msr hacr_el2, x17 - ldr x9, [x0, #CTX_FAR_EL2] - msr far_el2, x9 - - ldr x9, [x0, #CTX_FPEXC32_EL2] - msr fpexc32_el2, x9 - - ldr x9, [x0, #CTX_HACR_EL2] - msr hacr_el2, x9 - - ldr x9, [x0, #CTX_HAFGRTR_EL2] - msr HAFGRTR_EL2, x9 - - ldr x9, [x0, #CTX_HCR_EL2] + ldp x9, x10, [x0, #CTX_HCR_EL2] msr hcr_el2, x9 + msr hpfar_el2, x10 - ldr x9, [x0, #CTX_HDFGRTR_EL2] - msr HDFGRTR_EL2, x9 + ldp x11, x12, [x0, #CTX_HSTR_EL2] + msr hstr_el2, x11 + msr ICC_SRE_EL2, x12 - ldr x9, [x0, #CTX_HDFGWTR_EL2] - msr HDFGWTR_EL2, x9 + ldp x13, x14, [x0, #CTX_ICH_HCR_EL2] + msr ICH_HCR_EL2, x13 + msr ICH_VMCR_EL2, x14 - ldr x9, [x0, #CTX_HFGITR_EL2] - msr HFGITR_EL2, x9 + ldp x15, x16, [x0, #CTX_MAIR_EL2] + msr mair_el2, x15 + msr mdcr_el2, x16 - ldr x9, [x0, #CTX_HFGRTR_EL2] - msr HFGRTR_EL2, x9 - - ldr x9, [x0, #CTX_HFGWTR_EL2] - msr HFGWTR_EL2, x9 - - ldr x9, [x0, #CTX_HPFAR_EL2] - msr hpfar_el2, x9 - - ldr x9, [x0, #CTX_HSTR_EL2] - msr hstr_el2, x9 - - ldr x9, [x0, #CTX_ICC_SRE_EL2] - msr ICC_SRE_EL2, x9 - - ldr x9, [x0, #CTX_ICH_EISR_EL2] - msr ICH_EISR_EL2, x9 - - ldr x9, [x0, #CTX_ICH_ELRSR_EL2] - msr ICH_ELRSR_EL2, x9 - - ldr x9, [x0, #CTX_ICH_HCR_EL2] - msr ICH_HCR_EL2, x9 - - ldr x9, [x0, #CTX_ICH_MISR_EL2] - msr ICH_MISR_EL2, x9 - - ldr x9, [x0, #CTX_ICH_VMCR_EL2] - msr ICH_VMCR_EL2, x9 - - ldr x9, [x0, #CTX_ICH_VTR_EL2] - msr ICH_VTR_EL2, x9 - - ldr x9, [x0, #CTX_MAIR_EL2] - msr mair_el2, x9 - - ldr x9, [x0, #CTX_MDCR_EL2] - msr mdcr_el2, x9 - - ldr x9, [x0, #CTX_MPAM2_EL2] - msr MPAM2_EL2, x9 - - ldr x9, [x0, #CTX_MPAMHCR_EL2] - msr MPAMHCR_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPM0_EL2] - msr MPAMVPM0_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPM1_EL2] - msr MPAMVPM1_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPM2_EL2] - msr MPAMVPM2_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPM3_EL2] - msr MPAMVPM3_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPM4_EL2] - msr MPAMVPM4_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPM5_EL2] - msr MPAMVPM5_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPM6_EL2] - msr MPAMVPM6_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPM7_EL2] - msr MPAMVPM7_EL2, x9 - - ldr x9, [x0, #CTX_MPAMVPMV_EL2] - msr MPAMVPMV_EL2, x9 - - ldr x9, [x0, #CTX_RMR_EL2] - msr rmr_el2, x9 - - ldr x9, [x0, #CTX_SCTLR_EL2] + ldp x17, x9, [x0, #CTX_PMSCR_EL2] + msr PMSCR_EL2, x17 msr sctlr_el2, x9 - ldr x9, [x0, #CTX_SPSR_EL2] - msr spsr_el2, x9 + ldp x10, x11, [x0, #CTX_SPSR_EL2] + msr spsr_el2, x10 + msr sp_el2, x11 - ldr x9, [x0, #CTX_SP_EL2] - msr sp_el2, x9 + ldp x12, x13, [x0, #CTX_TCR_EL2] + msr tcr_el2, x12 + msr TRFCR_EL2, x13 - ldr x9, [x0, #CTX_TCR_EL2] - msr tcr_el2, x9 + ldp x14, x15, [x0, #CTX_TTBR0_EL2] + msr ttbr0_el2, x14 + msr vbar_el2, x15 - ldr x9, [x0, #CTX_TPIDR_EL2] - msr tpidr_el2, x9 + ldp x16, x17, [x0, #CTX_VMPIDR_EL2] + msr vmpidr_el2, x16 + msr vpidr_el2, x17 - ldr x9, [x0, #CTX_TTBR0_EL2] - msr ttbr0_el2, x9 - - ldr x9, [x0, #CTX_VBAR_EL2] - msr vbar_el2, x9 - - ldr x9, [x0, #CTX_VMPIDR_EL2] - msr vmpidr_el2, x9 - - ldr x9, [x0, #CTX_VPIDR_EL2] - msr vpidr_el2, x9 - - ldr x9, [x0, #CTX_VTCR_EL2] + ldp x9, x10, [x0, #CTX_VTCR_EL2] msr vtcr_el2, x9 + msr vttbr_el2, x10 - ldr x9, [x0, #CTX_VTTBR_EL2] - msr vttbr_el2, x9 +#if CTX_INCLUDE_MTE_REGS + ldr x11, [x0, #CTX_TFSR_EL2] + msr TFSR_EL2, x11 +#endif - ldr x9, [x0, #CTX_ZCR_EL2] - msr ZCR_EL2, x9 +#if ENABLE_MPAM_FOR_LOWER_ELS + ldp x9, x10, [x0, #CTX_MPAM2_EL2] + msr MPAM2_EL2, x9 + msr MPAMHCR_EL2, x10 + + ldp x11, x12, [x0, #CTX_MPAMVPM0_EL2] + msr MPAMVPM0_EL2, x11 + msr MPAMVPM1_EL2, x12 + + ldp x13, x14, [x0, #CTX_MPAMVPM2_EL2] + msr MPAMVPM2_EL2, x13 + msr MPAMVPM3_EL2, x14 + + ldp x15, x16, [x0, #CTX_MPAMVPM4_EL2] + msr MPAMVPM4_EL2, x15 + msr MPAMVPM5_EL2, x16 + + ldp x17, x9, [x0, #CTX_MPAMVPM6_EL2] + msr MPAMVPM6_EL2, x17 + msr MPAMVPM7_EL2, x9 + + ldr x10, [x0, #CTX_MPAMVPMV_EL2] + msr MPAMVPMV_EL2, x10 +#endif + +#if ARM_ARCH_AT_LEAST(8, 6) + ldp x11, x12, [x0, #CTX_HAFGRTR_EL2] + msr HAFGRTR_EL2, x11 + msr HDFGRTR_EL2, x12 + + ldp x13, x14, [x0, #CTX_HDFGWTR_EL2] + msr HDFGWTR_EL2, x13 + msr HFGITR_EL2, x14 + + ldp x15, x16, [x0, #CTX_HFGRTR_EL2] + msr HFGRTR_EL2, x15 + msr HFGWTR_EL2, x16 + + ldr x17, [x0, #CTX_CNTPOFF_EL2] + msr CNTPOFF_EL2, x17 +#endif + +#if ARM_ARCH_AT_LEAST(8, 4) + ldp x9, x10, [x0, #CTX_CNTHPS_CTL_EL2] + msr cnthps_ctl_el2, x9 + msr cnthps_cval_el2, x10 + + ldp x11, x12, [x0, #CTX_CNTHPS_TVAL_EL2] + msr cnthps_tval_el2, x11 + msr cnthvs_ctl_el2, x12 + + ldp x13, x14, [x0, #CTX_CNTHVS_CVAL_EL2] + msr cnthvs_cval_el2, x13 + msr cnthvs_tval_el2, x14 + + ldp x15, x16, [x0, #CTX_CNTHV_CTL_EL2] + msr cnthv_ctl_el2, x15 + msr cnthv_cval_el2, x16 + + ldp x17, x9, [x0, #CTX_CNTHV_TVAL_EL2] + msr cnthv_tval_el2, x17 + msr contextidr_el2, x9 + + ldr x10, [x0, #CTX_SDER32_EL2] + msr sder32_el2, x10 + + ldr x11, [x0, #CTX_TTBR1_EL2] + msr ttbr1_el2, x11 + + ldr x12, [x0, #CTX_VDISR_EL2] + msr vdisr_el2, x12 + + ldr x13, [x0, #CTX_VNCR_EL2] + msr vncr_el2, x13 + + ldr x14, [x0, #CTX_VSESR_EL2] + msr vsesr_el2, x14 + + ldr x15, [x0, #CTX_VSTCR_EL2] + msr vstcr_el2, x15 + + ldr x16, [x0, #CTX_VSTTBR_EL2] + msr vsttbr_el2, x16 +#endif + +#if ARM_ARCH_AT_LEAST(8, 5) + ldr x17, [x0, #CTX_SCXTNUM_EL2] + msr scxtnum_el2, x17 +#endif ret endfunc el2_sysregs_context_restore diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index f59bcfcd9..0314a8511 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -234,7 +234,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * and other EL2 registers are set up by cm_prepare_ns_entry() as they * are not part of the stored cpu_context. */ - write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); /* * Base the context ACTLR_EL1 on the current value, as it is @@ -244,7 +244,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * be zero. */ actlr_elx = read_actlr_el1(); - write_ctx_reg((get_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); + write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); /* * Populate EL3 state so that we've the right context @@ -336,7 +336,7 @@ void cm_prepare_el3_exit(uint32_t security_state) CTX_SCR_EL3); if ((scr_el3 & SCR_HCE_BIT) != 0U) { /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ - sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), + sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1); sctlr_elx &= SCTLR_EE_BIT; sctlr_elx |= SCTLR_EL2_RES1; @@ -549,7 +549,7 @@ void cm_el2_sysregs_context_save(uint32_t security_state) ctx = cm_get_context(security_state); assert(ctx != NULL); - el2_sysregs_context_save(get_sysregs_ctx(ctx)); + el2_sysregs_context_save(get_el2_sysregs_ctx(ctx)); } } @@ -571,7 +571,7 @@ void cm_el2_sysregs_context_restore(uint32_t security_state) ctx = cm_get_context(security_state); assert(ctx != NULL); - el2_sysregs_context_restore(get_sysregs_ctx(ctx)); + el2_sysregs_context_restore(get_el2_sysregs_ctx(ctx)); } } #endif /* CTX_INCLUDE_EL2_REGS */ @@ -588,7 +588,7 @@ void cm_el1_sysregs_context_save(uint32_t security_state) ctx = cm_get_context(security_state); assert(ctx != NULL); - el1_sysregs_context_save(get_sysregs_ctx(ctx)); + el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); #if IMAGE_BL31 if (security_state == SECURE) @@ -605,7 +605,7 @@ void cm_el1_sysregs_context_restore(uint32_t security_state) ctx = cm_get_context(security_state); assert(ctx != NULL); - el1_sysregs_context_restore(get_sysregs_ctx(ctx)); + el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); #if IMAGE_BL31 if (security_state == SECURE) diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c index 092ffa8eb..ba2f4a6e4 100644 --- a/services/spd/trusty/trusty.c +++ b/services/spd/trusty/trusty.c @@ -150,9 +150,9 @@ static uint64_t trusty_fiq_handler(uint32_t id, (void)memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs)); ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3); ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3); - ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1); + ctx->fiq_sp_el1 = read_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1); - write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp); + write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp); cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, (uint32_t)ctx->fiq_handler_cpsr); SMC_RET0(handle); @@ -211,7 +211,7 @@ static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t */ (void)memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs)); ctx->fiq_handler_active = 0; - write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1); + write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1); cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, (uint32_t)ctx->fiq_cpsr); SMC_RET0(handle); diff --git a/services/std_svc/spm_mm/spm_mm_setup.c b/services/std_svc/spm_mm/spm_mm_setup.c index ccb2f9058..468e5b3af 100644 --- a/services/std_svc/spm_mm/spm_mm_setup.c +++ b/services/std_svc/spm_mm/spm_mm_setup.c @@ -116,17 +116,17 @@ void spm_sp_setup(sp_context_t *sp_ctx) xlat_ctx->pa_max_address, xlat_ctx->va_max_address, EL1_EL0_REGIME); - write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1, mmu_cfg_params[MMU_CFG_MAIR]); - write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1, mmu_cfg_params[MMU_CFG_TCR]); - write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1, + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1, mmu_cfg_params[MMU_CFG_TTBR0]); /* Setup SCTLR_EL1 */ - u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1); + u_register_t sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1); sctlr_el1 |= /*SCTLR_EL1_RES1 |*/ @@ -160,7 +160,7 @@ void spm_sp_setup(sp_context_t *sp_ctx) SCTLR_UMA_BIT ); - write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1); + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1); /* * Setup other system registers @@ -168,10 +168,10 @@ void spm_sp_setup(sp_context_t *sp_ctx) */ /* Shim Exception Vector Base Address */ - write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1, + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1, SPM_SHIM_EXCEPTIONS_PTR); - write_ctx_reg(get_sysregs_ctx(ctx), CTX_CNTKCTL_EL1, + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1, EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT); /* @@ -181,7 +181,7 @@ void spm_sp_setup(sp_context_t *sp_ctx) * TTA: Enable access to trace registers. * ZEN (v8.2): Trap SVE instructions and access to SVE registers. */ - write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1, + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1, CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE)); /* From e0f924a52925c8e2baf20d3c04b29234d948ea51 Mon Sep 17 00:00:00 2001 From: Max Shvetsov Date: Fri, 24 Jan 2020 13:48:53 +0000 Subject: [PATCH 3/6] SPMD: [tegra] rename el1_sys_regs structure to sys_regs Renamed the structure according to a SPMD refactoring introduced in since this structure is used to service both EL1 and EL2 as opposed to serving only EL1. Change-Id: I23b7c089e53f617157a4b4e6443acce50d85c3b5 Signed-off-by: Max Shvetsov --- plat/nvidia/tegra/common/tegra_fiq_glue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plat/nvidia/tegra/common/tegra_fiq_glue.c b/plat/nvidia/tegra/common/tegra_fiq_glue.c index 60b559556..8e198ae76 100644 --- a/plat/nvidia/tegra/common/tegra_fiq_glue.c +++ b/plat/nvidia/tegra/common/tegra_fiq_glue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -155,7 +155,7 @@ int32_t tegra_fiq_get_intr_context(void) { cpu_context_t *ctx = cm_get_context(NON_SECURE); gp_regs_t *gpregs_ctx = get_gpregs_ctx(ctx); - const el1_sys_regs_t *el1state_ctx = get_sysregs_ctx(ctx); + const el1_sysregs_t *el1state_ctx = get_el1_sysregs_ctx(ctx); uint32_t cpu = plat_my_core_pos(); uint64_t val; From 0f14d02f8fdc23aae7da452cbb18a92eba1feda2 Mon Sep 17 00:00:00 2001 From: Max Shvetsov Date: Thu, 27 Feb 2020 14:54:21 +0000 Subject: [PATCH 4/6] SPMD: SPMC init, SMC handler cosmetic changes Change-Id: I8881d489994aea667e3dd59932ab4123f511d6ba Signed-off-by: Artsem Artsemenka Signed-off-by: Max Shvetsov --- include/services/spmd_svc.h | 2 +- services/std_svc/spmd/spmd_main.c | 398 ++++++++++++++++-------------- 2 files changed, 211 insertions(+), 189 deletions(-) diff --git a/include/services/spmd_svc.h b/include/services/spmd_svc.h index 6e4caf266..a766dcf8f 100644 --- a/include/services/spmd_svc.h +++ b/include/services/spmd_svc.h @@ -11,7 +11,7 @@ #include #include -int32_t spmd_setup(void); +int spmd_setup(void); uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c index 110719020..50c32fc83 100644 --- a/services/std_svc/spmd/spmd_main.c +++ b/services/std_svc/spmd/spmd_main.c @@ -33,7 +33,24 @@ spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; /******************************************************************************* * SPM Core attribute information read from its manifest. ******************************************************************************/ -spmc_manifest_sect_attribute_t spmc_attrs; +static spmc_manifest_sect_attribute_t spmc_attrs; + +/******************************************************************************* + * SPM Core entry point information. Discovered on the primary core and reused + * on secondary cores. + ******************************************************************************/ +static entry_point_info_t *spmc_ep_info; + +/******************************************************************************* + * Static function declaration. + ******************************************************************************/ +static int32_t spmd_init(void); +static int spmd_spmc_init(void *rd_base, size_t rd_size); +static uint64_t spmd_spci_error_return(void *handle, int error_code); +static uint64_t spmd_smc_forward(uint32_t smc_fid, uint32_t in_sstate, + uint32_t out_sstate, uint64_t x1, + uint64_t x2, uint64_t x3, uint64_t x4, + void *handle); /******************************************************************************* * This function takes an SP context pointer and performs a synchronous entry @@ -110,18 +127,137 @@ static int32_t spmd_init(void) return 1; } +/******************************************************************************* + * Load SPMC manifest, init SPMC. + ******************************************************************************/ +static int spmd_spmc_init(void *rd_base, size_t rd_size) +{ + int rc; + uint32_t ep_attr; + unsigned int linear_id = plat_my_core_pos(); + spmd_spm_core_context_t *spm_ctx = &spm_core_context[linear_id]; + + /* Load the SPM core manifest */ + rc = plat_spm_core_manifest_load(&spmc_attrs, rd_base, rd_size); + if (rc != 0) { + WARN("No or invalid SPM core manifest image provided by BL2 " + "boot loader. "); + return 1; + } + + /* + * Ensure that the SPM core version is compatible with the SPM + * dispatcher version + */ + if ((spmc_attrs.major_version != SPCI_VERSION_MAJOR) || + (spmc_attrs.minor_version > SPCI_VERSION_MINOR)) { + WARN("Unsupported SPCI version (%x.%x) specified in SPM core " + "manifest image provided by BL2 boot loader.\n", + spmc_attrs.major_version, spmc_attrs.minor_version); + return 1; + } + + INFO("SPCI version (%x.%x).\n", spmc_attrs.major_version, + spmc_attrs.minor_version); + + /* Validate the SPM core runtime EL */ + if ((spmc_attrs.runtime_el != MODE_EL1) && + (spmc_attrs.runtime_el != MODE_EL2)) { + WARN("Unsupported SPM core run time EL%x specified in " + "manifest image provided by BL2 boot loader.\n", + spmc_attrs.runtime_el); + return 1; + } + + INFO("SPM core run time EL%x.\n", spmc_attrs.runtime_el); + + /* Validate the SPM core execution state */ + if ((spmc_attrs.exec_state != MODE_RW_64) && + (spmc_attrs.exec_state != MODE_RW_32)) { + WARN("Unsupported SPM core execution state %x specified in " + "manifest image provided by BL2 boot loader.\n", + spmc_attrs.exec_state); + return 1; + } + + INFO("SPM core execution state %x.\n", spmc_attrs.exec_state); + + /* Ensure manifest has not requested S-EL2 in AArch32 state */ + if ((spmc_attrs.exec_state == MODE_RW_32) && + (spmc_attrs.runtime_el == MODE_EL2)) { + WARN("Invalid combination of SPM core execution state (%x) " + "and run time EL (%x).\n", spmc_attrs.exec_state, + spmc_attrs.runtime_el); + return 1; + } + + /* + * Check if S-EL2 is supported on this system if S-EL2 + * is required for SPM + */ + if (spmc_attrs.runtime_el == MODE_EL2) { + uint64_t sel2 = read_id_aa64pfr0_el1(); + + sel2 >>= ID_AA64PFR0_SEL2_SHIFT; + sel2 &= ID_AA64PFR0_SEL2_MASK; + + if (!sel2) { + WARN("SPM core run time EL: S-EL%x is not supported " + "but specified in manifest image provided by " + "BL2 boot loader.\n", spmc_attrs.runtime_el); + return 1; + } + } + + /* Initialise an entrypoint to set up the CPU context */ + ep_attr = SECURE | EP_ST_ENABLE; + if (read_sctlr_el3() & SCTLR_EE_BIT) { + ep_attr |= EP_EE_BIG; + } + + SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); + assert(spmc_ep_info->pc == BL32_BASE); + + /* + * Populate SPSR for SPM core based upon validated parameters from the + * manifest + */ + if (spmc_attrs.exec_state == MODE_RW_32) { + spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, + SPSR_E_LITTLE, + DAIF_FIQ_BIT | + DAIF_IRQ_BIT | + DAIF_ABT_BIT); + } else { + spmc_ep_info->spsr = SPSR_64(spmc_attrs.runtime_el, + MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + } + + /* Initialise SPM core context with this entry point information */ + cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info); + + /* Reuse PSCI affinity states to mark this SPMC context as off */ + spm_ctx->state = AFF_STATE_OFF; + + INFO("SPM core setup done.\n"); + + /* Register init function for deferred init. */ + bl31_register_bl32_init(&spmd_init); + + return 0; +} + /******************************************************************************* * Initialize context of SPM core. ******************************************************************************/ -int32_t spmd_setup(void) +int spmd_setup(void) { int rc; void *rd_base; size_t rd_size; - entry_point_info_t *spmc_ep_info; uintptr_t rd_base_align; uintptr_t rd_size_align; - uint32_t ep_attr; spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); if (!spmc_ep_info) { @@ -157,130 +293,66 @@ int32_t spmd_setup(void) (uintptr_t) rd_base_align, rd_size_align, MT_RO_DATA); - if (rc < 0) { + if (rc != 0) { ERROR("Error while mapping SPM core manifest (%d).\n", rc); panic(); } - /* Load the SPM core manifest */ - rc = plat_spm_core_manifest_load(&spmc_attrs, rd_base, rd_size); - if (rc < 0) { - WARN("No or invalid SPM core manifest image provided by BL2 " - "boot loader. "); - goto error; - } + /* Load manifest, init SPMC */ + rc = spmd_spmc_init(rd_base, rd_size); + if (rc != 0) { + int mmap_rc; - /* - * Ensure that the SPM core version is compatible with the SPM - * dispatcher version - */ - if ((spmc_attrs.major_version != SPCI_VERSION_MAJOR) || - (spmc_attrs.minor_version > SPCI_VERSION_MINOR)) { - WARN("Unsupported SPCI version (%x.%x) specified in SPM core " - "manifest image provided by BL2 boot loader.\n", - spmc_attrs.major_version, spmc_attrs.minor_version); - goto error; - } + WARN("Booting device without SPM initialization. " + "SPCI SMCs destined for SPM core will return " + "ENOTSUPPORTED\n"); - INFO("SPCI version (%x.%x).\n", spmc_attrs.major_version, - spmc_attrs.minor_version); - - /* Validate the SPM core runtime EL */ - if ((spmc_attrs.runtime_el != MODE_EL1) && - (spmc_attrs.runtime_el != MODE_EL2)) { - WARN("Unsupported SPM core run time EL%x specified in " - "manifest image provided by BL2 boot loader.\n", - spmc_attrs.runtime_el); - goto error; - } - - INFO("SPM core run time EL%x.\n", spmc_attrs.runtime_el); - - /* Validate the SPM core execution state */ - if ((spmc_attrs.exec_state != MODE_RW_64) && - (spmc_attrs.exec_state != MODE_RW_32)) { - WARN("Unsupported SPM core execution state %x specified in " - "manifest image provided by BL2 boot loader.\n", - spmc_attrs.exec_state); - goto error; - } - - INFO("SPM core execution state %x.\n", spmc_attrs.exec_state); - - /* Ensure manifest has not requested S-EL2 in AArch32 state */ - if ((spmc_attrs.exec_state == MODE_RW_32) && - (spmc_attrs.runtime_el == MODE_EL2)) { - WARN("Invalid combination of SPM core execution state (%x) " - "and run time EL (%x).\n", spmc_attrs.exec_state, - spmc_attrs.runtime_el); - goto error; - } - - /* - * Check if S-EL2 is supported on this system if S-EL2 - * is required for SPM - */ - if (spmc_attrs.runtime_el == MODE_EL2) { - uint64_t sel2 = read_id_aa64pfr0_el1(); - - sel2 >>= ID_AA64PFR0_SEL2_SHIFT; - sel2 &= ID_AA64PFR0_SEL2_MASK; - - if (!sel2) { - WARN("SPM core run time EL: S-EL%x is not supported " - "but specified in manifest image provided by " - "BL2 boot loader.\n", spmc_attrs.runtime_el); - goto error; + mmap_rc = mmap_remove_dynamic_region(rd_base_align, + rd_size_align); + if (mmap_rc != 0) { + ERROR("Error while unmapping SPM core manifest (%d).\n", + mmap_rc); + panic(); } + + return rc; } - /* Initialise an entrypoint to set up the CPU context */ - ep_attr = SECURE | EP_ST_ENABLE; - if (read_sctlr_el3() & SCTLR_EE_BIT) - ep_attr |= EP_EE_BIG; - SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); - assert(spmc_ep_info->pc == BL32_BASE); - - /* - * Populate SPSR for SPM core based upon validated parameters from the - * manifest - */ - if (spmc_attrs.exec_state == MODE_RW_32) { - spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, - SPSR_E_LITTLE, - DAIF_FIQ_BIT | - DAIF_IRQ_BIT | - DAIF_ABT_BIT); - } else { - spmc_ep_info->spsr = SPSR_64(spmc_attrs.runtime_el, - MODE_SP_ELX, - DISABLE_ALL_EXCEPTIONS); - } - - /* Initialise SPM core context with this entry point information */ - cm_setup_context(&(spm_core_context[plat_my_core_pos()].cpu_ctx), - spmc_ep_info); - - INFO("SPM core setup done.\n"); - - /* Register init function for deferred init. */ - bl31_register_bl32_init(&spmd_init); - return 0; +} -error: - WARN("Booting device without SPM initialization. " - "SPCI SMCs destined for SPM core will return " - "ENOTSUPPORTED\n"); +/******************************************************************************* + * Forward SMC to the other security state + ******************************************************************************/ +static uint64_t spmd_smc_forward(uint32_t smc_fid, uint32_t in_sstate, + uint32_t out_sstate, uint64_t x1, + uint64_t x2, uint64_t x3, uint64_t x4, + void *handle) +{ + /* Save incoming security state */ + cm_el1_sysregs_context_save(in_sstate); + cm_el2_sysregs_context_save(in_sstate); - rc = mmap_remove_dynamic_region(rd_base_align, rd_size_align); - if (rc < 0) { - ERROR("Error while unmapping SPM core manifest (%d).\n", - rc); - panic(); - } + /* Restore outgoing security state */ + cm_el1_sysregs_context_restore(out_sstate); + cm_el2_sysregs_context_restore(out_sstate); + cm_set_next_eret_context(out_sstate); - return 1; + SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4, + SMC_GET_GP(handle, CTX_GPREG_X5), + SMC_GET_GP(handle, CTX_GPREG_X6), + SMC_GET_GP(handle, CTX_GPREG_X7)); +} + +/******************************************************************************* + * Return SPCI_ERROR with specified error code + ******************************************************************************/ +static uint64_t spmd_spci_error_return(void *handle, int error_code) +{ + SMC_RET8(handle, SPCI_ERROR, + SPCI_TARGET_INFO_MBZ, error_code, + SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, + SPCI_PARAM_MBZ, SPCI_PARAM_MBZ); } /******************************************************************************* @@ -318,22 +390,13 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, * this CPU. If so, then indicate that the SPM core initialised * unsuccessfully. */ - if ((in_sstate == SECURE) && (ctx->state == SPMC_STATE_RESET)) + if ((in_sstate == SECURE) && + (ctx->state == SPMC_STATE_RESET)) { spmd_spm_core_sync_exit(x2); + } - /* Save incoming security state */ - cm_el1_sysregs_context_save(in_sstate); - cm_el2_sysregs_context_save(in_sstate); - - /* Restore outgoing security state */ - cm_el1_sysregs_context_restore(out_sstate); - cm_el2_sysregs_context_restore(out_sstate); - cm_set_next_eret_context(out_sstate); - - SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4, - SMC_GET_GP(handle, CTX_GPREG_X5), - SMC_GET_GP(handle, CTX_GPREG_X6), - SMC_GET_GP(handle, CTX_GPREG_X7)); + return spmd_smc_forward(smc_fid, in_sstate, out_sstate, + x1, x2, x3, x4, handle); break; /* not reached */ case SPCI_VERSION: @@ -357,31 +420,18 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, */ /* - * Check if w1 holds a valid SPCI fid. This is an + * Check if x1 holds a valid SPCI fid. This is an * optimization. */ - if (!is_spci_fid(x1)) - SMC_RET8(handle, SPCI_ERROR, - SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED, - SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, - SPCI_PARAM_MBZ, SPCI_PARAM_MBZ); + if (!is_spci_fid(x1)) { + return spmd_spci_error_return(handle, + SPCI_ERROR_NOT_SUPPORTED); + } /* Forward SMC from Normal world to the SPM core */ if (in_sstate == NON_SECURE) { - /* Save incoming security state */ - cm_el1_sysregs_context_save(in_sstate); - cm_el2_sysregs_context_save(in_sstate); - - /* Restore outgoing security state */ - cm_el1_sysregs_context_restore(out_sstate); - cm_el2_sysregs_context_restore(out_sstate); - cm_set_next_eret_context(out_sstate); - - SMC_RET8(cm_get_context(out_sstate), smc_fid, - x1, x2, x3, x4, - SMC_GET_GP(handle, CTX_GPREG_X5), - SMC_GET_GP(handle, CTX_GPREG_X6), - SMC_GET_GP(handle, CTX_GPREG_X7)); + return spmd_smc_forward(smc_fid, in_sstate, out_sstate, + x1, x2, x3, x4, handle); } else { /* * Return success if call was from secure world i.e. all @@ -393,6 +443,7 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, SMC_GET_GP(handle, CTX_GPREG_X6), SMC_GET_GP(handle, CTX_GPREG_X7)); } + break; /* not reached */ case SPCI_RX_RELEASE: @@ -402,10 +453,8 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, case SPCI_MSG_RUN: /* This interface must be invoked only by the Normal world */ if (in_sstate == SECURE) { - SMC_RET8(handle, SPCI_ERROR, - SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED, - SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, - SPCI_PARAM_MBZ, SPCI_PARAM_MBZ); + return spmd_spci_error_return(handle, + SPCI_ERROR_NOT_SUPPORTED); } /* Fall through to forward the call to the other world */ @@ -436,19 +485,8 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, * simply forward the call to the Normal world. */ - /* Save incoming security state */ - cm_el1_sysregs_context_save(in_sstate); - cm_el2_sysregs_context_save(in_sstate); - - /* Restore outgoing security state */ - cm_el1_sysregs_context_restore(out_sstate); - cm_el2_sysregs_context_restore(out_sstate); - cm_set_next_eret_context(out_sstate); - - SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4, - SMC_GET_GP(handle, CTX_GPREG_X5), - SMC_GET_GP(handle, CTX_GPREG_X6), - SMC_GET_GP(handle, CTX_GPREG_X7)); + return spmd_smc_forward(smc_fid, in_sstate, out_sstate, + x1, x2, x3, x4, handle); break; /* not reached */ case SPCI_MSG_WAIT: @@ -461,37 +499,21 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, spmd_spm_core_sync_exit(0); } - /* Intentional fall-through */ + /* Fall through to forward the call to the other world */ case SPCI_MSG_YIELD: /* This interface must be invoked only by the Secure world */ if (in_sstate == NON_SECURE) { - SMC_RET8(handle, SPCI_ERROR, - SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED, - SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, - SPCI_PARAM_MBZ, SPCI_PARAM_MBZ); + return spmd_spci_error_return(handle, + SPCI_ERROR_NOT_SUPPORTED); } - /* Save incoming security state */ - cm_el1_sysregs_context_save(in_sstate); - cm_el2_sysregs_context_save(in_sstate); - - /* Restore outgoing security state */ - cm_el1_sysregs_context_restore(out_sstate); - cm_el2_sysregs_context_restore(out_sstate); - cm_set_next_eret_context(out_sstate); - - SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4, - SMC_GET_GP(handle, CTX_GPREG_X5), - SMC_GET_GP(handle, CTX_GPREG_X6), - SMC_GET_GP(handle, CTX_GPREG_X7)); + return spmd_smc_forward(smc_fid, in_sstate, out_sstate, + x1, x2, x3, x4, handle); break; /* not reached */ default: WARN("SPM: Unsupported call 0x%08x\n", smc_fid); - SMC_RET8(handle, SPCI_ERROR, - SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED, - SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, - SPCI_PARAM_MBZ, SPCI_PARAM_MBZ); + return spmd_spci_error_return(handle, SPCI_ERROR_NOT_SUPPORTED); } } From 93ff138b59d493fe93ba7fee99e9f1d0f1acb361 Mon Sep 17 00:00:00 2001 From: Olivier Deprez Date: Mon, 23 Dec 2019 16:21:12 +0100 Subject: [PATCH 5/6] SPMD: smc handler qualify secure origin using booleans Change-Id: Icc8f73660453a2cbb2241583684b615d5d1af9d4 Signed-off-by: Olivier Deprez --- services/std_svc/spmd/spmd_main.c | 61 ++++++++++++++----------------- 1 file changed, 27 insertions(+), 34 deletions(-) diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c index 50c32fc83..f49d23610 100644 --- a/services/std_svc/spmd/spmd_main.c +++ b/services/std_svc/spmd/spmd_main.c @@ -47,10 +47,9 @@ static entry_point_info_t *spmc_ep_info; static int32_t spmd_init(void); static int spmd_spmc_init(void *rd_base, size_t rd_size); static uint64_t spmd_spci_error_return(void *handle, int error_code); -static uint64_t spmd_smc_forward(uint32_t smc_fid, uint32_t in_sstate, - uint32_t out_sstate, uint64_t x1, - uint64_t x2, uint64_t x3, uint64_t x4, - void *handle); +static uint64_t spmd_smc_forward(uint32_t smc_fid, bool secure_origin, + uint64_t x1, uint64_t x2, uint64_t x3, + uint64_t x4, void *handle); /******************************************************************************* * This function takes an SP context pointer and performs a synchronous entry @@ -324,21 +323,23 @@ int spmd_setup(void) /******************************************************************************* * Forward SMC to the other security state ******************************************************************************/ -static uint64_t spmd_smc_forward(uint32_t smc_fid, uint32_t in_sstate, - uint32_t out_sstate, uint64_t x1, - uint64_t x2, uint64_t x3, uint64_t x4, - void *handle) +static uint64_t spmd_smc_forward(uint32_t smc_fid, bool secure_origin, + uint64_t x1, uint64_t x2, uint64_t x3, + uint64_t x4, void *handle) { + uint32_t secure_state_in = (secure_origin) ? SECURE : NON_SECURE; + uint32_t secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; + /* Save incoming security state */ - cm_el1_sysregs_context_save(in_sstate); - cm_el2_sysregs_context_save(in_sstate); + cm_el1_sysregs_context_save(secure_state_in); + cm_el2_sysregs_context_save(secure_state_in); /* Restore outgoing security state */ - cm_el1_sysregs_context_restore(out_sstate); - cm_el2_sysregs_context_restore(out_sstate); - cm_set_next_eret_context(out_sstate); + cm_el1_sysregs_context_restore(secure_state_out); + cm_el2_sysregs_context_restore(secure_state_out); + cm_set_next_eret_context(secure_state_out); - SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4, + SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5), SMC_GET_GP(handle, CTX_GPREG_X6), SMC_GET_GP(handle, CTX_GPREG_X7)); @@ -363,19 +364,12 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { - uint32_t in_sstate; - uint32_t out_sstate; - int32_t ret; spmd_spm_core_context_t *ctx = &spm_core_context[plat_my_core_pos()]; + bool secure_origin; + int32_t ret; /* Determine which security state this SMC originated from */ - if (is_caller_secure(flags)) { - in_sstate = SECURE; - out_sstate = NON_SECURE; - } else { - in_sstate = NON_SECURE; - out_sstate = SECURE; - } + secure_origin = is_caller_secure(flags); INFO("SPM: 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, " "0x%llx, 0x%llx, 0x%llx\n", @@ -390,12 +384,11 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, * this CPU. If so, then indicate that the SPM core initialised * unsuccessfully. */ - if ((in_sstate == SECURE) && - (ctx->state == SPMC_STATE_RESET)) { + if (secure_origin && (ctx->state == SPMC_STATE_RESET)) { spmd_spm_core_sync_exit(x2); } - return spmd_smc_forward(smc_fid, in_sstate, out_sstate, + return spmd_smc_forward(smc_fid, secure_origin, x1, x2, x3, x4, handle); break; /* not reached */ @@ -429,8 +422,8 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, } /* Forward SMC from Normal world to the SPM core */ - if (in_sstate == NON_SECURE) { - return spmd_smc_forward(smc_fid, in_sstate, out_sstate, + if (!secure_origin) { + return spmd_smc_forward(smc_fid, secure_origin, x1, x2, x3, x4, handle); } else { /* @@ -452,7 +445,7 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, case SPCI_RXTX_UNMAP: case SPCI_MSG_RUN: /* This interface must be invoked only by the Normal world */ - if (in_sstate == SECURE) { + if (secure_origin) { return spmd_spci_error_return(handle, SPCI_ERROR_NOT_SUPPORTED); } @@ -485,7 +478,7 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, * simply forward the call to the Normal world. */ - return spmd_smc_forward(smc_fid, in_sstate, out_sstate, + return spmd_smc_forward(smc_fid, secure_origin, x1, x2, x3, x4, handle); break; /* not reached */ @@ -495,7 +488,7 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, * this CPU from the Secure world. If so, then indicate that the * SPM core initialised successfully. */ - if ((in_sstate == SECURE) && (ctx->state == SPMC_STATE_RESET)) { + if (secure_origin && (ctx->state == SPMC_STATE_RESET)) { spmd_spm_core_sync_exit(0); } @@ -503,12 +496,12 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, case SPCI_MSG_YIELD: /* This interface must be invoked only by the Secure world */ - if (in_sstate == NON_SECURE) { + if (!secure_origin) { return spmd_spci_error_return(handle, SPCI_ERROR_NOT_SUPPORTED); } - return spmd_smc_forward(smc_fid, in_sstate, out_sstate, + return spmd_smc_forward(smc_fid, secure_origin, x1, x2, x3, x4, handle); break; /* not reached */ From 033039f8e5ad0ff231261e316f27bf22bc5713a2 Mon Sep 17 00:00:00 2001 From: Max Shvetsov Date: Tue, 25 Feb 2020 13:55:00 +0000 Subject: [PATCH 6/6] SPMD: add command line parameter to run SPM at S-EL2 or S-EL1 Added SPMD_SPM_AT_SEL2 build command line parameter. Set to 1 to run SPM at S-EL2. Set to 0 to run SPM at S-EL1 (pre-v8.4 or S-EL2 is disabled). Removed runtime EL from SPM core manifest. Change-Id: Icb4f5ea4c800f266880db1d410d63fe27a1171c0 Signed-off-by: Artsem Artsemenka Signed-off-by: Max Shvetsov --- Makefile | 9 ++- include/services/spm_core_manifest.h | 7 --- make_helpers/defaults.mk | 3 + plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts | 1 - plat/common/plat_spmd_manifest.c | 7 --- services/std_svc/spmd/spmd_main.c | 61 +++++++++---------- 6 files changed, 39 insertions(+), 49 deletions(-) diff --git a/Makefile b/Makefile index a84c413b8..f3cb9be6b 100644 --- a/Makefile +++ b/Makefile @@ -422,11 +422,14 @@ ifneq (${SPD},none) endif ifeq (${SPD},spmd) + $(warning "SPMD is an experimental feature") # SPMD is located in std_svc directory SPD_DIR := std_svc - ifeq ($(CTX_INCLUDE_EL2_REGS),0) - $(error spmd requires CTX_INCLUDE_EL2_REGS option) + ifeq ($(SPMD_SPM_AT_SEL2),1) + ifeq ($(CTX_INCLUDE_EL2_REGS),0) + $(error SPMD with SPM at S-EL2 requires CTX_INCLUDE_EL2_REGS option) + endif endif else # All other SPDs in spd directory @@ -799,6 +802,7 @@ $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA)) $(eval $(call assert_boolean,SEPARATE_NOBITS_REGION)) $(eval $(call assert_boolean,SPIN_ON_BL1_EXIT)) $(eval $(call assert_boolean,SPM_MM)) +$(eval $(call assert_boolean,SPMD_SPM_AT_SEL2)) $(eval $(call assert_boolean,TRUSTED_BOARD_BOOT)) $(eval $(call assert_boolean,USE_COHERENT_MEM)) $(eval $(call assert_boolean,USE_DEBUGFS)) @@ -870,6 +874,7 @@ $(eval $(call add_define,RECLAIM_INIT_CODE)) $(eval $(call add_define,SPD_${SPD})) $(eval $(call add_define,SPIN_ON_BL1_EXIT)) $(eval $(call add_define,SPM_MM)) +$(eval $(call add_define,SPMD_SPM_AT_SEL2)) $(eval $(call add_define,TRUSTED_BOARD_BOOT)) $(eval $(call add_define,USE_COHERENT_MEM)) $(eval $(call add_define,USE_DEBUGFS)) diff --git a/include/services/spm_core_manifest.h b/include/services/spm_core_manifest.h index 06ecc1391..78748826d 100644 --- a/include/services/spm_core_manifest.h +++ b/include/services/spm_core_manifest.h @@ -20,13 +20,6 @@ typedef struct spm_core_manifest_sect_attribute { uint32_t major_version; uint32_t minor_version; - /* - * Run-Time Exception Level (mandatory): - * - 1: SEL1 - * - 2: SEL2 - */ - uint32_t runtime_el; - /* * Run-Time Execution state (optional): * - 0: AArch64 (default) diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk index 8e1f273a3..9273469e2 100644 --- a/make_helpers/defaults.mk +++ b/make_helpers/defaults.mk @@ -188,6 +188,9 @@ SPD := none # Enable the Management Mode (MM)-based Secure Partition Manager implementation SPM_MM := 0 +# Use SPM at S-EL2 as a default config for SPMD +SPMD_SPM_AT_SEL2 := 1 + # Flag to introduce an infinite loop in BL1 just before it exits into the next # image. This is meant to help debugging the post-BL2 phase. SPIN_ON_BL1_EXIT := 0 diff --git a/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts b/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts index e1c106f1e..c94a209fd 100644 --- a/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts +++ b/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts @@ -11,7 +11,6 @@ attribute { maj_ver = <0x0>; min_ver = <0x9>; - runtime_el = <0x1>; exec_state = <0x0>; load_address = <0x0 0x6000000>; entrypoint = <0x0 0x6000000>; diff --git a/plat/common/plat_spmd_manifest.c b/plat/common/plat_spmd_manifest.c index 4c789795e..9c3dc7177 100644 --- a/plat/common/plat_spmd_manifest.c +++ b/plat/common/plat_spmd_manifest.c @@ -37,12 +37,6 @@ static int manifest_parse_attribute(spmc_manifest_sect_attribute_t *attr, return -ENOENT; } - rc = fdtw_read_cells(fdt, node, "runtime_el", 1, &attr->runtime_el); - if (rc) { - ERROR("Missing SPM core runtime EL in manifest.\n"); - return -ENOENT; - } - rc = fdtw_read_cells(fdt, node, "exec_state", 1, &attr->exec_state); if (rc) NOTICE("Execution state not specified in SPM core manifest.\n"); @@ -61,7 +55,6 @@ static int manifest_parse_attribute(spmc_manifest_sect_attribute_t *attr, VERBOSE("SPM core manifest attribute section:\n"); VERBOSE(" version: %x.%x\n", attr->major_version, attr->minor_version); - VERBOSE(" runtime_el: 0x%x\n", attr->runtime_el); VERBOSE(" binary_size: 0x%x\n", attr->binary_size); VERBOSE(" load_address: 0x%llx\n", attr->load_address); VERBOSE(" entrypoint: 0x%llx\n", attr->entrypoint); diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c index f49d23610..2cdf4f5ff 100644 --- a/services/std_svc/spmd/spmd_main.c +++ b/services/std_svc/spmd/spmd_main.c @@ -65,19 +65,19 @@ uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) /* Restore the context assigned above */ cm_el1_sysregs_context_restore(SECURE); +#if SPMD_SPM_AT_SEL2 cm_el2_sysregs_context_restore(SECURE); +#endif cm_set_next_eret_context(SECURE); - /* Invalidate TLBs at EL1. */ - tlbivmalle1(); - dsbish(); - - /* Enter Secure Partition */ + /* Enter SPMC */ rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); /* Save secure state */ cm_el1_sysregs_context_save(SECURE); +#if SPMD_SPM_AT_SEL2 cm_el2_sysregs_context_save(SECURE); +#endif return rc; } @@ -159,16 +159,8 @@ static int spmd_spmc_init(void *rd_base, size_t rd_size) INFO("SPCI version (%x.%x).\n", spmc_attrs.major_version, spmc_attrs.minor_version); - /* Validate the SPM core runtime EL */ - if ((spmc_attrs.runtime_el != MODE_EL1) && - (spmc_attrs.runtime_el != MODE_EL2)) { - WARN("Unsupported SPM core run time EL%x specified in " - "manifest image provided by BL2 boot loader.\n", - spmc_attrs.runtime_el); - return 1; - } - - INFO("SPM core run time EL%x.\n", spmc_attrs.runtime_el); + INFO("SPM core run time EL%x.\n", + SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); /* Validate the SPM core execution state */ if ((spmc_attrs.exec_state != MODE_RW_64) && @@ -181,12 +173,10 @@ static int spmd_spmc_init(void *rd_base, size_t rd_size) INFO("SPM core execution state %x.\n", spmc_attrs.exec_state); - /* Ensure manifest has not requested S-EL2 in AArch32 state */ - if ((spmc_attrs.exec_state == MODE_RW_32) && - (spmc_attrs.runtime_el == MODE_EL2)) { - WARN("Invalid combination of SPM core execution state (%x) " - "and run time EL (%x).\n", spmc_attrs.exec_state, - spmc_attrs.runtime_el); +#if SPMD_SPM_AT_SEL2 + /* Ensure manifest has not requested AArch32 state in S-EL2 */ + if (spmc_attrs.exec_state == MODE_RW_32) { + WARN("AArch32 state at S-EL2 is not supported.\n"); return 1; } @@ -194,19 +184,16 @@ static int spmd_spmc_init(void *rd_base, size_t rd_size) * Check if S-EL2 is supported on this system if S-EL2 * is required for SPM */ - if (spmc_attrs.runtime_el == MODE_EL2) { - uint64_t sel2 = read_id_aa64pfr0_el1(); + uint64_t sel2 = read_id_aa64pfr0_el1(); - sel2 >>= ID_AA64PFR0_SEL2_SHIFT; - sel2 &= ID_AA64PFR0_SEL2_MASK; + sel2 >>= ID_AA64PFR0_SEL2_SHIFT; + sel2 &= ID_AA64PFR0_SEL2_MASK; - if (!sel2) { - WARN("SPM core run time EL: S-EL%x is not supported " - "but specified in manifest image provided by " - "BL2 boot loader.\n", spmc_attrs.runtime_el); - return 1; - } + if (!sel2) { + WARN("SPM core run time S-EL2 is not supported."); + return 1; } +#endif /* SPMD_SPM_AT_SEL2 */ /* Initialise an entrypoint to set up the CPU context */ ep_attr = SECURE | EP_ST_ENABLE; @@ -228,7 +215,13 @@ static int spmd_spmc_init(void *rd_base, size_t rd_size) DAIF_IRQ_BIT | DAIF_ABT_BIT); } else { - spmc_ep_info->spsr = SPSR_64(spmc_attrs.runtime_el, + +#if SPMD_SPM_AT_SEL2 + static const uint32_t runtime_el = MODE_EL2; +#else + static const uint32_t runtime_el = MODE_EL1; +#endif + spmc_ep_info->spsr = SPSR_64(runtime_el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); } @@ -332,11 +325,15 @@ static uint64_t spmd_smc_forward(uint32_t smc_fid, bool secure_origin, /* Save incoming security state */ cm_el1_sysregs_context_save(secure_state_in); +#if SPMD_SPM_AT_SEL2 cm_el2_sysregs_context_save(secure_state_in); +#endif /* Restore outgoing security state */ cm_el1_sysregs_context_restore(secure_state_out); +#if SPMD_SPM_AT_SEL2 cm_el2_sysregs_context_restore(secure_state_out); +#endif cm_set_next_eret_context(secure_state_out); SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,