Merge changes from topic "spmd-sel2" into integration

* changes:
  SPMD: add command line parameter to run SPM at S-EL2 or S-EL1
  SPMD: smc handler qualify secure origin using booleans
  SPMD: SPMC init, SMC handler cosmetic changes
  SPMD: [tegra] rename el1_sys_regs structure to sys_regs
  SPMD: Adds partially supported EL2 registers.
  SPMD: save/restore EL2 system registers.
This commit is contained in:
Olivier Deprez 2020-03-06 08:18:03 +00:00 committed by TrustedFirmware Code Review
commit d95f7a7287
15 changed files with 894 additions and 274 deletions

View File

@ -412,40 +412,48 @@ INCLUDE_TBBR_MK := 1
################################################################################
ifneq (${SPD},none)
ifeq (${ARCH},aarch32)
ifeq (${ARCH},aarch32)
$(error "Error: SPD is incompatible with AArch32.")
endif
ifdef EL3_PAYLOAD_BASE
endif
ifdef EL3_PAYLOAD_BASE
$(warning "SPD and EL3_PAYLOAD_BASE are incompatible build options.")
$(warning "The SPD and its BL32 companion will be present but ignored.")
endif
ifeq (${SPD},spmd)
# SPMD is located in std_svc directory
SPD_DIR := std_svc
else
# All other SPDs in spd directory
SPD_DIR := spd
endif
endif
# We expect to locate an spd.mk under the specified SPD directory
SPD_MAKE := $(wildcard services/${SPD_DIR}/${SPD}/${SPD}.mk)
ifeq (${SPD},spmd)
$(warning "SPMD is an experimental feature")
# SPMD is located in std_svc directory
SPD_DIR := std_svc
ifeq (${SPD_MAKE},)
$(error Error: No services/${SPD_DIR}/${SPD}/${SPD}.mk located)
ifeq ($(SPMD_SPM_AT_SEL2),1)
ifeq ($(CTX_INCLUDE_EL2_REGS),0)
$(error SPMD with SPM at S-EL2 requires CTX_INCLUDE_EL2_REGS option)
endif
endif
$(info Including ${SPD_MAKE})
include ${SPD_MAKE}
else
# All other SPDs in spd directory
SPD_DIR := spd
endif
# If there's BL32 companion for the chosen SPD, we expect that the SPD's
# Makefile would set NEED_BL32 to "yes". In this case, the build system
# supports two mutually exclusive options:
# * BL32 is built from source: then BL32_SOURCES must contain the list
# of source files to build BL32
# * BL32 is a prebuilt binary: then BL32 must point to the image file
# that will be included in the FIP
# If both BL32_SOURCES and BL32 are defined, the binary takes precedence
# over the sources.
# We expect to locate an spd.mk under the specified SPD directory
SPD_MAKE := $(wildcard services/${SPD_DIR}/${SPD}/${SPD}.mk)
ifeq (${SPD_MAKE},)
$(error Error: No services/${SPD_DIR}/${SPD}/${SPD}.mk located)
endif
$(info Including ${SPD_MAKE})
include ${SPD_MAKE}
# If there's BL32 companion for the chosen SPD, we expect that the SPD's
# Makefile would set NEED_BL32 to "yes". In this case, the build system
# supports two mutually exclusive options:
# * BL32 is built from source: then BL32_SOURCES must contain the list
# of source files to build BL32
# * BL32 is a prebuilt binary: then BL32 must point to the image file
# that will be included in the FIP
# If both BL32_SOURCES and BL32 are defined, the binary takes precedence
# over the sources.
endif
################################################################################
@ -761,6 +769,7 @@ $(eval $(call assert_boolean,CTX_INCLUDE_AARCH32_REGS))
$(eval $(call assert_boolean,CTX_INCLUDE_FPREGS))
$(eval $(call assert_boolean,CTX_INCLUDE_PAUTH_REGS))
$(eval $(call assert_boolean,CTX_INCLUDE_MTE_REGS))
$(eval $(call assert_boolean,CTX_INCLUDE_EL2_REGS))
$(eval $(call assert_boolean,DEBUG))
$(eval $(call assert_boolean,DYN_DISABLE_AUTH))
$(eval $(call assert_boolean,EL3_EXCEPTION_HANDLING))
@ -793,6 +802,7 @@ $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA))
$(eval $(call assert_boolean,SEPARATE_NOBITS_REGION))
$(eval $(call assert_boolean,SPIN_ON_BL1_EXIT))
$(eval $(call assert_boolean,SPM_MM))
$(eval $(call assert_boolean,SPMD_SPM_AT_SEL2))
$(eval $(call assert_boolean,TRUSTED_BOARD_BOOT))
$(eval $(call assert_boolean,USE_COHERENT_MEM))
$(eval $(call assert_boolean,USE_DEBUGFS))
@ -832,6 +842,7 @@ $(eval $(call add_define,CTX_INCLUDE_FPREGS))
$(eval $(call add_define,CTX_INCLUDE_PAUTH_REGS))
$(eval $(call add_define,EL3_EXCEPTION_HANDLING))
$(eval $(call add_define,CTX_INCLUDE_MTE_REGS))
$(eval $(call add_define,CTX_INCLUDE_EL2_REGS))
$(eval $(call add_define,ENABLE_AMU))
$(eval $(call add_define,ENABLE_ASSERTIONS))
$(eval $(call add_define,ENABLE_BTI))
@ -863,6 +874,7 @@ $(eval $(call add_define,RECLAIM_INIT_CODE))
$(eval $(call add_define,SPD_${SPD}))
$(eval $(call add_define,SPIN_ON_BL1_EXIT))
$(eval $(call add_define,SPM_MM))
$(eval $(call add_define,SPMD_SPM_AT_SEL2))
$(eval $(call add_define,TRUSTED_BOARD_BOOT))
$(eval $(call add_define,USE_COHERENT_MEM))
$(eval $(call add_define,USE_DEBUGFS))

View File

@ -96,6 +96,32 @@
#define ICC_EOIR1_EL1 S3_0_c12_c12_1
#define ICC_SGI0R_EL1 S3_0_c12_c11_7
/*******************************************************************************
* Definitions for EL2 system registers for save/restore routine
******************************************************************************/
#define CNTPOFF_EL2 S3_4_C14_C0_6
#define HAFGRTR_EL2 S3_4_C3_C1_6
#define HDFGRTR_EL2 S3_4_C3_C1_4
#define HDFGWTR_EL2 S3_4_C3_C1_5
#define HFGITR_EL2 S3_4_C1_C1_6
#define HFGRTR_EL2 S3_4_C1_C1_4
#define HFGWTR_EL2 S3_4_C1_C1_5
#define ICH_HCR_EL2 S3_4_C12_C11_0
#define ICH_VMCR_EL2 S3_4_C12_C11_7
#define MPAMVPM0_EL2 S3_4_C10_C5_0
#define MPAMVPM1_EL2 S3_4_C10_C5_1
#define MPAMVPM2_EL2 S3_4_C10_C5_2
#define MPAMVPM3_EL2 S3_4_C10_C5_3
#define MPAMVPM4_EL2 S3_4_C10_C5_4
#define MPAMVPM5_EL2 S3_4_C10_C5_5
#define MPAMVPM6_EL2 S3_4_C10_C5_6
#define MPAMVPM7_EL2 S3_4_C10_C5_7
#define MPAMVPMV_EL2 S3_4_C10_C4_1
#define TRFCR_EL2 S3_4_C1_C2_1
#define PMSCR_EL2 S3_4_C9_C9_0
#define TFSR_EL2 S3_4_C5_C6_0
/*******************************************************************************
* Generic timer memory mapped registers & offsets
******************************************************************************/

View File

@ -68,7 +68,7 @@
* registers are only 32-bits wide but are stored as 64-bit values for
* convenience
******************************************************************************/
#define CTX_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
#define CTX_EL1_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
#define CTX_SPSR_EL1 U(0x0)
#define CTX_ELR_EL1 U(0x8)
#define CTX_SCTLR_EL1 U(0x10)
@ -138,13 +138,118 @@
/*
* End of system registers.
*/
#define CTX_SYSREGS_END CTX_MTE_REGS_END
#define CTX_EL1_SYSREGS_END CTX_MTE_REGS_END
/*
* EL2 register set
*/
#if CTX_INCLUDE_EL2_REGS
/* For later discussion
* ICH_AP0R<n>_EL2
* ICH_AP1R<n>_EL2
* AMEVCNTVOFF0<n>_EL2
* AMEVCNTVOFF1<n>_EL2
* ICH_LR<n>_EL2
*/
#define CTX_EL2_SYSREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
#define CTX_ACTLR_EL2 U(0x0)
#define CTX_AFSR0_EL2 U(0x8)
#define CTX_AFSR1_EL2 U(0x10)
#define CTX_AMAIR_EL2 U(0x18)
#define CTX_CNTHCTL_EL2 U(0x20)
#define CTX_CNTHP_CTL_EL2 U(0x28)
#define CTX_CNTHP_CVAL_EL2 U(0x30)
#define CTX_CNTHP_TVAL_EL2 U(0x38)
#define CTX_CNTVOFF_EL2 U(0x40)
#define CTX_CPTR_EL2 U(0x48)
#define CTX_DBGVCR32_EL2 U(0x50)
#define CTX_ELR_EL2 U(0x58)
#define CTX_ESR_EL2 U(0x60)
#define CTX_FAR_EL2 U(0x68)
#define CTX_FPEXC32_EL2 U(0x70)
#define CTX_HACR_EL2 U(0x78)
#define CTX_HCR_EL2 U(0x80)
#define CTX_HPFAR_EL2 U(0x88)
#define CTX_HSTR_EL2 U(0x90)
#define CTX_ICC_SRE_EL2 U(0x98)
#define CTX_ICH_HCR_EL2 U(0xa0)
#define CTX_ICH_VMCR_EL2 U(0xa8)
#define CTX_MAIR_EL2 U(0xb0)
#define CTX_MDCR_EL2 U(0xb8)
#define CTX_PMSCR_EL2 U(0xc0)
#define CTX_SCTLR_EL2 U(0xc8)
#define CTX_SPSR_EL2 U(0xd0)
#define CTX_SP_EL2 U(0xd8)
#define CTX_TCR_EL2 U(0xe0)
#define CTX_TRFCR_EL2 U(0xe8)
#define CTX_TTBR0_EL2 U(0xf0)
#define CTX_VBAR_EL2 U(0xf8)
#define CTX_VMPIDR_EL2 U(0x100)
#define CTX_VPIDR_EL2 U(0x108)
#define CTX_VTCR_EL2 U(0x110)
#define CTX_VTTBR_EL2 U(0x118)
// Only if MTE registers in use
#define CTX_TFSR_EL2 U(0x120)
// Only if ENABLE_MPAM_FOR_LOWER_ELS==1
#define CTX_MPAM2_EL2 U(0x128)
#define CTX_MPAMHCR_EL2 U(0x130)
#define CTX_MPAMVPM0_EL2 U(0x138)
#define CTX_MPAMVPM1_EL2 U(0x140)
#define CTX_MPAMVPM2_EL2 U(0x148)
#define CTX_MPAMVPM3_EL2 U(0x150)
#define CTX_MPAMVPM4_EL2 U(0x158)
#define CTX_MPAMVPM5_EL2 U(0x160)
#define CTX_MPAMVPM6_EL2 U(0x168)
#define CTX_MPAMVPM7_EL2 U(0x170)
#define CTX_MPAMVPMV_EL2 U(0x178)
// Starting with Armv8.6
#define CTX_HAFGRTR_EL2 U(0x180)
#define CTX_HDFGRTR_EL2 U(0x188)
#define CTX_HDFGWTR_EL2 U(0x190)
#define CTX_HFGITR_EL2 U(0x198)
#define CTX_HFGRTR_EL2 U(0x1a0)
#define CTX_HFGWTR_EL2 U(0x1a8)
#define CTX_CNTPOFF_EL2 U(0x1b0)
// Starting with Armv8.4
#define CTX_CNTHPS_CTL_EL2 U(0x1b8)
#define CTX_CNTHPS_CVAL_EL2 U(0x1c0)
#define CTX_CNTHPS_TVAL_EL2 U(0x1c8)
#define CTX_CNTHVS_CTL_EL2 U(0x1d0)
#define CTX_CNTHVS_CVAL_EL2 U(0x1d8)
#define CTX_CNTHVS_TVAL_EL2 U(0x1e0)
#define CTX_CNTHV_CTL_EL2 U(0x1e8)
#define CTX_CNTHV_CVAL_EL2 U(0x1f0)
#define CTX_CNTHV_TVAL_EL2 U(0x1f8)
#define CTX_CONTEXTIDR_EL2 U(0x200)
#define CTX_SDER32_EL2 U(0x208)
#define CTX_TTBR1_EL2 U(0x210)
#define CTX_VDISR_EL2 U(0x218)
#define CTX_VNCR_EL2 U(0x220)
#define CTX_VSESR_EL2 U(0x228)
#define CTX_VSTCR_EL2 U(0x230)
#define CTX_VSTTBR_EL2 U(0x238)
// Starting with Armv8.5
#define CTX_SCXTNUM_EL2 U(0x240)
/* Align to the next 16 byte boundary */
#define CTX_EL2_SYSREGS_END U(0x250)
#endif /* CTX_INCLUDE_EL2_REGS */
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'fp_regs'
* structure at their correct offsets.
******************************************************************************/
#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
#if CTX_INCLUDE_EL2_REGS
# define CTX_FPREGS_OFFSET (CTX_EL2_SYSREGS_OFFSET + CTX_EL2_SYSREGS_END)
#else
# define CTX_FPREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
#endif
#if CTX_INCLUDE_FPREGS
#define CTX_FP_Q0 U(0x0)
#define CTX_FP_Q1 U(0x10)
@ -235,7 +340,10 @@
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#define CTX_EL1_SYSREGS_ALL (CTX_EL1_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_EL2_REGS
# define CTX_EL2_SYSREGS_ALL (CTX_EL2_SYSREGS_END >> DWORD_SHIFT)
#endif
#if CTX_INCLUDE_FPREGS
# define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
#endif
@ -256,10 +364,18 @@ DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during switches from one security state to
* another in EL1.
* architectural state during world switches.
*/
DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL);
DEFINE_REG_STRUCT(el1_sysregs, CTX_EL1_SYSREGS_ALL);
/*
* AArch64 EL2 system register context structure for preserving the
* architectural state during world switches.
*/
#if CTX_INCLUDE_EL2_REGS
DEFINE_REG_STRUCT(el2_sysregs, CTX_EL2_SYSREGS_ALL);
#endif
/*
* AArch64 floating point register context structure for preserving
@ -304,7 +420,10 @@ DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL);
typedef struct cpu_context {
gp_regs_t gpregs_ctx;
el3_state_t el3state_ctx;
el1_sys_regs_t sysregs_ctx;
el1_sysregs_t el1_sysregs_ctx;
#if CTX_INCLUDE_EL2_REGS
el2_sysregs_t el2_sysregs_ctx;
#endif
#if CTX_INCLUDE_FPREGS
fp_regs_t fpregs_ctx;
#endif
@ -319,7 +438,10 @@ typedef struct cpu_context {
#if CTX_INCLUDE_FPREGS
# define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx)
#endif
#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx)
#define get_el1_sysregs_ctx(h) (&((cpu_context_t *) h)->el1_sysregs_ctx)
#if CTX_INCLUDE_EL2_REGS
# define get_el2_sysregs_ctx(h) (&((cpu_context_t *) h)->el2_sysregs_ctx)
#endif
#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx)
#define get_cve_2018_3639_ctx(h) (&((cpu_context_t *) h)->cve_2018_3639_ctx)
#if CTX_INCLUDE_PAUTH_REGS
@ -333,8 +455,12 @@ typedef struct cpu_context {
*/
CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \
assert_core_context_gp_offset_mismatch);
CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \
assert_core_context_sys_offset_mismatch);
CASSERT(CTX_EL1_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el1_sysregs_ctx), \
assert_core_context_el1_sys_offset_mismatch);
#if CTX_INCLUDE_EL2_REGS
CASSERT(CTX_EL2_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el2_sysregs_ctx), \
assert_core_context_el2_sys_offset_mismatch);
#endif
#if CTX_INCLUDE_FPREGS
CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \
assert_core_context_fp_offset_mismatch);
@ -387,8 +513,14 @@ CASSERT(CTX_PAUTH_REGS_OFFSET == __builtin_offsetof(cpu_context_t, pauth_ctx), \
/*******************************************************************************
* Function prototypes
******************************************************************************/
void el1_sysregs_context_save(el1_sys_regs_t *regs);
void el1_sysregs_context_restore(el1_sys_regs_t *regs);
void el1_sysregs_context_save(el1_sysregs_t *regs);
void el1_sysregs_context_restore(el1_sysregs_t *regs);
#if CTX_INCLUDE_EL2_REGS
void el2_sysregs_context_save(el2_sysregs_t *regs);
void el2_sysregs_context_restore(el2_sysregs_t *regs);
#endif
#if CTX_INCLUDE_FPREGS
void fpregs_context_save(fp_regs_t *regs);
void fpregs_context_restore(fp_regs_t *regs);

View File

@ -36,6 +36,11 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep);
void cm_prepare_el3_exit(uint32_t security_state);
#ifdef __aarch64__
#if CTX_INCLUDE_EL2_REGS
void cm_el2_sysregs_context_save(uint32_t security_state);
void cm_el2_sysregs_context_restore(uint32_t security_state);
#endif
void cm_el1_sysregs_context_save(uint32_t security_state);
void cm_el1_sysregs_context_restore(uint32_t security_state);
void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);

View File

@ -20,13 +20,6 @@ typedef struct spm_core_manifest_sect_attribute {
uint32_t major_version;
uint32_t minor_version;
/*
* Run-Time Exception Level (mandatory):
* - 1: SEL1
* - 2: SEL2
*/
uint32_t runtime_el;
/*
* Run-Time Execution state (optional):
* - 0: AArch64 (default)

View File

@ -11,7 +11,7 @@
#include <services/spci_svc.h>
#include <stdint.h>
int32_t spmd_setup(void);
int spmd_setup(void);
uint64_t spmd_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -9,6 +9,11 @@
#include <assert_macros.S>
#include <context.h>
#if CTX_INCLUDE_EL2_REGS
.global el2_sysregs_context_save
.global el2_sysregs_context_restore
#endif
.global el1_sysregs_context_save
.global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS
@ -19,6 +24,385 @@
.global restore_gp_pmcr_pauth_regs
.global el3_exit
#if CTX_INCLUDE_EL2_REGS
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
* to save EL2 system register context. It assumes that
* 'x0' is pointing to a 'el2_sys_regs' structure where
* the register context will be saved.
*
* The following registers are not added.
* AMEVCNTVOFF0<n>_EL2
* AMEVCNTVOFF1<n>_EL2
* ICH_AP0R<n>_EL2
* ICH_AP1R<n>_EL2
* ICH_LR<n>_EL2
* -----------------------------------------------------
*/
func el2_sysregs_context_save
mrs x9, actlr_el2
mrs x10, afsr0_el2
stp x9, x10, [x0, #CTX_ACTLR_EL2]
mrs x11, afsr1_el2
mrs x12, amair_el2
stp x11, x12, [x0, #CTX_AFSR1_EL2]
mrs x13, cnthctl_el2
mrs x14, cnthp_ctl_el2
stp x13, x14, [x0, #CTX_CNTHCTL_EL2]
mrs x15, cnthp_cval_el2
mrs x16, cnthp_tval_el2
stp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2]
mrs x17, cntvoff_el2
mrs x9, cptr_el2
stp x17, x9, [x0, #CTX_CNTVOFF_EL2]
mrs x10, dbgvcr32_el2
mrs x11, elr_el2
stp x10, x11, [x0, #CTX_DBGVCR32_EL2]
mrs x14, esr_el2
mrs x15, far_el2
stp x14, x15, [x0, #CTX_ESR_EL2]
mrs x16, fpexc32_el2
mrs x17, hacr_el2
stp x16, x17, [x0, #CTX_FPEXC32_EL2]
mrs x9, hcr_el2
mrs x10, hpfar_el2
stp x9, x10, [x0, #CTX_HCR_EL2]
mrs x11, hstr_el2
mrs x12, ICC_SRE_EL2
stp x11, x12, [x0, #CTX_HSTR_EL2]
mrs x13, ICH_HCR_EL2
mrs x14, ICH_VMCR_EL2
stp x13, x14, [x0, #CTX_ICH_HCR_EL2]
mrs x15, mair_el2
mrs x16, mdcr_el2
stp x15, x16, [x0, #CTX_MAIR_EL2]
mrs x17, PMSCR_EL2
mrs x9, sctlr_el2
stp x17, x9, [x0, #CTX_PMSCR_EL2]
mrs x10, spsr_el2
mrs x11, sp_el2
stp x10, x11, [x0, #CTX_SPSR_EL2]
mrs x12, tcr_el2
mrs x13, TRFCR_EL2
stp x12, x13, [x0, #CTX_TCR_EL2]
mrs x14, ttbr0_el2
mrs x15, vbar_el2
stp x14, x15, [x0, #CTX_TTBR0_EL2]
mrs x16, vmpidr_el2
mrs x17, vpidr_el2
stp x16, x17, [x0, #CTX_VMPIDR_EL2]
mrs x9, vtcr_el2
mrs x10, vttbr_el2
stp x9, x10, [x0, #CTX_VTCR_EL2]
#if CTX_INCLUDE_MTE_REGS
mrs x11, TFSR_EL2
str x11, [x0, #CTX_TFSR_EL2]
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
mrs x9, MPAM2_EL2
mrs x10, MPAMHCR_EL2
stp x9, x10, [x0, #CTX_MPAM2_EL2]
mrs x11, MPAMVPM0_EL2
mrs x12, MPAMVPM1_EL2
stp x11, x12, [x0, #CTX_MPAMVPM0_EL2]
mrs x13, MPAMVPM2_EL2
mrs x14, MPAMVPM3_EL2
stp x13, x14, [x0, #CTX_MPAMVPM2_EL2]
mrs x15, MPAMVPM4_EL2
mrs x16, MPAMVPM5_EL2
stp x15, x16, [x0, #CTX_MPAMVPM4_EL2]
mrs x17, MPAMVPM6_EL2
mrs x9, MPAMVPM7_EL2
stp x17, x9, [x0, #CTX_MPAMVPM6_EL2]
mrs x10, MPAMVPMV_EL2
str x10, [x0, #CTX_MPAMVPMV_EL2]
#endif
#if ARM_ARCH_AT_LEAST(8, 6)
mrs x11, HAFGRTR_EL2
mrs x12, HDFGRTR_EL2
stp x11, x12, [x0, #CTX_HAFGRTR_EL2]
mrs x13, HDFGWTR_EL2
mrs x14, HFGITR_EL2
stp x13, x14, [x0, #CTX_HDFGWTR_EL2]
mrs x15, HFGRTR_EL2
mrs x16, HFGWTR_EL2
stp x15, x16, [x0, #CTX_HFGRTR_EL2]
mrs x17, CNTPOFF_EL2
str x17, [x0, #CTX_CNTPOFF_EL2]
#endif
#if ARM_ARCH_AT_LEAST(8, 4)
mrs x9, cnthps_ctl_el2
mrs x10, cnthps_cval_el2
stp x9, x10, [x0, #CTX_CNTHPS_CTL_EL2]
mrs x11, cnthps_tval_el2
mrs x12, cnthvs_ctl_el2
stp x11, x12, [x0, #CTX_CNTHPS_TVAL_EL2]
mrs x13, cnthvs_cval_el2
mrs x14, cnthvs_tval_el2
stp x13, x14, [x0, #CTX_CNTHVS_CVAL_EL2]
mrs x15, cnthv_ctl_el2
mrs x16, cnthv_cval_el2
stp x15, x16, [x0, #CTX_CNTHV_CTL_EL2]
mrs x17, cnthv_tval_el2
mrs x9, contextidr_el2
stp x17, x9, [x0, #CTX_CNTHV_TVAL_EL2]
mrs x10, sder32_el2
str x10, [x0, #CTX_SDER32_EL2]
mrs x11, ttbr1_el2
str x11, [x0, #CTX_TTBR1_EL2]
mrs x12, vdisr_el2
str x12, [x0, #CTX_VDISR_EL2]
mrs x13, vncr_el2
str x13, [x0, #CTX_VNCR_EL2]
mrs x14, vsesr_el2
str x14, [x0, #CTX_VSESR_EL2]
mrs x15, vstcr_el2
str x15, [x0, #CTX_VSTCR_EL2]
mrs x16, vsttbr_el2
str x16, [x0, #CTX_VSTTBR_EL2]
#endif
#if ARM_ARCH_AT_LEAST(8, 5)
mrs x17, scxtnum_el2
str x17, [x0, #CTX_SCXTNUM_EL2]
#endif
ret
endfunc el2_sysregs_context_save
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
* to restore EL2 system register context. It assumes
* that 'x0' is pointing to a 'el2_sys_regs' structure
* from where the register context will be restored
* The following registers are not restored
* AMEVCNTVOFF0<n>_EL2
* AMEVCNTVOFF1<n>_EL2
* ICH_AP0R<n>_EL2
* ICH_AP1R<n>_EL2
* ICH_LR<n>_EL2
* -----------------------------------------------------
*/
func el2_sysregs_context_restore
ldp x9, x10, [x0, #CTX_ACTLR_EL2]
msr actlr_el2, x9
msr afsr0_el2, x10
ldp x11, x12, [x0, #CTX_AFSR1_EL2]
msr afsr1_el2, x11
msr amair_el2, x12
ldp x13, x14, [x0, #CTX_CNTHCTL_EL2]
msr cnthctl_el2, x13
msr cnthp_ctl_el2, x14
ldp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2]
msr cnthp_cval_el2, x15
msr cnthp_tval_el2, x16
ldp x17, x9, [x0, #CTX_CNTVOFF_EL2]
msr cntvoff_el2, x17
msr cptr_el2, x9
ldp x10, x11, [x0, #CTX_DBGVCR32_EL2]
msr dbgvcr32_el2, x10
msr elr_el2, x11
ldp x14, x15, [x0, #CTX_ESR_EL2]
msr esr_el2, x14
msr far_el2, x15
ldp x16, x17, [x0, #CTX_FPEXC32_EL2]
msr fpexc32_el2, x16
msr hacr_el2, x17
ldp x9, x10, [x0, #CTX_HCR_EL2]
msr hcr_el2, x9
msr hpfar_el2, x10
ldp x11, x12, [x0, #CTX_HSTR_EL2]
msr hstr_el2, x11
msr ICC_SRE_EL2, x12
ldp x13, x14, [x0, #CTX_ICH_HCR_EL2]
msr ICH_HCR_EL2, x13
msr ICH_VMCR_EL2, x14
ldp x15, x16, [x0, #CTX_MAIR_EL2]
msr mair_el2, x15
msr mdcr_el2, x16
ldp x17, x9, [x0, #CTX_PMSCR_EL2]
msr PMSCR_EL2, x17
msr sctlr_el2, x9
ldp x10, x11, [x0, #CTX_SPSR_EL2]
msr spsr_el2, x10
msr sp_el2, x11
ldp x12, x13, [x0, #CTX_TCR_EL2]
msr tcr_el2, x12
msr TRFCR_EL2, x13
ldp x14, x15, [x0, #CTX_TTBR0_EL2]
msr ttbr0_el2, x14
msr vbar_el2, x15
ldp x16, x17, [x0, #CTX_VMPIDR_EL2]
msr vmpidr_el2, x16
msr vpidr_el2, x17
ldp x9, x10, [x0, #CTX_VTCR_EL2]
msr vtcr_el2, x9
msr vttbr_el2, x10
#if CTX_INCLUDE_MTE_REGS
ldr x11, [x0, #CTX_TFSR_EL2]
msr TFSR_EL2, x11
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
ldp x9, x10, [x0, #CTX_MPAM2_EL2]
msr MPAM2_EL2, x9
msr MPAMHCR_EL2, x10
ldp x11, x12, [x0, #CTX_MPAMVPM0_EL2]
msr MPAMVPM0_EL2, x11
msr MPAMVPM1_EL2, x12
ldp x13, x14, [x0, #CTX_MPAMVPM2_EL2]
msr MPAMVPM2_EL2, x13
msr MPAMVPM3_EL2, x14
ldp x15, x16, [x0, #CTX_MPAMVPM4_EL2]
msr MPAMVPM4_EL2, x15
msr MPAMVPM5_EL2, x16
ldp x17, x9, [x0, #CTX_MPAMVPM6_EL2]
msr MPAMVPM6_EL2, x17
msr MPAMVPM7_EL2, x9
ldr x10, [x0, #CTX_MPAMVPMV_EL2]
msr MPAMVPMV_EL2, x10
#endif
#if ARM_ARCH_AT_LEAST(8, 6)
ldp x11, x12, [x0, #CTX_HAFGRTR_EL2]
msr HAFGRTR_EL2, x11
msr HDFGRTR_EL2, x12
ldp x13, x14, [x0, #CTX_HDFGWTR_EL2]
msr HDFGWTR_EL2, x13
msr HFGITR_EL2, x14
ldp x15, x16, [x0, #CTX_HFGRTR_EL2]
msr HFGRTR_EL2, x15
msr HFGWTR_EL2, x16
ldr x17, [x0, #CTX_CNTPOFF_EL2]
msr CNTPOFF_EL2, x17
#endif
#if ARM_ARCH_AT_LEAST(8, 4)
ldp x9, x10, [x0, #CTX_CNTHPS_CTL_EL2]
msr cnthps_ctl_el2, x9
msr cnthps_cval_el2, x10
ldp x11, x12, [x0, #CTX_CNTHPS_TVAL_EL2]
msr cnthps_tval_el2, x11
msr cnthvs_ctl_el2, x12
ldp x13, x14, [x0, #CTX_CNTHVS_CVAL_EL2]
msr cnthvs_cval_el2, x13
msr cnthvs_tval_el2, x14
ldp x15, x16, [x0, #CTX_CNTHV_CTL_EL2]
msr cnthv_ctl_el2, x15
msr cnthv_cval_el2, x16
ldp x17, x9, [x0, #CTX_CNTHV_TVAL_EL2]
msr cnthv_tval_el2, x17
msr contextidr_el2, x9
ldr x10, [x0, #CTX_SDER32_EL2]
msr sder32_el2, x10
ldr x11, [x0, #CTX_TTBR1_EL2]
msr ttbr1_el2, x11
ldr x12, [x0, #CTX_VDISR_EL2]
msr vdisr_el2, x12
ldr x13, [x0, #CTX_VNCR_EL2]
msr vncr_el2, x13
ldr x14, [x0, #CTX_VSESR_EL2]
msr vsesr_el2, x14
ldr x15, [x0, #CTX_VSTCR_EL2]
msr vstcr_el2, x15
ldr x16, [x0, #CTX_VSTTBR_EL2]
msr vsttbr_el2, x16
#endif
#if ARM_ARCH_AT_LEAST(8, 5)
ldr x17, [x0, #CTX_SCXTNUM_EL2]
msr scxtnum_el2, x17
#endif
ret
endfunc el2_sysregs_context_restore
#endif /* CTX_INCLUDE_EL2_REGS */
/* ------------------------------------------------------------------
* The following function strictly follows the AArch64 PCS to use
* x9-x17 (temporary caller-saved registers) to save EL1 system

View File

@ -234,7 +234,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* and other EL2 registers are set up by cm_prepare_ns_entry() as they
* are not part of the stored cpu_context.
*/
write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
/*
* Base the context ACTLR_EL1 on the current value, as it is
@ -244,7 +244,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* be zero.
*/
actlr_elx = read_actlr_el1();
write_ctx_reg((get_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
/*
* Populate EL3 state so that we've the right context
@ -336,7 +336,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
CTX_SCR_EL3);
if ((scr_el3 & SCR_HCE_BIT) != 0U) {
/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_SCTLR_EL1);
sctlr_elx &= SCTLR_EE_BIT;
sctlr_elx |= SCTLR_EL2_RES1;
@ -530,6 +530,52 @@ void cm_prepare_el3_exit(uint32_t security_state)
cm_set_next_eret_context(security_state);
}
#if CTX_INCLUDE_EL2_REGS
/*******************************************************************************
* Save EL2 sysreg context
******************************************************************************/
void cm_el2_sysregs_context_save(uint32_t security_state)
{
u_register_t scr_el3 = read_scr();
/*
* Always save the non-secure EL2 context, only save the
* S-EL2 context if S-EL2 is enabled.
*/
if ((security_state == NON_SECURE) ||
((scr_el3 & SCR_EEL2_BIT) != 0U)) {
cpu_context_t *ctx;
ctx = cm_get_context(security_state);
assert(ctx != NULL);
el2_sysregs_context_save(get_el2_sysregs_ctx(ctx));
}
}
/*******************************************************************************
* Restore EL2 sysreg context
******************************************************************************/
void cm_el2_sysregs_context_restore(uint32_t security_state)
{
u_register_t scr_el3 = read_scr();
/*
* Always restore the non-secure EL2 context, only restore the
* S-EL2 context if S-EL2 is enabled.
*/
if ((security_state == NON_SECURE) ||
((scr_el3 & SCR_EEL2_BIT) != 0U)) {
cpu_context_t *ctx;
ctx = cm_get_context(security_state);
assert(ctx != NULL);
el2_sysregs_context_restore(get_el2_sysregs_ctx(ctx));
}
}
#endif /* CTX_INCLUDE_EL2_REGS */
/*******************************************************************************
* The next four functions are used by runtime services to save and restore
* EL1 context on the 'cpu_context' structure for the specified security
@ -542,7 +588,7 @@ void cm_el1_sysregs_context_save(uint32_t security_state)
ctx = cm_get_context(security_state);
assert(ctx != NULL);
el1_sysregs_context_save(get_sysregs_ctx(ctx));
el1_sysregs_context_save(get_el1_sysregs_ctx(ctx));
#if IMAGE_BL31
if (security_state == SECURE)
@ -559,7 +605,7 @@ void cm_el1_sysregs_context_restore(uint32_t security_state)
ctx = cm_get_context(security_state);
assert(ctx != NULL);
el1_sysregs_context_restore(get_sysregs_ctx(ctx));
el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx));
#if IMAGE_BL31
if (security_state == SECURE)

View File

@ -188,6 +188,9 @@ SPD := none
# Enable the Management Mode (MM)-based Secure Partition Manager implementation
SPM_MM := 0
# Use SPM at S-EL2 as a default config for SPMD
SPMD_SPM_AT_SEL2 := 1
# Flag to introduce an infinite loop in BL1 just before it exits into the next
# image. This is meant to help debugging the post-BL2 phase.
SPIN_ON_BL1_EXIT := 0
@ -262,3 +265,8 @@ USE_SPINLOCK_CAS := 0
# Enable Link Time Optimization
ENABLE_LTO := 0
# Build flag to include EL2 registers in cpu context save and restore during
# S-EL2 firmware entry/exit. This flag is to be used with SPD=spmd option.
# Default is 0.
CTX_INCLUDE_EL2_REGS := 0

View File

@ -11,7 +11,6 @@
attribute {
maj_ver = <0x0>;
min_ver = <0x9>;
runtime_el = <0x1>;
exec_state = <0x0>;
load_address = <0x0 0x6000000>;
entrypoint = <0x0 0x6000000>;

View File

@ -37,12 +37,6 @@ static int manifest_parse_attribute(spmc_manifest_sect_attribute_t *attr,
return -ENOENT;
}
rc = fdtw_read_cells(fdt, node, "runtime_el", 1, &attr->runtime_el);
if (rc) {
ERROR("Missing SPM core runtime EL in manifest.\n");
return -ENOENT;
}
rc = fdtw_read_cells(fdt, node, "exec_state", 1, &attr->exec_state);
if (rc)
NOTICE("Execution state not specified in SPM core manifest.\n");
@ -61,7 +55,6 @@ static int manifest_parse_attribute(spmc_manifest_sect_attribute_t *attr,
VERBOSE("SPM core manifest attribute section:\n");
VERBOSE(" version: %x.%x\n", attr->major_version, attr->minor_version);
VERBOSE(" runtime_el: 0x%x\n", attr->runtime_el);
VERBOSE(" binary_size: 0x%x\n", attr->binary_size);
VERBOSE(" load_address: 0x%llx\n", attr->load_address);
VERBOSE(" entrypoint: 0x%llx\n", attr->entrypoint);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -155,7 +155,7 @@ int32_t tegra_fiq_get_intr_context(void)
{
cpu_context_t *ctx = cm_get_context(NON_SECURE);
gp_regs_t *gpregs_ctx = get_gpregs_ctx(ctx);
const el1_sys_regs_t *el1state_ctx = get_sysregs_ctx(ctx);
const el1_sysregs_t *el1state_ctx = get_el1_sysregs_ctx(ctx);
uint32_t cpu = plat_my_core_pos();
uint64_t val;

View File

@ -150,9 +150,9 @@ static uint64_t trusty_fiq_handler(uint32_t id,
(void)memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);
ctx->fiq_sp_el1 = read_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1);
write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, (uint32_t)ctx->fiq_handler_cpsr);
SMC_RET0(handle);
@ -211,7 +211,7 @@ static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t
*/
(void)memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
ctx->fiq_handler_active = 0;
write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, (uint32_t)ctx->fiq_cpsr);
SMC_RET0(handle);

View File

@ -116,17 +116,17 @@ void spm_sp_setup(sp_context_t *sp_ctx)
xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
EL1_EL0_REGIME);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1,
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
mmu_cfg_params[MMU_CFG_MAIR]);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1,
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
mmu_cfg_params[MMU_CFG_TCR]);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
mmu_cfg_params[MMU_CFG_TTBR0]);
/* Setup SCTLR_EL1 */
u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
u_register_t sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
sctlr_el1 |=
/*SCTLR_EL1_RES1 |*/
@ -160,7 +160,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
SCTLR_UMA_BIT
);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
/*
* Setup other system registers
@ -168,10 +168,10 @@ void spm_sp_setup(sp_context_t *sp_ctx)
*/
/* Shim Exception Vector Base Address */
write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
SPM_SHIM_EXCEPTIONS_PTR);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
/*
@ -181,7 +181,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
* TTA: Enable access to trace registers.
* ZEN (v8.2): Trap SVE instructions and access to SVE registers.
*/
write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1,
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
/*

View File

@ -33,7 +33,23 @@ spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
/*******************************************************************************
* SPM Core attribute information read from its manifest.
******************************************************************************/
spmc_manifest_sect_attribute_t spmc_attrs;
static spmc_manifest_sect_attribute_t spmc_attrs;
/*******************************************************************************
* SPM Core entry point information. Discovered on the primary core and reused
* on secondary cores.
******************************************************************************/
static entry_point_info_t *spmc_ep_info;
/*******************************************************************************
* Static function declaration.
******************************************************************************/
static int32_t spmd_init(void);
static int spmd_spmc_init(void *rd_base, size_t rd_size);
static uint64_t spmd_spci_error_return(void *handle, int error_code);
static uint64_t spmd_smc_forward(uint32_t smc_fid, bool secure_origin,
uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, void *handle);
/*******************************************************************************
* This function takes an SP context pointer and performs a synchronous entry
@ -49,17 +65,19 @@ uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
/* Restore the context assigned above */
cm_el1_sysregs_context_restore(SECURE);
#if SPMD_SPM_AT_SEL2
cm_el2_sysregs_context_restore(SECURE);
#endif
cm_set_next_eret_context(SECURE);
/* Invalidate TLBs at EL1. */
tlbivmalle1();
dsbish();
/* Enter Secure Partition */
/* Enter SPMC */
rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
/* Save secure state */
cm_el1_sysregs_context_save(SECURE);
#if SPMD_SPM_AT_SEL2
cm_el2_sysregs_context_save(SECURE);
#endif
return rc;
}
@ -108,18 +126,130 @@ static int32_t spmd_init(void)
return 1;
}
/*******************************************************************************
* Load SPMC manifest, init SPMC.
******************************************************************************/
static int spmd_spmc_init(void *rd_base, size_t rd_size)
{
int rc;
uint32_t ep_attr;
unsigned int linear_id = plat_my_core_pos();
spmd_spm_core_context_t *spm_ctx = &spm_core_context[linear_id];
/* Load the SPM core manifest */
rc = plat_spm_core_manifest_load(&spmc_attrs, rd_base, rd_size);
if (rc != 0) {
WARN("No or invalid SPM core manifest image provided by BL2 "
"boot loader. ");
return 1;
}
/*
* Ensure that the SPM core version is compatible with the SPM
* dispatcher version
*/
if ((spmc_attrs.major_version != SPCI_VERSION_MAJOR) ||
(spmc_attrs.minor_version > SPCI_VERSION_MINOR)) {
WARN("Unsupported SPCI version (%x.%x) specified in SPM core "
"manifest image provided by BL2 boot loader.\n",
spmc_attrs.major_version, spmc_attrs.minor_version);
return 1;
}
INFO("SPCI version (%x.%x).\n", spmc_attrs.major_version,
spmc_attrs.minor_version);
INFO("SPM core run time EL%x.\n",
SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
/* Validate the SPM core execution state */
if ((spmc_attrs.exec_state != MODE_RW_64) &&
(spmc_attrs.exec_state != MODE_RW_32)) {
WARN("Unsupported SPM core execution state %x specified in "
"manifest image provided by BL2 boot loader.\n",
spmc_attrs.exec_state);
return 1;
}
INFO("SPM core execution state %x.\n", spmc_attrs.exec_state);
#if SPMD_SPM_AT_SEL2
/* Ensure manifest has not requested AArch32 state in S-EL2 */
if (spmc_attrs.exec_state == MODE_RW_32) {
WARN("AArch32 state at S-EL2 is not supported.\n");
return 1;
}
/*
* Check if S-EL2 is supported on this system if S-EL2
* is required for SPM
*/
uint64_t sel2 = read_id_aa64pfr0_el1();
sel2 >>= ID_AA64PFR0_SEL2_SHIFT;
sel2 &= ID_AA64PFR0_SEL2_MASK;
if (!sel2) {
WARN("SPM core run time S-EL2 is not supported.");
return 1;
}
#endif /* SPMD_SPM_AT_SEL2 */
/* Initialise an entrypoint to set up the CPU context */
ep_attr = SECURE | EP_ST_ENABLE;
if (read_sctlr_el3() & SCTLR_EE_BIT) {
ep_attr |= EP_EE_BIG;
}
SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
assert(spmc_ep_info->pc == BL32_BASE);
/*
* Populate SPSR for SPM core based upon validated parameters from the
* manifest
*/
if (spmc_attrs.exec_state == MODE_RW_32) {
spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
SPSR_E_LITTLE,
DAIF_FIQ_BIT |
DAIF_IRQ_BIT |
DAIF_ABT_BIT);
} else {
#if SPMD_SPM_AT_SEL2
static const uint32_t runtime_el = MODE_EL2;
#else
static const uint32_t runtime_el = MODE_EL1;
#endif
spmc_ep_info->spsr = SPSR_64(runtime_el,
MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
}
/* Initialise SPM core context with this entry point information */
cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info);
/* Reuse PSCI affinity states to mark this SPMC context as off */
spm_ctx->state = AFF_STATE_OFF;
INFO("SPM core setup done.\n");
/* Register init function for deferred init. */
bl31_register_bl32_init(&spmd_init);
return 0;
}
/*******************************************************************************
* Initialize context of SPM core.
******************************************************************************/
int32_t spmd_setup(void)
int spmd_setup(void)
{
int rc;
void *rd_base;
size_t rd_size;
entry_point_info_t *spmc_ep_info;
uintptr_t rd_base_align;
uintptr_t rd_size_align;
uint32_t ep_attr;
spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
if (!spmc_ep_info) {
@ -155,130 +285,72 @@ int32_t spmd_setup(void)
(uintptr_t) rd_base_align,
rd_size_align,
MT_RO_DATA);
if (rc < 0) {
if (rc != 0) {
ERROR("Error while mapping SPM core manifest (%d).\n", rc);
panic();
}
/* Load the SPM core manifest */
rc = plat_spm_core_manifest_load(&spmc_attrs, rd_base, rd_size);
if (rc < 0) {
WARN("No or invalid SPM core manifest image provided by BL2 "
"boot loader. ");
goto error;
}
/* Load manifest, init SPMC */
rc = spmd_spmc_init(rd_base, rd_size);
if (rc != 0) {
int mmap_rc;
/*
* Ensure that the SPM core version is compatible with the SPM
* dispatcher version
*/
if ((spmc_attrs.major_version != SPCI_VERSION_MAJOR) ||
(spmc_attrs.minor_version > SPCI_VERSION_MINOR)) {
WARN("Unsupported SPCI version (%x.%x) specified in SPM core "
"manifest image provided by BL2 boot loader.\n",
spmc_attrs.major_version, spmc_attrs.minor_version);
goto error;
}
WARN("Booting device without SPM initialization. "
"SPCI SMCs destined for SPM core will return "
"ENOTSUPPORTED\n");
INFO("SPCI version (%x.%x).\n", spmc_attrs.major_version,
spmc_attrs.minor_version);
/* Validate the SPM core runtime EL */
if ((spmc_attrs.runtime_el != MODE_EL1) &&
(spmc_attrs.runtime_el != MODE_EL2)) {
WARN("Unsupported SPM core run time EL%x specified in "
"manifest image provided by BL2 boot loader.\n",
spmc_attrs.runtime_el);
goto error;
}
INFO("SPM core run time EL%x.\n", spmc_attrs.runtime_el);
/* Validate the SPM core execution state */
if ((spmc_attrs.exec_state != MODE_RW_64) &&
(spmc_attrs.exec_state != MODE_RW_32)) {
WARN("Unsupported SPM core execution state %x specified in "
"manifest image provided by BL2 boot loader.\n",
spmc_attrs.exec_state);
goto error;
}
INFO("SPM core execution state %x.\n", spmc_attrs.exec_state);
/* Ensure manifest has not requested S-EL2 in AArch32 state */
if ((spmc_attrs.exec_state == MODE_RW_32) &&
(spmc_attrs.runtime_el == MODE_EL2)) {
WARN("Invalid combination of SPM core execution state (%x) "
"and run time EL (%x).\n", spmc_attrs.exec_state,
spmc_attrs.runtime_el);
goto error;
}
/*
* Check if S-EL2 is supported on this system if S-EL2
* is required for SPM
*/
if (spmc_attrs.runtime_el == MODE_EL2) {
uint64_t sel2 = read_id_aa64pfr0_el1();
sel2 >>= ID_AA64PFR0_SEL2_SHIFT;
sel2 &= ID_AA64PFR0_SEL2_MASK;
if (!sel2) {
WARN("SPM core run time EL: S-EL%x is not supported "
"but specified in manifest image provided by "
"BL2 boot loader.\n", spmc_attrs.runtime_el);
goto error;
mmap_rc = mmap_remove_dynamic_region(rd_base_align,
rd_size_align);
if (mmap_rc != 0) {
ERROR("Error while unmapping SPM core manifest (%d).\n",
mmap_rc);
panic();
}
return rc;
}
/* Initialise an entrypoint to set up the CPU context */
ep_attr = SECURE | EP_ST_ENABLE;
if (read_sctlr_el3() & SCTLR_EE_BIT)
ep_attr |= EP_EE_BIG;
SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
assert(spmc_ep_info->pc == BL32_BASE);
/*
* Populate SPSR for SPM core based upon validated parameters from the
* manifest
*/
if (spmc_attrs.exec_state == MODE_RW_32) {
spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
SPSR_E_LITTLE,
DAIF_FIQ_BIT |
DAIF_IRQ_BIT |
DAIF_ABT_BIT);
} else {
spmc_ep_info->spsr = SPSR_64(spmc_attrs.runtime_el,
MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
}
/* Initialise SPM core context with this entry point information */
cm_setup_context(&(spm_core_context[plat_my_core_pos()].cpu_ctx),
spmc_ep_info);
INFO("SPM core setup done.\n");
/* Register init function for deferred init. */
bl31_register_bl32_init(&spmd_init);
return 0;
}
error:
WARN("Booting device without SPM initialization. "
"SPCI SMCs destined for SPM core will return "
"ENOTSUPPORTED\n");
/*******************************************************************************
* Forward SMC to the other security state
******************************************************************************/
static uint64_t spmd_smc_forward(uint32_t smc_fid, bool secure_origin,
uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, void *handle)
{
uint32_t secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
uint32_t secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
rc = mmap_remove_dynamic_region(rd_base_align, rd_size_align);
if (rc < 0) {
ERROR("Error while unmapping SPM core manifest (%d).\n",
rc);
panic();
}
/* Save incoming security state */
cm_el1_sysregs_context_save(secure_state_in);
#if SPMD_SPM_AT_SEL2
cm_el2_sysregs_context_save(secure_state_in);
#endif
return 1;
/* Restore outgoing security state */
cm_el1_sysregs_context_restore(secure_state_out);
#if SPMD_SPM_AT_SEL2
cm_el2_sysregs_context_restore(secure_state_out);
#endif
cm_set_next_eret_context(secure_state_out);
SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
SMC_GET_GP(handle, CTX_GPREG_X5),
SMC_GET_GP(handle, CTX_GPREG_X6),
SMC_GET_GP(handle, CTX_GPREG_X7));
}
/*******************************************************************************
* Return SPCI_ERROR with specified error code
******************************************************************************/
static uint64_t spmd_spci_error_return(void *handle, int error_code)
{
SMC_RET8(handle, SPCI_ERROR,
SPCI_TARGET_INFO_MBZ, error_code,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
}
/*******************************************************************************
@ -289,19 +361,12 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
uint64_t x3, uint64_t x4, void *cookie, void *handle,
uint64_t flags)
{
uint32_t in_sstate;
uint32_t out_sstate;
int32_t ret;
spmd_spm_core_context_t *ctx = &spm_core_context[plat_my_core_pos()];
bool secure_origin;
int32_t ret;
/* Determine which security state this SMC originated from */
if (is_caller_secure(flags)) {
in_sstate = SECURE;
out_sstate = NON_SECURE;
} else {
in_sstate = NON_SECURE;
out_sstate = SECURE;
}
secure_origin = is_caller_secure(flags);
INFO("SPM: 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, "
"0x%llx, 0x%llx, 0x%llx\n",
@ -316,20 +381,12 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
* this CPU. If so, then indicate that the SPM core initialised
* unsuccessfully.
*/
if ((in_sstate == SECURE) && (ctx->state == SPMC_STATE_RESET))
if (secure_origin && (ctx->state == SPMC_STATE_RESET)) {
spmd_spm_core_sync_exit(x2);
}
/* Save incoming security state */
cm_el1_sysregs_context_save(in_sstate);
/* Restore outgoing security state */
cm_el1_sysregs_context_restore(out_sstate);
cm_set_next_eret_context(out_sstate);
SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4,
SMC_GET_GP(handle, CTX_GPREG_X5),
SMC_GET_GP(handle, CTX_GPREG_X6),
SMC_GET_GP(handle, CTX_GPREG_X7));
return spmd_smc_forward(smc_fid, secure_origin,
x1, x2, x3, x4, handle);
break; /* not reached */
case SPCI_VERSION:
@ -353,29 +410,18 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
*/
/*
* Check if w1 holds a valid SPCI fid. This is an
* Check if x1 holds a valid SPCI fid. This is an
* optimization.
*/
if (!is_spci_fid(x1))
SMC_RET8(handle, SPCI_ERROR,
SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
if (!is_spci_fid(x1)) {
return spmd_spci_error_return(handle,
SPCI_ERROR_NOT_SUPPORTED);
}
/* Forward SMC from Normal world to the SPM core */
if (in_sstate == NON_SECURE) {
/* Save incoming security state */
cm_el1_sysregs_context_save(in_sstate);
/* Restore outgoing security state */
cm_el1_sysregs_context_restore(out_sstate);
cm_set_next_eret_context(out_sstate);
SMC_RET8(cm_get_context(out_sstate), smc_fid,
x1, x2, x3, x4,
SMC_GET_GP(handle, CTX_GPREG_X5),
SMC_GET_GP(handle, CTX_GPREG_X6),
SMC_GET_GP(handle, CTX_GPREG_X7));
if (!secure_origin) {
return spmd_smc_forward(smc_fid, secure_origin,
x1, x2, x3, x4, handle);
} else {
/*
* Return success if call was from secure world i.e. all
@ -387,6 +433,7 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
SMC_GET_GP(handle, CTX_GPREG_X6),
SMC_GET_GP(handle, CTX_GPREG_X7));
}
break; /* not reached */
case SPCI_RX_RELEASE:
@ -395,11 +442,9 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
case SPCI_RXTX_UNMAP:
case SPCI_MSG_RUN:
/* This interface must be invoked only by the Normal world */
if (in_sstate == SECURE) {
SMC_RET8(handle, SPCI_ERROR,
SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
if (secure_origin) {
return spmd_spci_error_return(handle,
SPCI_ERROR_NOT_SUPPORTED);
}
/* Fall through to forward the call to the other world */
@ -430,17 +475,8 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
* simply forward the call to the Normal world.
*/
/* Save incoming security state */
cm_el1_sysregs_context_save(in_sstate);
/* Restore outgoing security state */
cm_el1_sysregs_context_restore(out_sstate);
cm_set_next_eret_context(out_sstate);
SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4,
SMC_GET_GP(handle, CTX_GPREG_X5),
SMC_GET_GP(handle, CTX_GPREG_X6),
SMC_GET_GP(handle, CTX_GPREG_X7));
return spmd_smc_forward(smc_fid, secure_origin,
x1, x2, x3, x4, handle);
break; /* not reached */
case SPCI_MSG_WAIT:
@ -449,39 +485,25 @@ uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
* this CPU from the Secure world. If so, then indicate that the
* SPM core initialised successfully.
*/
if ((in_sstate == SECURE) && (ctx->state == SPMC_STATE_RESET)) {
if (secure_origin && (ctx->state == SPMC_STATE_RESET)) {
spmd_spm_core_sync_exit(0);
}
/* Intentional fall-through */
/* Fall through to forward the call to the other world */
case SPCI_MSG_YIELD:
/* This interface must be invoked only by the Secure world */
if (in_sstate == NON_SECURE) {
SMC_RET8(handle, SPCI_ERROR,
SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
if (!secure_origin) {
return spmd_spci_error_return(handle,
SPCI_ERROR_NOT_SUPPORTED);
}
/* Save incoming security state */
cm_el1_sysregs_context_save(in_sstate);
/* Restore outgoing security state */
cm_el1_sysregs_context_restore(out_sstate);
cm_set_next_eret_context(out_sstate);
SMC_RET8(cm_get_context(out_sstate), smc_fid, x1, x2, x3, x4,
SMC_GET_GP(handle, CTX_GPREG_X5),
SMC_GET_GP(handle, CTX_GPREG_X6),
SMC_GET_GP(handle, CTX_GPREG_X7));
return spmd_smc_forward(smc_fid, secure_origin,
x1, x2, x3, x4, handle);
break; /* not reached */
default:
WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
SMC_RET8(handle, SPCI_ERROR,
SPCI_TARGET_INFO_MBZ, SPCI_ERROR_NOT_SUPPORTED,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ,
SPCI_PARAM_MBZ, SPCI_PARAM_MBZ);
return spmd_spci_error_return(handle, SPCI_ERROR_NOT_SUPPORTED);
}
}