feat(sme): enable SME functionality

This patch adds two new compile time options to enable SME in TF-A:
ENABLE_SME_FOR_NS and ENABLE_SME_FOR_SWD for use in non-secure and
secure worlds respectively. Setting ENABLE_SME_FOR_NS=1 will enable
SME for non-secure worlds and trap SME, SVE, and FPU/SIMD instructions
in secure context. Setting ENABLE_SME_FOR_SWD=1 will disable these
traps, but support for SME context management does not yet exist in
SPM so building with SPD=spmd will fail.

The existing ENABLE_SVE_FOR_NS and ENABLE_SVE_FOR_SWD options cannot
be used with SME as it is a superset of SVE and will enable SVE and
FPU/SIMD along with SME.

Signed-off-by: John Powell <john.powell@arm.com>
Change-Id: Iaaac9d22fe37b4a92315207891da848a8fd0ed73
This commit is contained in:
johpow01 2021-07-08 14:14:00 -05:00
parent 2242773ddb
commit dc78e62d80
13 changed files with 337 additions and 48 deletions

View File

@ -777,6 +777,52 @@ ifneq (${DECRYPTION_SUPPORT},none)
endif endif
endif endif
# SME/SVE only supported on AArch64
ifeq (${ARCH},aarch32)
ifeq (${ENABLE_SME_FOR_NS},1)
$(error "ENABLE_SME_FOR_NS cannot be used with ARCH=aarch32")
endif
ifeq (${ENABLE_SVE_FOR_NS},1)
# Warning instead of error due to CI dependency on this
$(warning "ENABLE_SVE_FOR_NS cannot be used with ARCH=aarch32")
$(warning "Forced ENABLE_SVE_FOR_NS=0")
override ENABLE_SVE_FOR_NS := 0
endif
endif
# Ensure ENABLE_RME is not used with SME
ifeq (${ENABLE_RME},1)
ifeq (${ENABLE_SME_FOR_NS},1)
$(error "ENABLE_SME_FOR_NS cannot be used with ENABLE_RME")
endif
endif
# Secure SME/SVE requires the non-secure component as well
ifeq (${ENABLE_SME_FOR_SWD},1)
ifeq (${ENABLE_SME_FOR_NS},0)
$(error "ENABLE_SME_FOR_SWD requires ENABLE_SME_FOR_NS")
endif
endif
ifeq (${ENABLE_SVE_FOR_SWD},1)
ifeq (${ENABLE_SVE_FOR_NS},0)
$(error "ENABLE_SVE_FOR_SWD requires ENABLE_SVE_FOR_NS")
endif
endif
# SVE and SME cannot be used with CTX_INCLUDE_FPREGS since secure manager does
# its own context management including FPU registers.
ifeq (${CTX_INCLUDE_FPREGS},1)
ifeq (${ENABLE_SME_FOR_NS},1)
$(error "ENABLE_SME_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
endif
ifeq (${ENABLE_SVE_FOR_NS},1)
# Warning instead of error due to CI dependency on this
$(warning "ENABLE_SVE_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
$(warning "Forced ENABLE_SVE_FOR_NS=0")
override ENABLE_SVE_FOR_NS := 0
endif
endif
################################################################################ ################################################################################
# Process platform overrideable behaviour # Process platform overrideable behaviour
################################################################################ ################################################################################
@ -941,6 +987,8 @@ $(eval $(call assert_booleans,\
ENABLE_PSCI_STAT \ ENABLE_PSCI_STAT \
ENABLE_RME \ ENABLE_RME \
ENABLE_RUNTIME_INSTRUMENTATION \ ENABLE_RUNTIME_INSTRUMENTATION \
ENABLE_SME_FOR_NS \
ENABLE_SME_FOR_SWD \
ENABLE_SPE_FOR_LOWER_ELS \ ENABLE_SPE_FOR_LOWER_ELS \
ENABLE_SVE_FOR_NS \ ENABLE_SVE_FOR_NS \
ENABLE_SVE_FOR_SWD \ ENABLE_SVE_FOR_SWD \
@ -1048,6 +1096,8 @@ $(eval $(call add_defines,\
ENABLE_PSCI_STAT \ ENABLE_PSCI_STAT \
ENABLE_RME \ ENABLE_RME \
ENABLE_RUNTIME_INSTRUMENTATION \ ENABLE_RUNTIME_INSTRUMENTATION \
ENABLE_SME_FOR_NS \
ENABLE_SME_FOR_SWD \
ENABLE_SPE_FOR_LOWER_ELS \ ENABLE_SPE_FOR_LOWER_ELS \
ENABLE_SVE_FOR_NS \ ENABLE_SVE_FOR_NS \
ENABLE_SVE_FOR_SWD \ ENABLE_SVE_FOR_SWD \

View File

@ -87,9 +87,14 @@ ifeq (${ENABLE_MPMM},1)
BL31_SOURCES += ${MPMM_SOURCES} BL31_SOURCES += ${MPMM_SOURCES}
endif endif
ifeq (${ENABLE_SME_FOR_NS},1)
BL31_SOURCES += lib/extensions/sme/sme.c
BL31_SOURCES += lib/extensions/sve/sve.c
else
ifeq (${ENABLE_SVE_FOR_NS},1) ifeq (${ENABLE_SVE_FOR_NS},1)
BL31_SOURCES += lib/extensions/sve/sve.c BL31_SOURCES += lib/extensions/sve/sve.c
endif endif
endif
ifeq (${ENABLE_MPAM_FOR_LOWER_ELS},1) ifeq (${ENABLE_MPAM_FOR_LOWER_ELS},1)
BL31_SOURCES += lib/extensions/mpam/mpam.c BL31_SOURCES += lib/extensions/mpam/mpam.c

View File

@ -299,6 +299,21 @@ Common build options
instrumented. Enabling this option enables the ``ENABLE_PMF`` build option instrumented. Enabling this option enables the ``ENABLE_PMF`` build option
as well. Default is 0. as well. Default is 0.
- ``ENABLE_SME_FOR_NS``: Boolean option to enable Scalable Matrix Extension
(SME), SVE, and FPU/SIMD for the non-secure world only. These features share
registers so are enabled together. Using this option without
ENABLE_SME_FOR_SWD=1 will cause SME, SVE, and FPU/SIMD instructions in secure
world to trap to EL3. SME is an optional architectural feature for AArch64
and TF-A support is experimental. At this time, this build option cannot be
used on systems that have SPD=spmd or ENABLE_RME, and attempting to build
with these options will fail. Default is 0.
- ``ENABLE_SME_FOR_SWD``: Boolean option to enable the Scalable Matrix
Extension for secure world use along with SVE and FPU/SIMD, ENABLE_SME_FOR_NS
must also be set to use this. If enabling this, the secure world MUST
handle context switching for SME, SVE, and FPU/SIMD registers to ensure that
no data is leaked to non-secure world. This is experimental. Default is 0.
- ``ENABLE_SPE_FOR_LOWER_ELS`` : Boolean option to enable Statistical Profiling - ``ENABLE_SPE_FOR_LOWER_ELS`` : Boolean option to enable Statistical Profiling
extensions. This is an optional architectural feature for AArch64. extensions. This is an optional architectural feature for AArch64.
The default is 1 but is automatically disabled when the target architecture The default is 1 but is automatically disabled when the target architecture
@ -313,8 +328,8 @@ Common build options
which are aliased by the SIMD and FP registers. The build option is not which are aliased by the SIMD and FP registers. The build option is not
compatible with the ``CTX_INCLUDE_FPREGS`` build option, and will raise an compatible with the ``CTX_INCLUDE_FPREGS`` build option, and will raise an
assert on platforms where SVE is implemented and ``ENABLE_SVE_FOR_NS`` set to assert on platforms where SVE is implemented and ``ENABLE_SVE_FOR_NS`` set to
1. The default is 1 but is automatically disabled when the target 1. The default is 1 but is automatically disabled when ENABLE_SME_FOR_NS=1
architecture is AArch32. since SME encompasses SVE.
- ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE for the Secure world. - ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE for the Secure world.
SVE is an optional architectural feature for AArch64. Note that this option SVE is an optional architectural feature for AArch64. Note that this option

View File

@ -218,8 +218,8 @@
#define ID_AA64DFR0_MTPMU_SUPPORTED ULL(1) #define ID_AA64DFR0_MTPMU_SUPPORTED ULL(1)
/* ID_AA64ISAR0_EL1 definitions */ /* ID_AA64ISAR0_EL1 definitions */
#define ID_AA64ISAR0_RNDR_SHIFT U(60) #define ID_AA64ISAR0_RNDR_SHIFT U(60)
#define ID_AA64ISAR0_RNDR_MASK ULL(0xf) #define ID_AA64ISAR0_RNDR_MASK ULL(0xf)
/* ID_AA64ISAR1_EL1 definitions */ /* ID_AA64ISAR1_EL1 definitions */
#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1 #define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
@ -286,10 +286,10 @@
#define ID_AA64MMFR1_EL1_VHE_SHIFT U(8) #define ID_AA64MMFR1_EL1_VHE_SHIFT U(8)
#define ID_AA64MMFR1_EL1_VHE_MASK ULL(0xf) #define ID_AA64MMFR1_EL1_VHE_MASK ULL(0xf)
#define ID_AA64MMFR1_EL1_HCX_SHIFT U(40) #define ID_AA64MMFR1_EL1_HCX_SHIFT U(40)
#define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf) #define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf)
#define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1) #define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1)
#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0) #define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
/* ID_AA64MMFR2_EL1 definitions */ /* ID_AA64MMFR2_EL1 definitions */
#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2 #define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
@ -329,6 +329,9 @@
#define ID_AA64PFR1_MPAM_FRAC_SHIFT ULL(16) #define ID_AA64PFR1_MPAM_FRAC_SHIFT ULL(16)
#define ID_AA64PFR1_MPAM_FRAC_MASK ULL(0xf) #define ID_AA64PFR1_MPAM_FRAC_MASK ULL(0xf)
#define ID_AA64PFR1_EL1_SME_SHIFT U(24)
#define ID_AA64PFR1_EL1_SME_MASK ULL(0xf)
/* ID_PFR1_EL1 definitions */ /* ID_PFR1_EL1 definitions */
#define ID_PFR1_VIRTEXT_SHIFT U(12) #define ID_PFR1_VIRTEXT_SHIFT U(12)
#define ID_PFR1_VIRTEXT_MASK U(0xf) #define ID_PFR1_VIRTEXT_MASK U(0xf)
@ -388,6 +391,7 @@
#define SCTLR_ITFSB_BIT (ULL(1) << 37) #define SCTLR_ITFSB_BIT (ULL(1) << 37)
#define SCTLR_TCF0_SHIFT U(38) #define SCTLR_TCF0_SHIFT U(38)
#define SCTLR_TCF0_MASK ULL(3) #define SCTLR_TCF0_MASK ULL(3)
#define SCTLR_ENTP2_BIT (ULL(1) << 60)
/* Tag Check Faults in EL0 have no effect on the PE */ /* Tag Check Faults in EL0 have no effect on the PE */
#define SCTLR_TCF0_NO_EFFECT U(0) #define SCTLR_TCF0_NO_EFFECT U(0)
@ -442,7 +446,9 @@
#define SCR_GPF_BIT (UL(1) << 48) #define SCR_GPF_BIT (UL(1) << 48)
#define SCR_TWEDEL_SHIFT U(30) #define SCR_TWEDEL_SHIFT U(30)
#define SCR_TWEDEL_MASK ULL(0xf) #define SCR_TWEDEL_MASK ULL(0xf)
#define SCR_HXEn_BIT (UL(1) << 38) #define SCR_HXEn_BIT (UL(1) << 38)
#define SCR_ENTP2_SHIFT U(41)
#define SCR_ENTP2_BIT (UL(1) << SCR_ENTP2_SHIFT)
#define SCR_AMVOFFEN_BIT (UL(1) << 35) #define SCR_AMVOFFEN_BIT (UL(1) << 35)
#define SCR_TWEDEn_BIT (UL(1) << 29) #define SCR_TWEDEn_BIT (UL(1) << 29)
#define SCR_ECVEN_BIT (UL(1) << 28) #define SCR_ECVEN_BIT (UL(1) << 28)
@ -465,7 +471,7 @@
#define SCR_FIQ_BIT (UL(1) << 2) #define SCR_FIQ_BIT (UL(1) << 2)
#define SCR_IRQ_BIT (UL(1) << 1) #define SCR_IRQ_BIT (UL(1) << 1)
#define SCR_NS_BIT (UL(1) << 0) #define SCR_NS_BIT (UL(1) << 0)
#define SCR_VALID_BIT_MASK U(0x2f8f) #define SCR_VALID_BIT_MASK U(0x24000002F8F)
#define SCR_RESET_VAL SCR_RES1_BITS #define SCR_RESET_VAL SCR_RES1_BITS
/* MDCR_EL3 definitions */ /* MDCR_EL3 definitions */
@ -574,23 +580,28 @@
#define TAM_SHIFT U(30) #define TAM_SHIFT U(30)
#define TAM_BIT (U(1) << TAM_SHIFT) #define TAM_BIT (U(1) << TAM_SHIFT)
#define TTA_BIT (U(1) << 20) #define TTA_BIT (U(1) << 20)
#define ESM_BIT (U(1) << 12)
#define TFP_BIT (U(1) << 10) #define TFP_BIT (U(1) << 10)
#define CPTR_EZ_BIT (U(1) << 8) #define CPTR_EZ_BIT (U(1) << 8)
#define CPTR_EL3_RESET_VAL (TCPAC_BIT | TAM_BIT | TTA_BIT | TFP_BIT & ~(CPTR_EZ_BIT)) #define CPTR_EL3_RESET_VAL ((TCPAC_BIT | TAM_BIT | TTA_BIT | TFP_BIT) & \
~(CPTR_EZ_BIT | ESM_BIT))
/* CPTR_EL2 definitions */ /* CPTR_EL2 definitions */
#define CPTR_EL2_RES1 ((U(1) << 13) | (U(1) << 12) | (U(0x3ff))) #define CPTR_EL2_RES1 ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
#define CPTR_EL2_TCPAC_BIT (U(1) << 31) #define CPTR_EL2_TCPAC_BIT (U(1) << 31)
#define CPTR_EL2_TAM_SHIFT U(30) #define CPTR_EL2_TAM_SHIFT U(30)
#define CPTR_EL2_TAM_BIT (U(1) << CPTR_EL2_TAM_SHIFT) #define CPTR_EL2_TAM_BIT (U(1) << CPTR_EL2_TAM_SHIFT)
#define CPTR_EL2_SMEN_MASK ULL(0x3)
#define CPTR_EL2_SMEN_SHIFT U(24)
#define CPTR_EL2_TTA_BIT (U(1) << 20) #define CPTR_EL2_TTA_BIT (U(1) << 20)
#define CPTR_EL2_TSM_BIT (U(1) << 12)
#define CPTR_EL2_TFP_BIT (U(1) << 10) #define CPTR_EL2_TFP_BIT (U(1) << 10)
#define CPTR_EL2_TZ_BIT (U(1) << 8) #define CPTR_EL2_TZ_BIT (U(1) << 8)
#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1 #define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
/* VTCR_EL2 definitions */ /* VTCR_EL2 definitions */
#define VTCR_RESET_VAL U(0x0) #define VTCR_RESET_VAL U(0x0)
#define VTCR_EL2_MSA (U(1) << 31) #define VTCR_EL2_MSA (U(1) << 31)
/* CPSR/SPSR definitions */ /* CPSR/SPSR definitions */
#define DAIF_FIQ_BIT (U(1) << 0) #define DAIF_FIQ_BIT (U(1) << 0)
@ -917,6 +928,20 @@
/* ZCR_EL2 definitions */ /* ZCR_EL2 definitions */
#define ZCR_EL2_LEN_MASK U(0xf) #define ZCR_EL2_LEN_MASK U(0xf)
/*******************************************************************************
* Definitions for system register interface to SME as needed in EL3
******************************************************************************/
#define ID_AA64SMFR0_EL1 S3_0_C0_C4_5
#define SMCR_EL3 S3_6_C1_C2_6
/* ID_AA64SMFR0_EL1 definitions */
#define ID_AA64SMFR0_EL1_FA64_BIT (UL(1) << 63)
/* SMCR_ELx definitions */
#define SMCR_ELX_LEN_SHIFT U(0)
#define SMCR_ELX_LEN_MASK U(0x1ff)
#define SMCR_ELX_FA64_BIT (U(1) << 31)
/******************************************************************************* /*******************************************************************************
* Definitions of MAIR encodings for device and normal memory * Definitions of MAIR encodings for device and normal memory
******************************************************************************/ ******************************************************************************/
@ -1199,12 +1224,12 @@
/******************************************************************************* /*******************************************************************************
* FEAT_HCX - Extended Hypervisor Configuration Register * FEAT_HCX - Extended Hypervisor Configuration Register
******************************************************************************/ ******************************************************************************/
#define HCRX_EL2 S3_4_C1_C2_2 #define HCRX_EL2 S3_4_C1_C2_2
#define HCRX_EL2_FGTnXS_BIT (UL(1) << 4) #define HCRX_EL2_FGTnXS_BIT (UL(1) << 4)
#define HCRX_EL2_FnXS_BIT (UL(1) << 3) #define HCRX_EL2_FnXS_BIT (UL(1) << 3)
#define HCRX_EL2_EnASR_BIT (UL(1) << 2) #define HCRX_EL2_EnASR_BIT (UL(1) << 2)
#define HCRX_EL2_EnALS_BIT (UL(1) << 1) #define HCRX_EL2_EnALS_BIT (UL(1) << 1)
#define HCRX_EL2_EnAS0_BIT (UL(1) << 0) #define HCRX_EL2_EnAS0_BIT (UL(1) << 0)
/******************************************************************************* /*******************************************************************************
* Definitions for DynamicIQ Shared Unit registers * Definitions for DynamicIQ Shared Unit registers

View File

@ -509,6 +509,9 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3) DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2) DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64smfr0_el1, ID_AA64SMFR0_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(smcr_el3, SMCR_EL3)
DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1) DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1) DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)

View File

@ -222,6 +222,9 @@
* *
* CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped * CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped
* to EL3 by default. * to EL3 by default.
*
* CPTR_EL3.ESM: Set to zero so that all SME functionality is trapped
* to EL3 by default.
*/ */
mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT)) mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SME_H
#define SME_H
#include <stdbool.h>
#include <context.h>
/*
* Maximum value of LEN field in SMCR_ELx. This is different than the maximum
* supported value which is platform dependent. In the first version of SME the
* LEN field is limited to 4 bits but will be expanded in future iterations.
* To support different versions, the code that discovers the supported vector
* lengths will write the max value into SMCR_ELx then read it back to see how
* many bits are implemented.
*/
#define SME_SMCR_LEN_MAX U(0x1FF)
void sme_enable(cpu_context_t *context);
void sme_disable(cpu_context_t *context);
#endif /* SME_H */

View File

@ -10,5 +10,6 @@
#include <context.h> #include <context.h>
void sve_enable(cpu_context_t *context); void sve_enable(cpu_context_t *context);
void sve_disable(cpu_context_t *context);
#endif /* SVE_H */ #endif /* SVE_H */

View File

@ -20,6 +20,7 @@
#include <lib/el3_runtime/pubsub_events.h> #include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h> #include <lib/extensions/amu.h>
#include <lib/extensions/mpam.h> #include <lib/extensions/mpam.h>
#include <lib/extensions/sme.h>
#include <lib/extensions/spe.h> #include <lib/extensions/spe.h>
#include <lib/extensions/sve.h> #include <lib/extensions/sve.h>
#include <lib/extensions/sys_reg_trace.h> #include <lib/extensions/sys_reg_trace.h>
@ -28,7 +29,7 @@
#include <lib/extensions/twed.h> #include <lib/extensions/twed.h>
#include <lib/utils.h> #include <lib/utils.h>
static void enable_extensions_secure(cpu_context_t *ctx); static void manage_extensions_secure(cpu_context_t *ctx);
/******************************************************************************* /*******************************************************************************
* Context management library initialisation routine. This library is used by * Context management library initialisation routine. This library is used by
@ -219,7 +220,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
/* Save the initialized value of CPTR_EL3 register */ /* Save the initialized value of CPTR_EL3 register */
write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3()); write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3());
if (security_state == SECURE) { if (security_state == SECURE) {
enable_extensions_secure(ctx); manage_extensions_secure(ctx);
} }
/* /*
@ -365,7 +366,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero. * it is zero.
******************************************************************************/ ******************************************************************************/
static void enable_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx) static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
{ {
#if IMAGE_BL31 #if IMAGE_BL31
#if ENABLE_SPE_FOR_LOWER_ELS #if ENABLE_SPE_FOR_LOWER_ELS
@ -376,7 +377,11 @@ static void enable_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
amu_enable(el2_unused, ctx); amu_enable(el2_unused, ctx);
#endif #endif
#if ENABLE_SVE_FOR_NS #if ENABLE_SME_FOR_NS
/* Enable SME, SVE, and FPU/SIMD for non-secure world. */
sme_enable(ctx);
#elif ENABLE_SVE_FOR_NS
/* Enable SVE and FPU/SIMD for non-secure world. */
sve_enable(ctx); sve_enable(ctx);
#endif #endif
@ -395,20 +400,45 @@ static void enable_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
#if ENABLE_TRF_FOR_NS #if ENABLE_TRF_FOR_NS
trf_enable(); trf_enable();
#endif /* ENABLE_TRF_FOR_NS */ #endif /* ENABLE_TRF_FOR_NS */
#endif #endif
} }
/******************************************************************************* /*******************************************************************************
* Enable architecture extensions on first entry to Secure world. * Enable architecture extensions on first entry to Secure world.
******************************************************************************/ ******************************************************************************/
static void enable_extensions_secure(cpu_context_t *ctx) static void manage_extensions_secure(cpu_context_t *ctx)
{ {
#if IMAGE_BL31 #if IMAGE_BL31
#if ENABLE_SVE_FOR_SWD #if ENABLE_SME_FOR_NS
#if ENABLE_SME_FOR_SWD
/*
* Enable SME, SVE, FPU/SIMD in secure context, secure manager must
* ensure SME, SVE, and FPU/SIMD context properly managed.
*/
sme_enable(ctx);
#else /* ENABLE_SME_FOR_SWD */
/*
* Disable SME, SVE, FPU/SIMD in secure context so non-secure world can
* safely use the associated registers.
*/
sme_disable(ctx);
#endif /* ENABLE_SME_FOR_SWD */
#elif ENABLE_SVE_FOR_NS
#if ENABLE_SVE_FOR_SWD
/*
* Enable SVE and FPU in secure context, secure manager must ensure that
* the SVE and FPU register contexts are properly managed.
*/
sve_enable(ctx); sve_enable(ctx);
#endif #else /* ENABLE_SVE_FOR_SWD */
#endif /*
* Disable SVE and FPU in secure context so non-secure world can safely
* use them.
*/
sve_disable(ctx);
#endif /* ENABLE_SVE_FOR_SWD */
#endif /* ENABLE_SVE_FOR_NS */
#endif /* IMAGE_BL31 */
} }
/******************************************************************************* /*******************************************************************************
@ -654,7 +684,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
~(CNTHP_CTL_ENABLE_BIT)); ~(CNTHP_CTL_ENABLE_BIT));
} }
enable_extensions_nonsecure(el2_unused, ctx); manage_extensions_nonsecure(el2_unused, ctx);
} }
cm_el1_sysregs_context_restore(security_state); cm_el1_sysregs_context_restore(security_state);

103
lib/extensions/sme/sme.c Normal file
View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdbool.h>
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/extensions/sme.h>
#include <lib/extensions/sve.h>
static bool feat_sme_supported(void)
{
uint64_t features;
features = read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SME_SHIFT;
return (features & ID_AA64PFR1_EL1_SME_MASK) != 0U;
}
static bool feat_sme_fa64_supported(void)
{
uint64_t features;
features = read_id_aa64smfr0_el1();
return (features & ID_AA64SMFR0_EL1_FA64_BIT) != 0U;
}
void sme_enable(cpu_context_t *context)
{
u_register_t reg;
u_register_t cptr_el3;
el3_state_t *state;
/* Make sure SME is implemented in hardware before continuing. */
if (!feat_sme_supported()) {
return;
}
/* Get the context state. */
state = get_el3state_ctx(context);
/* Enable SME in CPTR_EL3. */
reg = read_ctx_reg(state, CTX_CPTR_EL3);
reg |= ESM_BIT;
write_ctx_reg(state, CTX_CPTR_EL3, reg);
/* Set the ENTP2 bit in SCR_EL3 to enable access to TPIDR2_EL0. */
reg = read_ctx_reg(state, CTX_SCR_EL3);
reg |= SCR_ENTP2_BIT;
write_ctx_reg(state, CTX_SCR_EL3, reg);
/* Set CPTR_EL3.ESM bit so we can write SMCR_EL3 without trapping. */
cptr_el3 = read_cptr_el3();
write_cptr_el3(cptr_el3 | ESM_BIT);
/*
* Set the max LEN value and FA64 bit. This register is set up globally
* to be the least restrictive, then lower ELs can restrict as needed
* using SMCR_EL2 and SMCR_EL1.
*/
reg = SMCR_ELX_LEN_MASK;
if (feat_sme_fa64_supported()) {
VERBOSE("[SME] FA64 enabled\n");
reg |= SMCR_ELX_FA64_BIT;
}
write_smcr_el3(reg);
/* Reset CPTR_EL3 value. */
write_cptr_el3(cptr_el3);
/* Enable SVE/FPU in addition to SME. */
sve_enable(context);
}
void sme_disable(cpu_context_t *context)
{
u_register_t reg;
el3_state_t *state;
/* Make sure SME is implemented in hardware before continuing. */
if (!feat_sme_supported()) {
return;
}
/* Get the context state. */
state = get_el3state_ctx(context);
/* Disable SME, SVE, and FPU since they all share registers. */
reg = read_ctx_reg(state, CTX_CPTR_EL3);
reg &= ~ESM_BIT; /* Trap SME */
reg &= ~CPTR_EZ_BIT; /* Trap SVE */
reg |= TFP_BIT; /* Trap FPU/SIMD */
write_ctx_reg(state, CTX_CPTR_EL3, reg);
/* Disable access to TPIDR2_EL0. */
reg = read_ctx_reg(state, CTX_SCR_EL3);
reg &= ~SCR_ENTP2_BIT;
write_ctx_reg(state, CTX_SCR_EL3, reg);
}

View File

@ -43,3 +43,23 @@ void sve_enable(cpu_context_t *context)
write_ctx_reg(get_el3state_ctx(context), CTX_ZCR_EL3, write_ctx_reg(get_el3state_ctx(context), CTX_ZCR_EL3,
(ZCR_EL3_LEN_MASK & CONVERT_SVE_LENGTH(512))); (ZCR_EL3_LEN_MASK & CONVERT_SVE_LENGTH(512)));
} }
void sve_disable(cpu_context_t *context)
{
u_register_t reg;
el3_state_t *state;
/* Make sure SME is implemented in hardware before continuing. */
if (!sve_supported()) {
return;
}
/* Get the context state. */
state = get_el3state_ctx(context);
/* Disable SVE and FPU since they share registers. */
reg = read_ctx_reg(state, CTX_CPTR_EL3);
reg &= ~CPTR_EZ_BIT; /* Trap SVE */
reg |= TFP_BIT; /* Trap FPU/SIMD */
write_ctx_reg(state, CTX_CPTR_EL3, reg);
}

View File

@ -134,7 +134,7 @@ ENABLE_BTI := 0
ENABLE_PAUTH := 0 ENABLE_PAUTH := 0
# Flag to enable access to the HCRX_EL2 register by setting SCR_EL3.HXEn. # Flag to enable access to the HCRX_EL2 register by setting SCR_EL3.HXEn.
ENABLE_FEAT_HCX := 0 ENABLE_FEAT_HCX := 0
# By default BL31 encryption disabled # By default BL31 encryption disabled
ENCRYPT_BL31 := 0 ENCRYPT_BL31 := 0
@ -222,13 +222,13 @@ RESET_TO_BL31 := 0
SAVE_KEYS := 0 SAVE_KEYS := 0
# Software Delegated Exception support # Software Delegated Exception support
SDEI_SUPPORT := 0 SDEI_SUPPORT := 0
# True Random Number firmware Interface # True Random Number firmware Interface
TRNG_SUPPORT := 0 TRNG_SUPPORT := 0
# SMCCC PCI support # SMCCC PCI support
SMC_PCI_SUPPORT := 0 SMC_PCI_SUPPORT := 0
# Whether code and read-only data should be put on separate memory pages. The # Whether code and read-only data should be put on separate memory pages. The
# platform Makefile is free to override this value. # platform Makefile is free to override this value.
@ -303,7 +303,7 @@ ENABLE_SPE_FOR_LOWER_ELS := 1
# SPE is only supported on AArch64 so disable it on AArch32. # SPE is only supported on AArch64 so disable it on AArch32.
ifeq (${ARCH},aarch32) ifeq (${ARCH},aarch32)
override ENABLE_SPE_FOR_LOWER_ELS := 0 override ENABLE_SPE_FOR_LOWER_ELS := 0
endif endif
# Include Memory Tagging Extension registers in cpu context. This must be set # Include Memory Tagging Extension registers in cpu context. This must be set
@ -316,15 +316,18 @@ ENABLE_AMU_AUXILIARY_COUNTERS := 0
ENABLE_AMU_FCONF := 0 ENABLE_AMU_FCONF := 0
AMU_RESTRICT_COUNTERS := 0 AMU_RESTRICT_COUNTERS := 0
# By default, enable Scalable Vector Extension if implemented only for Non-secure # Enable SVE for non-secure world by default
# lower ELs ENABLE_SVE_FOR_NS := 1
# Note SVE is only supported on AArch64 - therefore do not enable in AArch32 ENABLE_SVE_FOR_SWD := 0
ifneq (${ARCH},aarch32)
ENABLE_SVE_FOR_NS := 1 # SME defaults to disabled
ENABLE_SVE_FOR_SWD := 0 ENABLE_SME_FOR_NS := 0
else ENABLE_SME_FOR_SWD := 0
override ENABLE_SVE_FOR_NS := 0
override ENABLE_SVE_FOR_SWD := 0 # If SME is enabled then force SVE off
ifeq (${ENABLE_SME_FOR_NS},1)
override ENABLE_SVE_FOR_NS := 0
override ENABLE_SVE_FOR_SWD := 0
endif endif
SANITIZE_UB := off SANITIZE_UB := off
@ -348,7 +351,7 @@ CTX_INCLUDE_EL2_REGS := 0
SUPPORT_STACK_MEMTAG := no SUPPORT_STACK_MEMTAG := no
# Select workaround for AT speculative behaviour. # Select workaround for AT speculative behaviour.
ERRATA_SPECULATIVE_AT := 0 ERRATA_SPECULATIVE_AT := 0
# Trap RAS error record access from lower EL # Trap RAS error record access from lower EL
RAS_TRAP_LOWER_EL_ERR_ACCESS := 0 RAS_TRAP_LOWER_EL_ERR_ACCESS := 0
@ -379,9 +382,9 @@ PSA_FWU_SUPPORT := 0
# Note FEAT_TRBE is only supported on AArch64 - therefore do not enable in # Note FEAT_TRBE is only supported on AArch64 - therefore do not enable in
# AArch32. # AArch32.
ifneq (${ARCH},aarch32) ifneq (${ARCH},aarch32)
ENABLE_TRBE_FOR_NS := 0 ENABLE_TRBE_FOR_NS := 0
else else
override ENABLE_TRBE_FOR_NS := 0 override ENABLE_TRBE_FOR_NS := 0
endif endif
# By default, disable access of trace system registers from NS lower # By default, disable access of trace system registers from NS lower

View File

@ -1,11 +1,15 @@
# #
# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. # Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
# #
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
ifneq (${ARCH},aarch64) ifneq (${ARCH},aarch64)
$(error "Error: SPMD is only supported on aarch64.") $(error "Error: SPMD is only supported on aarch64.")
endif
ifeq (${ENABLE_SME_FOR_NS},1)
$(error "Error: SPMD is not compatible with ENABLE_SME_FOR_NS")
endif endif
SPMD_SOURCES += $(addprefix services/std_svc/spmd/, \ SPMD_SOURCES += $(addprefix services/std_svc/spmd/, \