diff --git a/bl2/bl2.mk b/bl2/bl2.mk index fd8374795..7a973e512 100644 --- a/bl2/bl2.mk +++ b/bl2/bl2.mk @@ -17,7 +17,7 @@ endif ifeq (${ENABLE_RME},1) # Using RME, run BL2 at EL3 -include lib/gpt/gpt.mk +include lib/gpt_rme/gpt_rme.mk BL2_SOURCES += bl2/${ARCH}/bl2_rme_entrypoint.S \ bl2/${ARCH}/bl2_el3_exceptions.S \ diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index 2e9a39496..ed058648f 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -172,14 +172,6 @@ func bl31_warm_entrypoint _exception_vectors=runtime_exceptions \ _pie_fixup_size=0 -#if ENABLE_RME - /* - * Initialise and enable Granule Protection - * before enabling any stage of translation. - */ - bl gpt_enable -#endif - /* * We're about to enable MMU and participate in PSCI state coordination. * @@ -203,6 +195,19 @@ func bl31_warm_entrypoint #endif bl bl31_plat_enable_mmu +#if ENABLE_RME + /* + * At warm boot GPT data structures have already been initialized in RAM + * but the sysregs for this CPU need to be initialized. Note that the GPT + * accesses are controlled attributes in GPCCR and do not depend on the + * SCR_EL3.C bit. + */ + bl gpt_enable + cbz x0, 1f + no_ret plat_panic_handler +1: +#endif + #if ENABLE_PAUTH /* -------------------------------------------------------------------- * Program APIAKey_EL1 and enable pointer authentication diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 5927fb1c9..106d4109d 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -112,7 +112,7 @@ BL31_SOURCES += services/std_svc/pci_svc.c endif ifeq (${ENABLE_RME},1) -include lib/gpt/gpt.mk +include lib/gpt_rme/gpt_rme.mk BL31_SOURCES += ${GPT_LIB_SRCS} \ ${RMMD_SOURCES} diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h index 5949370e0..0ad97543b 100644 --- a/include/arch/aarch64/arch.h +++ b/include/arch/aarch64/arch.h @@ -1105,87 +1105,9 @@ /******************************************************************************* * Realm management extension register definitions ******************************************************************************/ - -/* GPCCR_EL3 definitions */ #define GPCCR_EL3 S3_6_C2_C1_6 - -/* Least significant address bits protected by each entry in level 0 GPT */ -#define GPCCR_L0GPTSZ_SHIFT U(20) -#define GPCCR_L0GPTSZ_MASK U(0xF) -#define GPCCR_L0GPTSZ_30BITS U(0x0) -#define GPCCR_L0GPTSZ_34BITS U(0x4) -#define GPCCR_L0GPTSZ_36BITS U(0x6) -#define GPCCR_L0GPTSZ_39BITS U(0x9) -#define SET_GPCCR_L0GPTSZ(x) \ - ((x & GPCCR_L0GPTSZ_MASK) << GPCCR_L0GPTSZ_SHIFT) - -/* Granule protection check priority bit definitions */ -#define GPCCR_GPCP_SHIFT U(17) -#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT) - -/* Granule protection check bit definitions */ -#define GPCCR_GPC_SHIFT U(16) -#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT) - -/* Physical granule size bit definitions */ -#define GPCCR_PGS_SHIFT U(14) -#define GPCCR_PGS_MASK U(0x3) -#define GPCCR_PGS_4K U(0x0) -#define GPCCR_PGS_16K U(0x2) -#define GPCCR_PGS_64K U(0x1) -#define SET_GPCCR_PGS(x) \ - ((x & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT) - -/* GPT fetch shareability attribute bit definitions */ -#define GPCCR_SH_SHIFT U(12) -#define GPCCR_SH_MASK U(0x3) -#define GPCCR_SH_NS U(0x0) -#define GPCCR_SH_OS U(0x2) -#define GPCCR_SH_IS U(0x3) -#define SET_GPCCR_SH(x) \ - ((x & GPCCR_SH_MASK) << GPCCR_SH_SHIFT) - -/* GPT fetch outer cacheability attribute bit definitions */ -#define GPCCR_ORGN_SHIFT U(10) -#define GPCCR_ORGN_MASK U(0x3) -#define GPCCR_ORGN_NC U(0x0) -#define GPCCR_ORGN_WB_RA_WA U(0x1) -#define GPCCR_ORGN_WT_RA_NWA U(0x2) -#define GPCCR_ORGN_WB_RA_NWA U(0x3) -#define SET_GPCCR_ORGN(x) \ - ((x & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT) - -/* GPT fetch inner cacheability attribute bit definitions */ -#define GPCCR_IRGN_SHIFT U(8) -#define GPCCR_IRGN_MASK U(0x3) -#define GPCCR_IRGN_NC U(0x0) -#define GPCCR_IRGN_WB_RA_WA U(0x1) -#define GPCCR_IRGN_WT_RA_NWA U(0x2) -#define GPCCR_IRGN_WB_RA_NWA U(0x3) -#define SET_GPCCR_IRGN(x) \ - ((x & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT) - -/* Protected physical address size bit definitions */ -#define GPCCR_PPS_SHIFT U(0) -#define GPCCR_PPS_MASK U(0x7) -#define GPCCR_PPS_4GB U(0x0) -#define GPCCR_PPS_64GB U(0x1) -#define GPCCR_PPS_1TB U(0x2) -#define GPCCR_PPS_4TB U(0x3) -#define GPCCR_PPS_16TB U(0x4) -#define GPCCR_PPS_256TB U(0x5) -#define GPCCR_PPS_4PB U(0x6) -#define SET_GPCCR_PPS(x) \ - ((x & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT) - -/* GPTBR_EL3 definitions */ #define GPTBR_EL3 S3_6_C2_C1_4 -/* Base Address for the GPT bit definitions */ -#define GPTBR_BADDR_SHIFT U(0) -#define GPTBR_BADDR_VAL_SHIFT U(12) -#define GPTBR_BADDR_MASK ULL(0xffffffffff) - /******************************************************************************* * RAS system registers ******************************************************************************/ diff --git a/include/lib/gpt/gpt.h b/include/lib/gpt/gpt.h deleted file mode 100644 index 89d30177d..000000000 --- a/include/lib/gpt/gpt.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2021, Arm Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef GPT_H -#define GPT_H - -#include - -#include - -#include "gpt_defs.h" - -#define GPT_DESC_ATTRS(_type, _gpi) \ - ((((_type) & PAS_REG_DESC_TYPE_MASK) \ - << PAS_REG_DESC_TYPE_SHIFT) | \ - (((_gpi) & PAS_REG_GPI_MASK) \ - << PAS_REG_GPI_SHIFT)) - -/* - * Macro to create a GPT entry for this PAS range either as a L0 block - * descriptor or L1 table descriptor depending upon the size of the range. - */ -#define MAP_GPT_REGION(_pa, _sz, _gpi) \ - { \ - .base_pa = (_pa), \ - .size = (_sz), \ - .attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_ANY, (_gpi)), \ - } - -/* - * Special macro to create a L1 table descriptor at L0 for a 1GB region as - * opposed to creating a block mapping by default. - */ -#define MAP_GPT_REGION_TBL(_pa, _sz, _gpi) \ - { \ - .base_pa = (_pa), \ - .size = (_sz), \ - .attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL, (_gpi)), \ - } - -/* - * Structure for specifying a Granule range and its properties - */ -typedef struct pas_region { - unsigned long long base_pa; /**< Base address for PAS. */ - size_t size; /**< Size of the PAS. */ - unsigned int attrs; /**< PAS GPI and entry type. */ -} pas_region_t; - -/* - * Structure to initialise the Granule Protection Tables. - */ -typedef struct gpt_init_params { - unsigned int pgs; /**< Address Width of Phisical Granule Size. */ - unsigned int pps; /**< Protected Physical Address Size. */ - unsigned int l0gptsz; /**< Granule size on L0 table entry. */ - pas_region_t *pas_regions; /**< PAS regions to protect. */ - unsigned int pas_count; /**< Number of PAS regions to initialise. */ - uintptr_t l0_mem_base; /**< L0 Table base address. */ - size_t l0_mem_size; /**< Size of memory reserved for L0 tables. */ - uintptr_t l1_mem_base; /**< L1 Table base address. */ - size_t l1_mem_size; /**< Size of memory reserved for L1 tables. */ -} gpt_init_params_t; - -/** @brief Initialise the Granule Protection tables. - */ -int gpt_init(gpt_init_params_t *params); - -/** @brief Enable the Granule Protection Checks. - */ -void gpt_enable(void); - -/** @brief Disable the Granule Protection Checks. - */ -void gpt_disable(void); - -/** @brief Transition a granule between security states. - */ -int gpt_transition_pas(uint64_t pa, - unsigned int src_sec_state, - unsigned int target_pas); - -#endif /* GPT_H */ diff --git a/include/lib/gpt/gpt_defs.h b/include/lib/gpt/gpt_defs.h deleted file mode 100644 index 6122a126f..000000000 --- a/include/lib/gpt/gpt_defs.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2021, Arm Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef GPT_DEFS_H -#define GPT_DEFS_H - -#include -#include - -#include "gpt.h" - -/* GPI values */ -#define GPI_NO_ACCESS U(0x0) -#define GPI_SECURE U(0x8) -#define GPI_NS U(0x9) -#define GPI_ROOT U(0xa) -#define GPI_REALM U(0xb) -#define GPI_ANY U(0xf) -#define GPI_VAL_MASK ULL(0xf) - -/* GPT descriptor bit definitions */ -#define GPT_L1_INDEX_MASK ULL(0xf) -#define GPT_L1_INDEX_SHIFT ULL(0x0) - -#define GPT_TBL_DESC ULL(0x3) -#define GPT_BLK_DESC ULL(0x1) - -#define GPT_TBL_DESC_ADDR_SHIFT ULL(12) -#define GPT_TBL_DESC_ADDR_MASK (((ULL(1) << \ - (51 - GPT_TBL_DESC_ADDR_SHIFT)) - 1) \ - << GPT_TBL_DESC_ADDR_SHIFT) - -#define GPT_BLOCK_DESC_GPI_VAL_SHIFT ULL(4) - -/* Each descriptor is 8 bytes long. */ -#define GPT_DESC_SIZE ULL(8) - -#define PPS_MAX_VAL PSTCR_EL3_PPS_4PB -#define PPS_NUM_1GB_ENTRIES ULL(1024) -#define PGS_4K_1GB_L1_TABLE_SZ (U(2) << 17) - -/* 2 << LOG2_8K = Bytes in 8K */ -#define LOG2_8K U(13) - -#define GPT_L1_SIZE ULL(0x40000) /* 256K */ -#define SZ_1G (ULL(0x1) << 30) /* 1GB */ - -#define GPT_MIN_PGS_SHIFT U(12) /* 4K */ - -#define L1_GPT_INDEX_MASK U(0x3fffffff) -#define GPT_GRAN_DESC_NUM_GPIS U(4) - -#define PAS_REG_GPI_SHIFT U(0) -#define PAS_REG_GPI_MASK U(0xf) - -/* .attrs field definitions */ -#define PAS_REG_DESC_TYPE_ANY U(0) -#define PAS_REG_DESC_TYPE_BLK U(1) -#define PAS_REG_DESC_TYPE_TBL U(2) -#define PAS_REG_DESC_TYPE_SHIFT U(4) -#define PAS_REG_DESC_TYPE_MASK U(0x3) -#define PAS_REG_DESC_TYPE(_attrs) (((_attrs) \ - >> PAS_REG_DESC_TYPE_SHIFT) \ - & PAS_REG_DESC_TYPE_MASK) - -#define PAS_REG_GPI(_attrs) (((_attrs) \ - >> PAS_REG_GPI_SHIFT) \ - & PAS_REG_GPI_MASK) - -#define SZ_1G_MASK (SZ_1G - U(1)) -#define IS_1GB_ALIGNED(addr) (((addr) & SZ_1G_MASK) == U(0)) - -#endif /* GPT_DEFS */ diff --git a/include/lib/gpt_rme/gpt_rme.h b/include/lib/gpt_rme/gpt_rme.h new file mode 100644 index 000000000..379b91562 --- /dev/null +++ b/include/lib/gpt_rme/gpt_rme.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPT_RME_H +#define GPT_RME_H + +#include + +#include + +/******************************************************************************/ +/* GPT helper macros and definitions */ +/******************************************************************************/ + +/* + * Structure for specifying a mapping range and it's properties. This should not + * be manually initialized, using the MAP_GPT_REGION_x macros is recommended as + * to avoid potential incompatibilities in the future. + */ +typedef struct pas_region { + uintptr_t base_pa; /* Base address for PAS. */ + size_t size; /* Size of the PAS. */ + unsigned int attrs; /* PAS GPI and entry type. */ +} pas_region_t; + +/* GPT GPI definitions */ +#define GPT_GPI_NO_ACCESS U(0x0) +#define GPT_GPI_SECURE U(0x8) +#define GPT_GPI_NS U(0x9) +#define GPT_GPI_ROOT U(0xA) +#define GPT_GPI_REALM U(0xB) +#define GPT_GPI_ANY U(0xF) +#define GPT_GPI_VAL_MASK UL(0xF) + +/* PAS attribute GPI definitions. */ +#define GPT_PAS_ATTR_GPI_SHIFT U(0) +#define GPT_PAS_ATTR_GPI_MASK U(0xF) +#define GPT_PAS_ATTR_GPI(_attrs) (((_attrs) \ + >> GPT_PAS_ATTR_GPI_SHIFT) \ + & GPT_PAS_ATTR_GPI_MASK) + +/* PAS attribute mapping type definitions */ +#define GPT_PAS_ATTR_MAP_TYPE_BLOCK U(0x0) +#define GPT_PAS_ATTR_MAP_TYPE_GRANULE U(0x1) +#define GPT_PAS_ATTR_MAP_TYPE_SHIFT U(4) +#define GPT_PAS_ATTR_MAP_TYPE_MASK U(0x1) +#define GPT_PAS_ATTR_MAP_TYPE(_attrs) (((_attrs) \ + >> GPT_PAS_ATTR_MAP_TYPE_SHIFT) \ + & GPT_PAS_ATTR_MAP_TYPE_MASK) + +/* + * Macro to initialize the attributes field in the pas_region_t structure. + * [31:5] Reserved + * [4] Mapping type (GPT_PAS_ATTR_MAP_TYPE_x definitions) + * [3:0] PAS GPI type (GPT_GPI_x definitions) + */ +#define GPT_PAS_ATTR(_type, _gpi) \ + ((((_type) & GPT_PAS_ATTR_MAP_TYPE_MASK) \ + << GPT_PAS_ATTR_MAP_TYPE_SHIFT) | \ + (((_gpi) & GPT_PAS_ATTR_GPI_MASK) \ + << GPT_PAS_ATTR_GPI_SHIFT)) + +/* + * Macro to create a GPT entry for this PAS range as a block descriptor. If this + * region does not fit the requirements for a block descriptor then GPT + * initialization will fail. + */ +#define GPT_MAP_REGION_BLOCK(_pa, _sz, _gpi) \ + { \ + .base_pa = (_pa), \ + .size = (_sz), \ + .attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_BLOCK, (_gpi)), \ + } + +/* + * Macro to create a GPT entry for this PAS range as a table descriptor. If this + * region does not fit the requirements for a table descriptor then GPT + * initialization will fail. + */ +#define GPT_MAP_REGION_GRANULE(_pa, _sz, _gpi) \ + { \ + .base_pa = (_pa), \ + .size = (_sz), \ + .attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_GRANULE, (_gpi)), \ + } + +/******************************************************************************/ +/* GPT register field definitions */ +/******************************************************************************/ + +/* + * Least significant address bits protected by each entry in level 0 GPT. This + * field is read-only. + */ +#define GPCCR_L0GPTSZ_SHIFT U(20) +#define GPCCR_L0GPTSZ_MASK U(0xF) + +typedef enum { + GPCCR_L0GPTSZ_30BITS = U(0x0), + GPCCR_L0GPTSZ_34BITS = U(0x4), + GPCCR_L0GPTSZ_36BITS = U(0x6), + GPCCR_L0GPTSZ_39BITS = U(0x9) +} gpccr_l0gptsz_e; + +/* Granule protection check priority bit definitions */ +#define GPCCR_GPCP_SHIFT U(17) +#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT) + +/* Granule protection check bit definitions */ +#define GPCCR_GPC_SHIFT U(16) +#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT) + +/* Physical granule size bit definitions */ +#define GPCCR_PGS_SHIFT U(14) +#define GPCCR_PGS_MASK U(0x3) +#define SET_GPCCR_PGS(x) (((x) & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT) + +typedef enum { + GPCCR_PGS_4K = U(0x0), + GPCCR_PGS_64K = U(0x1), + GPCCR_PGS_16K = U(0x2) +} gpccr_pgs_e; + +/* GPT fetch shareability attribute bit definitions */ +#define GPCCR_SH_SHIFT U(12) +#define GPCCR_SH_MASK U(0x3) +#define SET_GPCCR_SH(x) (((x) & GPCCR_SH_MASK) << GPCCR_SH_SHIFT) + +typedef enum { + GPCCR_SH_NS = U(0x0), + GPCCR_SH_OS = U(0x2), + GPCCR_SH_IS = U(0x3) +} gpccr_sh_e; + +/* GPT fetch outer cacheability attribute bit definitions */ +#define GPCCR_ORGN_SHIFT U(10) +#define GPCCR_ORGN_MASK U(0x3) +#define SET_GPCCR_ORGN(x) (((x) & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT) + +typedef enum { + GPCCR_ORGN_NC = U(0x0), + GPCCR_ORGN_WB_RA_WA = U(0x1), + GPCCR_ORGN_WT_RA_NWA = U(0x2), + GPCCR_ORGN_WB_RA_NWA = U(0x3) +} gpccr_orgn_e; + +/* GPT fetch inner cacheability attribute bit definitions */ +#define GPCCR_IRGN_SHIFT U(8) +#define GPCCR_IRGN_MASK U(0x3) +#define SET_GPCCR_IRGN(x) (((x) & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT) + +typedef enum { + GPCCR_IRGN_NC = U(0x0), + GPCCR_IRGN_WB_RA_WA = U(0x1), + GPCCR_IRGN_WT_RA_NWA = U(0x2), + GPCCR_IRGN_WB_RA_NWA = U(0x3) +} gpccr_irgn_e; + +/* Protected physical address size bit definitions */ +#define GPCCR_PPS_SHIFT U(0) +#define GPCCR_PPS_MASK U(0x7) +#define SET_GPCCR_PPS(x) (((x) & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT) + +typedef enum { + GPCCR_PPS_4GB = U(0x0), + GPCCR_PPS_64GB = U(0x1), + GPCCR_PPS_1TB = U(0x2), + GPCCR_PPS_4TB = U(0x3), + GPCCR_PPS_16TB = U(0x4), + GPCCR_PPS_256TB = U(0x5), + GPCCR_PPS_4PB = U(0x6) +} gpccr_pps_e; + +/* Base Address for the GPT bit definitions */ +#define GPTBR_BADDR_SHIFT U(0) +#define GPTBR_BADDR_VAL_SHIFT U(12) +#define GPTBR_BADDR_MASK ULL(0xffffffffff) + +/******************************************************************************/ +/* GPT public APIs */ +/******************************************************************************/ + +/* + * Public API that initializes the entire protected space to GPT_GPI_ANY using + * the L0 tables (block descriptors). Ideally, this function is invoked prior + * to DDR discovery and initialization. The MMU must be initialized before + * calling this function. + * + * Parameters + * pps PPS value to use for table generation + * l0_mem_base Base address of L0 tables in memory. + * l0_mem_size Total size of memory available for L0 tables. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_init_l0_tables(gpccr_pps_e pps, + uintptr_t l0_mem_base, + size_t l0_mem_size); + +/* + * Public API that carves out PAS regions from the L0 tables and builds any L1 + * tables that are needed. This function ideally is run after DDR discovery and + * initialization. The L0 tables must have already been initialized to GPI_ANY + * when this function is called. + * + * Parameters + * pgs PGS value to use for table generation. + * l1_mem_base Base address of memory used for L1 tables. + * l1_mem_size Total size of memory available for L1 tables. + * *pas_regions Pointer to PAS regions structure array. + * pas_count Total number of PAS regions. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, + uintptr_t l1_mem_base, + size_t l1_mem_size, + pas_region_t *pas_regions, + unsigned int pas_count); + +/* + * Public API to initialize the runtime gpt_config structure based on the values + * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization + * typically happens in a bootloader stage prior to setting up the EL3 runtime + * environment for the granule transition service so this function detects the + * initialization from a previous stage. Granule protection checks must be + * enabled already or this function will return an error. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_runtime_init(void); + +/* + * Public API to enable granule protection checks once the tables have all been + * initialized. This function is called at first initialization and then again + * later during warm boots of CPU cores. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_enable(void); + +/* + * Public API to disable granule protection checks. + */ +void gpt_disable(void); + +/* + * This function is the core of the granule transition service. When a granule + * transition request occurs it is routed to this function where the request is + * validated then fulfilled if possible. + * + * TODO: implement support for transitioning multiple granules at once. + * + * Parameters + * base: Base address of the region to transition, must be aligned to granule + * size. + * size: Size of region to transition, must be aligned to granule size. + * src_sec_state: Security state of the caller. + * target_pas: Target PAS of the specified memory region. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_transition_pas(uint64_t base, + size_t size, + unsigned int src_sec_state, + unsigned int target_pas); + +#endif /* GPT_RME_H */ diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h index a8b5d26df..1993cb401 100644 --- a/include/plat/arm/common/arm_def.h +++ b/include/plat/arm/common/arm_def.h @@ -81,19 +81,19 @@ * - REALM DRAM: Reserved for Realm world if RME is enabled * - AP TZC DRAM: The remaining TZC secured DRAM reserved for AP use * - * RME enabled(64MB) RME not enabled(16MB) - * -------------------- ------------------- - * | | | | - * | AP TZC (~28MB) | | AP TZC (~14MB) | - * -------------------- ------------------- - * | | | | - * | REALM (32MB) | | EL3 TZC (2MB) | - * -------------------- ------------------- - * | | | | - * | EL3 TZC (3MB) | | SCP TZC | - * -------------------- 0xFFFF_FFFF------------------- - * | L1 GPT + SCP TZC | - * | (~1MB) | + * RME enabled(64MB) RME not enabled(16MB) + * -------------------- ------------------- + * | | | | + * | AP TZC (~28MB) | | AP TZC (~14MB) | + * -------------------- ------------------- + * | | | | + * | REALM (32MB) | | EL3 TZC (2MB) | + * -------------------- ------------------- + * | | | | + * | EL3 TZC (3MB) | | SCP TZC | + * -------------------- 0xFFFF_FFFF------------------- + * | L1 GPT + SCP TZC | + * | (~1MB) | * 0xFFFF_FFFF -------------------- */ #if ENABLE_RME @@ -252,56 +252,56 @@ INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, (grp), \ GIC_INTR_CFG_EDGE) -#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \ - ARM_SHARED_RAM_BASE, \ - ARM_SHARED_RAM_SIZE, \ - MT_DEVICE | MT_RW | EL3_PAS) +#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \ + ARM_SHARED_RAM_BASE, \ + ARM_SHARED_RAM_SIZE, \ + MT_DEVICE | MT_RW | EL3_PAS) -#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \ - ARM_NS_DRAM1_BASE, \ - ARM_NS_DRAM1_SIZE, \ - MT_MEMORY | MT_RW | MT_NS) +#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \ + ARM_NS_DRAM1_BASE, \ + ARM_NS_DRAM1_SIZE, \ + MT_MEMORY | MT_RW | MT_NS) -#define ARM_MAP_DRAM2 MAP_REGION_FLAT( \ - ARM_DRAM2_BASE, \ - ARM_DRAM2_SIZE, \ - MT_MEMORY | MT_RW | MT_NS) +#define ARM_MAP_DRAM2 MAP_REGION_FLAT( \ + ARM_DRAM2_BASE, \ + ARM_DRAM2_SIZE, \ + MT_MEMORY | MT_RW | MT_NS) -#define ARM_MAP_TSP_SEC_MEM MAP_REGION_FLAT( \ - TSP_SEC_MEM_BASE, \ - TSP_SEC_MEM_SIZE, \ - MT_MEMORY | MT_RW | MT_SECURE) +#define ARM_MAP_TSP_SEC_MEM MAP_REGION_FLAT( \ + TSP_SEC_MEM_BASE, \ + TSP_SEC_MEM_SIZE, \ + MT_MEMORY | MT_RW | MT_SECURE) #if ARM_BL31_IN_DRAM -#define ARM_MAP_BL31_SEC_DRAM MAP_REGION_FLAT( \ - BL31_BASE, \ - PLAT_ARM_MAX_BL31_SIZE, \ - MT_MEMORY | MT_RW | MT_SECURE) +#define ARM_MAP_BL31_SEC_DRAM MAP_REGION_FLAT( \ + BL31_BASE, \ + PLAT_ARM_MAX_BL31_SIZE, \ + MT_MEMORY | MT_RW | MT_SECURE) #endif -#define ARM_MAP_EL3_TZC_DRAM MAP_REGION_FLAT( \ - ARM_EL3_TZC_DRAM1_BASE, \ - ARM_EL3_TZC_DRAM1_SIZE, \ - MT_MEMORY | MT_RW | EL3_PAS) +#define ARM_MAP_EL3_TZC_DRAM MAP_REGION_FLAT( \ + ARM_EL3_TZC_DRAM1_BASE, \ + ARM_EL3_TZC_DRAM1_SIZE, \ + MT_MEMORY | MT_RW | EL3_PAS) #if defined(SPD_spmd) -#define ARM_MAP_TRUSTED_DRAM MAP_REGION_FLAT( \ - PLAT_ARM_TRUSTED_DRAM_BASE, \ - PLAT_ARM_TRUSTED_DRAM_SIZE, \ - MT_MEMORY | MT_RW | MT_SECURE) +#define ARM_MAP_TRUSTED_DRAM MAP_REGION_FLAT( \ + PLAT_ARM_TRUSTED_DRAM_BASE, \ + PLAT_ARM_TRUSTED_DRAM_SIZE, \ + MT_MEMORY | MT_RW | MT_SECURE) #endif #if ENABLE_RME -#define ARM_MAP_RMM_DRAM MAP_REGION_FLAT( \ - PLAT_ARM_RMM_BASE, \ - PLAT_ARM_RMM_SIZE, \ - MT_MEMORY | MT_RW | MT_REALM) +#define ARM_MAP_RMM_DRAM MAP_REGION_FLAT( \ + PLAT_ARM_RMM_BASE, \ + PLAT_ARM_RMM_SIZE, \ + MT_MEMORY | MT_RW | MT_REALM) -#define ARM_MAP_GPT_L1_DRAM MAP_REGION_FLAT( \ - ARM_L1_GPT_ADDR_BASE, \ - ARM_L1_GPT_SIZE, \ - MT_MEMORY | MT_RW | EL3_PAS) +#define ARM_MAP_GPT_L1_DRAM MAP_REGION_FLAT( \ + ARM_L1_GPT_ADDR_BASE, \ + ARM_L1_GPT_SIZE, \ + MT_MEMORY | MT_RW | EL3_PAS) #endif /* ENABLE_RME */ diff --git a/include/plat/arm/common/arm_pas_def.h b/include/plat/arm/common/arm_pas_def.h index d268ce613..4fee41b3f 100644 --- a/include/plat/arm/common/arm_pas_def.h +++ b/include/plat/arm/common/arm_pas_def.h @@ -6,6 +6,7 @@ #ifndef ARM_PAS_DEF_H #define ARM_PAS_DEF_H +#include #include /***************************************************************************** @@ -42,12 +43,12 @@ * * - 4KB of L0 GPT reside in TSRAM, on top of the CONFIG section. * - ~1MB of L1 GPTs reside at the top of DRAM1 (TZC area). - * - The first 1GB region has GPI_ANY and, therefore, is not protected by + * - The first 1GB region has GPT_GPI_ANY and, therefore, is not protected by * the GPT. * - The DRAM TZC area is split into three regions: the L1 GPT region and - * 3MB of region below that are defined as GPI_ROOT, 32MB Realm region - * below that is defined as GPI_REALM and the rest of it is defined as - * GPI_SECURE. + * 3MB of region below that are defined as GPT_GPI_ROOT, 32MB Realm region + * below that is defined as GPT_GPI_REALM and the rest of it is defined as + * GPT_GPI_SECURE. */ /* TODO: This might not be the best way to map the PAS */ @@ -64,32 +65,30 @@ #define ARM_PAS_3_BASE (ARM_AP_TZC_DRAM1_BASE) #define ARM_PAS_3_SIZE (ARM_AP_TZC_DRAM1_SIZE) -#define ARM_PAS_GPI_ANY MAP_GPT_REGION(ARM_PAS_1_BASE, \ - ARM_PAS_1_SIZE, \ - GPI_ANY) -#define ARM_PAS_KERNEL MAP_GPT_REGION_TBL(ARM_PAS_2_BASE, \ - ARM_PAS_2_SIZE, \ - GPI_NS) +#define ARM_PAS_GPI_ANY MAP_GPT_REGION(ARM_PAS_1_BASE, \ + ARM_PAS_1_SIZE, \ + GPT_GPI_ANY) +#define ARM_PAS_KERNEL GPT_MAP_REGION_GRANULE(ARM_PAS_2_BASE, \ + ARM_PAS_2_SIZE, \ + GPT_GPI_NS) -#define ARM_PAS_TZC MAP_GPT_REGION_TBL(ARM_PAS_3_BASE, \ - ARM_PAS_3_SIZE, \ - GPI_SECURE) +#define ARM_PAS_SECURE GPT_MAP_REGION_GRANULE(ARM_PAS_3_BASE, \ + ARM_PAS_3_SIZE, \ + GPT_GPI_SECURE) -#define ARM_PAS_REALM MAP_GPT_REGION_TBL(ARM_REALM_BASE, \ - ARM_REALM_SIZE, \ - GPI_REALM) +#define ARM_PAS_REALM GPT_MAP_REGION_GRANULE(ARM_REALM_BASE, \ + ARM_REALM_SIZE, \ + GPT_GPI_REALM) -#define ARM_PAS_EL3_DRAM MAP_GPT_REGION_TBL(ARM_EL3_TZC_DRAM1_BASE, \ - ARM_EL3_TZC_DRAM1_SIZE, \ - GPI_ROOT) +#define ARM_PAS_EL3_DRAM GPT_MAP_REGION_GRANULE(ARM_EL3_TZC_DRAM1_BASE, \ + ARM_EL3_TZC_DRAM1_SIZE, \ + GPT_GPI_ROOT) -#define ARM_PAS_GPTS MAP_GPT_REGION_TBL(ARM_L1_GPT_ADDR_BASE, \ - ARM_L1_GPT_SIZE, \ - GPI_ROOT) +#define ARM_PAS_GPTS GPT_MAP_REGION_GRANULE(ARM_L1_GPT_ADDR_BASE, \ + ARM_L1_GPT_SIZE, \ + GPT_GPI_ROOT) /* GPT Configuration options */ -#define PLATFORM_PGS GPCCR_PGS_4K -#define PLATFORM_PPS GPCCR_PPS_4GB #define PLATFORM_L0GPTSZ GPCCR_L0GPTSZ_30BITS #endif /* ARM_PAS_DEF_H */ diff --git a/lib/gpt/gpt_core.c b/lib/gpt/gpt_core.c deleted file mode 100644 index 8a3afd2fa..000000000 --- a/lib/gpt/gpt_core.c +++ /dev/null @@ -1,767 +0,0 @@ -/* - * Copyright (c) 2021, Arm Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#if !ENABLE_RME -#error "ENABLE_RME must be enabled to use the GPT library." -#endif - -typedef struct { - uintptr_t plat_gpt_l0_base; - uintptr_t plat_gpt_l1_base; - size_t plat_gpt_l0_size; - size_t plat_gpt_l1_size; - unsigned int plat_gpt_pps; - unsigned int plat_gpt_pgs; - unsigned int plat_gpt_l0gptsz; -} gpt_config_t; - -gpt_config_t gpt_config; - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) -/* Helper function that cleans the data cache only if it is enabled. */ -static inline - void gpt_clean_dcache_range(uintptr_t addr, size_t size) -{ - if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) { - clean_dcache_range(addr, size); - } -} - -/* Helper function that invalidates the data cache only if it is enabled. */ -static inline - void gpt_inv_dcache_range(uintptr_t addr, size_t size) -{ - if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) { - inv_dcache_range(addr, size); - } -} -#endif - -typedef struct l1_gpt_attr_desc { - size_t t_sz; /** Table size */ - size_t g_sz; /** Granularity size */ - unsigned int p_val; /** Associated P value */ -} l1_gpt_attr_desc_t; - -/* - * Lookup table to find out the size in bytes of the L1 tables as well - * as the index mask, given the Width of Physical Granule Size (PGS). - * L1 tables are indexed by PA[29:p+4], being 'p' the width in bits of the - * aforementioned Physical Granule Size. - */ -static const l1_gpt_attr_desc_t l1_gpt_attr_lookup[] = { - [GPCCR_PGS_4K] = {U(1) << U(17), /* 16384B x 64bit entry = 128KB */ - PAGE_SIZE_4KB, /* 4KB Granularity */ - U(12)}, - [GPCCR_PGS_64K] = {U(1) << U(13), /* Table size = 8KB */ - PAGE_SIZE_64KB, /* 64KB Granularity */ - U(16)}, - [GPCCR_PGS_16K] = {U(1) << U(15), /* Table size = 32KB */ - PAGE_SIZE_16KB, /* 16KB Granularity */ - U(14)} -}; - -typedef struct l0_gpt_attr_desc { - size_t sz; - unsigned int t_val_mask; -} l0_gpt_attr_desc_t; - -/* - * Lookup table to find out the size in bytes of the L0 table as well - * as the index mask, given the Protected Physical Address Size (PPS). - * L0 table is indexed by PA[t-1:30], being 't' the size in bits - * of the aforementioned Protected Physical Address Size. - */ -static const l0_gpt_attr_desc_t l0_gpt_attr_lookup[] = { - - [GPCCR_PPS_4GB] = {U(1) << U(5), /* 4 x 64 bit entry = 32 bytes */ - 0x3}, /* Bits[31:30] */ - - [GPCCR_PPS_64GB] = {U(1) << U(9), /* 512 bytes */ - 0x3f}, /* Bits[35:30] */ - - [GPCCR_PPS_1TB] = {U(1) << U(13), /* 8KB */ - 0x3ff}, /* Bits[39:30] */ - - [GPCCR_PPS_4TB] = {U(1) << U(15), /* 32KB */ - 0xfff}, /* Bits[41:30] */ - - [GPCCR_PPS_16TB] = {U(1) << U(17), /* 128KB */ - 0x3fff}, /* Bits[43:30] */ - - [GPCCR_PPS_256TB] = {U(1) << U(21), /* 2MB */ - 0x3ffff}, /* Bits[47:30] */ - - [GPCCR_PPS_4PB] = {U(1) << U(25), /* 32MB */ - 0x3fffff}, /* Bits[51:30] */ - -}; - -static unsigned int get_l1_gpt_index(unsigned int pgs, uintptr_t pa) -{ - unsigned int l1_gpt_arr_idx; - - /* - * Mask top 2 bits to obtain the 30 bits required to - * generate the L1 GPT index - */ - l1_gpt_arr_idx = (unsigned int)(pa & L1_GPT_INDEX_MASK); - - /* Shift by 'p' value + 4 to obtain the index */ - l1_gpt_arr_idx >>= (l1_gpt_attr_lookup[pgs].p_val + 4); - - return l1_gpt_arr_idx; -} - -unsigned int plat_is_my_cpu_primary(void); - -/* The granule partition tables can only be configured on BL2 */ -#ifdef IMAGE_BL2 - -/* Global to keep track of next available index in array of L1 GPTs */ -static unsigned int l1_gpt_mem_avlbl_index; - -static int validate_l0_gpt_params(gpt_init_params_t *params) -{ - /* Only 1GB of address space per L0 entry is allowed */ - if (params->l0gptsz != GPCCR_L0GPTSZ_30BITS) { - WARN("Invalid L0GPTSZ %u.\n", params->l0gptsz); - } - - /* Only 4K granule is supported for now */ - if (params->pgs != GPCCR_PGS_4K) { - WARN("Invalid GPT PGS %u.\n", params->pgs); - return -EINVAL; - } - - /* Only 4GB of protected physical address space is supported for now */ - if (params->pps != GPCCR_PPS_4GB) { - WARN("Invalid GPT PPS %u.\n", params->pps); - return -EINVAL; - } - - /* Check if GPT base address is aligned with the system granule */ - if (!IS_PAGE_ALIGNED(params->l0_mem_base)) { - ERROR("Unaligned L0 GPT base address.\n"); - return -EFAULT; - } - - /* Check if there is enough memory for L0 GPTs */ - if (params->l0_mem_size < l0_gpt_attr_lookup[params->pps].sz) { - ERROR("Inadequate memory for L0 GPTs. "); - ERROR("Expected 0x%lx bytes. Got 0x%lx bytes\n", - l0_gpt_attr_lookup[params->pps].sz, - params->l0_mem_size); - return -ENOMEM; - } - - return 0; -} - -/* - * A L1 GPT is required if any one of the following conditions is true: - * - * - The base address is not 1GB aligned - * - The size of the memory region is not a multiple of 1GB - * - A L1 GPT has been explicitly requested (attrs == PAS_REG_DESC_TYPE_TBL) - * - * This function: - * - iterates over all the PAS regions to determine whether they - * will need a 2 stage look up (and therefore a L1 GPT will be required) or - * if it would be enough with a single level lookup table. - * - Updates the attr field of the PAS regions. - * - Returns the total count of L1 tables needed. - * - * In the future wwe should validate that the PAS range does not exceed the - * configured PPS. (and maybe rename this function as it is validating PAS - * regions). - */ -static unsigned int update_gpt_type(pas_region_t *pas_regions, - unsigned int pas_region_cnt) -{ - unsigned int idx, cnt = 0U; - - for (idx = 0U; idx < pas_region_cnt; idx++) { - if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) == - PAS_REG_DESC_TYPE_TBL) { - cnt++; - continue; - } - if (!(IS_1GB_ALIGNED(pas_regions[idx].base_pa) && - IS_1GB_ALIGNED(pas_regions[idx].size))) { - - /* Current region will need L1 GPTs. */ - assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs) - == PAS_REG_DESC_TYPE_ANY); - - pas_regions[idx].attrs = - GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL, - PAS_REG_GPI(pas_regions[idx].attrs)); - cnt++; - continue; - } - - /* The PAS can be mapped on a one stage lookup table */ - assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs) != - PAS_REG_DESC_TYPE_TBL); - - pas_regions[idx].attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_BLK, - PAS_REG_GPI(pas_regions[idx].attrs)); - } - - return cnt; -} - -static int validate_l1_gpt_params(gpt_init_params_t *params, - unsigned int l1_gpt_cnt) -{ - size_t l1_gpt_sz, l1_gpt_mem_sz; - - /* Check if the granularity is supported */ - assert(xlat_arch_is_granule_size_supported( - l1_gpt_attr_lookup[params->pgs].g_sz)); - - - /* Check if naturally aligned L1 GPTs can be created */ - l1_gpt_sz = l1_gpt_attr_lookup[params->pgs].g_sz; - if (params->l1_mem_base & (l1_gpt_sz - 1)) { - WARN("Unaligned L1 GPT base address.\n"); - return -EFAULT; - } - - /* Check if there is enough memory for L1 GPTs */ - l1_gpt_mem_sz = l1_gpt_cnt * l1_gpt_sz; - if (params->l1_mem_size < l1_gpt_mem_sz) { - WARN("Inadequate memory for L1 GPTs. "); - WARN("Expected 0x%lx bytes. Got 0x%lx bytes\n", - l1_gpt_mem_sz, params->l1_mem_size); - return -ENOMEM; - } - - INFO("Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz); - return 0; -} - -/* - * Helper function to determine if the end physical address lies in the same GB - * as the current physical address. If true, the end physical address is - * returned else, the start address of the next GB is returned. - */ -static uintptr_t get_l1_gpt_end_pa(uintptr_t cur_pa, uintptr_t end_pa) -{ - uintptr_t cur_gb, end_gb; - - cur_gb = cur_pa >> ONE_GB_SHIFT; - end_gb = end_pa >> ONE_GB_SHIFT; - - assert(cur_gb <= end_gb); - - if (cur_gb == end_gb) { - return end_pa; - } - - return (cur_gb + 1) << ONE_GB_SHIFT; -} - -static void generate_l0_blk_desc(gpt_init_params_t *params, - unsigned int idx) -{ - uint64_t gpt_desc; - uintptr_t end_addr; - unsigned int end_idx, start_idx; - pas_region_t *pas = params->pas_regions + idx; - uint64_t *l0_gpt_arr = (uint64_t *)params->l0_mem_base; - - /* Create the GPT Block descriptor for this PAS region */ - gpt_desc = GPT_BLK_DESC; - gpt_desc |= PAS_REG_GPI(pas->attrs) - << GPT_BLOCK_DESC_GPI_VAL_SHIFT; - - /* Start index of this region in L0 GPTs */ - start_idx = pas->base_pa >> ONE_GB_SHIFT; - - /* - * Determine number of L0 GPT descriptors covered by - * this PAS region and use the count to populate these - * descriptors. - */ - end_addr = pas->base_pa + pas->size; - assert(end_addr \ - <= (ULL(l0_gpt_attr_lookup[params->pps].t_val_mask + 1)) << 30); - end_idx = end_addr >> ONE_GB_SHIFT; - - for (; start_idx < end_idx; start_idx++) { - l0_gpt_arr[start_idx] = gpt_desc; - INFO("L0 entry (BLOCK) index %u [%p]: GPI = 0x%llx (0x%llx)\n", - start_idx, &l0_gpt_arr[start_idx], - (gpt_desc >> GPT_BLOCK_DESC_GPI_VAL_SHIFT) & - GPT_L1_INDEX_MASK, l0_gpt_arr[start_idx]); - } -} - -static void generate_l0_tbl_desc(gpt_init_params_t *params, - unsigned int idx) -{ - uint64_t gpt_desc = 0U, *l1_gpt_arr; - uintptr_t start_pa, end_pa, cur_pa, next_pa; - unsigned int start_idx, l1_gpt_idx; - unsigned int p_val, gran_sz; - pas_region_t *pas = params->pas_regions + idx; - uint64_t *l0_gpt_base = (uint64_t *)params->l0_mem_base; - uint64_t *l1_gpt_base = (uint64_t *)params->l1_mem_base; - - start_pa = pas->base_pa; - end_pa = start_pa + pas->size; - p_val = l1_gpt_attr_lookup[params->pgs].p_val; - gran_sz = 1 << p_val; - - /* - * end_pa cannot be larger than the maximum protected physical memory. - */ - assert(((1ULL<<30) << l0_gpt_attr_lookup[params->pps].t_val_mask) - > end_pa); - - for (cur_pa = start_pa; cur_pa < end_pa;) { - /* - * Determine the PA range that will be covered - * in this loop iteration. - */ - next_pa = get_l1_gpt_end_pa(cur_pa, end_pa); - - INFO("PAS[%u]: start: 0x%lx, end: 0x%lx, next_pa: 0x%lx.\n", - idx, cur_pa, end_pa, next_pa); - - /* Index of this PA in L0 GPTs */ - start_idx = cur_pa >> ONE_GB_SHIFT; - - /* - * If cur_pa is on a 1GB boundary then determine - * the base address of next available L1 GPT - * memory region - */ - if (IS_1GB_ALIGNED(cur_pa)) { - l1_gpt_arr = (uint64_t *)((uint64_t)l1_gpt_base + - (l1_gpt_attr_lookup[params->pgs].t_sz * - l1_gpt_mem_avlbl_index)); - - assert(l1_gpt_arr < - (l1_gpt_base + params->l1_mem_size)); - - /* Create the L0 GPT descriptor for this PAS region */ - gpt_desc = GPT_TBL_DESC | - ((uintptr_t)l1_gpt_arr - & GPT_TBL_DESC_ADDR_MASK); - - l0_gpt_base[start_idx] = gpt_desc; - - /* - * Update index to point to next available L1 - * GPT memory region - */ - l1_gpt_mem_avlbl_index++; - } else { - /* Use the existing L1 GPT */ - l1_gpt_arr = (uint64_t *)(l0_gpt_base[start_idx] - & ~((1U<<12) - 1U)); - } - - INFO("L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%llx)\n", - start_idx, &l0_gpt_base[start_idx], - (unsigned long long)(l1_gpt_arr), - l0_gpt_base[start_idx]); - - /* - * Fill up L1 GPT entries between these two - * addresses. - */ - for (; cur_pa < next_pa; cur_pa += gran_sz) { - unsigned int gpi_idx, gpi_idx_shift; - - /* Obtain index of L1 GPT entry */ - l1_gpt_idx = get_l1_gpt_index(params->pgs, cur_pa); - - /* - * Obtain index of GPI in L1 GPT entry - * (i = PA[p_val+3:p_val]) - */ - gpi_idx = (cur_pa >> p_val) & GPT_L1_INDEX_MASK; - - /* - * Shift by index * 4 to reach correct - * GPI entry in L1 GPT descriptor. - * GPI = gpt_desc[(4*idx)+3:(4*idx)] - */ - gpi_idx_shift = gpi_idx << 2; - - gpt_desc = l1_gpt_arr[l1_gpt_idx]; - - /* Clear existing GPI encoding */ - gpt_desc &= ~(GPT_L1_INDEX_MASK << gpi_idx_shift); - - /* Set the GPI encoding */ - gpt_desc |= ((uint64_t)PAS_REG_GPI(pas->attrs) - << gpi_idx_shift); - - l1_gpt_arr[l1_gpt_idx] = gpt_desc; - - if (gpi_idx == 15U) { - VERBOSE("\tEntry %u [%p] = 0x%llx\n", - l1_gpt_idx, - &l1_gpt_arr[l1_gpt_idx], gpt_desc); - } - } - } -} - -static void create_gpt(gpt_init_params_t *params) -{ - unsigned int idx; - pas_region_t *pas_regions = params->pas_regions; - - INFO("pgs = 0x%x, pps = 0x%x, l0gptsz = 0x%x\n", - params->pgs, params->pps, params->l0gptsz); - INFO("pas_region_cnt = 0x%x L1 base = 0x%lx, L1 sz = 0x%lx\n", - params->pas_count, params->l1_mem_base, params->l1_mem_size); - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_inv_dcache_range(params->l0_mem_base, params->l0_mem_size); - gpt_inv_dcache_range(params->l1_mem_base, params->l1_mem_size); -#endif - - for (idx = 0U; idx < params->pas_count; idx++) { - - INFO("PAS[%u]: base 0x%llx, sz 0x%lx, GPI 0x%x, type 0x%x\n", - idx, pas_regions[idx].base_pa, pas_regions[idx].size, - PAS_REG_GPI(pas_regions[idx].attrs), - PAS_REG_DESC_TYPE(pas_regions[idx].attrs)); - - /* Check if a block or table descriptor is required */ - if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) == - PAS_REG_DESC_TYPE_BLK) { - generate_l0_blk_desc(params, idx); - - } else { - generate_l0_tbl_desc(params, idx); - } - } - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_clean_dcache_range(params->l0_mem_base, params->l0_mem_size); - gpt_clean_dcache_range(params->l1_mem_base, params->l1_mem_size); -#endif - - /* Make sure that all the entries are written to the memory. */ - dsbishst(); -} - -#endif /* IMAGE_BL2 */ - -int gpt_init(gpt_init_params_t *params) -{ -#ifdef IMAGE_BL2 - unsigned int l1_gpt_cnt; - int ret; -#endif - /* Validate arguments */ - assert(params != NULL); - assert(params->pgs <= GPCCR_PGS_16K); - assert(params->pps <= GPCCR_PPS_4PB); - assert(params->l0_mem_base != (uintptr_t)0); - assert(params->l0_mem_size > 0U); - assert(params->l1_mem_base != (uintptr_t)0); - assert(params->l1_mem_size > 0U); - -#ifdef IMAGE_BL2 - /* - * The Granule Protection Tables are initialised only in BL2. - * BL31 is not allowed to initialise them again in case - * these are modified by any other image loaded by BL2. - */ - assert(params->pas_regions != NULL); - assert(params->pas_count > 0U); - - ret = validate_l0_gpt_params(params); - if (ret < 0) { - - return ret; - } - - /* Check if L1 GPTs are required and how many. */ - l1_gpt_cnt = update_gpt_type(params->pas_regions, - params->pas_count); - INFO("%u L1 GPTs requested.\n", l1_gpt_cnt); - - if (l1_gpt_cnt > 0U) { - ret = validate_l1_gpt_params(params, l1_gpt_cnt); - if (ret < 0) { - return ret; - } - } - - create_gpt(params); -#else - /* If running in BL31, only primary CPU can initialise GPTs */ - assert(plat_is_my_cpu_primary() == 1U); - - /* - * If the primary CPU is calling this function from BL31 - * we expect that the tables are aready initialised from - * BL2 and GPCCR_EL3 is already configured with - * Granule Protection Check Enable bit set. - */ - assert((read_gpccr_el3() & GPCCR_GPC_BIT) != 0U); -#endif /* IMAGE_BL2 */ - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); -#endif - gpt_config.plat_gpt_l0_base = params->l0_mem_base; - gpt_config.plat_gpt_l1_base = params->l1_mem_base; - gpt_config.plat_gpt_l0_size = params->l0_mem_size; - gpt_config.plat_gpt_l1_size = params->l1_mem_size; - - /* Backup the parameters used to configure GPCCR_EL3 on every PE. */ - gpt_config.plat_gpt_pgs = params->pgs; - gpt_config.plat_gpt_pps = params->pps; - gpt_config.plat_gpt_l0gptsz = params->l0gptsz; - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_clean_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); -#endif - - return 0; -} - -void gpt_enable(void) -{ - u_register_t gpccr_el3; - - /* Invalidate any stale TLB entries */ - tlbipaallos(); - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); -#endif - -#ifdef IMAGE_BL2 - /* - * Granule tables must be initialised before enabling - * granule protection. - */ - assert(gpt_config.plat_gpt_l0_base != (uintptr_t)NULL); -#endif - write_gptbr_el3(gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT); - - /* GPCCR_EL3.L0GPTSZ */ - gpccr_el3 = SET_GPCCR_L0GPTSZ(gpt_config.plat_gpt_l0gptsz); - - /* GPCCR_EL3.PPS */ - gpccr_el3 |= SET_GPCCR_PPS(gpt_config.plat_gpt_pps); - - /* GPCCR_EL3.PGS */ - gpccr_el3 |= SET_GPCCR_PGS(gpt_config.plat_gpt_pgs); - - /* Set shareability attribute to Outher Shareable */ - gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_OS); - - /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */ - gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); - gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); - - /* Enable GPT */ - gpccr_el3 |= GPCCR_GPC_BIT; - - write_gpccr_el3(gpccr_el3); - dsbsy(); - - VERBOSE("Granule Protection Checks enabled\n"); -} - -void gpt_disable(void) -{ - u_register_t gpccr_el3 = read_gpccr_el3(); - - write_gpccr_el3(gpccr_el3 &= ~GPCCR_GPC_BIT); - dsbsy(); -} - -#ifdef IMAGE_BL31 - -/* - * Each L1 descriptor is protected by 1 spinlock. The number of descriptors is - * equal to the size of the total protected memory area divided by the size of - * protected memory area covered by each descriptor. - * - * The size of memory covered by each descriptor is the 'size of the granule' x - * 'number of granules' in a descriptor. The former is PLAT_ARM_GPT_PGS and - * latter is always 16. - */ -static spinlock_t gpt_lock; - -static unsigned int get_l0_gpt_index(unsigned int pps, uint64_t pa) -{ - unsigned int idx; - - /* Get the index into the L0 table */ - idx = pa >> ONE_GB_SHIFT; - - /* Check if the pa lies within the PPS */ - if (idx & ~(l0_gpt_attr_lookup[pps].t_val_mask)) { - WARN("Invalid address 0x%llx.\n", pa); - return -EINVAL; - } - - return idx; -} - -int gpt_transition_pas(uint64_t pa, - unsigned int src_sec_state, - unsigned int target_pas) -{ - int idx; - unsigned int idx_shift; - unsigned int gpi; - uint64_t gpt_l1_desc; - uint64_t *gpt_l1_addr, *gpt_addr; - - /* - * Check if caller is allowed to transition the granule's PAS. - * - * - Secure world caller can only request S <-> NS transitions on a - * granule that is already in either S or NS PAS. - * - * - Realm world caller can only request R <-> NS transitions on a - * granule that is already in either R or NS PAS. - */ - if (src_sec_state == SMC_FROM_REALM) { - if ((target_pas != GPI_REALM) && (target_pas != GPI_NS)) { - WARN("Invalid caller (%s) and PAS (%d) combination.\n", - "realm world", target_pas); - return -EINVAL; - } - } else if (src_sec_state == SMC_FROM_SECURE) { - if ((target_pas != GPI_SECURE) && (target_pas != GPI_NS)) { - WARN("Invalid caller (%s) and PAS (%d) combination.\n", - "secure world", target_pas); - return -EINVAL; - } - } else { - WARN("Invalid caller security state 0x%x\n", src_sec_state); - return -EINVAL; - } - - /* Obtain the L0 GPT address. */ - gpt_addr = (uint64_t *)gpt_config.plat_gpt_l0_base; - - /* Validate physical address and obtain index into L0 GPT table */ - idx = get_l0_gpt_index(gpt_config.plat_gpt_pps, pa); - if (idx < 0U) { - return idx; - } - - VERBOSE("PA 0x%llx, L0 base addr 0x%llx, L0 index %u\n", - pa, (uint64_t)gpt_addr, idx); - - /* Obtain the L0 descriptor */ - gpt_l1_desc = gpt_addr[idx]; - - /* - * Check if it is a table descriptor. Granule transition only applies to - * memory ranges for which L1 tables were created at boot time. So there - * is no possibility of splitting and coalescing tables. - */ - if ((gpt_l1_desc & GPT_L1_INDEX_MASK) != GPT_TBL_DESC) { - WARN("Invalid address 0x%llx.\n", pa); - return -EPERM; - } - - /* Obtain the L1 table address from L0 descriptor. */ - gpt_l1_addr = (uint64_t *)(gpt_l1_desc & ~(0xFFF)); - - /* Obtain the index into the L1 table */ - idx = get_l1_gpt_index(gpt_config.plat_gpt_pgs, pa); - - VERBOSE("L1 table base addr 0x%llx, L1 table index %u\n", (uint64_t)gpt_l1_addr, idx); - - /* Lock access to the granule */ - spin_lock(&gpt_lock); - - /* Obtain the L1 descriptor */ - gpt_l1_desc = gpt_l1_addr[idx]; - - /* Obtain the shift for GPI in L1 GPT entry */ - idx_shift = (pa >> 12) & GPT_L1_INDEX_MASK; - idx_shift <<= 2; - - /* Obtain the current GPI encoding for this PA */ - gpi = (gpt_l1_desc >> idx_shift) & GPT_L1_INDEX_MASK; - - if (src_sec_state == SMC_FROM_REALM) { - /* - * Realm world is only allowed to transition a NS or Realm world - * granule. - */ - if ((gpi != GPI_REALM) && (gpi != GPI_NS)) { - WARN("Invalid transition request from %s.\n", - "realm world"); - spin_unlock(&gpt_lock); - return -EPERM; - } - } else if (src_sec_state == SMC_FROM_SECURE) { - /* - * Secure world is only allowed to transition a NS or Secure world - * granule. - */ - if ((gpi != GPI_SECURE) && (gpi != GPI_NS)) { - WARN("Invalid transition request from %s.\n", - "secure world"); - spin_unlock(&gpt_lock); - return -EPERM; - } - } - /* We don't need an else here since we already handle that above. */ - - VERBOSE("L1 table desc 0x%llx before mod \n", gpt_l1_desc); - - /* Clear existing GPI encoding */ - gpt_l1_desc &= ~(GPT_L1_INDEX_MASK << idx_shift); - - /* Transition the granule to the new PAS */ - gpt_l1_desc |= ((uint64_t)target_pas << idx_shift); - - /* Update the L1 GPT entry */ - gpt_l1_addr[idx] = gpt_l1_desc; - - VERBOSE("L1 table desc 0x%llx after mod \n", gpt_l1_desc); - - /* Make sure change is propagated to other CPUs. */ -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_clean_dcache_range((uintptr_t)&gpt_addr[idx], sizeof(uint64_t)); -#endif - - gpt_tlbi_by_pa(pa, PAGE_SIZE_4KB); - - /* Make sure that all the entries are written to the memory. */ - dsbishst(); - - /* Unlock access to the granule */ - spin_unlock(&gpt_lock); - - return 0; -} - -#endif /* IMAGE_BL31 */ diff --git a/lib/gpt_rme/gpt_rme.c b/lib/gpt_rme/gpt_rme.c new file mode 100644 index 000000000..1f90e64cf --- /dev/null +++ b/lib/gpt_rme/gpt_rme.c @@ -0,0 +1,1112 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include + +#include +#include +#include +#include "gpt_rme_private.h" +#include +#include +#include +#include + +#if !ENABLE_RME +#error "ENABLE_RME must be enabled to use the GPT library." +#endif + +/* + * Lookup T from PPS + * + * PPS Size T + * 0b000 4GB 32 + * 0b001 64GB 36 + * 0b010 1TB 40 + * 0b011 4TB 42 + * 0b100 16TB 44 + * 0b101 256TB 48 + * 0b110 4PB 52 + * + * See section 15.1.27 of the RME specification. + */ +static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T, + PPS_1TB_T, PPS_4TB_T, + PPS_16TB_T, PPS_256TB_T, + PPS_4PB_T}; + +/* + * Lookup P from PGS + * + * PGS Size P + * 0b00 4KB 12 + * 0b10 16KB 14 + * 0b01 64KB 16 + * + * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo. + * + * See section 15.1.27 of the RME specification. + */ +static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P}; + +/* + * This structure contains GPT configuration data. + */ +typedef struct { + uintptr_t plat_gpt_l0_base; + gpccr_pps_e pps; + gpt_t_val_e t; + gpccr_pgs_e pgs; + gpt_p_val_e p; +} gpt_config_t; + +static gpt_config_t gpt_config; + +/* These variables are used during initialization of the L1 tables. */ +static unsigned int gpt_next_l1_tbl_idx; +static uintptr_t gpt_l1_tbl; + +/* + * This function checks to see if a GPI value is valid. + * + * These are valid GPI values. + * GPT_GPI_NO_ACCESS U(0x0) + * GPT_GPI_SECURE U(0x8) + * GPT_GPI_NS U(0x9) + * GPT_GPI_ROOT U(0xA) + * GPT_GPI_REALM U(0xB) + * GPT_GPI_ANY U(0xF) + * + * Parameters + * gpi GPI to check for validity. + * + * Return + * true for a valid GPI, false for an invalid one. + */ +static bool gpt_is_gpi_valid(unsigned int gpi) +{ + if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) || + ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) { + return true; + } else { + return false; + } +} + +/* + * This function checks to see if two PAS regions overlap. + * + * Parameters + * base_1: base address of first PAS + * size_1: size of first PAS + * base_2: base address of second PAS + * size_2: size of second PAS + * + * Return + * True if PAS regions overlap, false if they do not. + */ +static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1, + uintptr_t base_2, size_t size_2) +{ + if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) { + return true; + } else { + return false; + } +} + +/* + * This helper function checks to see if a PAS region from index 0 to + * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table. + * + * Parameters + * l0_idx: Index of the L0 entry to check + * pas_regions: PAS region array + * pas_idx: Upper bound of the PAS array index. + * + * Return + * True if a PAS region occupies the L0 region in question, false if not. + */ +static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx, + pas_region_t *pas_regions, + unsigned int pas_idx) +{ + /* Iterate over PAS regions up to pas_idx. */ + for (unsigned int i = 0U; i < pas_idx; i++) { + if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx), + GPT_L0GPTSZ_ACTUAL_SIZE, + pas_regions[i].base_pa, pas_regions[i].size)) { + return true; + } + } + return false; +} + +/* + * This function iterates over all of the PAS regions and checks them to ensure + * proper alignment of base and size, that the GPI is valid, and that no regions + * overlap. As a part of the overlap checks, this function checks existing L0 + * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables + * is called multiple times to place L1 tables in different areas of memory. It + * also counts the number of L1 tables needed and returns it on success. + * + * Parameters + * *pas_regions Pointer to array of PAS region structures. + * pas_region_cnt Total number of PAS regions in the array. + * + * Return + * Negative Linux error code in the event of a failure, number of L1 regions + * required when successful. + */ +static int gpt_validate_pas_mappings(pas_region_t *pas_regions, + unsigned int pas_region_cnt) +{ + unsigned int idx; + unsigned int l1_cnt = 0U; + unsigned int pas_l1_cnt; + uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base; + + assert(pas_regions != NULL); + assert(pas_region_cnt != 0U); + + for (idx = 0U; idx < pas_region_cnt; idx++) { + /* Check for arithmetic overflow in region. */ + if ((ULONG_MAX - pas_regions[idx].base_pa) < + pas_regions[idx].size) { + ERROR("[GPT] Address overflow in PAS[%u]!\n", idx); + return -EOVERFLOW; + } + + /* Initial checks for PAS validity. */ + if (((pas_regions[idx].base_pa + pas_regions[idx].size) > + GPT_PPS_ACTUAL_SIZE(gpt_config.t)) || + !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) { + ERROR("[GPT] PAS[%u] is invalid!\n", idx); + return -EFAULT; + } + + /* + * Make sure this PAS does not overlap with another one. We + * start from idx + 1 instead of 0 since prior PAS mappings will + * have already checked themselves against this one. + */ + for (unsigned int i = idx + 1; i < pas_region_cnt; i++) { + if (gpt_check_pas_overlap(pas_regions[idx].base_pa, + pas_regions[idx].size, + pas_regions[i].base_pa, + pas_regions[i].size)) { + ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n", + i, idx); + return -EFAULT; + } + } + + /* + * Since this function can be called multiple times with + * separate L1 tables we need to check the existing L0 mapping + * to see if this PAS would fall into one that has already been + * initialized. + */ + for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa); + i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1); + i++) { + if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) && + (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) { + /* This descriptor is unused so continue. */ + continue; + } + + /* + * This descriptor has been initialized in a previous + * call to this function so cannot be initialized again. + */ + ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n", + idx, i); + return -EFAULT; + } + + /* Check for block mapping (L0) type. */ + if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == + GPT_PAS_ATTR_MAP_TYPE_BLOCK) { + /* Make sure base and size are block-aligned. */ + if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) || + !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) { + ERROR("[GPT] PAS[%u] is not block-aligned!\n", + idx); + return -EFAULT; + } + + continue; + } + + /* Check for granule mapping (L1) type. */ + if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == + GPT_PAS_ATTR_MAP_TYPE_GRANULE) { + /* Make sure base and size are granule-aligned. */ + if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) || + !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) { + ERROR("[GPT] PAS[%u] is not granule-aligned!\n", + idx); + return -EFAULT; + } + + /* Find how many L1 tables this PAS occupies. */ + pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa + + pas_regions[idx].size - 1) - + GPT_L0_IDX(pas_regions[idx].base_pa) + 1); + + /* + * This creates a situation where, if multiple PAS + * regions occupy the same table descriptor, we can get + * an artificially high total L1 table count. The way we + * handle this is by checking each PAS against those + * before it in the array, and if they both occupy the + * same PAS we subtract from pas_l1_cnt and only the + * first PAS in the array gets to count it. + */ + + /* + * If L1 count is greater than 1 we know the start and + * end PAs are in different L0 regions so we must check + * both for overlap against other PAS. + */ + if (pas_l1_cnt > 1) { + if (gpt_does_previous_pas_exist_here( + GPT_L0_IDX(pas_regions[idx].base_pa + + pas_regions[idx].size - 1), + pas_regions, idx)) { + pas_l1_cnt = pas_l1_cnt - 1; + } + } + + if (gpt_does_previous_pas_exist_here( + GPT_L0_IDX(pas_regions[idx].base_pa), + pas_regions, idx)) { + pas_l1_cnt = pas_l1_cnt - 1; + } + + l1_cnt += pas_l1_cnt; + continue; + } + + /* If execution reaches this point, mapping type is invalid. */ + ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx, + GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); + return -EINVAL; + } + + return l1_cnt; +} + +/* + * This function validates L0 initialization parameters. + * + * Parameters + * l0_mem_base Base address of memory used for L0 tables. + * l1_mem_size Size of memory available for L0 tables. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base, + size_t l0_mem_size) +{ + size_t l0_alignment; + + /* + * Make sure PPS is valid and then store it since macros need this value + * to work. + */ + if (pps > GPT_PPS_MAX) { + ERROR("[GPT] Invalid PPS: 0x%x\n", pps); + return -EINVAL; + } + gpt_config.pps = pps; + gpt_config.t = gpt_t_lookup[pps]; + + /* Alignment must be the greater of 4k or l0 table size. */ + l0_alignment = PAGE_SIZE_4KB; + if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) { + l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t); + } + + /* Check base address. */ + if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) { + ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base); + return -EFAULT; + } + + /* Check size. */ + if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) { + ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n", + GPT_L0_TABLE_SIZE(gpt_config.t), + l0_mem_size); + return -ENOMEM; + } + + return 0; +} + +/* + * In the event that L1 tables are needed, this function validates + * the L1 table generation parameters. + * + * Parameters + * l1_mem_base Base address of memory used for L1 table allocation. + * l1_mem_size Total size of memory available for L1 tables. + * l1_gpt_cnt Number of L1 tables needed. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size, + unsigned int l1_gpt_cnt) +{ + size_t l1_gpt_mem_sz; + + /* Check if the granularity is supported */ + if (!xlat_arch_is_granule_size_supported( + GPT_PGS_ACTUAL_SIZE(gpt_config.p))) { + return -EPERM; + } + + /* Make sure L1 tables are aligned to their size. */ + if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) { + ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n", + l1_mem_base); + return -EFAULT; + } + + /* Get total memory needed for L1 tables. */ + l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p); + + /* Check for overflow. */ + if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) { + ERROR("[GPT] Overflow calculating L1 memory size.\n"); + return -ENOMEM; + } + + /* Make sure enough space was supplied. */ + if (l1_mem_size < l1_gpt_mem_sz) { + ERROR("[GPT] Inadequate memory for L1 GPTs. "); + ERROR(" Expected 0x%lx bytes. Got 0x%lx bytes\n", + l1_gpt_mem_sz, l1_mem_size); + return -ENOMEM; + } + + VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz); + return 0; +} + +/* + * This function initializes L0 block descriptors (regions that cannot be + * transitioned at the granule level) according to the provided PAS. + * + * Parameters + * *pas Pointer to the structure defining the PAS region to + * initialize. + */ +static void gpt_generate_l0_blk_desc(pas_region_t *pas) +{ + uint64_t gpt_desc; + unsigned int end_idx; + unsigned int idx; + uint64_t *l0_gpt_arr; + + assert(gpt_config.plat_gpt_l0_base != 0U); + assert(pas != NULL); + + /* + * Checking of PAS parameters has already been done in + * gpt_validate_pas_mappings so no need to check the same things again. + */ + + l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base; + + /* Create the GPT Block descriptor for this PAS region */ + gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs)); + + /* Start index of this region in L0 GPTs */ + idx = pas->base_pa >> GPT_L0_IDX_SHIFT; + + /* + * Determine number of L0 GPT descriptors covered by + * this PAS region and use the count to populate these + * descriptors. + */ + end_idx = (pas->base_pa + pas->size) >> GPT_L0_IDX_SHIFT; + + /* Generate the needed block descriptors. */ + for (; idx < end_idx; idx++) { + l0_gpt_arr[idx] = gpt_desc; + VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%llx (0x%llx)\n", + idx, &l0_gpt_arr[idx], + (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) & + GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]); + } +} + +/* + * Helper function to determine if the end physical address lies in the same L0 + * region as the current physical address. If true, the end physical address is + * returned else, the start address of the next region is returned. + * + * Parameters + * cur_pa Physical address of the current PA in the loop through + * the range. + * end_pa Physical address of the end PA in a PAS range. + * + * Return + * The PA of the end of the current range. + */ +static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa) +{ + uintptr_t cur_idx; + uintptr_t end_idx; + + cur_idx = cur_pa >> GPT_L0_IDX_SHIFT; + end_idx = end_pa >> GPT_L0_IDX_SHIFT; + + assert(cur_idx <= end_idx); + + if (cur_idx == end_idx) { + return end_pa; + } + + return (cur_idx + 1U) << GPT_L0_IDX_SHIFT; +} + +/* + * Helper function to fill out GPI entries in a single L1 table. This function + * fills out entire L1 descriptors at a time to save memory writes. + * + * Parameters + * gpi GPI to set this range to + * l1 Pointer to L1 table to fill out + * first Address of first granule in range. + * last Address of last granule in range (inclusive). + */ +static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first, + uintptr_t last) +{ + uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi); + uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF; + + assert(first <= last); + assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U); + assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U); + assert(GPT_L0_IDX(first) == GPT_L0_IDX(last)); + assert(l1 != NULL); + + /* Shift the mask if we're starting in the middle of an L1 entry. */ + gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2); + + /* Fill out each L1 entry for this region. */ + for (unsigned int i = GPT_L1_IDX(gpt_config.p, first); + i <= GPT_L1_IDX(gpt_config.p, last); i++) { + /* Account for stopping in the middle of an L1 entry. */ + if (i == GPT_L1_IDX(gpt_config.p, last)) { + gpi_mask &= (gpi_mask >> ((15 - + GPT_L1_GPI_IDX(gpt_config.p, last)) << 2)); + } + + /* Write GPI values. */ + assert((l1[i] & gpi_mask) == + (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask)); + l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field); + + /* Reset mask. */ + gpi_mask = 0xFFFFFFFFFFFFFFFF; + } +} + +/* + * This function finds the next available unused L1 table and initializes all + * granules descriptor entries to GPI_ANY. This ensures that there are no chunks + * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the + * event that a PAS region stops midway through an L1 table, thus guaranteeing + * that all memory not explicitly assigned is GPI_ANY. This function does not + * check for overflow conditions, that should be done by the caller. + * + * Return + * Pointer to the next available L1 table. + */ +static uint64_t *gpt_get_new_l1_tbl(void) +{ + /* Retrieve the next L1 table. */ + uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) + + (GPT_L1_TABLE_SIZE(gpt_config.p) * + gpt_next_l1_tbl_idx)); + + /* Increment L1 counter. */ + gpt_next_l1_tbl_idx++; + + /* Initialize all GPIs to GPT_GPI_ANY */ + for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) { + l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY); + } + + return l1; +} + +/* + * When L1 tables are needed, this function creates the necessary L0 table + * descriptors and fills out the L1 table entries according to the supplied + * PAS range. + * + * Parameters + * *pas Pointer to the structure defining the PAS region. + */ +static void gpt_generate_l0_tbl_desc(pas_region_t *pas) +{ + uintptr_t end_pa; + uintptr_t cur_pa; + uintptr_t last_gran_pa; + uint64_t *l0_gpt_base; + uint64_t *l1_gpt_arr; + unsigned int l0_idx; + + assert(gpt_config.plat_gpt_l0_base != 0U); + assert(pas != NULL); + + /* + * Checking of PAS parameters has already been done in + * gpt_validate_pas_mappings so no need to check the same things again. + */ + + end_pa = pas->base_pa + pas->size; + l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base; + + /* We start working from the granule at base PA */ + cur_pa = pas->base_pa; + + /* Iterate over each L0 region in this memory range. */ + for (l0_idx = GPT_L0_IDX(pas->base_pa); + l0_idx <= GPT_L0_IDX(end_pa - 1U); + l0_idx++) { + + /* + * See if the L0 entry is already a table descriptor or if we + * need to create one. + */ + if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) { + /* Get the L1 array from the L0 entry. */ + l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]); + } else { + /* Get a new L1 table from the L1 memory space. */ + l1_gpt_arr = gpt_get_new_l1_tbl(); + + /* Fill out the L0 descriptor and flush it. */ + l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr); + } + + VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%llx)\n", + l0_idx, &l0_gpt_base[l0_idx], + (unsigned long long)(l1_gpt_arr), + l0_gpt_base[l0_idx]); + + /* + * Determine the PA of the last granule in this L0 descriptor. + */ + last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) - + GPT_PGS_ACTUAL_SIZE(gpt_config.p); + + /* + * Fill up L1 GPT entries between these two addresses. This + * function needs the addresses of the first granule and last + * granule in the range. + */ + gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr, + cur_pa, last_gran_pa); + + /* Advance cur_pa to first granule in next L0 region. */ + cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa); + } +} + +/* + * This function flushes a range of L0 descriptors used by a given PAS region + * array. There is a chance that some unmodified L0 descriptors would be flushed + * in the case that there are "holes" in an array of PAS regions but overall + * this should be faster than individually flushing each modified L0 descriptor + * as they are created. + * + * Parameters + * *pas Pointer to an array of PAS regions. + * pas_count Number of entries in the PAS array. + */ +static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count) +{ + unsigned int idx; + unsigned int start_idx; + unsigned int end_idx; + uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base; + + assert(pas != NULL); + assert(pas_count > 0); + + /* Initial start and end values. */ + start_idx = GPT_L0_IDX(pas[0].base_pa); + end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1); + + /* Find lowest and highest L0 indices used in this PAS array. */ + for (idx = 1; idx < pas_count; idx++) { + if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) { + start_idx = GPT_L0_IDX(pas[idx].base_pa); + } + if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) { + end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1); + } + } + + /* + * Flush all covered L0 descriptors, add 1 because we need to include + * the end index value. + */ + flush_dcache_range((uintptr_t)&l0[start_idx], + ((end_idx + 1) - start_idx) * sizeof(uint64_t)); +} + +/* + * Public API to enable granule protection checks once the tables have all been + * initialized. This function is called at first initialization and then again + * later during warm boots of CPU cores. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_enable(void) +{ + u_register_t gpccr_el3; + + /* + * Granule tables must be initialised before enabling + * granule protection. + */ + if (gpt_config.plat_gpt_l0_base == 0U) { + ERROR("[GPT] Tables have not been initialized!\n"); + return -EPERM; + } + + /* Invalidate any stale TLB entries */ + tlbipaallos(); + dsb(); + + /* Write the base address of the L0 tables into GPTBR */ + write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT) + >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK); + + /* GPCCR_EL3.PPS */ + gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps); + + /* GPCCR_EL3.PGS */ + gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs); + + /* Set shareability attribute to Outher Shareable */ + gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_OS); + + /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */ + gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); + gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); + + /* Enable GPT */ + gpccr_el3 |= GPCCR_GPC_BIT; + + /* TODO: Configure GPCCR_EL3_GPCP for Fault control. */ + write_gpccr_el3(gpccr_el3); + tlbipaallos(); + dsb(); + isb(); + + return 0; +} + +/* + * Public API to disable granule protection checks. + */ +void gpt_disable(void) +{ + u_register_t gpccr_el3 = read_gpccr_el3(); + + write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT); + dsbsy(); + isb(); +} + +/* + * Public API that initializes the entire protected space to GPT_GPI_ANY using + * the L0 tables (block descriptors). Ideally, this function is invoked prior + * to DDR discovery and initialization. The MMU must be initialized before + * calling this function. + * + * Parameters + * pps PPS value to use for table generation + * l0_mem_base Base address of L0 tables in memory. + * l0_mem_size Total size of memory available for L0 tables. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_init_l0_tables(unsigned int pps, uintptr_t l0_mem_base, + size_t l0_mem_size) +{ + int ret; + uint64_t gpt_desc; + + /* Ensure that MMU and caches are enabled. */ + assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); + + /* Validate other parameters. */ + ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size); + if (ret < 0) { + return ret; + } + + /* Create the descriptor to initialize L0 entries with. */ + gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY); + + /* Iterate through all L0 entries */ + for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) { + ((uint64_t *)l0_mem_base)[i] = gpt_desc; + } + + /* Flush updated L0 tables to memory. */ + flush_dcache_range((uintptr_t)l0_mem_base, + (size_t)GPT_L0_TABLE_SIZE(gpt_config.t)); + + /* Stash the L0 base address once initial setup is complete. */ + gpt_config.plat_gpt_l0_base = l0_mem_base; + + return 0; +} + +/* + * Public API that carves out PAS regions from the L0 tables and builds any L1 + * tables that are needed. This function ideally is run after DDR discovery and + * initialization. The L0 tables must have already been initialized to GPI_ANY + * when this function is called. + * + * This function can be called multiple times with different L1 memory ranges + * and PAS regions if it is desirable to place L1 tables in different locations + * in memory. (ex: you have multiple DDR banks and want to place the L1 tables + * in the DDR bank that they control) + * + * Parameters + * pgs PGS value to use for table generation. + * l1_mem_base Base address of memory used for L1 tables. + * l1_mem_size Total size of memory available for L1 tables. + * *pas_regions Pointer to PAS regions structure array. + * pas_count Total number of PAS regions. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base, + size_t l1_mem_size, pas_region_t *pas_regions, + unsigned int pas_count) +{ + int ret; + int l1_gpt_cnt; + + /* Ensure that MMU and caches are enabled. */ + assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); + + /* PGS is needed for gpt_validate_pas_mappings so check it now. */ + if (pgs > GPT_PGS_MAX) { + ERROR("[GPT] Invalid PGS: 0x%x\n", pgs); + return -EINVAL; + } + gpt_config.pgs = pgs; + gpt_config.p = gpt_p_lookup[pgs]; + + /* Make sure L0 tables have been initialized. */ + if (gpt_config.plat_gpt_l0_base == 0U) { + ERROR("[GPT] L0 tables must be initialized first!\n"); + return -EPERM; + } + + /* Check if L1 GPTs are required and how many. */ + l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count); + if (l1_gpt_cnt < 0) { + return l1_gpt_cnt; + } + + VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt); + + /* If L1 tables are needed then validate the L1 parameters. */ + if (l1_gpt_cnt > 0) { + ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size, + l1_gpt_cnt); + if (ret < 0) { + return ret; + } + + /* Set up parameters for L1 table generation. */ + gpt_l1_tbl = l1_mem_base; + gpt_next_l1_tbl_idx = 0U; + } + + INFO("[GPT] Boot Configuration\n"); + INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); + INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); + INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); + INFO(" PAS count: 0x%x\n", pas_count); + INFO(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base); + + /* Generate the tables in memory. */ + for (unsigned int idx = 0U; idx < pas_count; idx++) { + INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n", + idx, pas_regions[idx].base_pa, pas_regions[idx].size, + GPT_PAS_ATTR_GPI(pas_regions[idx].attrs), + GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); + + /* Check if a block or table descriptor is required */ + if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == + GPT_PAS_ATTR_MAP_TYPE_BLOCK) { + gpt_generate_l0_blk_desc(&pas_regions[idx]); + + } else { + gpt_generate_l0_tbl_desc(&pas_regions[idx]); + } + } + + /* Flush modified L0 tables. */ + flush_l0_for_pas_array(pas_regions, pas_count); + + /* Flush L1 tables if needed. */ + if (l1_gpt_cnt > 0) { + flush_dcache_range(l1_mem_base, + GPT_L1_TABLE_SIZE(gpt_config.p) * + l1_gpt_cnt); + } + + /* Make sure that all the entries are written to the memory. */ + dsbishst(); + + return 0; +} + +/* + * Public API to initialize the runtime gpt_config structure based on the values + * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization + * typically happens in a bootloader stage prior to setting up the EL3 runtime + * environment for the granule transition service so this function detects the + * initialization from a previous stage. Granule protection checks must be + * enabled already or this function will return an error. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_runtime_init(void) +{ + u_register_t reg; + + /* Ensure that MMU and caches are enabled. */ + assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); + + /* Ensure GPC are already enabled. */ + if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) { + ERROR("[GPT] Granule protection checks are not enabled!\n"); + return -EPERM; + } + + /* + * Read the L0 table address from GPTBR, we don't need the L1 base + * address since those are included in the L0 tables as needed. + */ + reg = read_gptbr_el3(); + gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) & + GPTBR_BADDR_MASK) << + GPTBR_BADDR_VAL_SHIFT; + + /* Read GPCCR to get PGS and PPS values. */ + reg = read_gpccr_el3(); + gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK; + gpt_config.t = gpt_t_lookup[gpt_config.pps]; + gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK; + gpt_config.p = gpt_p_lookup[gpt_config.pgs]; + + VERBOSE("[GPT] Runtime Configuration\n"); + VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); + VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); + VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); + VERBOSE(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base); + + return 0; +} + +/* + * The L1 descriptors are protected by a spinlock to ensure that multiple + * CPUs do not attempt to change the descriptors at once. In the future it + * would be better to have separate spinlocks for each L1 descriptor. + */ +static spinlock_t gpt_lock; + +/* + * Check if caller is allowed to transition a PAS. + * + * - Secure world caller can only request S <-> NS transitions on a + * granule that is already in either S or NS PAS. + * + * - Realm world caller can only request R <-> NS transitions on a + * granule that is already in either R or NS PAS. + * + * Parameters + * src_sec_state Security state of the caller. + * current_gpi Current GPI of the granule. + * target_gpi Requested new GPI for the granule. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +static int gpt_check_transition_gpi(unsigned int src_sec_state, + unsigned int current_gpi, + unsigned int target_gpi) +{ + unsigned int check_gpi; + + /* Cannot transition a granule to the state it is already in. */ + if (current_gpi == target_gpi) { + return -EINVAL; + } + + /* Check security state, only secure and realm can transition. */ + if (src_sec_state == SMC_FROM_REALM) { + check_gpi = GPT_GPI_REALM; + } else if (src_sec_state == SMC_FROM_SECURE) { + check_gpi = GPT_GPI_SECURE; + } else { + return -EINVAL; + } + + /* Make sure security state is allowed to make the transition. */ + if ((target_gpi != check_gpi) && (target_gpi != GPT_GPI_NS)) { + return -EINVAL; + } + if ((current_gpi != check_gpi) && (current_gpi != GPT_GPI_NS)) { + return -EINVAL; + } + + return 0; +} + +/* + * This function is the core of the granule transition service. When a granule + * transition request occurs it is routed to this function where the request is + * validated then fulfilled if possible. + * + * TODO: implement support for transitioning multiple granules at once. + * + * Parameters + * base Base address of the region to transition, must be + * aligned to granule size. + * size Size of region to transition, must be aligned to granule + * size. + * src_sec_state Security state of the caller. + * target_pas Target PAS of the specified memory region. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_transition_pas(uint64_t base, size_t size, unsigned int src_sec_state, + unsigned int target_pas) +{ + int idx; + unsigned int gpi_shift; + unsigned int gpi; + uint64_t gpt_l0_desc; + uint64_t gpt_l1_desc; + uint64_t *gpt_l1_addr; + uint64_t *gpt_l0_base; + + /* Ensure that the tables have been set up before taking requests. */ + assert(gpt_config.plat_gpt_l0_base != 0U); + + /* Check for address range overflow. */ + if ((ULONG_MAX - base) < size) { + VERBOSE("[GPT] Transition request address overflow!\n"); + VERBOSE(" Base=0x%llx\n", base); + VERBOSE(" Size=0x%lx\n", size); + return -EINVAL; + } + + /* Make sure base and size are valid. */ + if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) || + ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) || + (size == 0U) || + ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { + VERBOSE("[GPT] Invalid granule transition address range!\n"); + VERBOSE(" Base=0x%llx\n", base); + VERBOSE(" Size=0x%lx\n", size); + return -EINVAL; + } + + /* See if this is a single granule transition or a range of granules. */ + if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { + /* + * TODO: Add support for transitioning multiple granules with a + * single call to this function. + */ + panic(); + } + + /* Get the L0 descriptor and make sure it is for a table. */ + gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base; + gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)]; + if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) { + VERBOSE("[GPT] Granule is not covered by a table descriptor!\n"); + VERBOSE(" Base=0x%llx\n", base); + return -EINVAL; + } + + /* Get the table index and GPI shift from PA. */ + gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc); + idx = GPT_L1_IDX(gpt_config.p, base); + gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2; + + /* + * Access to L1 tables is controlled by a global lock to ensure + * that no more than one CPU is allowed to make changes at any + * given time. + */ + spin_lock(&gpt_lock); + gpt_l1_desc = gpt_l1_addr[idx]; + gpi = (gpt_l1_desc >> gpi_shift) & GPT_L1_GRAN_DESC_GPI_MASK; + + /* Make sure caller state and source/target PAS are allowed. */ + if (gpt_check_transition_gpi(src_sec_state, gpi, target_pas) < 0) { + spin_unlock(&gpt_lock); + VERBOSE("[GPT] Invalid caller state and PAS combo!\n"); + VERBOSE(" Caller: %u, Current GPI: %u, Target GPI: %u\n", + src_sec_state, gpi, target_pas); + return -EPERM; + } + + /* Clear existing GPI encoding and transition granule. */ + gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift); + gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift); + gpt_l1_addr[idx] = gpt_l1_desc; + + /* Ensure that the write operation happens before the unlock. */ + dmbishst(); + + /* Unlock access to the L1 tables. */ + spin_unlock(&gpt_lock); + + /* Cache maintenance. */ + clean_dcache_range((uintptr_t)&gpt_l1_addr[idx], + sizeof(uint64_t)); + gpt_tlbi_by_pa(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p)); + dsbishst(); + + VERBOSE("[GPT] Granule 0x%llx, GPI 0x%x->0x%x\n", base, gpi, + target_pas); + + return 0; +} diff --git a/lib/gpt/gpt.mk b/lib/gpt_rme/gpt_rme.mk similarity index 61% rename from lib/gpt/gpt.mk rename to lib/gpt_rme/gpt_rme.mk index 611e50457..60176f4e1 100644 --- a/lib/gpt/gpt.mk +++ b/lib/gpt_rme/gpt_rme.mk @@ -4,5 +4,5 @@ # SPDX-License-Identifier: BSD-3-Clause # -GPT_LIB_SRCS := $(addprefix lib/gpt/, \ - gpt_core.c) +GPT_LIB_SRCS := $(addprefix lib/gpt_rme/, \ + gpt_rme.c) diff --git a/lib/gpt_rme/gpt_rme_private.h b/lib/gpt_rme/gpt_rme_private.h new file mode 100644 index 000000000..5770bf7d6 --- /dev/null +++ b/lib/gpt_rme/gpt_rme_private.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPT_RME_PRIVATE_H +#define GPT_RME_PRIVATE_H + +#include +#include +#include + +/******************************************************************************/ +/* GPT descriptor definitions */ +/******************************************************************************/ + +/* GPT level 0 descriptor bit definitions. */ +#define GPT_L0_TYPE_MASK UL(0xF) +#define GPT_L0_TYPE_SHIFT U(0) + +/* For now, we don't support contiguous descriptors, only table and block. */ +#define GPT_L0_TYPE_TBL_DESC UL(0x3) +#define GPT_L0_TYPE_BLK_DESC UL(0x1) + +#define GPT_L0_TBL_DESC_L1ADDR_MASK UL(0xFFFFFFFFFF) +#define GPT_L0_TBL_DESC_L1ADDR_SHIFT U(12) + +#define GPT_L0_BLK_DESC_GPI_MASK UL(0xF) +#define GPT_L0_BLK_DESC_GPI_SHIFT U(4) + +/* GPT level 1 descriptor bit definitions */ +#define GPT_L1_GRAN_DESC_GPI_MASK UL(0xF) + +/* + * This macro fills out every GPI entry in a granules descriptor to the same + * value. + */ +#define GPT_BUILD_L1_DESC(_gpi) (((uint64_t)(_gpi) << 4*0) | \ + ((uint64_t)(_gpi) << 4*1) | \ + ((uint64_t)(_gpi) << 4*2) | \ + ((uint64_t)(_gpi) << 4*3) | \ + ((uint64_t)(_gpi) << 4*4) | \ + ((uint64_t)(_gpi) << 4*5) | \ + ((uint64_t)(_gpi) << 4*6) | \ + ((uint64_t)(_gpi) << 4*7) | \ + ((uint64_t)(_gpi) << 4*8) | \ + ((uint64_t)(_gpi) << 4*9) | \ + ((uint64_t)(_gpi) << 4*10) | \ + ((uint64_t)(_gpi) << 4*11) | \ + ((uint64_t)(_gpi) << 4*12) | \ + ((uint64_t)(_gpi) << 4*13) | \ + ((uint64_t)(_gpi) << 4*14) | \ + ((uint64_t)(_gpi) << 4*15)) + +/******************************************************************************/ +/* GPT platform configuration */ +/******************************************************************************/ + +/* This value comes from GPCCR_EL3 so no externally supplied definition. */ +#define GPT_L0GPTSZ ((unsigned int)((read_gpccr_el3() >> \ + GPCCR_L0GPTSZ_SHIFT) & GPCCR_L0GPTSZ_MASK)) + +/* The "S" value is directly related to L0GPTSZ */ +#define GPT_S_VAL (GPT_L0GPTSZ + 30U) + +/* + * Map PPS values to T values. + * + * PPS Size T + * 0b000 4GB 32 + * 0b001 64GB 36 + * 0b010 1TB 40 + * 0b011 4TB 42 + * 0b100 16TB 44 + * 0b101 256TB 48 + * 0b110 4PB 52 + * + * See section 15.1.27 of the RME specification. + */ +typedef enum { + PPS_4GB_T = 32U, + PPS_64GB_T = 36U, + PPS_1TB_T = 40U, + PPS_4TB_T = 42U, + PPS_16TB_T = 44U, + PPS_256TB_T = 48U, + PPS_4PB_T = 52U +} gpt_t_val_e; + +/* + * Map PGS values to P values. + * + * PGS Size P + * 0b00 4KB 12 + * 0b10 16KB 14 + * 0b01 64KB 16 + * + * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo. + * + * See section 15.1.27 of the RME specification. + */ +typedef enum { + PGS_4KB_P = 12U, + PGS_16KB_P = 14U, + PGS_64KB_P = 16U +} gpt_p_val_e; + +/* Max valid value for PGS. */ +#define GPT_PGS_MAX (2U) + +/* Max valid value for PPS. */ +#define GPT_PPS_MAX (6U) + +/******************************************************************************/ +/* L0 address attribute macros */ +/******************************************************************************/ + +/* + * If S is greater than or equal to T then there is a single L0 region covering + * the entire protected space so there is no L0 index, so the width (and the + * derivative mask value) are both zero. If we don't specifically handle this + * special case we'll get a negative width value which does not make sense and + * could cause a lot of problems. + */ +#define GPT_L0_IDX_WIDTH(_t) (((_t) > GPT_S_VAL) ? \ + ((_t) - GPT_S_VAL) : (0U)) + +/* Bit shift for the L0 index field in a PA. */ +#define GPT_L0_IDX_SHIFT (GPT_S_VAL) + +/* Mask for the L0 index field, must be shifted. */ +#define GPT_L0_IDX_MASK(_t) (0xFFFFFFFFFFFFFFFFUL >> \ + (64U - (GPT_L0_IDX_WIDTH(_t)))) + +/* Total number of L0 regions. */ +#define GPT_L0_REGION_COUNT(_t) ((GPT_L0_IDX_MASK(_t)) + 1U) + +/* Total size of each GPT L0 region in bytes. */ +#define GPT_L0_REGION_SIZE (1UL << (GPT_L0_IDX_SHIFT)) + +/* Total size in bytes of the whole L0 table. */ +#define GPT_L0_TABLE_SIZE(_t) ((GPT_L0_REGION_COUNT(_t)) << 3U) + +/******************************************************************************/ +/* L1 address attribute macros */ +/******************************************************************************/ + +/* Width of the L1 index field. */ +#define GPT_L1_IDX_WIDTH(_p) ((GPT_S_VAL - 1U) - ((_p) + 3U)) + +/* Bit shift for the L1 index field. */ +#define GPT_L1_IDX_SHIFT(_p) ((_p) + 4U) + +/* Mask for the L1 index field, must be shifted. */ +#define GPT_L1_IDX_MASK(_p) (0xFFFFFFFFFFFFFFFFUL >> \ + (64U - (GPT_L1_IDX_WIDTH(_p)))) + +/* Bit shift for the index of the L1 GPI in a PA. */ +#define GPT_L1_GPI_IDX_SHIFT(_p) (_p) + +/* Mask for the index of the L1 GPI in a PA. */ +#define GPT_L1_GPI_IDX_MASK (0xF) + +/* Total number of entries in each L1 table. */ +#define GPT_L1_ENTRY_COUNT(_p) ((GPT_L1_IDX_MASK(_p)) + 1U) + +/* Total size in bytes of each L1 table. */ +#define GPT_L1_TABLE_SIZE(_p) ((GPT_L1_ENTRY_COUNT(_p)) << 3U) + +/******************************************************************************/ +/* General helper macros */ +/******************************************************************************/ + +/* Protected space actual size in bytes. */ +#define GPT_PPS_ACTUAL_SIZE(_t) (1UL << (_t)) + +/* Granule actual size in bytes. */ +#define GPT_PGS_ACTUAL_SIZE(_p) (1UL << (_p)) + +/* L0 GPT region size in bytes. */ +#define GPT_L0GPTSZ_ACTUAL_SIZE (1UL << GPT_S_VAL) + +/* Get the index of the L0 entry from a physical address. */ +#define GPT_L0_IDX(_pa) ((_pa) >> GPT_L0_IDX_SHIFT) + +/* + * This definition is used to determine if a physical address lies on an L0 + * region boundary. + */ +#define GPT_IS_L0_ALIGNED(_pa) (((_pa) & (GPT_L0_REGION_SIZE - U(1))) == U(0)) + +/* Get the type field from an L0 descriptor. */ +#define GPT_L0_TYPE(_desc) (((_desc) >> GPT_L0_TYPE_SHIFT) & \ + GPT_L0_TYPE_MASK) + +/* Create an L0 block descriptor. */ +#define GPT_L0_BLK_DESC(_gpi) (GPT_L0_TYPE_BLK_DESC | \ + (((_gpi) & GPT_L0_BLK_DESC_GPI_MASK) << \ + GPT_L0_BLK_DESC_GPI_SHIFT)) + +/* Create an L0 table descriptor with an L1 table address. */ +#define GPT_L0_TBL_DESC(_pa) (GPT_L0_TYPE_TBL_DESC | ((uint64_t)(_pa) & \ + (GPT_L0_TBL_DESC_L1ADDR_MASK << \ + GPT_L0_TBL_DESC_L1ADDR_SHIFT))) + +/* Get the GPI from an L0 block descriptor. */ +#define GPT_L0_BLKD_GPI(_desc) (((_desc) >> GPT_L0_BLK_DESC_GPI_SHIFT) & \ + GPT_L0_BLK_DESC_GPI_MASK) + +/* Get the L1 address from an L0 table descriptor. */ +#define GPT_L0_TBLD_ADDR(_desc) ((uint64_t *)(((_desc) & \ + (GPT_L0_TBL_DESC_L1ADDR_MASK << \ + GPT_L0_TBL_DESC_L1ADDR_SHIFT)))) + +/* Get the index into the L1 table from a physical address. */ +#define GPT_L1_IDX(_p, _pa) (((_pa) >> GPT_L1_IDX_SHIFT(_p)) & \ + GPT_L1_IDX_MASK(_p)) + +/* Get the index of the GPI within an L1 table entry from a physical address. */ +#define GPT_L1_GPI_IDX(_p, _pa) (((_pa) >> GPT_L1_GPI_IDX_SHIFT(_p)) & \ + GPT_L1_GPI_IDX_MASK) + +/* Determine if an address is granule-aligned. */ +#define GPT_IS_L1_ALIGNED(_p, _pa) (((_pa) & (GPT_PGS_ACTUAL_SIZE(_p) - U(1))) \ + == U(0)) + +#endif /* GPT_RME_PRIVATE_H */ diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index ef372068a..2871b1bf0 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -18,12 +18,16 @@ #include #include #include -#include +#if ENABLE_RME +#include +#endif /* ENABLE_RME */ #ifdef SPD_opteed #include #endif #include +#if ENABLE_RME #include +#endif /* ENABLE_RME */ #include #include @@ -130,6 +134,7 @@ void bl2_platform_setup(void) } #if ENABLE_RME + static void arm_bl2_plat_gpt_setup(void) { /* @@ -137,32 +142,38 @@ static void arm_bl2_plat_gpt_setup(void) * the layout, so the array cannot be constant. */ pas_region_t pas_regions[] = { - ARM_PAS_GPI_ANY, ARM_PAS_KERNEL, - ARM_PAS_TZC, + ARM_PAS_SECURE, ARM_PAS_REALM, ARM_PAS_EL3_DRAM, ARM_PAS_GPTS }; - gpt_init_params_t gpt_params = { - PLATFORM_PGS, - PLATFORM_PPS, - PLATFORM_L0GPTSZ, - pas_regions, - (unsigned int)(sizeof(pas_regions)/sizeof(pas_region_t)), - ARM_L0_GPT_ADDR_BASE, ARM_L0_GPT_SIZE, - ARM_L1_GPT_ADDR_BASE, ARM_L1_GPT_SIZE - }; - - /* Initialise the global granule tables */ - INFO("Enabling Granule Protection Checks\n"); - if (gpt_init(&gpt_params) < 0) { + /* Initialize entire protected space to GPT_GPI_ANY. */ + if (gpt_init_l0_tables(GPCCR_PPS_4GB, ARM_L0_GPT_ADDR_BASE, + ARM_L0_GPT_SIZE) < 0) { + ERROR("gpt_init_l0_tables() failed!\n"); panic(); } - gpt_enable(); + /* Carve out defined PAS ranges. */ + if (gpt_init_pas_l1_tables(GPCCR_PGS_4K, + ARM_L1_GPT_ADDR_BASE, + ARM_L1_GPT_SIZE, + pas_regions, + (unsigned int)(sizeof(pas_regions) / + sizeof(pas_region_t))) < 0) { + ERROR("gpt_init_pas_l1_tables() failed!\n"); + panic(); + } + + INFO("Enabling Granule Protection Checks\n"); + if (gpt_enable() < 0) { + ERROR("gpt_enable() failed!\n"); + panic(); + } } + #endif /* ENABLE_RME */ /******************************************************************************* @@ -201,9 +212,6 @@ void arm_bl2_plat_arch_setup(void) #if ENABLE_RME /* Initialise the secure environment */ plat_arm_security_setup(); - - /* Initialise and enable Granule Protection */ - arm_bl2_plat_gpt_setup(); #endif setup_page_tables(bl_regions, plat_arm_get_mmap()); @@ -212,6 +220,9 @@ void arm_bl2_plat_arch_setup(void) /* BL2 runs in EL3 when RME enabled. */ assert(get_armv9_2_feat_rme_support() != 0U); enable_mmu_el3(0); + + /* Initialise and enable granule protection after MMU. */ + arm_bl2_plat_gpt_setup(); #else enable_mmu_el1(0); #endif diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index d131bb95b..6472590f3 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -13,10 +13,11 @@ #include #include #include -#include +#if ENABLE_RME +#include +#endif #include #include -#include #include #include #include @@ -235,28 +236,6 @@ void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_confi */ bl33_image_ep_info.args.arg0 = (u_register_t)ARM_DRAM1_BASE; #endif - -#if ENABLE_RME - /* - * Initialise Granule Protection library and enable GPC - * for the primary processor. The tables were initialised - * in BL2, so there is no need to provide any PAS here. - */ - gpt_init_params_t gpt_params = { - PLATFORM_PGS, - PLATFORM_PPS, - PLATFORM_L0GPTSZ, - NULL, - 0U, - ARM_L0_GPT_ADDR_BASE, ARM_L0_GPT_SIZE, - ARM_L1_GPT_ADDR_BASE, ARM_L1_GPT_SIZE - }; - - /* Initialise the global granule tables. */ - if (gpt_init(&gpt_params) < 0) { - panic(); - } -#endif /* ENABLE_RME */ } void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, @@ -430,6 +409,19 @@ void __init arm_bl31_plat_arch_setup(void) enable_mmu_el3(0); +#if ENABLE_RME + /* + * Initialise Granule Protection library and enable GPC for the primary + * processor. The tables have already been initialized by a previous BL + * stage, so there is no need to provide any PAS here. This function + * sets up pointers to those tables. + */ + if (gpt_runtime_init() < 0) { + ERROR("gpt_runtime_init() failed!\n"); + panic(); + } +#endif /* ENABLE_RME */ + arm_setup_romlib(); } diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c index 26a5b8464..dacd15087 100644 --- a/services/std_svc/rmmd/rmmd_main.c +++ b/services/std_svc/rmmd/rmmd_main.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -296,12 +296,18 @@ static int gtsi_transition_granule(uint64_t pa, { int ret; - ret = gpt_transition_pas(pa, src_sec_state, target_pas); + ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas); /* Convert TF-A error codes into GTSI error codes */ if (ret == -EINVAL) { + ERROR("[GTSI] Transition failed: invalid %s\n", "address"); + ERROR(" PA: 0x%llx, SRC: %d, PAS: %d\n", pa, + src_sec_state, target_pas); ret = GRAN_TRANS_RET_BAD_ADDR; } else if (ret == -EPERM) { + ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS"); + ERROR(" PA: 0x%llx, SRC: %d, PAS: %d\n", pa, + src_sec_state, target_pas); ret = GRAN_TRANS_RET_BAD_PAS; } @@ -328,12 +334,10 @@ uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, switch (smc_fid) { case SMC_ASC_MARK_REALM: SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, - GPI_REALM)); - break; + GPT_GPI_REALM)); case SMC_ASC_MARK_NONSECURE: SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, - GPI_NS)); - break; + GPT_GPI_NS)); default: WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid); SMC_RET1(handle, SMC_UNK);