refactor(gpt): productize and refactor GPT library

This patch updates and refactors the GPT library and fixes bugs.

- Support all combinations of PGS, PPS, and L0GPTSZ parameters.
- PPS and PGS are set at runtime, L0GPTSZ is read from GPCCR_EL3.
- Use compiler definitions to simplify code.
- Renaming functions to better suit intended uses.
- MMU enabled before GPT APIs called.
- Add comments to make function usage more clear in GPT library.
- Added _rme suffix to file names to differentiate better from the
  GPT file system code.
- Renamed gpt_defs.h to gpt_rme_private.h to better separate private
  and public code.
- Renamed gpt_core.c to gpt_rme.c to better conform to TF-A precedent.

Signed-off-by: John Powell <john.powell@arm.com>
Change-Id: I4cbb23b0f81e697baa9fb23ba458aa3f7d1ed919
This commit is contained in:
johpow01 2021-06-16 17:57:28 -05:00
parent 07e96d1d29
commit f19dc624a1
16 changed files with 1762 additions and 1142 deletions

View File

@ -17,7 +17,7 @@ endif
ifeq (${ENABLE_RME},1)
# Using RME, run BL2 at EL3
include lib/gpt/gpt.mk
include lib/gpt_rme/gpt_rme.mk
BL2_SOURCES += bl2/${ARCH}/bl2_rme_entrypoint.S \
bl2/${ARCH}/bl2_el3_exceptions.S \

View File

@ -172,14 +172,6 @@ func bl31_warm_entrypoint
_exception_vectors=runtime_exceptions \
_pie_fixup_size=0
#if ENABLE_RME
/*
* Initialise and enable Granule Protection
* before enabling any stage of translation.
*/
bl gpt_enable
#endif
/*
* We're about to enable MMU and participate in PSCI state coordination.
*
@ -203,6 +195,19 @@ func bl31_warm_entrypoint
#endif
bl bl31_plat_enable_mmu
#if ENABLE_RME
/*
* At warm boot GPT data structures have already been initialized in RAM
* but the sysregs for this CPU need to be initialized. Note that the GPT
* accesses are controlled attributes in GPCCR and do not depend on the
* SCR_EL3.C bit.
*/
bl gpt_enable
cbz x0, 1f
no_ret plat_panic_handler
1:
#endif
#if ENABLE_PAUTH
/* --------------------------------------------------------------------
* Program APIAKey_EL1 and enable pointer authentication

View File

@ -112,7 +112,7 @@ BL31_SOURCES += services/std_svc/pci_svc.c
endif
ifeq (${ENABLE_RME},1)
include lib/gpt/gpt.mk
include lib/gpt_rme/gpt_rme.mk
BL31_SOURCES += ${GPT_LIB_SRCS} \
${RMMD_SOURCES}

View File

@ -1105,87 +1105,9 @@
/*******************************************************************************
* Realm management extension register definitions
******************************************************************************/
/* GPCCR_EL3 definitions */
#define GPCCR_EL3 S3_6_C2_C1_6
/* Least significant address bits protected by each entry in level 0 GPT */
#define GPCCR_L0GPTSZ_SHIFT U(20)
#define GPCCR_L0GPTSZ_MASK U(0xF)
#define GPCCR_L0GPTSZ_30BITS U(0x0)
#define GPCCR_L0GPTSZ_34BITS U(0x4)
#define GPCCR_L0GPTSZ_36BITS U(0x6)
#define GPCCR_L0GPTSZ_39BITS U(0x9)
#define SET_GPCCR_L0GPTSZ(x) \
((x & GPCCR_L0GPTSZ_MASK) << GPCCR_L0GPTSZ_SHIFT)
/* Granule protection check priority bit definitions */
#define GPCCR_GPCP_SHIFT U(17)
#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT)
/* Granule protection check bit definitions */
#define GPCCR_GPC_SHIFT U(16)
#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT)
/* Physical granule size bit definitions */
#define GPCCR_PGS_SHIFT U(14)
#define GPCCR_PGS_MASK U(0x3)
#define GPCCR_PGS_4K U(0x0)
#define GPCCR_PGS_16K U(0x2)
#define GPCCR_PGS_64K U(0x1)
#define SET_GPCCR_PGS(x) \
((x & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT)
/* GPT fetch shareability attribute bit definitions */
#define GPCCR_SH_SHIFT U(12)
#define GPCCR_SH_MASK U(0x3)
#define GPCCR_SH_NS U(0x0)
#define GPCCR_SH_OS U(0x2)
#define GPCCR_SH_IS U(0x3)
#define SET_GPCCR_SH(x) \
((x & GPCCR_SH_MASK) << GPCCR_SH_SHIFT)
/* GPT fetch outer cacheability attribute bit definitions */
#define GPCCR_ORGN_SHIFT U(10)
#define GPCCR_ORGN_MASK U(0x3)
#define GPCCR_ORGN_NC U(0x0)
#define GPCCR_ORGN_WB_RA_WA U(0x1)
#define GPCCR_ORGN_WT_RA_NWA U(0x2)
#define GPCCR_ORGN_WB_RA_NWA U(0x3)
#define SET_GPCCR_ORGN(x) \
((x & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT)
/* GPT fetch inner cacheability attribute bit definitions */
#define GPCCR_IRGN_SHIFT U(8)
#define GPCCR_IRGN_MASK U(0x3)
#define GPCCR_IRGN_NC U(0x0)
#define GPCCR_IRGN_WB_RA_WA U(0x1)
#define GPCCR_IRGN_WT_RA_NWA U(0x2)
#define GPCCR_IRGN_WB_RA_NWA U(0x3)
#define SET_GPCCR_IRGN(x) \
((x & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT)
/* Protected physical address size bit definitions */
#define GPCCR_PPS_SHIFT U(0)
#define GPCCR_PPS_MASK U(0x7)
#define GPCCR_PPS_4GB U(0x0)
#define GPCCR_PPS_64GB U(0x1)
#define GPCCR_PPS_1TB U(0x2)
#define GPCCR_PPS_4TB U(0x3)
#define GPCCR_PPS_16TB U(0x4)
#define GPCCR_PPS_256TB U(0x5)
#define GPCCR_PPS_4PB U(0x6)
#define SET_GPCCR_PPS(x) \
((x & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT)
/* GPTBR_EL3 definitions */
#define GPTBR_EL3 S3_6_C2_C1_4
/* Base Address for the GPT bit definitions */
#define GPTBR_BADDR_SHIFT U(0)
#define GPTBR_BADDR_VAL_SHIFT U(12)
#define GPTBR_BADDR_MASK ULL(0xffffffffff)
/*******************************************************************************
* RAS system registers
******************************************************************************/

View File

@ -1,86 +0,0 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef GPT_H
#define GPT_H
#include <stdint.h>
#include <arch.h>
#include "gpt_defs.h"
#define GPT_DESC_ATTRS(_type, _gpi) \
((((_type) & PAS_REG_DESC_TYPE_MASK) \
<< PAS_REG_DESC_TYPE_SHIFT) | \
(((_gpi) & PAS_REG_GPI_MASK) \
<< PAS_REG_GPI_SHIFT))
/*
* Macro to create a GPT entry for this PAS range either as a L0 block
* descriptor or L1 table descriptor depending upon the size of the range.
*/
#define MAP_GPT_REGION(_pa, _sz, _gpi) \
{ \
.base_pa = (_pa), \
.size = (_sz), \
.attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_ANY, (_gpi)), \
}
/*
* Special macro to create a L1 table descriptor at L0 for a 1GB region as
* opposed to creating a block mapping by default.
*/
#define MAP_GPT_REGION_TBL(_pa, _sz, _gpi) \
{ \
.base_pa = (_pa), \
.size = (_sz), \
.attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL, (_gpi)), \
}
/*
* Structure for specifying a Granule range and its properties
*/
typedef struct pas_region {
unsigned long long base_pa; /**< Base address for PAS. */
size_t size; /**< Size of the PAS. */
unsigned int attrs; /**< PAS GPI and entry type. */
} pas_region_t;
/*
* Structure to initialise the Granule Protection Tables.
*/
typedef struct gpt_init_params {
unsigned int pgs; /**< Address Width of Phisical Granule Size. */
unsigned int pps; /**< Protected Physical Address Size. */
unsigned int l0gptsz; /**< Granule size on L0 table entry. */
pas_region_t *pas_regions; /**< PAS regions to protect. */
unsigned int pas_count; /**< Number of PAS regions to initialise. */
uintptr_t l0_mem_base; /**< L0 Table base address. */
size_t l0_mem_size; /**< Size of memory reserved for L0 tables. */
uintptr_t l1_mem_base; /**< L1 Table base address. */
size_t l1_mem_size; /**< Size of memory reserved for L1 tables. */
} gpt_init_params_t;
/** @brief Initialise the Granule Protection tables.
*/
int gpt_init(gpt_init_params_t *params);
/** @brief Enable the Granule Protection Checks.
*/
void gpt_enable(void);
/** @brief Disable the Granule Protection Checks.
*/
void gpt_disable(void);
/** @brief Transition a granule between security states.
*/
int gpt_transition_pas(uint64_t pa,
unsigned int src_sec_state,
unsigned int target_pas);
#endif /* GPT_H */

View File

@ -1,76 +0,0 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef GPT_DEFS_H
#define GPT_DEFS_H
#include <arch.h>
#include <lib/utils_def.h>
#include "gpt.h"
/* GPI values */
#define GPI_NO_ACCESS U(0x0)
#define GPI_SECURE U(0x8)
#define GPI_NS U(0x9)
#define GPI_ROOT U(0xa)
#define GPI_REALM U(0xb)
#define GPI_ANY U(0xf)
#define GPI_VAL_MASK ULL(0xf)
/* GPT descriptor bit definitions */
#define GPT_L1_INDEX_MASK ULL(0xf)
#define GPT_L1_INDEX_SHIFT ULL(0x0)
#define GPT_TBL_DESC ULL(0x3)
#define GPT_BLK_DESC ULL(0x1)
#define GPT_TBL_DESC_ADDR_SHIFT ULL(12)
#define GPT_TBL_DESC_ADDR_MASK (((ULL(1) << \
(51 - GPT_TBL_DESC_ADDR_SHIFT)) - 1) \
<< GPT_TBL_DESC_ADDR_SHIFT)
#define GPT_BLOCK_DESC_GPI_VAL_SHIFT ULL(4)
/* Each descriptor is 8 bytes long. */
#define GPT_DESC_SIZE ULL(8)
#define PPS_MAX_VAL PSTCR_EL3_PPS_4PB
#define PPS_NUM_1GB_ENTRIES ULL(1024)
#define PGS_4K_1GB_L1_TABLE_SZ (U(2) << 17)
/* 2 << LOG2_8K = Bytes in 8K */
#define LOG2_8K U(13)
#define GPT_L1_SIZE ULL(0x40000) /* 256K */
#define SZ_1G (ULL(0x1) << 30) /* 1GB */
#define GPT_MIN_PGS_SHIFT U(12) /* 4K */
#define L1_GPT_INDEX_MASK U(0x3fffffff)
#define GPT_GRAN_DESC_NUM_GPIS U(4)
#define PAS_REG_GPI_SHIFT U(0)
#define PAS_REG_GPI_MASK U(0xf)
/* .attrs field definitions */
#define PAS_REG_DESC_TYPE_ANY U(0)
#define PAS_REG_DESC_TYPE_BLK U(1)
#define PAS_REG_DESC_TYPE_TBL U(2)
#define PAS_REG_DESC_TYPE_SHIFT U(4)
#define PAS_REG_DESC_TYPE_MASK U(0x3)
#define PAS_REG_DESC_TYPE(_attrs) (((_attrs) \
>> PAS_REG_DESC_TYPE_SHIFT) \
& PAS_REG_DESC_TYPE_MASK)
#define PAS_REG_GPI(_attrs) (((_attrs) \
>> PAS_REG_GPI_SHIFT) \
& PAS_REG_GPI_MASK)
#define SZ_1G_MASK (SZ_1G - U(1))
#define IS_1GB_ALIGNED(addr) (((addr) & SZ_1G_MASK) == U(0))
#endif /* GPT_DEFS */

View File

@ -0,0 +1,276 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef GPT_RME_H
#define GPT_RME_H
#include <stdint.h>
#include <arch.h>
/******************************************************************************/
/* GPT helper macros and definitions */
/******************************************************************************/
/*
* Structure for specifying a mapping range and it's properties. This should not
* be manually initialized, using the MAP_GPT_REGION_x macros is recommended as
* to avoid potential incompatibilities in the future.
*/
typedef struct pas_region {
uintptr_t base_pa; /* Base address for PAS. */
size_t size; /* Size of the PAS. */
unsigned int attrs; /* PAS GPI and entry type. */
} pas_region_t;
/* GPT GPI definitions */
#define GPT_GPI_NO_ACCESS U(0x0)
#define GPT_GPI_SECURE U(0x8)
#define GPT_GPI_NS U(0x9)
#define GPT_GPI_ROOT U(0xA)
#define GPT_GPI_REALM U(0xB)
#define GPT_GPI_ANY U(0xF)
#define GPT_GPI_VAL_MASK UL(0xF)
/* PAS attribute GPI definitions. */
#define GPT_PAS_ATTR_GPI_SHIFT U(0)
#define GPT_PAS_ATTR_GPI_MASK U(0xF)
#define GPT_PAS_ATTR_GPI(_attrs) (((_attrs) \
>> GPT_PAS_ATTR_GPI_SHIFT) \
& GPT_PAS_ATTR_GPI_MASK)
/* PAS attribute mapping type definitions */
#define GPT_PAS_ATTR_MAP_TYPE_BLOCK U(0x0)
#define GPT_PAS_ATTR_MAP_TYPE_GRANULE U(0x1)
#define GPT_PAS_ATTR_MAP_TYPE_SHIFT U(4)
#define GPT_PAS_ATTR_MAP_TYPE_MASK U(0x1)
#define GPT_PAS_ATTR_MAP_TYPE(_attrs) (((_attrs) \
>> GPT_PAS_ATTR_MAP_TYPE_SHIFT) \
& GPT_PAS_ATTR_MAP_TYPE_MASK)
/*
* Macro to initialize the attributes field in the pas_region_t structure.
* [31:5] Reserved
* [4] Mapping type (GPT_PAS_ATTR_MAP_TYPE_x definitions)
* [3:0] PAS GPI type (GPT_GPI_x definitions)
*/
#define GPT_PAS_ATTR(_type, _gpi) \
((((_type) & GPT_PAS_ATTR_MAP_TYPE_MASK) \
<< GPT_PAS_ATTR_MAP_TYPE_SHIFT) | \
(((_gpi) & GPT_PAS_ATTR_GPI_MASK) \
<< GPT_PAS_ATTR_GPI_SHIFT))
/*
* Macro to create a GPT entry for this PAS range as a block descriptor. If this
* region does not fit the requirements for a block descriptor then GPT
* initialization will fail.
*/
#define GPT_MAP_REGION_BLOCK(_pa, _sz, _gpi) \
{ \
.base_pa = (_pa), \
.size = (_sz), \
.attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_BLOCK, (_gpi)), \
}
/*
* Macro to create a GPT entry for this PAS range as a table descriptor. If this
* region does not fit the requirements for a table descriptor then GPT
* initialization will fail.
*/
#define GPT_MAP_REGION_GRANULE(_pa, _sz, _gpi) \
{ \
.base_pa = (_pa), \
.size = (_sz), \
.attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_GRANULE, (_gpi)), \
}
/******************************************************************************/
/* GPT register field definitions */
/******************************************************************************/
/*
* Least significant address bits protected by each entry in level 0 GPT. This
* field is read-only.
*/
#define GPCCR_L0GPTSZ_SHIFT U(20)
#define GPCCR_L0GPTSZ_MASK U(0xF)
typedef enum {
GPCCR_L0GPTSZ_30BITS = U(0x0),
GPCCR_L0GPTSZ_34BITS = U(0x4),
GPCCR_L0GPTSZ_36BITS = U(0x6),
GPCCR_L0GPTSZ_39BITS = U(0x9)
} gpccr_l0gptsz_e;
/* Granule protection check priority bit definitions */
#define GPCCR_GPCP_SHIFT U(17)
#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT)
/* Granule protection check bit definitions */
#define GPCCR_GPC_SHIFT U(16)
#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT)
/* Physical granule size bit definitions */
#define GPCCR_PGS_SHIFT U(14)
#define GPCCR_PGS_MASK U(0x3)
#define SET_GPCCR_PGS(x) (((x) & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT)
typedef enum {
GPCCR_PGS_4K = U(0x0),
GPCCR_PGS_64K = U(0x1),
GPCCR_PGS_16K = U(0x2)
} gpccr_pgs_e;
/* GPT fetch shareability attribute bit definitions */
#define GPCCR_SH_SHIFT U(12)
#define GPCCR_SH_MASK U(0x3)
#define SET_GPCCR_SH(x) (((x) & GPCCR_SH_MASK) << GPCCR_SH_SHIFT)
typedef enum {
GPCCR_SH_NS = U(0x0),
GPCCR_SH_OS = U(0x2),
GPCCR_SH_IS = U(0x3)
} gpccr_sh_e;
/* GPT fetch outer cacheability attribute bit definitions */
#define GPCCR_ORGN_SHIFT U(10)
#define GPCCR_ORGN_MASK U(0x3)
#define SET_GPCCR_ORGN(x) (((x) & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT)
typedef enum {
GPCCR_ORGN_NC = U(0x0),
GPCCR_ORGN_WB_RA_WA = U(0x1),
GPCCR_ORGN_WT_RA_NWA = U(0x2),
GPCCR_ORGN_WB_RA_NWA = U(0x3)
} gpccr_orgn_e;
/* GPT fetch inner cacheability attribute bit definitions */
#define GPCCR_IRGN_SHIFT U(8)
#define GPCCR_IRGN_MASK U(0x3)
#define SET_GPCCR_IRGN(x) (((x) & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT)
typedef enum {
GPCCR_IRGN_NC = U(0x0),
GPCCR_IRGN_WB_RA_WA = U(0x1),
GPCCR_IRGN_WT_RA_NWA = U(0x2),
GPCCR_IRGN_WB_RA_NWA = U(0x3)
} gpccr_irgn_e;
/* Protected physical address size bit definitions */
#define GPCCR_PPS_SHIFT U(0)
#define GPCCR_PPS_MASK U(0x7)
#define SET_GPCCR_PPS(x) (((x) & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT)
typedef enum {
GPCCR_PPS_4GB = U(0x0),
GPCCR_PPS_64GB = U(0x1),
GPCCR_PPS_1TB = U(0x2),
GPCCR_PPS_4TB = U(0x3),
GPCCR_PPS_16TB = U(0x4),
GPCCR_PPS_256TB = U(0x5),
GPCCR_PPS_4PB = U(0x6)
} gpccr_pps_e;
/* Base Address for the GPT bit definitions */
#define GPTBR_BADDR_SHIFT U(0)
#define GPTBR_BADDR_VAL_SHIFT U(12)
#define GPTBR_BADDR_MASK ULL(0xffffffffff)
/******************************************************************************/
/* GPT public APIs */
/******************************************************************************/
/*
* Public API that initializes the entire protected space to GPT_GPI_ANY using
* the L0 tables (block descriptors). Ideally, this function is invoked prior
* to DDR discovery and initialization. The MMU must be initialized before
* calling this function.
*
* Parameters
* pps PPS value to use for table generation
* l0_mem_base Base address of L0 tables in memory.
* l0_mem_size Total size of memory available for L0 tables.
*
* Return
* Negative Linux error code in the event of a failure, 0 for success.
*/
int gpt_init_l0_tables(gpccr_pps_e pps,
uintptr_t l0_mem_base,
size_t l0_mem_size);
/*
* Public API that carves out PAS regions from the L0 tables and builds any L1
* tables that are needed. This function ideally is run after DDR discovery and
* initialization. The L0 tables must have already been initialized to GPI_ANY
* when this function is called.
*
* Parameters
* pgs PGS value to use for table generation.
* l1_mem_base Base address of memory used for L1 tables.
* l1_mem_size Total size of memory available for L1 tables.
* *pas_regions Pointer to PAS regions structure array.
* pas_count Total number of PAS regions.
*
* Return
* Negative Linux error code in the event of a failure, 0 for success.
*/
int gpt_init_pas_l1_tables(gpccr_pgs_e pgs,
uintptr_t l1_mem_base,
size_t l1_mem_size,
pas_region_t *pas_regions,
unsigned int pas_count);
/*
* Public API to initialize the runtime gpt_config structure based on the values
* present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
* typically happens in a bootloader stage prior to setting up the EL3 runtime
* environment for the granule transition service so this function detects the
* initialization from a previous stage. Granule protection checks must be
* enabled already or this function will return an error.
*
* Return
* Negative Linux error code in the event of a failure, 0 for success.
*/
int gpt_runtime_init(void);
/*
* Public API to enable granule protection checks once the tables have all been
* initialized. This function is called at first initialization and then again
* later during warm boots of CPU cores.
*
* Return
* Negative Linux error code in the event of a failure, 0 for success.
*/
int gpt_enable(void);
/*
* Public API to disable granule protection checks.
*/
void gpt_disable(void);
/*
* This function is the core of the granule transition service. When a granule
* transition request occurs it is routed to this function where the request is
* validated then fulfilled if possible.
*
* TODO: implement support for transitioning multiple granules at once.
*
* Parameters
* base: Base address of the region to transition, must be aligned to granule
* size.
* size: Size of region to transition, must be aligned to granule size.
* src_sec_state: Security state of the caller.
* target_pas: Target PAS of the specified memory region.
*
* Return
* Negative Linux error code in the event of a failure, 0 for success.
*/
int gpt_transition_pas(uint64_t base,
size_t size,
unsigned int src_sec_state,
unsigned int target_pas);
#endif /* GPT_RME_H */

View File

@ -81,19 +81,19 @@
* - REALM DRAM: Reserved for Realm world if RME is enabled
* - AP TZC DRAM: The remaining TZC secured DRAM reserved for AP use
*
* RME enabled(64MB) RME not enabled(16MB)
* -------------------- -------------------
* | | | |
* | AP TZC (~28MB) | | AP TZC (~14MB) |
* -------------------- -------------------
* | | | |
* | REALM (32MB) | | EL3 TZC (2MB) |
* -------------------- -------------------
* | | | |
* | EL3 TZC (3MB) | | SCP TZC |
* -------------------- 0xFFFF_FFFF-------------------
* | L1 GPT + SCP TZC |
* | (~1MB) |
* RME enabled(64MB) RME not enabled(16MB)
* -------------------- -------------------
* | | | |
* | AP TZC (~28MB) | | AP TZC (~14MB) |
* -------------------- -------------------
* | | | |
* | REALM (32MB) | | EL3 TZC (2MB) |
* -------------------- -------------------
* | | | |
* | EL3 TZC (3MB) | | SCP TZC |
* -------------------- 0xFFFF_FFFF-------------------
* | L1 GPT + SCP TZC |
* | (~1MB) |
* 0xFFFF_FFFF --------------------
*/
#if ENABLE_RME
@ -252,56 +252,56 @@
INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, (grp), \
GIC_INTR_CFG_EDGE)
#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \
ARM_SHARED_RAM_BASE, \
ARM_SHARED_RAM_SIZE, \
MT_DEVICE | MT_RW | EL3_PAS)
#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \
ARM_SHARED_RAM_BASE, \
ARM_SHARED_RAM_SIZE, \
MT_DEVICE | MT_RW | EL3_PAS)
#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \
ARM_NS_DRAM1_BASE, \
ARM_NS_DRAM1_SIZE, \
MT_MEMORY | MT_RW | MT_NS)
#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \
ARM_NS_DRAM1_BASE, \
ARM_NS_DRAM1_SIZE, \
MT_MEMORY | MT_RW | MT_NS)
#define ARM_MAP_DRAM2 MAP_REGION_FLAT( \
ARM_DRAM2_BASE, \
ARM_DRAM2_SIZE, \
MT_MEMORY | MT_RW | MT_NS)
#define ARM_MAP_DRAM2 MAP_REGION_FLAT( \
ARM_DRAM2_BASE, \
ARM_DRAM2_SIZE, \
MT_MEMORY | MT_RW | MT_NS)
#define ARM_MAP_TSP_SEC_MEM MAP_REGION_FLAT( \
TSP_SEC_MEM_BASE, \
TSP_SEC_MEM_SIZE, \
MT_MEMORY | MT_RW | MT_SECURE)
#define ARM_MAP_TSP_SEC_MEM MAP_REGION_FLAT( \
TSP_SEC_MEM_BASE, \
TSP_SEC_MEM_SIZE, \
MT_MEMORY | MT_RW | MT_SECURE)
#if ARM_BL31_IN_DRAM
#define ARM_MAP_BL31_SEC_DRAM MAP_REGION_FLAT( \
BL31_BASE, \
PLAT_ARM_MAX_BL31_SIZE, \
MT_MEMORY | MT_RW | MT_SECURE)
#define ARM_MAP_BL31_SEC_DRAM MAP_REGION_FLAT( \
BL31_BASE, \
PLAT_ARM_MAX_BL31_SIZE, \
MT_MEMORY | MT_RW | MT_SECURE)
#endif
#define ARM_MAP_EL3_TZC_DRAM MAP_REGION_FLAT( \
ARM_EL3_TZC_DRAM1_BASE, \
ARM_EL3_TZC_DRAM1_SIZE, \
MT_MEMORY | MT_RW | EL3_PAS)
#define ARM_MAP_EL3_TZC_DRAM MAP_REGION_FLAT( \
ARM_EL3_TZC_DRAM1_BASE, \
ARM_EL3_TZC_DRAM1_SIZE, \
MT_MEMORY | MT_RW | EL3_PAS)
#if defined(SPD_spmd)
#define ARM_MAP_TRUSTED_DRAM MAP_REGION_FLAT( \
PLAT_ARM_TRUSTED_DRAM_BASE, \
PLAT_ARM_TRUSTED_DRAM_SIZE, \
MT_MEMORY | MT_RW | MT_SECURE)
#define ARM_MAP_TRUSTED_DRAM MAP_REGION_FLAT( \
PLAT_ARM_TRUSTED_DRAM_BASE, \
PLAT_ARM_TRUSTED_DRAM_SIZE, \
MT_MEMORY | MT_RW | MT_SECURE)
#endif
#if ENABLE_RME
#define ARM_MAP_RMM_DRAM MAP_REGION_FLAT( \
PLAT_ARM_RMM_BASE, \
PLAT_ARM_RMM_SIZE, \
MT_MEMORY | MT_RW | MT_REALM)
#define ARM_MAP_RMM_DRAM MAP_REGION_FLAT( \
PLAT_ARM_RMM_BASE, \
PLAT_ARM_RMM_SIZE, \
MT_MEMORY | MT_RW | MT_REALM)
#define ARM_MAP_GPT_L1_DRAM MAP_REGION_FLAT( \
ARM_L1_GPT_ADDR_BASE, \
ARM_L1_GPT_SIZE, \
MT_MEMORY | MT_RW | EL3_PAS)
#define ARM_MAP_GPT_L1_DRAM MAP_REGION_FLAT( \
ARM_L1_GPT_ADDR_BASE, \
ARM_L1_GPT_SIZE, \
MT_MEMORY | MT_RW | EL3_PAS)
#endif /* ENABLE_RME */

View File

@ -6,6 +6,7 @@
#ifndef ARM_PAS_DEF_H
#define ARM_PAS_DEF_H
#include <lib/gpt_rme/gpt_rme.h>
#include <plat/arm/common/arm_def.h>
/*****************************************************************************
@ -42,12 +43,12 @@
*
* - 4KB of L0 GPT reside in TSRAM, on top of the CONFIG section.
* - ~1MB of L1 GPTs reside at the top of DRAM1 (TZC area).
* - The first 1GB region has GPI_ANY and, therefore, is not protected by
* - The first 1GB region has GPT_GPI_ANY and, therefore, is not protected by
* the GPT.
* - The DRAM TZC area is split into three regions: the L1 GPT region and
* 3MB of region below that are defined as GPI_ROOT, 32MB Realm region
* below that is defined as GPI_REALM and the rest of it is defined as
* GPI_SECURE.
* 3MB of region below that are defined as GPT_GPI_ROOT, 32MB Realm region
* below that is defined as GPT_GPI_REALM and the rest of it is defined as
* GPT_GPI_SECURE.
*/
/* TODO: This might not be the best way to map the PAS */
@ -64,32 +65,30 @@
#define ARM_PAS_3_BASE (ARM_AP_TZC_DRAM1_BASE)
#define ARM_PAS_3_SIZE (ARM_AP_TZC_DRAM1_SIZE)
#define ARM_PAS_GPI_ANY MAP_GPT_REGION(ARM_PAS_1_BASE, \
ARM_PAS_1_SIZE, \
GPI_ANY)
#define ARM_PAS_KERNEL MAP_GPT_REGION_TBL(ARM_PAS_2_BASE, \
ARM_PAS_2_SIZE, \
GPI_NS)
#define ARM_PAS_GPI_ANY MAP_GPT_REGION(ARM_PAS_1_BASE, \
ARM_PAS_1_SIZE, \
GPT_GPI_ANY)
#define ARM_PAS_KERNEL GPT_MAP_REGION_GRANULE(ARM_PAS_2_BASE, \
ARM_PAS_2_SIZE, \
GPT_GPI_NS)
#define ARM_PAS_TZC MAP_GPT_REGION_TBL(ARM_PAS_3_BASE, \
ARM_PAS_3_SIZE, \
GPI_SECURE)
#define ARM_PAS_SECURE GPT_MAP_REGION_GRANULE(ARM_PAS_3_BASE, \
ARM_PAS_3_SIZE, \
GPT_GPI_SECURE)
#define ARM_PAS_REALM MAP_GPT_REGION_TBL(ARM_REALM_BASE, \
ARM_REALM_SIZE, \
GPI_REALM)
#define ARM_PAS_REALM GPT_MAP_REGION_GRANULE(ARM_REALM_BASE, \
ARM_REALM_SIZE, \
GPT_GPI_REALM)
#define ARM_PAS_EL3_DRAM MAP_GPT_REGION_TBL(ARM_EL3_TZC_DRAM1_BASE, \
ARM_EL3_TZC_DRAM1_SIZE, \
GPI_ROOT)
#define ARM_PAS_EL3_DRAM GPT_MAP_REGION_GRANULE(ARM_EL3_TZC_DRAM1_BASE, \
ARM_EL3_TZC_DRAM1_SIZE, \
GPT_GPI_ROOT)
#define ARM_PAS_GPTS MAP_GPT_REGION_TBL(ARM_L1_GPT_ADDR_BASE, \
ARM_L1_GPT_SIZE, \
GPI_ROOT)
#define ARM_PAS_GPTS GPT_MAP_REGION_GRANULE(ARM_L1_GPT_ADDR_BASE, \
ARM_L1_GPT_SIZE, \
GPT_GPI_ROOT)
/* GPT Configuration options */
#define PLATFORM_PGS GPCCR_PGS_4K
#define PLATFORM_PPS GPCCR_PPS_4GB
#define PLATFORM_L0GPTSZ GPCCR_L0GPTSZ_30BITS
#endif /* ARM_PAS_DEF_H */

View File

@ -1,767 +0,0 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <arch.h>
#include <arch_helpers.h>
#include <lib/gpt/gpt.h>
#include <lib/smccc.h>
#include <lib/spinlock.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#if !ENABLE_RME
#error "ENABLE_RME must be enabled to use the GPT library."
#endif
typedef struct {
uintptr_t plat_gpt_l0_base;
uintptr_t plat_gpt_l1_base;
size_t plat_gpt_l0_size;
size_t plat_gpt_l1_size;
unsigned int plat_gpt_pps;
unsigned int plat_gpt_pgs;
unsigned int plat_gpt_l0gptsz;
} gpt_config_t;
gpt_config_t gpt_config;
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
/* Helper function that cleans the data cache only if it is enabled. */
static inline
void gpt_clean_dcache_range(uintptr_t addr, size_t size)
{
if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) {
clean_dcache_range(addr, size);
}
}
/* Helper function that invalidates the data cache only if it is enabled. */
static inline
void gpt_inv_dcache_range(uintptr_t addr, size_t size)
{
if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) {
inv_dcache_range(addr, size);
}
}
#endif
typedef struct l1_gpt_attr_desc {
size_t t_sz; /** Table size */
size_t g_sz; /** Granularity size */
unsigned int p_val; /** Associated P value */
} l1_gpt_attr_desc_t;
/*
* Lookup table to find out the size in bytes of the L1 tables as well
* as the index mask, given the Width of Physical Granule Size (PGS).
* L1 tables are indexed by PA[29:p+4], being 'p' the width in bits of the
* aforementioned Physical Granule Size.
*/
static const l1_gpt_attr_desc_t l1_gpt_attr_lookup[] = {
[GPCCR_PGS_4K] = {U(1) << U(17), /* 16384B x 64bit entry = 128KB */
PAGE_SIZE_4KB, /* 4KB Granularity */
U(12)},
[GPCCR_PGS_64K] = {U(1) << U(13), /* Table size = 8KB */
PAGE_SIZE_64KB, /* 64KB Granularity */
U(16)},
[GPCCR_PGS_16K] = {U(1) << U(15), /* Table size = 32KB */
PAGE_SIZE_16KB, /* 16KB Granularity */
U(14)}
};
typedef struct l0_gpt_attr_desc {
size_t sz;
unsigned int t_val_mask;
} l0_gpt_attr_desc_t;
/*
* Lookup table to find out the size in bytes of the L0 table as well
* as the index mask, given the Protected Physical Address Size (PPS).
* L0 table is indexed by PA[t-1:30], being 't' the size in bits
* of the aforementioned Protected Physical Address Size.
*/
static const l0_gpt_attr_desc_t l0_gpt_attr_lookup[] = {
[GPCCR_PPS_4GB] = {U(1) << U(5), /* 4 x 64 bit entry = 32 bytes */
0x3}, /* Bits[31:30] */
[GPCCR_PPS_64GB] = {U(1) << U(9), /* 512 bytes */
0x3f}, /* Bits[35:30] */
[GPCCR_PPS_1TB] = {U(1) << U(13), /* 8KB */
0x3ff}, /* Bits[39:30] */
[GPCCR_PPS_4TB] = {U(1) << U(15), /* 32KB */
0xfff}, /* Bits[41:30] */
[GPCCR_PPS_16TB] = {U(1) << U(17), /* 128KB */
0x3fff}, /* Bits[43:30] */
[GPCCR_PPS_256TB] = {U(1) << U(21), /* 2MB */
0x3ffff}, /* Bits[47:30] */
[GPCCR_PPS_4PB] = {U(1) << U(25), /* 32MB */
0x3fffff}, /* Bits[51:30] */
};
static unsigned int get_l1_gpt_index(unsigned int pgs, uintptr_t pa)
{
unsigned int l1_gpt_arr_idx;
/*
* Mask top 2 bits to obtain the 30 bits required to
* generate the L1 GPT index
*/
l1_gpt_arr_idx = (unsigned int)(pa & L1_GPT_INDEX_MASK);
/* Shift by 'p' value + 4 to obtain the index */
l1_gpt_arr_idx >>= (l1_gpt_attr_lookup[pgs].p_val + 4);
return l1_gpt_arr_idx;
}
unsigned int plat_is_my_cpu_primary(void);
/* The granule partition tables can only be configured on BL2 */
#ifdef IMAGE_BL2
/* Global to keep track of next available index in array of L1 GPTs */
static unsigned int l1_gpt_mem_avlbl_index;
static int validate_l0_gpt_params(gpt_init_params_t *params)
{
/* Only 1GB of address space per L0 entry is allowed */
if (params->l0gptsz != GPCCR_L0GPTSZ_30BITS) {
WARN("Invalid L0GPTSZ %u.\n", params->l0gptsz);
}
/* Only 4K granule is supported for now */
if (params->pgs != GPCCR_PGS_4K) {
WARN("Invalid GPT PGS %u.\n", params->pgs);
return -EINVAL;
}
/* Only 4GB of protected physical address space is supported for now */
if (params->pps != GPCCR_PPS_4GB) {
WARN("Invalid GPT PPS %u.\n", params->pps);
return -EINVAL;
}
/* Check if GPT base address is aligned with the system granule */
if (!IS_PAGE_ALIGNED(params->l0_mem_base)) {
ERROR("Unaligned L0 GPT base address.\n");
return -EFAULT;
}
/* Check if there is enough memory for L0 GPTs */
if (params->l0_mem_size < l0_gpt_attr_lookup[params->pps].sz) {
ERROR("Inadequate memory for L0 GPTs. ");
ERROR("Expected 0x%lx bytes. Got 0x%lx bytes\n",
l0_gpt_attr_lookup[params->pps].sz,
params->l0_mem_size);
return -ENOMEM;
}
return 0;
}
/*
* A L1 GPT is required if any one of the following conditions is true:
*
* - The base address is not 1GB aligned
* - The size of the memory region is not a multiple of 1GB
* - A L1 GPT has been explicitly requested (attrs == PAS_REG_DESC_TYPE_TBL)
*
* This function:
* - iterates over all the PAS regions to determine whether they
* will need a 2 stage look up (and therefore a L1 GPT will be required) or
* if it would be enough with a single level lookup table.
* - Updates the attr field of the PAS regions.
* - Returns the total count of L1 tables needed.
*
* In the future wwe should validate that the PAS range does not exceed the
* configured PPS. (and maybe rename this function as it is validating PAS
* regions).
*/
static unsigned int update_gpt_type(pas_region_t *pas_regions,
unsigned int pas_region_cnt)
{
unsigned int idx, cnt = 0U;
for (idx = 0U; idx < pas_region_cnt; idx++) {
if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) ==
PAS_REG_DESC_TYPE_TBL) {
cnt++;
continue;
}
if (!(IS_1GB_ALIGNED(pas_regions[idx].base_pa) &&
IS_1GB_ALIGNED(pas_regions[idx].size))) {
/* Current region will need L1 GPTs. */
assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs)
== PAS_REG_DESC_TYPE_ANY);
pas_regions[idx].attrs =
GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL,
PAS_REG_GPI(pas_regions[idx].attrs));
cnt++;
continue;
}
/* The PAS can be mapped on a one stage lookup table */
assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs) !=
PAS_REG_DESC_TYPE_TBL);
pas_regions[idx].attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_BLK,
PAS_REG_GPI(pas_regions[idx].attrs));
}
return cnt;
}
static int validate_l1_gpt_params(gpt_init_params_t *params,
unsigned int l1_gpt_cnt)
{
size_t l1_gpt_sz, l1_gpt_mem_sz;
/* Check if the granularity is supported */
assert(xlat_arch_is_granule_size_supported(
l1_gpt_attr_lookup[params->pgs].g_sz));
/* Check if naturally aligned L1 GPTs can be created */
l1_gpt_sz = l1_gpt_attr_lookup[params->pgs].g_sz;
if (params->l1_mem_base & (l1_gpt_sz - 1)) {
WARN("Unaligned L1 GPT base address.\n");
return -EFAULT;
}
/* Check if there is enough memory for L1 GPTs */
l1_gpt_mem_sz = l1_gpt_cnt * l1_gpt_sz;
if (params->l1_mem_size < l1_gpt_mem_sz) {
WARN("Inadequate memory for L1 GPTs. ");
WARN("Expected 0x%lx bytes. Got 0x%lx bytes\n",
l1_gpt_mem_sz, params->l1_mem_size);
return -ENOMEM;
}
INFO("Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz);
return 0;
}
/*
* Helper function to determine if the end physical address lies in the same GB
* as the current physical address. If true, the end physical address is
* returned else, the start address of the next GB is returned.
*/
static uintptr_t get_l1_gpt_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
{
uintptr_t cur_gb, end_gb;
cur_gb = cur_pa >> ONE_GB_SHIFT;
end_gb = end_pa >> ONE_GB_SHIFT;
assert(cur_gb <= end_gb);
if (cur_gb == end_gb) {
return end_pa;
}
return (cur_gb + 1) << ONE_GB_SHIFT;
}
static void generate_l0_blk_desc(gpt_init_params_t *params,
unsigned int idx)
{
uint64_t gpt_desc;
uintptr_t end_addr;
unsigned int end_idx, start_idx;
pas_region_t *pas = params->pas_regions + idx;
uint64_t *l0_gpt_arr = (uint64_t *)params->l0_mem_base;
/* Create the GPT Block descriptor for this PAS region */
gpt_desc = GPT_BLK_DESC;
gpt_desc |= PAS_REG_GPI(pas->attrs)
<< GPT_BLOCK_DESC_GPI_VAL_SHIFT;
/* Start index of this region in L0 GPTs */
start_idx = pas->base_pa >> ONE_GB_SHIFT;
/*
* Determine number of L0 GPT descriptors covered by
* this PAS region and use the count to populate these
* descriptors.
*/
end_addr = pas->base_pa + pas->size;
assert(end_addr \
<= (ULL(l0_gpt_attr_lookup[params->pps].t_val_mask + 1)) << 30);
end_idx = end_addr >> ONE_GB_SHIFT;
for (; start_idx < end_idx; start_idx++) {
l0_gpt_arr[start_idx] = gpt_desc;
INFO("L0 entry (BLOCK) index %u [%p]: GPI = 0x%llx (0x%llx)\n",
start_idx, &l0_gpt_arr[start_idx],
(gpt_desc >> GPT_BLOCK_DESC_GPI_VAL_SHIFT) &
GPT_L1_INDEX_MASK, l0_gpt_arr[start_idx]);
}
}
static void generate_l0_tbl_desc(gpt_init_params_t *params,
unsigned int idx)
{
uint64_t gpt_desc = 0U, *l1_gpt_arr;
uintptr_t start_pa, end_pa, cur_pa, next_pa;
unsigned int start_idx, l1_gpt_idx;
unsigned int p_val, gran_sz;
pas_region_t *pas = params->pas_regions + idx;
uint64_t *l0_gpt_base = (uint64_t *)params->l0_mem_base;
uint64_t *l1_gpt_base = (uint64_t *)params->l1_mem_base;
start_pa = pas->base_pa;
end_pa = start_pa + pas->size;
p_val = l1_gpt_attr_lookup[params->pgs].p_val;
gran_sz = 1 << p_val;
/*
* end_pa cannot be larger than the maximum protected physical memory.
*/
assert(((1ULL<<30) << l0_gpt_attr_lookup[params->pps].t_val_mask)
> end_pa);
for (cur_pa = start_pa; cur_pa < end_pa;) {
/*
* Determine the PA range that will be covered
* in this loop iteration.
*/
next_pa = get_l1_gpt_end_pa(cur_pa, end_pa);
INFO("PAS[%u]: start: 0x%lx, end: 0x%lx, next_pa: 0x%lx.\n",
idx, cur_pa, end_pa, next_pa);
/* Index of this PA in L0 GPTs */
start_idx = cur_pa >> ONE_GB_SHIFT;
/*
* If cur_pa is on a 1GB boundary then determine
* the base address of next available L1 GPT
* memory region
*/
if (IS_1GB_ALIGNED(cur_pa)) {
l1_gpt_arr = (uint64_t *)((uint64_t)l1_gpt_base +
(l1_gpt_attr_lookup[params->pgs].t_sz *
l1_gpt_mem_avlbl_index));
assert(l1_gpt_arr <
(l1_gpt_base + params->l1_mem_size));
/* Create the L0 GPT descriptor for this PAS region */
gpt_desc = GPT_TBL_DESC |
((uintptr_t)l1_gpt_arr
& GPT_TBL_DESC_ADDR_MASK);
l0_gpt_base[start_idx] = gpt_desc;
/*
* Update index to point to next available L1
* GPT memory region
*/
l1_gpt_mem_avlbl_index++;
} else {
/* Use the existing L1 GPT */
l1_gpt_arr = (uint64_t *)(l0_gpt_base[start_idx]
& ~((1U<<12) - 1U));
}
INFO("L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%llx)\n",
start_idx, &l0_gpt_base[start_idx],
(unsigned long long)(l1_gpt_arr),
l0_gpt_base[start_idx]);
/*
* Fill up L1 GPT entries between these two
* addresses.
*/
for (; cur_pa < next_pa; cur_pa += gran_sz) {
unsigned int gpi_idx, gpi_idx_shift;
/* Obtain index of L1 GPT entry */
l1_gpt_idx = get_l1_gpt_index(params->pgs, cur_pa);
/*
* Obtain index of GPI in L1 GPT entry
* (i = PA[p_val+3:p_val])
*/
gpi_idx = (cur_pa >> p_val) & GPT_L1_INDEX_MASK;
/*
* Shift by index * 4 to reach correct
* GPI entry in L1 GPT descriptor.
* GPI = gpt_desc[(4*idx)+3:(4*idx)]
*/
gpi_idx_shift = gpi_idx << 2;
gpt_desc = l1_gpt_arr[l1_gpt_idx];
/* Clear existing GPI encoding */
gpt_desc &= ~(GPT_L1_INDEX_MASK << gpi_idx_shift);
/* Set the GPI encoding */
gpt_desc |= ((uint64_t)PAS_REG_GPI(pas->attrs)
<< gpi_idx_shift);
l1_gpt_arr[l1_gpt_idx] = gpt_desc;
if (gpi_idx == 15U) {
VERBOSE("\tEntry %u [%p] = 0x%llx\n",
l1_gpt_idx,
&l1_gpt_arr[l1_gpt_idx], gpt_desc);
}
}
}
}
static void create_gpt(gpt_init_params_t *params)
{
unsigned int idx;
pas_region_t *pas_regions = params->pas_regions;
INFO("pgs = 0x%x, pps = 0x%x, l0gptsz = 0x%x\n",
params->pgs, params->pps, params->l0gptsz);
INFO("pas_region_cnt = 0x%x L1 base = 0x%lx, L1 sz = 0x%lx\n",
params->pas_count, params->l1_mem_base, params->l1_mem_size);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
gpt_inv_dcache_range(params->l0_mem_base, params->l0_mem_size);
gpt_inv_dcache_range(params->l1_mem_base, params->l1_mem_size);
#endif
for (idx = 0U; idx < params->pas_count; idx++) {
INFO("PAS[%u]: base 0x%llx, sz 0x%lx, GPI 0x%x, type 0x%x\n",
idx, pas_regions[idx].base_pa, pas_regions[idx].size,
PAS_REG_GPI(pas_regions[idx].attrs),
PAS_REG_DESC_TYPE(pas_regions[idx].attrs));
/* Check if a block or table descriptor is required */
if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) ==
PAS_REG_DESC_TYPE_BLK) {
generate_l0_blk_desc(params, idx);
} else {
generate_l0_tbl_desc(params, idx);
}
}
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
gpt_clean_dcache_range(params->l0_mem_base, params->l0_mem_size);
gpt_clean_dcache_range(params->l1_mem_base, params->l1_mem_size);
#endif
/* Make sure that all the entries are written to the memory. */
dsbishst();
}
#endif /* IMAGE_BL2 */
int gpt_init(gpt_init_params_t *params)
{
#ifdef IMAGE_BL2
unsigned int l1_gpt_cnt;
int ret;
#endif
/* Validate arguments */
assert(params != NULL);
assert(params->pgs <= GPCCR_PGS_16K);
assert(params->pps <= GPCCR_PPS_4PB);
assert(params->l0_mem_base != (uintptr_t)0);
assert(params->l0_mem_size > 0U);
assert(params->l1_mem_base != (uintptr_t)0);
assert(params->l1_mem_size > 0U);
#ifdef IMAGE_BL2
/*
* The Granule Protection Tables are initialised only in BL2.
* BL31 is not allowed to initialise them again in case
* these are modified by any other image loaded by BL2.
*/
assert(params->pas_regions != NULL);
assert(params->pas_count > 0U);
ret = validate_l0_gpt_params(params);
if (ret < 0) {
return ret;
}
/* Check if L1 GPTs are required and how many. */
l1_gpt_cnt = update_gpt_type(params->pas_regions,
params->pas_count);
INFO("%u L1 GPTs requested.\n", l1_gpt_cnt);
if (l1_gpt_cnt > 0U) {
ret = validate_l1_gpt_params(params, l1_gpt_cnt);
if (ret < 0) {
return ret;
}
}
create_gpt(params);
#else
/* If running in BL31, only primary CPU can initialise GPTs */
assert(plat_is_my_cpu_primary() == 1U);
/*
* If the primary CPU is calling this function from BL31
* we expect that the tables are aready initialised from
* BL2 and GPCCR_EL3 is already configured with
* Granule Protection Check Enable bit set.
*/
assert((read_gpccr_el3() & GPCCR_GPC_BIT) != 0U);
#endif /* IMAGE_BL2 */
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config));
#endif
gpt_config.plat_gpt_l0_base = params->l0_mem_base;
gpt_config.plat_gpt_l1_base = params->l1_mem_base;
gpt_config.plat_gpt_l0_size = params->l0_mem_size;
gpt_config.plat_gpt_l1_size = params->l1_mem_size;
/* Backup the parameters used to configure GPCCR_EL3 on every PE. */
gpt_config.plat_gpt_pgs = params->pgs;
gpt_config.plat_gpt_pps = params->pps;
gpt_config.plat_gpt_l0gptsz = params->l0gptsz;
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
gpt_clean_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config));
#endif
return 0;
}
void gpt_enable(void)
{
u_register_t gpccr_el3;
/* Invalidate any stale TLB entries */
tlbipaallos();
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config));
#endif
#ifdef IMAGE_BL2
/*
* Granule tables must be initialised before enabling
* granule protection.
*/
assert(gpt_config.plat_gpt_l0_base != (uintptr_t)NULL);
#endif
write_gptbr_el3(gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT);
/* GPCCR_EL3.L0GPTSZ */
gpccr_el3 = SET_GPCCR_L0GPTSZ(gpt_config.plat_gpt_l0gptsz);
/* GPCCR_EL3.PPS */
gpccr_el3 |= SET_GPCCR_PPS(gpt_config.plat_gpt_pps);
/* GPCCR_EL3.PGS */
gpccr_el3 |= SET_GPCCR_PGS(gpt_config.plat_gpt_pgs);
/* Set shareability attribute to Outher Shareable */
gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_OS);
/* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */
gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
/* Enable GPT */
gpccr_el3 |= GPCCR_GPC_BIT;
write_gpccr_el3(gpccr_el3);
dsbsy();
VERBOSE("Granule Protection Checks enabled\n");
}
void gpt_disable(void)
{
u_register_t gpccr_el3 = read_gpccr_el3();
write_gpccr_el3(gpccr_el3 &= ~GPCCR_GPC_BIT);
dsbsy();
}
#ifdef IMAGE_BL31
/*
* Each L1 descriptor is protected by 1 spinlock. The number of descriptors is
* equal to the size of the total protected memory area divided by the size of
* protected memory area covered by each descriptor.
*
* The size of memory covered by each descriptor is the 'size of the granule' x
* 'number of granules' in a descriptor. The former is PLAT_ARM_GPT_PGS and
* latter is always 16.
*/
static spinlock_t gpt_lock;
static unsigned int get_l0_gpt_index(unsigned int pps, uint64_t pa)
{
unsigned int idx;
/* Get the index into the L0 table */
idx = pa >> ONE_GB_SHIFT;
/* Check if the pa lies within the PPS */
if (idx & ~(l0_gpt_attr_lookup[pps].t_val_mask)) {
WARN("Invalid address 0x%llx.\n", pa);
return -EINVAL;
}
return idx;
}
int gpt_transition_pas(uint64_t pa,
unsigned int src_sec_state,
unsigned int target_pas)
{
int idx;
unsigned int idx_shift;
unsigned int gpi;
uint64_t gpt_l1_desc;
uint64_t *gpt_l1_addr, *gpt_addr;
/*
* Check if caller is allowed to transition the granule's PAS.
*
* - Secure world caller can only request S <-> NS transitions on a
* granule that is already in either S or NS PAS.
*
* - Realm world caller can only request R <-> NS transitions on a
* granule that is already in either R or NS PAS.
*/
if (src_sec_state == SMC_FROM_REALM) {
if ((target_pas != GPI_REALM) && (target_pas != GPI_NS)) {
WARN("Invalid caller (%s) and PAS (%d) combination.\n",
"realm world", target_pas);
return -EINVAL;
}
} else if (src_sec_state == SMC_FROM_SECURE) {
if ((target_pas != GPI_SECURE) && (target_pas != GPI_NS)) {
WARN("Invalid caller (%s) and PAS (%d) combination.\n",
"secure world", target_pas);
return -EINVAL;
}
} else {
WARN("Invalid caller security state 0x%x\n", src_sec_state);
return -EINVAL;
}
/* Obtain the L0 GPT address. */
gpt_addr = (uint64_t *)gpt_config.plat_gpt_l0_base;
/* Validate physical address and obtain index into L0 GPT table */
idx = get_l0_gpt_index(gpt_config.plat_gpt_pps, pa);
if (idx < 0U) {
return idx;
}
VERBOSE("PA 0x%llx, L0 base addr 0x%llx, L0 index %u\n",
pa, (uint64_t)gpt_addr, idx);
/* Obtain the L0 descriptor */
gpt_l1_desc = gpt_addr[idx];
/*
* Check if it is a table descriptor. Granule transition only applies to
* memory ranges for which L1 tables were created at boot time. So there
* is no possibility of splitting and coalescing tables.
*/
if ((gpt_l1_desc & GPT_L1_INDEX_MASK) != GPT_TBL_DESC) {
WARN("Invalid address 0x%llx.\n", pa);
return -EPERM;
}
/* Obtain the L1 table address from L0 descriptor. */
gpt_l1_addr = (uint64_t *)(gpt_l1_desc & ~(0xFFF));
/* Obtain the index into the L1 table */
idx = get_l1_gpt_index(gpt_config.plat_gpt_pgs, pa);
VERBOSE("L1 table base addr 0x%llx, L1 table index %u\n", (uint64_t)gpt_l1_addr, idx);
/* Lock access to the granule */
spin_lock(&gpt_lock);
/* Obtain the L1 descriptor */
gpt_l1_desc = gpt_l1_addr[idx];
/* Obtain the shift for GPI in L1 GPT entry */
idx_shift = (pa >> 12) & GPT_L1_INDEX_MASK;
idx_shift <<= 2;
/* Obtain the current GPI encoding for this PA */
gpi = (gpt_l1_desc >> idx_shift) & GPT_L1_INDEX_MASK;
if (src_sec_state == SMC_FROM_REALM) {
/*
* Realm world is only allowed to transition a NS or Realm world
* granule.
*/
if ((gpi != GPI_REALM) && (gpi != GPI_NS)) {
WARN("Invalid transition request from %s.\n",
"realm world");
spin_unlock(&gpt_lock);
return -EPERM;
}
} else if (src_sec_state == SMC_FROM_SECURE) {
/*
* Secure world is only allowed to transition a NS or Secure world
* granule.
*/
if ((gpi != GPI_SECURE) && (gpi != GPI_NS)) {
WARN("Invalid transition request from %s.\n",
"secure world");
spin_unlock(&gpt_lock);
return -EPERM;
}
}
/* We don't need an else here since we already handle that above. */
VERBOSE("L1 table desc 0x%llx before mod \n", gpt_l1_desc);
/* Clear existing GPI encoding */
gpt_l1_desc &= ~(GPT_L1_INDEX_MASK << idx_shift);
/* Transition the granule to the new PAS */
gpt_l1_desc |= ((uint64_t)target_pas << idx_shift);
/* Update the L1 GPT entry */
gpt_l1_addr[idx] = gpt_l1_desc;
VERBOSE("L1 table desc 0x%llx after mod \n", gpt_l1_desc);
/* Make sure change is propagated to other CPUs. */
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
gpt_clean_dcache_range((uintptr_t)&gpt_addr[idx], sizeof(uint64_t));
#endif
gpt_tlbi_by_pa(pa, PAGE_SIZE_4KB);
/* Make sure that all the entries are written to the memory. */
dsbishst();
/* Unlock access to the granule */
spin_unlock(&gpt_lock);
return 0;
}
#endif /* IMAGE_BL31 */

1112
lib/gpt_rme/gpt_rme.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -4,5 +4,5 @@
# SPDX-License-Identifier: BSD-3-Clause
#
GPT_LIB_SRCS := $(addprefix lib/gpt/, \
gpt_core.c)
GPT_LIB_SRCS := $(addprefix lib/gpt_rme/, \
gpt_rme.c)

View File

@ -0,0 +1,228 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef GPT_RME_PRIVATE_H
#define GPT_RME_PRIVATE_H
#include <arch.h>
#include <lib/gpt_rme/gpt_rme.h>
#include <lib/utils_def.h>
/******************************************************************************/
/* GPT descriptor definitions */
/******************************************************************************/
/* GPT level 0 descriptor bit definitions. */
#define GPT_L0_TYPE_MASK UL(0xF)
#define GPT_L0_TYPE_SHIFT U(0)
/* For now, we don't support contiguous descriptors, only table and block. */
#define GPT_L0_TYPE_TBL_DESC UL(0x3)
#define GPT_L0_TYPE_BLK_DESC UL(0x1)
#define GPT_L0_TBL_DESC_L1ADDR_MASK UL(0xFFFFFFFFFF)
#define GPT_L0_TBL_DESC_L1ADDR_SHIFT U(12)
#define GPT_L0_BLK_DESC_GPI_MASK UL(0xF)
#define GPT_L0_BLK_DESC_GPI_SHIFT U(4)
/* GPT level 1 descriptor bit definitions */
#define GPT_L1_GRAN_DESC_GPI_MASK UL(0xF)
/*
* This macro fills out every GPI entry in a granules descriptor to the same
* value.
*/
#define GPT_BUILD_L1_DESC(_gpi) (((uint64_t)(_gpi) << 4*0) | \
((uint64_t)(_gpi) << 4*1) | \
((uint64_t)(_gpi) << 4*2) | \
((uint64_t)(_gpi) << 4*3) | \
((uint64_t)(_gpi) << 4*4) | \
((uint64_t)(_gpi) << 4*5) | \
((uint64_t)(_gpi) << 4*6) | \
((uint64_t)(_gpi) << 4*7) | \
((uint64_t)(_gpi) << 4*8) | \
((uint64_t)(_gpi) << 4*9) | \
((uint64_t)(_gpi) << 4*10) | \
((uint64_t)(_gpi) << 4*11) | \
((uint64_t)(_gpi) << 4*12) | \
((uint64_t)(_gpi) << 4*13) | \
((uint64_t)(_gpi) << 4*14) | \
((uint64_t)(_gpi) << 4*15))
/******************************************************************************/
/* GPT platform configuration */
/******************************************************************************/
/* This value comes from GPCCR_EL3 so no externally supplied definition. */
#define GPT_L0GPTSZ ((unsigned int)((read_gpccr_el3() >> \
GPCCR_L0GPTSZ_SHIFT) & GPCCR_L0GPTSZ_MASK))
/* The "S" value is directly related to L0GPTSZ */
#define GPT_S_VAL (GPT_L0GPTSZ + 30U)
/*
* Map PPS values to T values.
*
* PPS Size T
* 0b000 4GB 32
* 0b001 64GB 36
* 0b010 1TB 40
* 0b011 4TB 42
* 0b100 16TB 44
* 0b101 256TB 48
* 0b110 4PB 52
*
* See section 15.1.27 of the RME specification.
*/
typedef enum {
PPS_4GB_T = 32U,
PPS_64GB_T = 36U,
PPS_1TB_T = 40U,
PPS_4TB_T = 42U,
PPS_16TB_T = 44U,
PPS_256TB_T = 48U,
PPS_4PB_T = 52U
} gpt_t_val_e;
/*
* Map PGS values to P values.
*
* PGS Size P
* 0b00 4KB 12
* 0b10 16KB 14
* 0b01 64KB 16
*
* Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
*
* See section 15.1.27 of the RME specification.
*/
typedef enum {
PGS_4KB_P = 12U,
PGS_16KB_P = 14U,
PGS_64KB_P = 16U
} gpt_p_val_e;
/* Max valid value for PGS. */
#define GPT_PGS_MAX (2U)
/* Max valid value for PPS. */
#define GPT_PPS_MAX (6U)
/******************************************************************************/
/* L0 address attribute macros */
/******************************************************************************/
/*
* If S is greater than or equal to T then there is a single L0 region covering
* the entire protected space so there is no L0 index, so the width (and the
* derivative mask value) are both zero. If we don't specifically handle this
* special case we'll get a negative width value which does not make sense and
* could cause a lot of problems.
*/
#define GPT_L0_IDX_WIDTH(_t) (((_t) > GPT_S_VAL) ? \
((_t) - GPT_S_VAL) : (0U))
/* Bit shift for the L0 index field in a PA. */
#define GPT_L0_IDX_SHIFT (GPT_S_VAL)
/* Mask for the L0 index field, must be shifted. */
#define GPT_L0_IDX_MASK(_t) (0xFFFFFFFFFFFFFFFFUL >> \
(64U - (GPT_L0_IDX_WIDTH(_t))))
/* Total number of L0 regions. */
#define GPT_L0_REGION_COUNT(_t) ((GPT_L0_IDX_MASK(_t)) + 1U)
/* Total size of each GPT L0 region in bytes. */
#define GPT_L0_REGION_SIZE (1UL << (GPT_L0_IDX_SHIFT))
/* Total size in bytes of the whole L0 table. */
#define GPT_L0_TABLE_SIZE(_t) ((GPT_L0_REGION_COUNT(_t)) << 3U)
/******************************************************************************/
/* L1 address attribute macros */
/******************************************************************************/
/* Width of the L1 index field. */
#define GPT_L1_IDX_WIDTH(_p) ((GPT_S_VAL - 1U) - ((_p) + 3U))
/* Bit shift for the L1 index field. */
#define GPT_L1_IDX_SHIFT(_p) ((_p) + 4U)
/* Mask for the L1 index field, must be shifted. */
#define GPT_L1_IDX_MASK(_p) (0xFFFFFFFFFFFFFFFFUL >> \
(64U - (GPT_L1_IDX_WIDTH(_p))))
/* Bit shift for the index of the L1 GPI in a PA. */
#define GPT_L1_GPI_IDX_SHIFT(_p) (_p)
/* Mask for the index of the L1 GPI in a PA. */
#define GPT_L1_GPI_IDX_MASK (0xF)
/* Total number of entries in each L1 table. */
#define GPT_L1_ENTRY_COUNT(_p) ((GPT_L1_IDX_MASK(_p)) + 1U)
/* Total size in bytes of each L1 table. */
#define GPT_L1_TABLE_SIZE(_p) ((GPT_L1_ENTRY_COUNT(_p)) << 3U)
/******************************************************************************/
/* General helper macros */
/******************************************************************************/
/* Protected space actual size in bytes. */
#define GPT_PPS_ACTUAL_SIZE(_t) (1UL << (_t))
/* Granule actual size in bytes. */
#define GPT_PGS_ACTUAL_SIZE(_p) (1UL << (_p))
/* L0 GPT region size in bytes. */
#define GPT_L0GPTSZ_ACTUAL_SIZE (1UL << GPT_S_VAL)
/* Get the index of the L0 entry from a physical address. */
#define GPT_L0_IDX(_pa) ((_pa) >> GPT_L0_IDX_SHIFT)
/*
* This definition is used to determine if a physical address lies on an L0
* region boundary.
*/
#define GPT_IS_L0_ALIGNED(_pa) (((_pa) & (GPT_L0_REGION_SIZE - U(1))) == U(0))
/* Get the type field from an L0 descriptor. */
#define GPT_L0_TYPE(_desc) (((_desc) >> GPT_L0_TYPE_SHIFT) & \
GPT_L0_TYPE_MASK)
/* Create an L0 block descriptor. */
#define GPT_L0_BLK_DESC(_gpi) (GPT_L0_TYPE_BLK_DESC | \
(((_gpi) & GPT_L0_BLK_DESC_GPI_MASK) << \
GPT_L0_BLK_DESC_GPI_SHIFT))
/* Create an L0 table descriptor with an L1 table address. */
#define GPT_L0_TBL_DESC(_pa) (GPT_L0_TYPE_TBL_DESC | ((uint64_t)(_pa) & \
(GPT_L0_TBL_DESC_L1ADDR_MASK << \
GPT_L0_TBL_DESC_L1ADDR_SHIFT)))
/* Get the GPI from an L0 block descriptor. */
#define GPT_L0_BLKD_GPI(_desc) (((_desc) >> GPT_L0_BLK_DESC_GPI_SHIFT) & \
GPT_L0_BLK_DESC_GPI_MASK)
/* Get the L1 address from an L0 table descriptor. */
#define GPT_L0_TBLD_ADDR(_desc) ((uint64_t *)(((_desc) & \
(GPT_L0_TBL_DESC_L1ADDR_MASK << \
GPT_L0_TBL_DESC_L1ADDR_SHIFT))))
/* Get the index into the L1 table from a physical address. */
#define GPT_L1_IDX(_p, _pa) (((_pa) >> GPT_L1_IDX_SHIFT(_p)) & \
GPT_L1_IDX_MASK(_p))
/* Get the index of the GPI within an L1 table entry from a physical address. */
#define GPT_L1_GPI_IDX(_p, _pa) (((_pa) >> GPT_L1_GPI_IDX_SHIFT(_p)) & \
GPT_L1_GPI_IDX_MASK)
/* Determine if an address is granule-aligned. */
#define GPT_IS_L1_ALIGNED(_p, _pa) (((_pa) & (GPT_PGS_ACTUAL_SIZE(_p) - U(1))) \
== U(0))
#endif /* GPT_RME_PRIVATE_H */

View File

@ -18,12 +18,16 @@
#include <drivers/partition/partition.h>
#include <lib/fconf/fconf.h>
#include <lib/fconf/fconf_dyn_cfg_getter.h>
#include <lib/gpt/gpt.h>
#if ENABLE_RME
#include <lib/gpt_rme/gpt_rme.h>
#endif /* ENABLE_RME */
#ifdef SPD_opteed
#include <lib/optee_utils.h>
#endif
#include <lib/utils.h>
#if ENABLE_RME
#include <plat/arm/common/arm_pas_def.h>
#endif /* ENABLE_RME */
#include <plat/arm/common/plat_arm.h>
#include <plat/common/platform.h>
@ -130,6 +134,7 @@ void bl2_platform_setup(void)
}
#if ENABLE_RME
static void arm_bl2_plat_gpt_setup(void)
{
/*
@ -137,32 +142,38 @@ static void arm_bl2_plat_gpt_setup(void)
* the layout, so the array cannot be constant.
*/
pas_region_t pas_regions[] = {
ARM_PAS_GPI_ANY,
ARM_PAS_KERNEL,
ARM_PAS_TZC,
ARM_PAS_SECURE,
ARM_PAS_REALM,
ARM_PAS_EL3_DRAM,
ARM_PAS_GPTS
};
gpt_init_params_t gpt_params = {
PLATFORM_PGS,
PLATFORM_PPS,
PLATFORM_L0GPTSZ,
pas_regions,
(unsigned int)(sizeof(pas_regions)/sizeof(pas_region_t)),
ARM_L0_GPT_ADDR_BASE, ARM_L0_GPT_SIZE,
ARM_L1_GPT_ADDR_BASE, ARM_L1_GPT_SIZE
};
/* Initialise the global granule tables */
INFO("Enabling Granule Protection Checks\n");
if (gpt_init(&gpt_params) < 0) {
/* Initialize entire protected space to GPT_GPI_ANY. */
if (gpt_init_l0_tables(GPCCR_PPS_4GB, ARM_L0_GPT_ADDR_BASE,
ARM_L0_GPT_SIZE) < 0) {
ERROR("gpt_init_l0_tables() failed!\n");
panic();
}
gpt_enable();
/* Carve out defined PAS ranges. */
if (gpt_init_pas_l1_tables(GPCCR_PGS_4K,
ARM_L1_GPT_ADDR_BASE,
ARM_L1_GPT_SIZE,
pas_regions,
(unsigned int)(sizeof(pas_regions) /
sizeof(pas_region_t))) < 0) {
ERROR("gpt_init_pas_l1_tables() failed!\n");
panic();
}
INFO("Enabling Granule Protection Checks\n");
if (gpt_enable() < 0) {
ERROR("gpt_enable() failed!\n");
panic();
}
}
#endif /* ENABLE_RME */
/*******************************************************************************
@ -201,9 +212,6 @@ void arm_bl2_plat_arch_setup(void)
#if ENABLE_RME
/* Initialise the secure environment */
plat_arm_security_setup();
/* Initialise and enable Granule Protection */
arm_bl2_plat_gpt_setup();
#endif
setup_page_tables(bl_regions, plat_arm_get_mmap());
@ -212,6 +220,9 @@ void arm_bl2_plat_arch_setup(void)
/* BL2 runs in EL3 when RME enabled. */
assert(get_armv9_2_feat_rme_support() != 0U);
enable_mmu_el3(0);
/* Initialise and enable granule protection after MMU. */
arm_bl2_plat_gpt_setup();
#else
enable_mmu_el1(0);
#endif

View File

@ -13,10 +13,11 @@
#include <drivers/console.h>
#include <lib/debugfs.h>
#include <lib/extensions/ras.h>
#include <lib/gpt/gpt.h>
#if ENABLE_RME
#include <lib/gpt_rme/gpt_rme.h>
#endif
#include <lib/mmio.h>
#include <lib/xlat_tables/xlat_tables_compat.h>
#include <plat/arm/common/arm_pas_def.h>
#include <plat/arm/common/plat_arm.h>
#include <plat/common/platform.h>
#include <platform_def.h>
@ -235,28 +236,6 @@ void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_confi
*/
bl33_image_ep_info.args.arg0 = (u_register_t)ARM_DRAM1_BASE;
#endif
#if ENABLE_RME
/*
* Initialise Granule Protection library and enable GPC
* for the primary processor. The tables were initialised
* in BL2, so there is no need to provide any PAS here.
*/
gpt_init_params_t gpt_params = {
PLATFORM_PGS,
PLATFORM_PPS,
PLATFORM_L0GPTSZ,
NULL,
0U,
ARM_L0_GPT_ADDR_BASE, ARM_L0_GPT_SIZE,
ARM_L1_GPT_ADDR_BASE, ARM_L1_GPT_SIZE
};
/* Initialise the global granule tables. */
if (gpt_init(&gpt_params) < 0) {
panic();
}
#endif /* ENABLE_RME */
}
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
@ -430,6 +409,19 @@ void __init arm_bl31_plat_arch_setup(void)
enable_mmu_el3(0);
#if ENABLE_RME
/*
* Initialise Granule Protection library and enable GPC for the primary
* processor. The tables have already been initialized by a previous BL
* stage, so there is no need to provide any PAS here. This function
* sets up pointers to those tables.
*/
if (gpt_runtime_init() < 0) {
ERROR("gpt_runtime_init() failed!\n");
panic();
}
#endif /* ENABLE_RME */
arm_setup_romlib();
}

View File

@ -16,7 +16,7 @@
#include <context.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/el3_runtime/pubsub.h>
#include <lib/gpt/gpt_defs.h>
#include <lib/gpt_rme/gpt_rme.h>
#include <lib/spinlock.h>
#include <lib/utils.h>
@ -296,12 +296,18 @@ static int gtsi_transition_granule(uint64_t pa,
{
int ret;
ret = gpt_transition_pas(pa, src_sec_state, target_pas);
ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas);
/* Convert TF-A error codes into GTSI error codes */
if (ret == -EINVAL) {
ERROR("[GTSI] Transition failed: invalid %s\n", "address");
ERROR(" PA: 0x%llx, SRC: %d, PAS: %d\n", pa,
src_sec_state, target_pas);
ret = GRAN_TRANS_RET_BAD_ADDR;
} else if (ret == -EPERM) {
ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS");
ERROR(" PA: 0x%llx, SRC: %d, PAS: %d\n", pa,
src_sec_state, target_pas);
ret = GRAN_TRANS_RET_BAD_PAS;
}
@ -328,12 +334,10 @@ uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
switch (smc_fid) {
case SMC_ASC_MARK_REALM:
SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
GPI_REALM));
break;
GPT_GPI_REALM));
case SMC_ASC_MARK_NONSECURE:
SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
GPI_NS));
break;
GPT_GPI_NS));
default:
WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid);
SMC_RET1(handle, SMC_UNK);