nxp: platform files for bl2 and bl31 setup

For NXP platforms:
- Setup files for BL2 and BL31
- Other supporting files.

Signed-off-by: Pankaj Gupta <pankaj.gupta@nxp.com>
Change-Id: I36a1183a0652701bdede9e02d41eb976accbb017
This commit is contained in:
Pankaj Gupta 2020-12-09 14:02:40 +05:30
parent 0f33f50e21
commit b53c2c5f2d
16 changed files with 1960 additions and 0 deletions

View File

@ -0,0 +1,103 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <common/bl_common.h>
#include <common/desc_image_load.h>
#ifdef CSF_HEADER_PREPENDED
#include <csf_hdr.h>
#endif
#include <plat/common/platform.h>
#include <platform_def.h>
/*******************************************************************************
* Following descriptor provides BL image/ep information that gets used
* by BL2 to load the images and also subset of this information is
* passed to next BL image. The image loading sequence is managed by
* populating the images in required loading order. The image execution
* sequence is managed by populating the `next_handoff_image_id` with
* the next executable image id.
******************************************************************************/
static bl_mem_params_node_t bl2_mem_params_descs[] = {
/* Fill BL31 related information */
{
.image_id = BL31_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t,
SECURE | EXECUTABLE | EP_FIRST_EXE),
.ep_info.pc = BL31_BASE,
.ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS),
#if DEBUG
.ep_info.args.arg1 = LS_BL31_PLAT_PARAM_VAL,
#endif
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
#ifdef CSF_HEADER_PREPENDED
.image_info.image_base = BL31_BASE - CSF_HDR_SZ,
.image_info.image_max_size = (BL31_LIMIT - BL31_BASE) +
CSF_HDR_SZ,
#else
.image_info.image_base = BL31_BASE,
.image_info.image_max_size = (BL31_LIMIT - BL31_BASE),
#endif
# ifdef NXP_LOAD_BL32
.next_handoff_image_id = BL32_IMAGE_ID,
# else
.next_handoff_image_id = BL33_IMAGE_ID,
# endif
},
# ifdef NXP_LOAD_BL32
/* Fill BL32 related information */
{
.image_id = BL32_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
.ep_info.pc = BL32_BASE,
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, 0),
#ifdef CSF_HEADER_PREPENDED
.image_info.image_base = BL32_BASE - CSF_HDR_SZ,
.image_info.image_max_size = (BL32_LIMIT - BL32_BASE) +
CSF_HDR_SZ,
#else
.image_info.image_base = BL32_BASE,
.image_info.image_max_size = (BL32_LIMIT - BL32_BASE),
#endif
.next_handoff_image_id = BL33_IMAGE_ID,
},
# endif /* BL32_BASE */
/* Fill BL33 related information */
{
.image_id = BL33_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
.ep_info.pc = BL33_BASE,
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, 0),
#ifdef CSF_HEADER_PREPENDED
.image_info.image_base = BL33_BASE - CSF_HDR_SZ,
.image_info.image_max_size = (BL33_LIMIT - BL33_BASE) +
CSF_HDR_SZ,
#else
.image_info.image_base = BL33_BASE,
.image_info.image_max_size = BL33_LIMIT - BL33_BASE,
#endif
.ep_info.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS),
.next_handoff_image_id = INVALID_IMAGE_ID,
}
};
REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)

View File

@ -0,0 +1,105 @@
#
# Copyright 2018-2021 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
#
###############################################################################
# Flow begins in BL2 at EL3 mode
BL2_AT_EL3 := 1
# Though one core is powered up by default, there are
# platform specific ways to release more than one core
COLD_BOOT_SINGLE_CPU := 0
PROGRAMMABLE_RESET_ADDRESS := 1
USE_COHERENT_MEM := 0
# Use generic OID definition (tbbr_oid.h)
USE_TBBR_DEFS := 1
PLAT_XLAT_TABLES_DYNAMIC := 0
ENABLE_SVE_FOR_NS := 0
ENABLE_STACK_PROTECTOR := 0
ERROR_DEPRECATED := 0
LS_DISABLE_TRUSTED_WDOG := 1
# On ARM platforms, separate the code and read-only data sections to allow
# mapping the former as executable and the latter as execute-never.
SEPARATE_CODE_AND_RODATA := 1
# Enable new version of image loading on ARM platforms
LOAD_IMAGE_V2 := 1
RCW := ""
ifneq (${SPD},none)
$(eval $(call add_define, NXP_LOAD_BL32))
endif
###############################################################################
PLAT_TOOL_PATH := tools/nxp
CREATE_PBL_TOOL_PATH := ${PLAT_TOOL_PATH}/create_pbl
PLAT_SETUP_PATH := ${PLAT_PATH}/common/setup
PLAT_INCLUDES += -I${PLAT_SETUP_PATH}/include \
-Iinclude/plat/arm/common \
-Iinclude/drivers/arm \
-Iinclude/lib \
-Iinclude/drivers/io \
-Ilib/psci
# Required without TBBR.
# To include the defines for DDR PHY Images.
PLAT_INCLUDES += -Iinclude/common/tbbr
include ${PLAT_SETUP_PATH}/core.mk
PLAT_BL_COMMON_SOURCES += ${CPU_LIBS} \
plat/nxp/common/setup/ls_err.c \
plat/nxp/common/setup/ls_common.c
ifneq (${ENABLE_STACK_PROTECTOR},0)
PLAT_BL_COMMON_SOURCES += ${PLAT_SETUP_PATH}/ls_stack_protector.c
endif
include lib/xlat_tables_v2/xlat_tables.mk
PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS}
BL2_SOURCES += drivers/io/io_fip.c \
drivers/io/io_memmap.c \
drivers/io/io_storage.c \
common/desc_image_load.c \
plat/nxp/common/setup/ls_image_load.c \
plat/nxp/common/setup/ls_io_storage.c \
plat/nxp/common/setup/ls_bl2_el3_setup.c \
plat/nxp/common/setup/${ARCH}/ls_bl2_mem_params_desc.c
BL31_SOURCES += plat/nxp/common/setup/ls_bl31_setup.c \
ifeq (${LS_EL3_INTERRUPT_HANDLER}, yes)
$(eval $(call add_define, LS_EL3_INTERRUPT_HANDLER))
BL31_SOURCES += plat/nxp/common/setup/ls_interrupt_mgmt.c
endif
ifeq (${TEST_BL31}, 1)
BL31_SOURCES += ${TEST_SOURCES}
endif
# Verify build config
# -------------------
ifneq (${LOAD_IMAGE_V2}, 1)
$(error Error: Layerscape needs LOAD_IMAGE_V2=1)
else
$(eval $(call add_define,LOAD_IMAGE_V2))
endif
include $(CREATE_PBL_TOOL_PATH)/create_pbl.mk

View File

@ -0,0 +1,20 @@
# Copyright 2018-2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
#
#------------------------------------------------------------------------------
#
# Select the CORE files
#
# -----------------------------------------------------------------------------
CPU_LIBS := lib/cpus/${ARCH}/aem_generic.S
ifeq (,$(filter $(CORE_TYPE),a53 a55 a57 a72 a75))
$(error "CORE_TYPE not specified or incorrect")
else
CPU_LIBS += lib/cpus/${ARCH}/cortex_$(CORE_TYPE).S
endif
# -----------------------------------------------------------------------------

View File

@ -0,0 +1,61 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef BL31_DATA_H
#define BL31_DATA_H
#define SECURE_DATA_BASE NXP_OCRAM_ADDR
#define SECURE_DATA_SIZE NXP_OCRAM_SIZE
#define SECURE_DATA_TOP (SECURE_DATA_BASE + SECURE_DATA_SIZE)
#define SMC_REGION_SIZE 0x80
#define SMC_GLBL_BASE (SECURE_DATA_TOP - SMC_REGION_SIZE)
#define BC_PSCI_DATA_SIZE 0xC0
#define BC_PSCI_BASE (SMC_GLBL_BASE - BC_PSCI_DATA_SIZE)
#define SECONDARY_TOP BC_PSCI_BASE
#define SEC_PSCI_DATA_SIZE 0xC0
#define SEC_REGION_SIZE SEC_PSCI_DATA_SIZE
/* SMC global data */
#define BOOTLOC_OFFSET 0x0
#define BOOT_SVCS_OSET 0x8
/* offset to prefetch disable mask */
#define PREFETCH_DIS_OFFSET 0x10
/* must reference last smc global entry */
#define LAST_SMC_GLBL_OFFSET 0x18
#define SMC_TASK_OFFSET 0xC
#define TSK_START_OFFSET 0x0
#define TSK_DONE_OFFSET 0x4
#define TSK_CORE_OFFSET 0x8
#define SMC_TASK1_BASE (SMC_GLBL_BASE + 32)
#define SMC_TASK2_BASE (SMC_TASK1_BASE + SMC_TASK_OFFSET)
#define SMC_TASK3_BASE (SMC_TASK2_BASE + SMC_TASK_OFFSET)
#define SMC_TASK4_BASE (SMC_TASK3_BASE + SMC_TASK_OFFSET)
/* psci data area offsets */
#define CORE_STATE_DATA 0x0
#define SPSR_EL3_DATA 0x8
#define CNTXT_ID_DATA 0x10
#define START_ADDR_DATA 0x18
#define LINK_REG_DATA 0x20
#define GICC_CTLR_DATA 0x28
#define ABORT_FLAG_DATA 0x30
#define SCTLR_DATA 0x38
#define CPUECTLR_DATA 0x40
#define AUX_01_DATA 0x48 /* usage defined per SoC */
#define AUX_02_DATA 0x50 /* usage defined per SoC */
#define AUX_03_DATA 0x58 /* usage defined per SoC */
#define AUX_04_DATA 0x60 /* usage defined per SoC */
#define AUX_05_DATA 0x68 /* usage defined per SoC */
#define AUX_06_DATA 0x70 /* usage defined per SoC */
#define AUX_07_DATA 0x78 /* usage defined per SoC */
#define SCR_EL3_DATA 0x80
#define HCR_EL2_DATA 0x88
#endif /* BL31_DATA_H */

View File

@ -0,0 +1,23 @@
/*
* Copyright 2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef LS_EL3_INTRPT_MGMT_H
#define LS_EL3_INTRPT_MGMT_H
#include <bl31/interrupt_mgmt.h>
#define MAX_INTR_EL3 128
/*
* Register handler to specific GIC entrance
* for INTR_TYPE_EL3 type of interrupt
*/
int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler);
void ls_el3_interrupt_config(void);
#endif /* LS_EL3_INTRPT_MGMT_H */

View File

@ -0,0 +1,34 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef MMU_MAP_DEF_H
#define MMU_MAP_DEF_H
#include <lib/xlat_tables/xlat_tables_defs.h>
#include <platform_def.h>
#define LS_MAP_CCSR MAP_REGION_FLAT(NXP_CCSR_ADDR, \
NXP_CCSR_SIZE, \
MT_DEVICE | MT_RW | MT_SECURE)
#ifdef NXP_DCSR_ADDR
#define LS_MAP_DCSR MAP_REGION_FLAT(NXP_DCSR_ADDR, \
NXP_DCSR_SIZE, \
MT_DEVICE | MT_RW | MT_SECURE)
#endif
#define LS_MAP_CONSOLE MAP_REGION_FLAT(NXP_DUART1_ADDR, \
NXP_DUART_SIZE, \
MT_DEVICE | MT_RW | MT_NS)
#define LS_MAP_OCRAM MAP_REGION_FLAT(NXP_OCRAM_ADDR, \
NXP_OCRAM_SIZE, \
MT_DEVICE | MT_RW | MT_SECURE)
#endif /* MMU_MAP_DEF_H */

View File

@ -0,0 +1,147 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_COMMON_H
#define PLAT_COMMON_H
#include <stdbool.h>
#include <lib/el3_runtime/cpu_data.h>
#include <platform_def.h>
#ifdef IMAGE_BL31
#define BL31_END (uintptr_t)(&__BL31_END__)
/*******************************************************************************
* This structure represents the superset of information that can be passed to
* BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
* populated only if BL2 detects its presence. A pointer to a structure of this
* type should be passed in X0 to BL31's cold boot entrypoint.
*
* Use of this structure and the X0 parameter is not mandatory: the BL31
* platform code can use other mechanisms to provide the necessary information
* about BL32 and BL33 to the common and SPD code.
*
* BL31 image information is mandatory if this structure is used. If either of
* the optional BL32 and BL33 image information is not provided, this is
* indicated by the respective image_info pointers being zero.
******************************************************************************/
typedef struct bl31_params {
param_header_t h;
image_info_t *bl31_image_info;
entry_point_info_t *bl32_ep_info;
image_info_t *bl32_image_info;
entry_point_info_t *bl33_ep_info;
image_info_t *bl33_image_info;
} bl31_params_t;
/* BL3 utility functions */
void ls_bl31_early_platform_setup(void *from_bl2,
void *plat_params_from_bl2);
/* LS Helper functions */
unsigned int plat_my_core_mask(void);
unsigned int plat_core_mask(u_register_t mpidr);
unsigned int plat_core_pos(u_register_t mpidr);
//unsigned int plat_my_core_pos(void);
/* BL31 Data API(s) */
void _init_global_data(void);
void _initialize_psci(void);
uint32_t _getCoreState(u_register_t core_mask);
void _setCoreState(u_register_t core_mask, u_register_t core_state);
/* SoC defined structure and API(s) */
void soc_runtime_setup(void);
void soc_init(void);
void soc_platform_setup(void);
void soc_early_platform_setup2(void);
#endif /* IMAGE_BL31 */
#ifdef IMAGE_BL2
void soc_early_init(void);
void soc_mem_access(void);
void soc_preload_setup(void);
void soc_bl2_prepare_exit(void);
/* IO storage utility functions */
int plat_io_setup(void);
int open_backend(const uintptr_t spec);
void ls_bl2_plat_arch_setup(void);
void ls_bl2_el3_plat_arch_setup(void);
enum boot_device {
BOOT_DEVICE_IFC_NOR,
BOOT_DEVICE_IFC_NAND,
BOOT_DEVICE_QSPI,
BOOT_DEVICE_EMMC,
BOOT_DEVICE_SDHC2_EMMC,
BOOT_DEVICE_FLEXSPI_NOR,
BOOT_DEVICE_FLEXSPI_NAND,
BOOT_DEVICE_NONE
};
enum boot_device get_boot_dev(void);
/* DDR Related functions */
#if DDR_INIT
#ifdef NXP_WARM_BOOT
long long init_ddr(uint32_t wrm_bt_flg);
#else
long long init_ddr(void);
#endif
#endif
/* Board specific weak functions */
bool board_enable_povdd(void);
bool board_disable_povdd(void);
void mmap_add_ddr_region_dynamically(void);
#endif /* IMAGE_BL2 */
typedef struct {
uint64_t addr;
uint64_t size;
} region_info_t;
typedef struct {
uint64_t num_dram_regions;
uint64_t total_dram_size;
region_info_t region[NUM_DRAM_REGIONS];
} dram_regions_info_t;
dram_regions_info_t *get_dram_regions_info(void);
void ls_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit
#if USE_COHERENT_MEM
, uintptr_t coh_start,
uintptr_t coh_limit
#endif
);
/* Structure to define SoC personality */
struct soc_type {
char name[10];
uint32_t personality;
uint32_t num_clusters;
uint32_t cores_per_cluster;
};
#define SOC_ENTRY(n, v, ncl, nc) { \
.name = #n, \
.personality = SVR_##v, \
.num_clusters = (ncl), \
.cores_per_cluster = (nc)}
#endif /* PLAT_COMMON_H */

View File

@ -0,0 +1,22 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_MACROS_S
#define PLAT_MACROS_S
/* ---------------------------------------------
* The below required platform porting macro
* prints out relevant GIC and CCI registers
* whenever an unhandled exception is taken in
* BL31.
* Clobbers: x0 - x10, x16, x17, sp
* ---------------------------------------------
*/
.macro plat_crash_print_regs
.endm
#endif /* PLAT_MACROS_S */

View File

@ -0,0 +1,300 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <common/desc_image_load.h>
#include <dcfg.h>
#ifdef POLICY_FUSE_PROVISION
#include <fuse_io.h>
#endif
#include <mmu_def.h>
#include <plat_common.h>
#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
#include <plat_nv_storage.h>
#endif
#pragma weak bl2_el3_early_platform_setup
#pragma weak bl2_el3_plat_arch_setup
#pragma weak bl2_el3_plat_prepare_exit
static dram_regions_info_t dram_regions_info = {0};
/*******************************************************************************
* Return the pointer to the 'dram_regions_info structure of the DRAM.
* This structure is populated after init_ddr().
******************************************************************************/
dram_regions_info_t *get_dram_regions_info(void)
{
return &dram_regions_info;
}
#ifdef DDR_INIT
static void populate_dram_regions_info(void)
{
long long dram_remain_size = dram_regions_info.total_dram_size;
uint8_t reg_id = 0U;
dram_regions_info.region[reg_id].addr = NXP_DRAM0_ADDR;
dram_regions_info.region[reg_id].size =
dram_remain_size > NXP_DRAM0_MAX_SIZE ?
NXP_DRAM0_MAX_SIZE : dram_remain_size;
if (dram_regions_info.region[reg_id].size != NXP_DRAM0_SIZE) {
ERROR("Incorrect DRAM0 size is defined in platform_def.h\n");
}
dram_remain_size -= dram_regions_info.region[reg_id].size;
dram_regions_info.region[reg_id].size -= (NXP_SECURE_DRAM_SIZE
+ NXP_SP_SHRD_DRAM_SIZE);
assert(dram_regions_info.region[reg_id].size > 0);
/* Reducing total dram size by 66MB */
dram_regions_info.total_dram_size -= (NXP_SECURE_DRAM_SIZE
+ NXP_SP_SHRD_DRAM_SIZE);
#if defined(NXP_DRAM1_ADDR) && defined(NXP_DRAM1_MAX_SIZE)
if (dram_remain_size > 0) {
reg_id++;
dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
dram_regions_info.region[reg_id].size =
dram_remain_size > NXP_DRAM1_MAX_SIZE ?
NXP_DRAM1_MAX_SIZE : dram_remain_size;
dram_remain_size -= dram_regions_info.region[reg_id].size;
}
#endif
#if defined(NXP_DRAM2_ADDR) && defined(NXP_DRAM2_MAX_SIZE)
if (dram_remain_size > 0) {
reg_id++;
dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
dram_regions_info.region[reg_id].size =
dram_remain_size > NXP_DRAM1_MAX_SIZE ?
NXP_DRAM1_MAX_SIZE : dram_remain_size;
dram_remain_size -= dram_regions_info.region[reg_id].size;
}
#endif
reg_id++;
dram_regions_info.num_dram_regions = reg_id;
}
#endif
#ifdef IMAGE_BL32
/*******************************************************************************
* Gets SPSR for BL32 entry
******************************************************************************/
static uint32_t ls_get_spsr_for_bl32_entry(void)
{
/*
* The Secure Payload Dispatcher service is responsible for
* setting the SPSR prior to entry into the BL32 image.
*/
return 0U;
}
#endif
/*******************************************************************************
* Gets SPSR for BL33 entry
******************************************************************************/
#ifndef AARCH32
static uint32_t ls_get_spsr_for_bl33_entry(void)
{
unsigned int mode;
uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */
mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/
spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
return spsr;
}
#else
/*******************************************************************************
* Gets SPSR for BL33 entry
******************************************************************************/
static uint32_t ls_get_spsr_for_bl33_entry(void)
{
unsigned int hyp_status, mode, spsr;
hyp_status = GET_VIRT_EXT(read_id_pfr1());
mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/
spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
return spsr;
}
#endif /* AARCH32 */
void bl2_el3_early_platform_setup(u_register_t arg0 __unused,
u_register_t arg1 __unused,
u_register_t arg2 __unused,
u_register_t arg3 __unused)
{
/*
* SoC specific early init
* Any errata handling or SoC specific early initialization can
* be done here
* Set Counter Base Frequency in CNTFID0 and in cntfrq_el0.
* Initialize the interconnect.
* Enable coherency for primary CPU cluster
*/
soc_early_init();
/* Initialise the IO layer and register platform IO devices */
plat_io_setup();
if (dram_regions_info.total_dram_size > 0) {
populate_dram_regions_info();
}
#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
read_nv_app_data();
#if DEBUG
const nv_app_data_t *nv_app_data = get_nv_data();
INFO("Value of warm_reset flag = 0x%x\n", nv_app_data->warm_rst_flag);
INFO("Value of WDT flag = 0x%x\n", nv_app_data->wdt_rst_flag);
#endif
#endif
}
/*******************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this is only initializes the mmu in a quick and dirty way.
******************************************************************************/
void ls_bl2_el3_plat_arch_setup(void)
{
unsigned int flags = 0U;
/* Initialise the IO layer and register platform IO devices */
ls_setup_page_tables(
#if SEPARATE_RW_AND_NOLOAD
BL2_START,
BL2_LIMIT - BL2_START,
#else
BL2_BASE,
(unsigned long)(&__BL2_END__) - BL2_BASE,
#endif
BL_CODE_BASE,
BL_CODE_END,
BL_RO_DATA_BASE,
BL_RO_DATA_END
#if USE_COHERENT_MEM
, BL_COHERENT_RAM_BASE,
BL_COHERENT_RAM_END
#endif
);
if ((dram_regions_info.region[0].addr == 0)
&& (dram_regions_info.total_dram_size == 0)) {
flags = XLAT_TABLE_NC;
}
#ifdef AARCH32
enable_mmu_secure(0);
#else
enable_mmu_el3(flags);
#endif
}
void bl2_el3_plat_arch_setup(void)
{
ls_bl2_el3_plat_arch_setup();
}
void bl2_platform_setup(void)
{
/*
* Perform platform setup before loading the image.
*/
}
/* Handling image information by platform. */
int ls_bl2_handle_post_image_load(unsigned int image_id)
{
int err = 0;
bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
assert(bl_mem_params);
switch (image_id) {
case BL31_IMAGE_ID:
bl_mem_params->ep_info.args.arg3 =
(u_register_t) &dram_regions_info;
/* Pass the value of PORSR1 register in Argument 4 */
bl_mem_params->ep_info.args.arg4 =
(u_register_t)read_reg_porsr1();
flush_dcache_range((uintptr_t)&dram_regions_info,
sizeof(dram_regions_info));
break;
#if defined(AARCH64) && defined(IMAGE_BL32)
case BL32_IMAGE_ID:
bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl32_entry();
break;
#endif
case BL33_IMAGE_ID:
/* BL33 expects to receive the primary CPU MPID (through r0) */
bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl33_entry();
break;
}
return err;
}
/*******************************************************************************
* This function can be used by the platforms to update/use image
* information for given `image_id`.
******************************************************************************/
int bl2_plat_handle_post_image_load(unsigned int image_id)
{
return ls_bl2_handle_post_image_load(image_id);
}
void bl2_el3_plat_prepare_exit(void)
{
return soc_bl2_prepare_exit();
}
/* Called to do the dynamic initialization required
* before loading the next image.
*/
void bl2_plat_preload_setup(void)
{
soc_preload_setup();
if (dram_regions_info.total_dram_size < NXP_DRAM0_SIZE) {
NOTICE("ERROR: DRAM0 Size is not correctly configured.");
assert(false);
}
if ((dram_regions_info.region[0].addr == 0)
&& (dram_regions_info.total_dram_size > 0)) {
populate_dram_regions_info();
mmap_add_ddr_region_dynamically();
}
/* setup the memory region access permissions */
soc_mem_access();
#ifdef POLICY_FUSE_PROVISION
fip_fuse_provisioning((uintptr_t)FUSE_BUF, FUSE_SZ);
#endif
}

View File

@ -0,0 +1,210 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#ifdef LS_EL3_INTERRUPT_HANDLER
#include <ls_interrupt_mgmt.h>
#endif
#include <mmu_def.h>
#include <plat_common.h>
/*
* Placeholder variables for copying the arguments that have been passed to
* BL31 from BL2.
*/
#ifdef TEST_BL31
#define SPSR_FOR_EL2H 0x3C9
#define SPSR_FOR_EL1H 0x3C5
#else
static entry_point_info_t bl31_image_ep_info;
#endif
static entry_point_info_t bl32_image_ep_info;
static entry_point_info_t bl33_image_ep_info;
static dram_regions_info_t dram_regions_info = {0};
static uint64_t rcw_porsr1;
/* Return the pointer to the 'dram_regions_info structure of the DRAM.
* This structure is populated after init_ddr().
*/
dram_regions_info_t *get_dram_regions_info(void)
{
return &dram_regions_info;
}
/* Return the RCW.PORSR1 value which was passed in from BL2
*/
uint64_t bl31_get_porsr1(void)
{
return rcw_porsr1;
}
/*
* Return pointer to the 'entry_point_info' structure of the next image for the
* security state specified:
* - BL33 corresponds to the non-secure image type; while
* - BL32 corresponds to the secure image type.
* - A NULL pointer is returned, if the image does not exist.
*/
entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
{
entry_point_info_t *next_image_info;
assert(sec_state_is_valid(type));
next_image_info = (type == NON_SECURE)
? &bl33_image_ep_info : &bl32_image_ep_info;
#ifdef TEST_BL31
next_image_info->pc = _get_test_entry();
next_image_info->spsr = SPSR_FOR_EL2H;
next_image_info->h.attr = NON_SECURE;
#endif
if (next_image_info->pc != 0U) {
return next_image_info;
} else {
return NULL;
}
}
/*
* Perform any BL31 early platform setup common to NXP platforms.
* - Here is an opportunity to copy parameters passed by the calling EL (S-EL1
* in BL2 & S-EL3 in BL1) before they are lost (potentially).
* - This needs to be done before the MMU is initialized so that the
* memory layout can be used while creating page tables.
* - BL2 has flushed this information to memory, in order to fetch latest data.
*/
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
#ifndef TEST_BL31
int i = 0;
void *from_bl2 = (void *)arg0;
#endif
soc_early_platform_setup2();
#ifdef TEST_BL31
dram_regions_info.num_dram_regions = 2;
dram_regions_info.total_dram_size = 0x100000000;
dram_regions_info.region[0].addr = 0x80000000;
dram_regions_info.region[0].size = 0x80000000;
dram_regions_info.region[1].addr = 0x880000000;
dram_regions_info.region[1].size = 0x80000000;
bl33_image_ep_info.pc = _get_test_entry();
#else
/*
* Check params passed from BL2 should not be NULL,
*/
bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
assert(params_from_bl2 != NULL);
assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
assert(params_from_bl2->h.version >= VERSION_2);
bl_params_node_t *bl_params = params_from_bl2->head;
/*
* Copy BL33 and BL32 (if present), entry point information.
* They are stored in Secure RAM, in BL2's address space.
*/
while (bl_params != NULL) {
if (bl_params->image_id == BL31_IMAGE_ID) {
bl31_image_ep_info = *bl_params->ep_info;
dram_regions_info_t *loc_dram_regions_info =
(dram_regions_info_t *) bl31_image_ep_info.args.arg3;
dram_regions_info.num_dram_regions =
loc_dram_regions_info->num_dram_regions;
dram_regions_info.total_dram_size =
loc_dram_regions_info->total_dram_size;
VERBOSE("Number of DRAM Regions = %llx\n",
dram_regions_info.num_dram_regions);
for (i = 0; i < dram_regions_info.num_dram_regions;
i++) {
dram_regions_info.region[i].addr =
loc_dram_regions_info->region[i].addr;
dram_regions_info.region[i].size =
loc_dram_regions_info->region[i].size;
VERBOSE("DRAM%d Size = %llx\n", i,
dram_regions_info.region[i].size);
}
rcw_porsr1 = bl31_image_ep_info.args.arg4;
}
if (bl_params->image_id == BL32_IMAGE_ID) {
bl32_image_ep_info = *bl_params->ep_info;
}
if (bl_params->image_id == BL33_IMAGE_ID) {
bl33_image_ep_info = *bl_params->ep_info;
}
bl_params = bl_params->next_params_info;
}
#endif /* TEST_BL31 */
if (bl33_image_ep_info.pc == 0) {
panic();
}
/*
* perform basic initialization on the soc
*/
soc_init();
}
/*******************************************************************************
* Perform any BL31 platform setup common to ARM standard platforms
******************************************************************************/
void bl31_platform_setup(void)
{
NOTICE("Welcome to %s BL31 Phase\n", BOARD);
soc_platform_setup();
/* Console logs gone missing as part going to
* EL1 for initilizing Bl32 if present.
* console flush is necessary to avoid it.
*/
(void)console_flush();
}
void bl31_plat_runtime_setup(void)
{
#ifdef LS_EL3_INTERRUPT_HANDLER
ls_el3_interrupt_config();
#endif
soc_runtime_setup();
}
/*******************************************************************************
* Perform the very early platform specific architectural setup shared between
* ARM standard platforms. This only does basic initialization. Later
* architectural setup (bl31_arch_setup()) does not do anything platform
* specific.
******************************************************************************/
void bl31_plat_arch_setup(void)
{
ls_setup_page_tables(BL31_BASE,
BL31_END - BL31_BASE,
BL_CODE_BASE,
BL_CODE_END,
BL_RO_DATA_BASE,
BL_RO_DATA_END
#if USE_COHERENT_MEM
, BL_COHERENT_RAM_BASE,
BL_COHERENT_RAM_END
#endif
);
enable_mmu_el3(0);
}

View File

@ -0,0 +1,240 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <lib/mmio.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <mmu_def.h>
#include <plat/common/platform.h>
#include "plat_common.h"
#include "platform_def.h"
const mmap_region_t *plat_ls_get_mmap(void);
/*
* Table of memory regions for various BL stages to map using the MMU.
* This doesn't include Trusted SRAM as arm_setup_page_tables() already
* takes care of mapping it.
*
* The flash needs to be mapped as writable in order to erase the FIP's Table of
* Contents in case of unrecoverable error (see plat_error_handler()).
*/
#ifdef IMAGE_BL2
const mmap_region_t plat_ls_mmap[] = {
LS_MAP_CCSR,
{0}
};
#endif
#ifdef IMAGE_BL31
const mmap_region_t plat_ls_mmap[] = {
LS_MAP_CCSR,
#ifdef NXP_DCSR_ADDR
LS_MAP_DCSR,
#endif
LS_MAP_OCRAM,
{0}
};
#endif
#ifdef IMAGE_BL32
const mmap_region_t plat_ls_mmap[] = {
LS_MAP_CCSR,
LS_MAP_BL32_SEC_MEM,
{0}
};
#endif
/* Weak definitions may be overridden in specific NXP SoC */
#pragma weak plat_get_ns_image_entrypoint
#pragma weak plat_ls_get_mmap
#if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
static void mmap_add_ddr_regions_statically(void)
{
int i = 0;
dram_regions_info_t *info_dram_regions = get_dram_regions_info();
/* MMU map for Non-Secure DRAM Regions */
VERBOSE("DRAM Region %d: %p - %p\n", i,
(void *) info_dram_regions->region[i].addr,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
- 1));
mmap_add_region(info_dram_regions->region[i].addr,
info_dram_regions->region[i].addr,
info_dram_regions->region[i].size,
MT_MEMORY | MT_RW | MT_NS);
/* MMU map for Secure DDR Region on DRAM-0 */
if (info_dram_regions->region[i].size >
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
+ NXP_SECURE_DRAM_SIZE
+ NXP_SP_SHRD_DRAM_SIZE
- 1));
mmap_add_region((info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
MT_MEMORY | MT_RW | MT_SECURE);
}
#ifdef IMAGE_BL31
for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
if (info_dram_regions->region[i].size == 0)
break;
VERBOSE("DRAM Region %d: %p - %p\n", i,
(void *) info_dram_regions->region[i].addr,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
- 1));
mmap_add_region(info_dram_regions->region[i].addr,
info_dram_regions->region[i].addr,
info_dram_regions->region[i].size,
MT_MEMORY | MT_RW | MT_NS);
}
#endif
}
#endif
#if defined(PLAT_XLAT_TABLES_DYNAMIC)
void mmap_add_ddr_region_dynamically(void)
{
int i = 0;
dram_regions_info_t *info_dram_regions = get_dram_regions_info();
/* MMU map for Non-Secure DRAM Regions */
VERBOSE("DRAM Region %d: %p - %p\n", i,
(void *) info_dram_regions->region[i].addr,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
- 1));
mmap_add_dynamic_region(info_dram_regions->region[i].addr,
info_dram_regions->region[i].addr,
info_dram_regions->region[i].size,
MT_MEMORY | MT_RW | MT_NS);
/* MMU map for Secure DDR Region on DRAM-0 */
if (info_dram_regions->region[i].size >
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
+ NXP_SECURE_DRAM_SIZE
+ NXP_SP_SHRD_DRAM_SIZE
- 1));
mmap_add_dynamic_region((info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
MT_MEMORY | MT_RW | MT_SECURE);
}
#ifdef IMAGE_BL31
for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
if (info_dram_regions->region[i].size == 0) {
break;
}
VERBOSE("DRAM Region %d: %p - %p\n", i,
(void *) info_dram_regions->region[i].addr,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
- 1));
mmap_add_dynamic_region(info_dram_regions->region[i].addr,
info_dram_regions->region[i].addr,
info_dram_regions->region[i].size,
MT_MEMORY | MT_RW | MT_NS);
}
#endif
}
#endif
/*
* Set up the page tables for the generic and platform-specific memory regions.
* The extents of the generic memory regions are specified by the function
* arguments and consist of:
* - Trusted SRAM seen by the BL image;
* - Code section;
* - Read-only data section;
* - Coherent memory region, if applicable.
*/
void ls_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit
#if USE_COHERENT_MEM
,
uintptr_t coh_start,
uintptr_t coh_limit
#endif
)
{
/*
* Map the Trusted SRAM with appropriate memory attributes.
* Subsequent mappings will adjust the attributes for specific regions.
*/
VERBOSE("Memory seen by this BL image: %p - %p\n",
(void *) total_base, (void *) (total_base + total_size));
mmap_add_region(total_base, total_base,
total_size,
MT_MEMORY | MT_RW | MT_SECURE);
/* Re-map the code section */
VERBOSE("Code region: %p - %p\n",
(void *) code_start, (void *) code_limit);
mmap_add_region(code_start, code_start,
code_limit - code_start,
MT_CODE | MT_SECURE);
/* Re-map the read-only data section */
VERBOSE("Read-only data region: %p - %p\n",
(void *) rodata_start, (void *) rodata_limit);
mmap_add_region(rodata_start, rodata_start,
rodata_limit - rodata_start,
MT_RO_DATA | MT_SECURE);
#if USE_COHERENT_MEM
/* Re-map the coherent memory region */
VERBOSE("Coherent region: %p - %p\n",
(void *) coh_start, (void *) coh_limit);
mmap_add_region(coh_start, coh_start,
coh_limit - coh_start,
MT_DEVICE | MT_RW | MT_SECURE);
#endif
/* Now (re-)map the platform-specific memory regions */
mmap_add(plat_ls_get_mmap());
#if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
mmap_add_ddr_regions_statically();
#endif
/* Create the page tables to reflect the above mappings */
init_xlat_tables();
}
/*******************************************************************************
* Returns NXP platform specific memory map regions.
******************************************************************************/
const mmap_region_t *plat_ls_get_mmap(void)
{
return plat_ls_mmap;
}

View File

@ -0,0 +1,55 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <arch_helpers.h>
#include <common/debug.h>
#if TRUSTED_BOARD_BOOT
#include <dcfg.h>
#include <snvs.h>
#endif
#include "plat_common.h"
/*
* Error handler
*/
void plat_error_handler(int err)
{
#if TRUSTED_BOARD_BOOT
uint32_t mode;
bool sb = check_boot_mode_secure(&mode);
#endif
switch (err) {
case -ENOENT:
case -EAUTH:
printf("Authentication failure\n");
#if TRUSTED_BOARD_BOOT
/* For SB production mode i.e ITS = 1 */
if (sb == true) {
if (mode == 1U) {
transition_snvs_soft_fail();
} else {
transition_snvs_non_secure();
}
}
#endif
break;
default:
/* Unexpected error */
break;
}
/* Loop until the watchdog resets the system */
for (;;)
wfi();
}

View File

@ -0,0 +1,33 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <common/desc_image_load.h>
/*******************************************************************************
* This function flushes the data structures so that they are visible
* in memory for the next BL image.
******************************************************************************/
void plat_flush_next_bl_params(void)
{
flush_bl_params_desc();
}
/*******************************************************************************
* This function returns the list of loadable images.
******************************************************************************/
bl_load_info_t *plat_get_bl_image_load_info(void)
{
return get_bl_load_info_from_mem_params_desc();
}
/*******************************************************************************
* This function returns the list of executable images.
******************************************************************************/
bl_params_t *plat_get_next_bl_params(void)
{
return get_next_bl_params_from_mem_params_desc();
}

View File

@ -0,0 +1,66 @@
/*
* Copyright 2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <bl31/interrupt_mgmt.h>
#include <common/debug.h>
#include <ls_interrupt_mgmt.h>
#include <plat/common/platform.h>
static interrupt_type_handler_t type_el3_interrupt_table[MAX_INTR_EL3];
int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler)
{
/* Validate 'handler' and 'id' parameters */
if (!handler || id >= MAX_INTR_EL3) {
return -EINVAL;
}
/* Check if a handler has already been registered */
if (type_el3_interrupt_table[id] != NULL) {
return -EALREADY;
}
type_el3_interrupt_table[id] = handler;
return 0;
}
static uint64_t ls_el3_interrupt_handler(uint32_t id, uint32_t flags,
void *handle, void *cookie)
{
uint32_t intr_id;
interrupt_type_handler_t handler;
intr_id = plat_ic_get_pending_interrupt_id();
INFO("Interrupt recvd is %d\n", intr_id);
handler = type_el3_interrupt_table[intr_id];
if (handler != NULL) {
handler(intr_id, flags, handle, cookie);
}
/*
* Mark this interrupt as complete to avoid a interrupt storm.
*/
plat_ic_end_of_interrupt(intr_id);
return 0U;
}
void ls_el3_interrupt_config(void)
{
uint64_t flags = 0U;
uint64_t rc;
set_interrupt_rm_flag(flags, NON_SECURE);
rc = register_interrupt_type_handler(INTR_TYPE_EL3,
ls_el3_interrupt_handler, flags);
if (rc != 0U) {
panic();
}
}

View File

@ -0,0 +1,519 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <endian.h>
#include <string.h>
#include <common/debug.h>
#include <common/tbbr/tbbr_img_def.h>
#include <drivers/io/io_block.h>
#include <drivers/io/io_driver.h>
#include <drivers/io/io_fip.h>
#include <drivers/io/io_memmap.h>
#include <drivers/io/io_storage.h>
#ifdef FLEXSPI_NOR_BOOT
#include <flexspi_nor.h>
#endif
#if defined(QSPI_BOOT)
#include <qspi.h>
#endif
#if defined(SD_BOOT) || defined(EMMC_BOOT)
#include <sd_mmc.h>
#endif
#include <tools_share/firmware_image_package.h>
#ifdef CONFIG_DDR_FIP_IMAGE
#include <ddr_io_storage.h>
#endif
#ifdef POLICY_FUSE_PROVISION
#include <fuse_io.h>
#endif
#include "plat_common.h"
#include "platform_def.h"
uint32_t fip_device;
/* IO devices */
uintptr_t backend_dev_handle;
static const io_dev_connector_t *fip_dev_con;
static uintptr_t fip_dev_handle;
static const io_dev_connector_t *backend_dev_con;
static io_block_spec_t fip_block_spec = {
.offset = PLAT_FIP_OFFSET,
.length = PLAT_FIP_MAX_SIZE
};
static const io_uuid_spec_t bl2_uuid_spec = {
.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
};
static const io_uuid_spec_t fuse_bl2_uuid_spec = {
.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
};
static const io_uuid_spec_t bl31_uuid_spec = {
.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
};
static const io_uuid_spec_t bl32_uuid_spec = {
.uuid = UUID_SECURE_PAYLOAD_BL32,
};
static const io_uuid_spec_t bl33_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
};
static const io_uuid_spec_t tb_fw_config_uuid_spec = {
.uuid = UUID_TB_FW_CONFIG,
};
static const io_uuid_spec_t hw_config_uuid_spec = {
.uuid = UUID_HW_CONFIG,
};
#if TRUSTED_BOARD_BOOT
static const io_uuid_spec_t tb_fw_cert_uuid_spec = {
.uuid = UUID_TRUSTED_BOOT_FW_CERT,
};
static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
.uuid = UUID_TRUSTED_KEY_CERT,
};
static const io_uuid_spec_t fuse_key_cert_uuid_spec = {
.uuid = UUID_SCP_FW_KEY_CERT,
};
static const io_uuid_spec_t soc_fw_key_cert_uuid_spec = {
.uuid = UUID_SOC_FW_KEY_CERT,
};
static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
};
static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
};
static const io_uuid_spec_t fuse_cert_uuid_spec = {
.uuid = UUID_SCP_FW_CONTENT_CERT,
};
static const io_uuid_spec_t soc_fw_cert_uuid_spec = {
.uuid = UUID_SOC_FW_CONTENT_CERT,
};
static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
};
static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
};
#endif /* TRUSTED_BOARD_BOOT */
static int open_fip(const uintptr_t spec);
struct plat_io_policy {
uintptr_t *dev_handle;
uintptr_t image_spec;
int (*check)(const uintptr_t spec);
};
/* By default, ARM platforms load images from the FIP */
static const struct plat_io_policy policies[] = {
[FIP_IMAGE_ID] = {
&backend_dev_handle,
(uintptr_t)&fip_block_spec,
open_backend
},
[BL2_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl2_uuid_spec,
open_fip
},
[SCP_BL2_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&fuse_bl2_uuid_spec,
open_fip
},
[BL31_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl31_uuid_spec,
open_fip
},
[BL32_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl32_uuid_spec,
open_fip
},
[BL33_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl33_uuid_spec,
open_fip
},
[TB_FW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&tb_fw_config_uuid_spec,
open_fip
},
[HW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&hw_config_uuid_spec,
open_fip
},
#if TRUSTED_BOARD_BOOT
[TRUSTED_BOOT_FW_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tb_fw_cert_uuid_spec,
open_fip
},
[TRUSTED_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&trusted_key_cert_uuid_spec,
open_fip
},
[SCP_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&fuse_key_cert_uuid_spec,
open_fip
},
[SOC_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&soc_fw_key_cert_uuid_spec,
open_fip
},
[TRUSTED_OS_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tos_fw_key_cert_uuid_spec,
open_fip
},
[NON_TRUSTED_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&nt_fw_key_cert_uuid_spec,
open_fip
},
[SCP_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&fuse_cert_uuid_spec,
open_fip
},
[SOC_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&soc_fw_cert_uuid_spec,
open_fip
},
[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tos_fw_cert_uuid_spec,
open_fip
},
[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&nt_fw_cert_uuid_spec,
open_fip
},
#endif /* TRUSTED_BOARD_BOOT */
};
/* Weak definitions may be overridden in specific ARM standard platform */
#pragma weak plat_io_setup
/*
* Return an IO device handle and specification which can be used to access
*/
static int open_fip(const uintptr_t spec)
{
int result;
uintptr_t local_image_handle;
/* See if a Firmware Image Package is available */
result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
if (result == 0) {
result = io_open(fip_dev_handle, spec, &local_image_handle);
if (result == 0) {
VERBOSE("Using FIP\n");
io_close(local_image_handle);
}
}
return result;
}
int open_backend(const uintptr_t spec)
{
int result;
uintptr_t local_image_handle;
result = io_dev_init(backend_dev_handle, (uintptr_t)NULL);
if (result == 0) {
result = io_open(backend_dev_handle, spec, &local_image_handle);
if (result == 0) {
io_close(local_image_handle);
}
}
return result;
}
#if defined(SD_BOOT) || defined(EMMC_BOOT)
static int plat_io_block_setup(size_t fip_offset, uintptr_t block_dev_spec)
{
int io_result;
fip_block_spec.offset = fip_offset;
io_result = register_io_dev_block(&backend_dev_con);
assert(io_result == 0);
/* Open connections to devices and cache the handles */
io_result = io_dev_open(backend_dev_con, block_dev_spec,
&backend_dev_handle);
assert(io_result == 0);
return io_result;
}
#endif
#if defined(FLEXSPI_NOR_BOOT) || defined(QSPI_BOOT)
static int plat_io_memmap_setup(size_t fip_offset)
{
int io_result;
fip_block_spec.offset = fip_offset;
io_result = register_io_dev_memmap(&backend_dev_con);
assert(io_result == 0);
/* Open connections to devices and cache the handles */
io_result = io_dev_open(backend_dev_con, (uintptr_t)NULL,
&backend_dev_handle);
assert(io_result == 0);
return io_result;
}
#endif
static int ls_io_fip_setup(unsigned int boot_dev)
{
int io_result;
io_result = register_io_dev_fip(&fip_dev_con);
assert(io_result == 0);
/* Open connections to devices and cache the handles */
io_result = io_dev_open(fip_dev_con, (uintptr_t)&fip_device,
&fip_dev_handle);
assert(io_result == 0);
#ifdef CONFIG_DDR_FIP_IMAGE
/* Open connection to DDR FIP image if available */
io_result = ddr_fip_setup(fip_dev_con, boot_dev);
assert(io_result == 0);
#endif
#ifdef POLICY_FUSE_PROVISION
/* Open connection to FUSE FIP image if available */
io_result = fuse_fip_setup(fip_dev_con, boot_dev);
assert(io_result == 0);
#endif
return io_result;
}
int ls_qspi_io_setup(void)
{
#ifdef QSPI_BOOT
qspi_io_setup(NXP_QSPI_FLASH_ADDR,
NXP_QSPI_FLASH_SIZE,
PLAT_FIP_OFFSET);
return plat_io_memmap_setup(NXP_QSPI_FLASH_ADDR + PLAT_FIP_OFFSET);
#else
ERROR("QSPI driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
#endif
}
int emmc_sdhc2_io_setup(void)
{
#if defined(EMMC_BOOT) && defined(NXP_ESDHC2_ADDR)
uintptr_t block_dev_spec;
int ret;
ret = sd_emmc_init(&block_dev_spec,
NXP_ESDHC2_ADDR,
NXP_SD_BLOCK_BUF_ADDR,
NXP_SD_BLOCK_BUF_SIZE,
false);
if (ret != 0) {
return ret;
}
return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec);
#else
ERROR("EMMC driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
#endif
}
int emmc_io_setup(void)
{
/* On the platforms which only has one ESDHC controller,
* eMMC-boot will use the first ESDHC controller.
*/
#if defined(SD_BOOT) || defined(EMMC_BOOT)
uintptr_t block_dev_spec;
int ret;
ret = sd_emmc_init(&block_dev_spec,
NXP_ESDHC_ADDR,
NXP_SD_BLOCK_BUF_ADDR,
NXP_SD_BLOCK_BUF_SIZE,
true);
if (ret != 0) {
return ret;
}
return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec);
#else
ERROR("SD driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
#endif
}
int ifc_nor_io_setup(void)
{
ERROR("NOR driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
}
int ifc_nand_io_setup(void)
{
ERROR("NAND driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
}
int ls_flexspi_nor_io_setup(void)
{
#ifdef FLEXSPI_NOR_BOOT
int ret = 0;
ret = flexspi_nor_io_setup(NXP_FLEXSPI_FLASH_ADDR,
NXP_FLEXSPI_FLASH_SIZE,
NXP_FLEXSPI_ADDR);
if (ret != 0) {
ERROR("FlexSPI NOR driver initialization error.\n");
/* Should never reach here */
assert(0);
panic();
return -1;
}
return plat_io_memmap_setup(NXP_FLEXSPI_FLASH_ADDR + PLAT_FIP_OFFSET);
#else
ERROR("FlexSPI NOR driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
#endif
}
static int (* const ls_io_setup_table[])(void) = {
[BOOT_DEVICE_IFC_NOR] = ifc_nor_io_setup,
[BOOT_DEVICE_IFC_NAND] = ifc_nand_io_setup,
[BOOT_DEVICE_QSPI] = ls_qspi_io_setup,
[BOOT_DEVICE_EMMC] = emmc_io_setup,
[BOOT_DEVICE_SDHC2_EMMC] = emmc_sdhc2_io_setup,
[BOOT_DEVICE_FLEXSPI_NOR] = ls_flexspi_nor_io_setup,
[BOOT_DEVICE_FLEXSPI_NAND] = ls_flexspi_nor_io_setup,
};
int plat_io_setup(void)
{
int (*io_setup)(void);
unsigned int boot_dev = BOOT_DEVICE_NONE;
int ret;
boot_dev = get_boot_dev();
if (boot_dev == BOOT_DEVICE_NONE) {
ERROR("Boot Device detection failed, Check RCW_SRC\n");
return -EINVAL;
}
io_setup = ls_io_setup_table[boot_dev];
ret = io_setup();
if (ret != 0) {
return ret;
}
ret = ls_io_fip_setup(boot_dev);
if (ret != 0) {
return ret;
}
return 0;
}
/* Return an IO device handle and specification which can be used to access
* an image. Use this to enforce platform load policy
*/
int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
uintptr_t *image_spec)
{
int result = -1;
const struct plat_io_policy *policy;
if (image_id < ARRAY_SIZE(policies)) {
policy = &policies[image_id];
result = policy->check(policy->image_spec);
if (result == 0) {
*image_spec = policy->image_spec;
*dev_handle = *(policy->dev_handle);
}
}
#ifdef CONFIG_DDR_FIP_IMAGE
else {
VERBOSE("Trying alternative IO\n");
result = plat_get_ddr_fip_image_source(image_id, dev_handle,
image_spec, open_backend);
}
#endif
#ifdef POLICY_FUSE_PROVISION
if (result != 0) {
VERBOSE("Trying FUSE IO\n");
result = plat_get_fuse_image_source(image_id, dev_handle,
image_spec, open_backend);
}
#endif
return result;
}

View File

@ -0,0 +1,22 @@
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <stdint.h>
#include <arch_helpers.h>
#include <plat/common/platform.h>
#define RANDOM_CANARY_VALUE ((u_register_t) 3288484550995823360ULL)
u_register_t plat_get_stack_protector_canary(void)
{
/*
* TBD: Generate Random Number from NXP CAAM Block.
*/
return RANDOM_CANARY_VALUE ^ read_cntpct_el0();
}