Merge pull request #1587 from antonio-nino-diaz-arm/an/deprecated

Remove deprecated interfaces for all platforms
This commit is contained in:
Soby Mathew 2018-10-02 10:12:32 +01:00 committed by GitHub
commit 3ccfcd6e3d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
210 changed files with 422 additions and 6574 deletions

1
.gitignore vendored
View File

@ -11,7 +11,6 @@ build/
# Ignore build products from tools
tools/**/*.o
tools/fip_create/
tools/fiptool/fiptool
tools/fiptool/fiptool.exe
tools/cert_create/src/*.o

View File

@ -327,19 +327,6 @@ ifeq (${ARM_ARCH_MAJOR},7)
include make_helpers/armv7-a-cpus.mk
endif
# Platform compatibility is not supported in AArch32
ifneq (${ARCH},aarch32)
# If the platform has not defined ENABLE_PLAT_COMPAT, then enable it by default
ifndef ENABLE_PLAT_COMPAT
ENABLE_PLAT_COMPAT := 1
endif
# Include the platform compatibility helpers for PSCI
ifneq (${ENABLE_PLAT_COMPAT}, 0)
include plat/compat/plat_compat.mk
endif
endif
# Include the CPU specific operations makefile, which provides default
# values for all CPU errata workarounds and CPU specific optimisations.
# This can be overridden by the platform.
@ -403,13 +390,6 @@ ifeq (${NEED_BL33},yes)
endif
endif
# For AArch32, LOAD_IMAGE_V2 must be enabled.
ifeq (${ARCH},aarch32)
ifeq (${LOAD_IMAGE_V2}, 0)
$(error "For AArch32, LOAD_IMAGE_V2 must be enabled.")
endif
endif
# When building for systems with hardware-assisted coherency, there's no need to
# use USE_COHERENT_MEM. Require that USE_COHERENT_MEM must be set to 0 too.
ifeq ($(HW_ASSISTED_COHERENCY)-$(USE_COHERENT_MEM),1-1)
@ -451,14 +431,11 @@ ifeq ($(FAULT_INJECTION_SUPPORT),1)
endif
endif
# DYN_DISABLE_AUTH can be set only when TRUSTED_BOARD_BOOT=1 and LOAD_IMAGE_V2=1
# DYN_DISABLE_AUTH can be set only when TRUSTED_BOARD_BOOT=1
ifeq ($(DYN_DISABLE_AUTH), 1)
ifeq (${TRUSTED_BOARD_BOOT}, 0)
$(error "TRUSTED_BOARD_BOOT must be enabled for DYN_DISABLE_AUTH to be set.")
endif
ifeq (${LOAD_IMAGE_V2}, 0)
$(error "DYN_DISABLE_AUTH is only supported for LOAD_IMAGE_V2.")
endif
endif
################################################################################
@ -586,7 +563,6 @@ $(eval $(call assert_boolean,ENABLE_AMU))
$(eval $(call assert_boolean,ENABLE_ASSERTIONS))
$(eval $(call assert_boolean,ENABLE_BACKTRACE))
$(eval $(call assert_boolean,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call assert_boolean,ENABLE_PLAT_COMPAT))
$(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT))
$(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION))
@ -599,7 +575,6 @@ $(eval $(call assert_boolean,GENERATE_COT))
$(eval $(call assert_boolean,GICV2_G0_FOR_EL3))
$(eval $(call assert_boolean,HANDLE_EA_EL3_FIRST))
$(eval $(call assert_boolean,HW_ASSISTED_COHERENCY))
$(eval $(call assert_boolean,LOAD_IMAGE_V2))
$(eval $(call assert_boolean,MULTI_CONSOLE_API))
$(eval $(call assert_boolean,NS_TIMER_SWITCH))
$(eval $(call assert_boolean,PL011_GENERIC_UART))
@ -630,7 +605,6 @@ $(eval $(call assert_numeric,SMCCC_MAJOR_VERSION))
$(eval $(call add_define,ARM_ARCH_MAJOR))
$(eval $(call add_define,ARM_ARCH_MINOR))
$(eval $(call add_define,ARM_GIC_ARCH))
$(eval $(call add_define,COLD_BOOT_SINGLE_CPU))
$(eval $(call add_define,CTX_INCLUDE_AARCH32_REGS))
$(eval $(call add_define,CTX_INCLUDE_FPREGS))
@ -639,7 +613,6 @@ $(eval $(call add_define,ENABLE_AMU))
$(eval $(call add_define,ENABLE_ASSERTIONS))
$(eval $(call add_define,ENABLE_BACKTRACE))
$(eval $(call add_define,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call add_define,ENABLE_PLAT_COMPAT))
$(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT))
$(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION))
@ -651,7 +624,6 @@ $(eval $(call add_define,FAULT_INJECTION_SUPPORT))
$(eval $(call add_define,GICV2_G0_FOR_EL3))
$(eval $(call add_define,HANDLE_EA_EL3_FIRST))
$(eval $(call add_define,HW_ASSISTED_COHERENCY))
$(eval $(call add_define,LOAD_IMAGE_V2))
$(eval $(call add_define,LOG_LEVEL))
$(eval $(call add_define,MULTI_CONSOLE_API))
$(eval $(call add_define,NS_TIMER_SWITCH))

View File

@ -291,26 +291,11 @@ static int bl1_fwu_image_copy(unsigned int image_id,
return -ENOMEM;
}
#if LOAD_IMAGE_V2
/* Check that the image size to load is within limit */
if (image_size > image_desc->image_info.image_max_size) {
WARN("BL1-FWU: Image size out of bounds\n");
return -ENOMEM;
}
#else
/*
* Check the image will fit into the free trusted RAM after BL1
* load.
*/
const meminfo_t *mem_layout = bl1_plat_sec_mem_layout();
if (!is_mem_free(mem_layout->free_base, mem_layout->free_size,
image_desc->image_info.image_base,
image_size)) {
WARN("BL1-FWU: Copy not allowed due to insufficient"
" resources.\n");
return -ENOMEM;
}
#endif
/* Save the given image size. */
image_desc->image_info.image_size = image_size;

View File

@ -37,7 +37,6 @@ void bl1_calc_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
assert(bl1_mem_layout != NULL);
assert(bl2_mem_layout != NULL);
#if LOAD_IMAGE_V2
/*
* Remove BL1 RW data from the scope of memory visible to BL2.
* This is assuming BL1 RW data is at the top of bl1_mem_layout.
@ -45,42 +44,10 @@ void bl1_calc_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
assert(BL1_RW_BASE > bl1_mem_layout->total_base);
bl2_mem_layout->total_base = bl1_mem_layout->total_base;
bl2_mem_layout->total_size = BL1_RW_BASE - bl1_mem_layout->total_base;
#else
/* Check that BL1's memory is lying outside of the free memory */
assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) ||
(BL1_RAM_BASE >= bl1_mem_layout->free_base +
bl1_mem_layout->free_size));
/* Remove BL1 RW data from the scope of memory visible to BL2 */
*bl2_mem_layout = *bl1_mem_layout;
reserve_mem(&bl2_mem_layout->total_base,
&bl2_mem_layout->total_size,
BL1_RAM_BASE,
BL1_RAM_LIMIT - BL1_RAM_BASE);
#endif /* LOAD_IMAGE_V2 */
flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t));
}
#if !ERROR_DEPRECATED
/*******************************************************************************
* Compatibility default implementation for deprecated API. This has a weak
* definition. Platform specific code can override it if it wishes to.
******************************************************************************/
#pragma weak bl1_init_bl2_mem_layout
/*******************************************************************************
* Function that takes a memory layout into which BL2 has been loaded and
* populates a new memory layout for BL2 that ensures that BL1's data sections
* resident in secure RAM are not visible to BL2.
******************************************************************************/
void bl1_init_bl2_mem_layout(const struct meminfo *bl1_mem_layout,
struct meminfo *bl2_mem_layout)
{
bl1_calc_bl2_mem_layout(bl1_mem_layout, bl2_mem_layout);
}
#endif
/*******************************************************************************
* Function to perform late architectural and platform specific initialization.
* It also queries the platform to load and run next BL image. Only called
@ -183,27 +150,7 @@ static void bl1_load_bl2(void)
plat_error_handler(err);
}
#if LOAD_IMAGE_V2
err = load_auth_image(BL2_IMAGE_ID, image_info);
#else
entry_point_info_t *ep_info;
meminfo_t *bl1_tzram_layout;
/* Get the entry point info */
ep_info = &image_desc->ep_info;
/* Find out how much free trusted ram remains after BL1 load */
bl1_tzram_layout = bl1_plat_sec_mem_layout();
/* Load the BL2 image */
err = load_auth_image(bl1_tzram_layout,
BL2_IMAGE_ID,
image_info->image_base,
image_info,
ep_info);
#endif /* LOAD_IMAGE_V2 */
if (err) {
ERROR("Failed to load BL2 firmware.\n");
plat_error_handler(err);

View File

@ -15,9 +15,7 @@ image_desc_t bl1_tbbr_image_descs[] = {
SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
VERSION_1, image_info_t, 0),
.image_info.image_base = BL2_BASE,
#if LOAD_IMAGE_V2
.image_info.image_max_size = BL2_LIMIT - BL2_BASE,
#endif
SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
VERSION_1, entry_point_info_t, SECURE),
},
@ -35,9 +33,7 @@ image_desc_t bl1_tbbr_image_descs[] = {
SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
VERSION_1, image_info_t, 0),
.image_info.image_base = SCP_BL2U_BASE,
#if LOAD_IMAGE_V2
.image_info.image_max_size = SCP_BL2U_LIMIT - SCP_BL2U_BASE,
#endif
SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
VERSION_1, entry_point_info_t, SECURE),
},
@ -48,9 +44,7 @@ image_desc_t bl1_tbbr_image_descs[] = {
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_1, image_info_t, 0),
.image_info.image_base = BL2U_BASE,
#if LOAD_IMAGE_V2
.image_info.image_max_size = BL2U_LIMIT - BL2U_BASE,
#endif
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),
.ep_info.pc = BL2U_BASE,

View File

@ -14,11 +14,7 @@ ifeq (${ARCH},aarch64)
BL2_SOURCES += common/aarch64/early_exceptions.S
endif
ifeq (${LOAD_IMAGE_V2},1)
BL2_SOURCES += bl2/bl2_image_load_v2.c
else
BL2_SOURCES += bl2/bl2_image_load.c
endif
ifeq (${BL2_AT_EL3},0)
BL2_SOURCES += bl2/${ARCH}/bl2_entrypoint.S

View File

@ -1,261 +0,0 @@
/*
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <auth_mod.h>
#include <bl_common.h>
#include <debug.h>
#include <errno.h>
#include <platform.h>
#include <platform_def.h>
#include <stdint.h>
/*
* Check for platforms that use obsolete image terminology
*/
#ifdef BL30_BASE
# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE"
#endif
/*******************************************************************************
* Load the SCP_BL2 image if there's one.
* If a platform does not want to attempt to load SCP_BL2 image it must leave
* SCP_BL2_BASE undefined.
* Return 0 on success or if there's no SCP_BL2 image to load, a negative error
* code otherwise.
******************************************************************************/
static int load_scp_bl2(void)
{
int e = 0;
#ifdef SCP_BL2_BASE
meminfo_t scp_bl2_mem_info;
image_info_t scp_bl2_image_info;
/*
* It is up to the platform to specify where SCP_BL2 should be loaded if
* it exists. It could create space in the secure sram or point to a
* completely different memory.
*
* The entry point information is not relevant in this case as the AP
* won't execute the SCP_BL2 image.
*/
INFO("BL2: Loading SCP_BL2\n");
bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info);
scp_bl2_image_info.h.version = VERSION_1;
e = load_auth_image(&scp_bl2_mem_info,
SCP_BL2_IMAGE_ID,
SCP_BL2_BASE,
&scp_bl2_image_info,
NULL);
if (e == 0) {
/* The subsequent handling of SCP_BL2 is platform specific */
e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info);
if (e) {
ERROR("Failure in platform-specific handling of SCP_BL2 image.\n");
}
}
#endif /* SCP_BL2_BASE */
return e;
}
#ifndef EL3_PAYLOAD_BASE
/*******************************************************************************
* Load the BL31 image.
* The bl2_to_bl31_params and bl31_ep_info params will be updated with the
* relevant BL31 information.
* Return 0 on success, a negative error code otherwise.
******************************************************************************/
static int load_bl31(bl31_params_t *bl2_to_bl31_params,
entry_point_info_t *bl31_ep_info)
{
meminfo_t *bl2_tzram_layout;
int e;
INFO("BL2: Loading BL31\n");
assert(bl2_to_bl31_params != NULL);
assert(bl31_ep_info != NULL);
/* Find out how much free trusted ram remains after BL2 load */
bl2_tzram_layout = bl2_plat_sec_mem_layout();
/* Set the X0 parameter to BL31 */
bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params;
/* Load the BL31 image */
e = load_auth_image(bl2_tzram_layout,
BL31_IMAGE_ID,
BL31_BASE,
bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
if (e == 0) {
bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
}
return e;
}
/*******************************************************************************
* Load the BL32 image if there's one.
* The bl2_to_bl31_params param will be updated with the relevant BL32
* information.
* If a platform does not want to attempt to load BL32 image it must leave
* BL32_BASE undefined.
* Return 0 on success or if there's no BL32 image to load, a negative error
* code otherwise.
******************************************************************************/
static int load_bl32(bl31_params_t *bl2_to_bl31_params)
{
int e = 0;
#ifdef BL32_BASE
meminfo_t bl32_mem_info;
INFO("BL2: Loading BL32\n");
assert(bl2_to_bl31_params != NULL);
/*
* It is up to the platform to specify where BL32 should be loaded if
* it exists. It could create space in the secure sram or point to a
* completely different memory.
*/
bl2_plat_get_bl32_meminfo(&bl32_mem_info);
e = load_auth_image(&bl32_mem_info,
BL32_IMAGE_ID,
BL32_BASE,
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
if (e == 0) {
bl2_plat_set_bl32_ep_info(
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
}
#endif /* BL32_BASE */
return e;
}
#ifndef PRELOADED_BL33_BASE
/*******************************************************************************
* Load the BL33 image.
* The bl2_to_bl31_params param will be updated with the relevant BL33
* information.
* Return 0 on success, a negative error code otherwise.
******************************************************************************/
static int load_bl33(bl31_params_t *bl2_to_bl31_params)
{
meminfo_t bl33_mem_info;
int e;
INFO("BL2: Loading BL33\n");
assert(bl2_to_bl31_params != NULL);
bl2_plat_get_bl33_meminfo(&bl33_mem_info);
/* Load the BL33 image in non-secure memory provided by the platform */
e = load_auth_image(&bl33_mem_info,
BL33_IMAGE_ID,
plat_get_ns_image_entrypoint(),
bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
if (e == 0) {
bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
}
return e;
}
#endif /* PRELOADED_BL33_BASE */
#endif /* EL3_PAYLOAD_BASE */
/*******************************************************************************
* This function loads SCP_BL2/BL3x images and returns the ep_info for
* the next executable image.
******************************************************************************/
struct entry_point_info *bl2_load_images(void)
{
bl31_params_t *bl2_to_bl31_params;
entry_point_info_t *bl31_ep_info;
int e;
e = load_scp_bl2();
if (e) {
ERROR("Failed to load SCP_BL2 (%i)\n", e);
plat_error_handler(e);
}
/* Perform platform setup in BL2 after loading SCP_BL2 */
bl2_platform_setup();
/*
* Get a pointer to the memory the platform has set aside to pass
* information to BL31.
*/
bl2_to_bl31_params = bl2_plat_get_bl31_params();
bl31_ep_info = bl2_plat_get_bl31_ep_info();
#ifdef EL3_PAYLOAD_BASE
/*
* In the case of an EL3 payload, we don't need to load any further
* images. Just update the BL31 entrypoint info structure to make BL1
* jump to the EL3 payload.
* The pointer to the memory the platform has set aside to pass
* information to BL31 in the normal boot flow is reused here, even
* though only a fraction of the information contained in the
* bl31_params_t structure makes sense in the context of EL3 payloads.
* This will be refined in the future.
*/
INFO("BL2: Populating the entrypoint info for the EL3 payload\n");
bl31_ep_info->pc = EL3_PAYLOAD_BASE;
bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params;
bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info);
#else
e = load_bl31(bl2_to_bl31_params, bl31_ep_info);
if (e) {
ERROR("Failed to load BL31 (%i)\n", e);
plat_error_handler(e);
}
e = load_bl32(bl2_to_bl31_params);
if (e) {
if (e == -EAUTH) {
ERROR("Failed to authenticate BL32\n");
plat_error_handler(e);
} else {
WARN("Failed to load BL32 (%i)\n", e);
}
}
#ifdef PRELOADED_BL33_BASE
/*
* In this case, don't load the BL33 image as it's already loaded in
* memory. Update BL33 entrypoint information.
*/
INFO("BL2: Populating the entrypoint info for the preloaded BL33\n");
bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE;
bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info);
#else
e = load_bl33(bl2_to_bl31_params);
if (e) {
ERROR("Failed to load BL33 (%i)\n", e);
plat_error_handler(e);
}
#endif /* PRELOADED_BL33_BASE */
#endif /* EL3_PAYLOAD_BASE */
/* Flush the params to be passed to memory */
bl2_plat_flush_bl31_params();
return bl31_ep_info;
}

View File

@ -61,70 +61,3 @@ void cm_set_context_by_index(unsigned int cpu_idx, void *context,
set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
}
#if !ERROR_DEPRECATED
/*
* These context management helpers are deprecated but are maintained for use
* by SPDs which have not migrated to the new API. If ERROR_DEPRECATED
* is enabled, these are excluded from the build so as to force users to
* migrate to the new API.
*/
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by MPIDR that was set as the context for the specified
* security state. NULL is returned if no such structure has been specified.
******************************************************************************/
void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
/*
* Suppress deprecated declaration warning in compatibility function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
#pragma GCC diagnostic pop
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by MPIDR
******************************************************************************/
void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
/*
* Suppress deprecated declaration warning in compatibility function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
cm_set_context_by_index(platform_get_core_pos(mpidr),
context, security_state);
#pragma GCC diagnostic pop
}
/*******************************************************************************
* The following function provides a compatibility function for SPDs using the
* existing cm library routines. This function is expected to be invoked for
* initializing the cpu_context for the CPU specified by MPIDR for first use.
******************************************************************************/
void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
{
if ((mpidr & MPIDR_AFFINITY_MASK) ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
cm_init_my_context(ep);
else {
/*
* Suppress deprecated declaration warning in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
#pragma GCC diagnostic pop
}
}
#endif /* ERROR_DEPRECATED */

View File

@ -23,8 +23,7 @@ static int disable_auth;
/******************************************************************************
* API to dynamically disable authentication. Only meant for development
* systems. This is only invoked if DYN_DISABLE_AUTH is defined. This
* capability is restricted to LOAD_IMAGE_V2.
* systems. This is only invoked if DYN_DISABLE_AUTH is defined.
*****************************************************************************/
void dyn_disable_auth(void)
{
@ -101,88 +100,6 @@ int is_mem_free(uintptr_t free_base, size_t free_size,
return (addr >= free_base) && (requested_end <= free_end);
}
#if !LOAD_IMAGE_V2
/******************************************************************************
* Inside a given memory region, determine whether a sub-region of memory is
* closer from the top or the bottom of the encompassing region. Return the
* size of the smallest chunk of free memory surrounding the sub-region in
* 'small_chunk_size'.
*****************************************************************************/
static unsigned int choose_mem_pos(uintptr_t mem_start, uintptr_t mem_end,
uintptr_t submem_start, uintptr_t submem_end,
size_t *small_chunk_size)
{
size_t top_chunk_size, bottom_chunk_size;
assert(mem_start <= submem_start);
assert(submem_start <= submem_end);
assert(submem_end <= mem_end);
assert(small_chunk_size != NULL);
top_chunk_size = mem_end - submem_end;
bottom_chunk_size = submem_start - mem_start;
if (top_chunk_size < bottom_chunk_size) {
*small_chunk_size = top_chunk_size;
return TOP;
} else {
*small_chunk_size = bottom_chunk_size;
return BOTTOM;
}
}
/******************************************************************************
* Reserve the memory region delimited by 'addr' and 'size'. The extents of free
* memory are passed in 'free_base' and 'free_size' and they will be updated to
* reflect the memory usage.
* The caller must ensure the memory to reserve is free and that the addresses
* and sizes passed in arguments are sane.
*****************************************************************************/
void reserve_mem(uintptr_t *free_base, size_t *free_size,
uintptr_t addr, size_t size)
{
size_t discard_size;
size_t reserved_size;
unsigned int pos;
assert(free_base != NULL);
assert(free_size != NULL);
assert(is_mem_free(*free_base, *free_size, addr, size));
if (size == 0) {
WARN("Nothing to allocate, requested size is zero\n");
return;
}
pos = choose_mem_pos(*free_base, *free_base + (*free_size - 1),
addr, addr + (size - 1),
&discard_size);
reserved_size = size + discard_size;
*free_size -= reserved_size;
if (pos == BOTTOM)
*free_base = addr + size;
VERBOSE("Reserved 0x%zx bytes (discarded 0x%zx bytes %s)\n",
reserved_size, discard_size,
pos == TOP ? "above" : "below");
}
static void dump_load_info(uintptr_t image_load_addr,
size_t image_size,
const meminfo_t *mem_layout)
{
INFO("Trying to load image at address %p, size = 0x%zx\n",
(void *)image_load_addr, image_size);
INFO("Current memory layout:\n");
INFO(" total region = [base = %p, size = 0x%zx]\n",
(void *) mem_layout->total_base, mem_layout->total_size);
INFO(" free region = [base = %p, size = 0x%zx]\n",
(void *) mem_layout->free_base, mem_layout->free_size);
}
#endif /* LOAD_IMAGE_V2 */
/* Generic function to return the size of an image */
size_t get_image_size(unsigned int image_id)
{
@ -226,8 +143,6 @@ size_t get_image_size(unsigned int image_id)
return image_size;
}
#if LOAD_IMAGE_V2
/*******************************************************************************
* Internal function to load an image at a specific address given
* an image ID and extents of free memory.
@ -386,214 +301,6 @@ int load_auth_image(unsigned int image_id, image_info_t *image_data)
return err;
}
#else /* LOAD_IMAGE_V2 */
/*******************************************************************************
* Generic function to load an image at a specific address given an image ID and
* extents of free memory.
*
* If the load is successful then the image information is updated.
*
* If the entry_point_info argument is not NULL then this function also updates:
* - the memory layout to mark the memory as reserved;
* - the entry point information.
*
* The caller might pass a NULL pointer for the entry point if they are not
* interested in this information. This is typically the case for non-executable
* images (e.g. certificates) and executable images that won't ever be executed
* on the application processor (e.g. additional microcontroller firmware).
*
* Returns 0 on success, a negative error code otherwise.
******************************************************************************/
int load_image(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info)
{
uintptr_t dev_handle;
uintptr_t image_handle;
uintptr_t image_spec;
size_t image_size;
size_t bytes_read;
int io_result;
assert(mem_layout != NULL);
assert(image_data != NULL);
assert(image_data->h.version == VERSION_1);
/* Obtain a reference to the image by querying the platform layer */
io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
if (io_result != 0) {
WARN("Failed to obtain reference to image id=%u (%i)\n",
image_id, io_result);
return io_result;
}
/* Attempt to access the image */
io_result = io_open(dev_handle, image_spec, &image_handle);
if (io_result != 0) {
WARN("Failed to access image id=%u (%i)\n",
image_id, io_result);
return io_result;
}
INFO("Loading image id=%u at address %p\n", image_id,
(void *) image_base);
/* Find the size of the image */
io_result = io_size(image_handle, &image_size);
if ((io_result != 0) || (image_size == 0)) {
WARN("Failed to determine the size of the image id=%u (%i)\n",
image_id, io_result);
goto exit;
}
/* Check that the memory where the image will be loaded is free */
if (!is_mem_free(mem_layout->free_base, mem_layout->free_size,
image_base, image_size)) {
WARN("Failed to reserve region [base = %p, size = 0x%zx]\n",
(void *) image_base, image_size);
dump_load_info(image_base, image_size, mem_layout);
io_result = -ENOMEM;
goto exit;
}
/* We have enough space so load the image now */
/* TODO: Consider whether to try to recover/retry a partially successful read */
io_result = io_read(image_handle, image_base, image_size, &bytes_read);
if ((io_result != 0) || (bytes_read < image_size)) {
WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
goto exit;
}
image_data->image_base = image_base;
image_data->image_size = image_size;
/*
* Update the memory usage info.
* This is done after the actual loading so that it is not updated when
* the load is unsuccessful.
* If the caller does not provide an entry point, bypass the memory
* reservation.
*/
if (entry_point_info != NULL) {
reserve_mem(&mem_layout->free_base, &mem_layout->free_size,
image_base, image_size);
entry_point_info->pc = image_base;
} else {
INFO("Skip reserving region [base = %p, size = 0x%zx]\n",
(void *) image_base, image_size);
}
#if !TRUSTED_BOARD_BOOT
/*
* File has been successfully loaded.
* Flush the image to main memory so that it can be executed later by
* any CPU, regardless of cache and MMU state.
* When TBB is enabled the image is flushed later, after image
* authentication.
*/
flush_dcache_range(image_base, image_size);
#endif /* TRUSTED_BOARD_BOOT */
INFO("Image id=%u loaded at address %p, size = 0x%zx\n", image_id,
(void *) image_base, image_size);
exit:
io_close(image_handle);
/* Ignore improbable/unrecoverable error in 'close' */
/* TODO: Consider maintaining open device connection from this bootloader stage */
io_dev_close(dev_handle);
/* Ignore improbable/unrecoverable error in 'dev_close' */
return io_result;
}
static int load_auth_image_internal(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info,
int is_parent_image)
{
int rc;
#if TRUSTED_BOARD_BOOT
unsigned int parent_id;
/* Use recursion to authenticate parent images */
rc = auth_mod_get_parent_id(image_id, &parent_id);
if (rc == 0) {
rc = load_auth_image_internal(mem_layout, parent_id, image_base,
image_data, NULL, 1);
if (rc != 0) {
return rc;
}
}
#endif /* TRUSTED_BOARD_BOOT */
/* Load the image */
rc = load_image(mem_layout, image_id, image_base, image_data,
entry_point_info);
if (rc != 0) {
return rc;
}
#if TRUSTED_BOARD_BOOT
/* Authenticate it */
rc = auth_mod_verify_img(image_id,
(void *)image_data->image_base,
image_data->image_size);
if (rc != 0) {
/* Authentication error, zero memory and flush it right away. */
zero_normalmem((void *)image_data->image_base,
image_data->image_size);
flush_dcache_range(image_data->image_base,
image_data->image_size);
return -EAUTH;
}
/*
* File has been successfully loaded and authenticated.
* Flush the image to main memory so that it can be executed later by
* any CPU, regardless of cache and MMU state.
* Do it only for child images, not for the parents (certificates).
*/
if (!is_parent_image) {
flush_dcache_range(image_data->image_base,
image_data->image_size);
}
#endif /* TRUSTED_BOARD_BOOT */
return 0;
}
/*******************************************************************************
* Generic function to load and authenticate an image. The image is actually
* loaded by calling the 'load_image()' function. Therefore, it returns the
* same error codes if the loading operation failed, or -EAUTH if the
* authentication failed. In addition, this function uses recursion to
* authenticate the parent images up to the root of trust.
******************************************************************************/
int load_auth_image(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info)
{
int err;
do {
err = load_auth_image_internal(mem_layout, image_id, image_base,
image_data, entry_point_info, 0);
} while (err != 0 && plat_try_next_boot_source());
return err;
}
#endif /* LOAD_IMAGE_V2 */
/*******************************************************************************
* Print the content of an entry_point_info_t structure.
******************************************************************************/

View File

@ -394,13 +394,9 @@ On Arm platforms, BL2 performs the following platform initializations:
Image loading in BL2
^^^^^^^^^^^^^^^^^^^^
Image loading scheme in BL2 depends on ``LOAD_IMAGE_V2`` build option. If the
flag is disabled, the BLxx images are loaded, by calling the respective
load\_blxx() function from BL2 generic code. If the flag is enabled, the BL2
generic code loads the images based on the list of loadable images provided
by the platform. BL2 passes the list of executable images provided by the
platform to the next handover BL image. By default, this flag is disabled for
AArch64 and the AArch32 build is supported only if this flag is enabled.
BL2 generic code loads the images based on the list of loadable images
provided by the platform. BL2 passes the list of executable images
provided by the platform to the next handover BL image.
The list of loadable images provided by the platform may also contain
dynamic configuration files. The files are loaded and can be parsed as
@ -425,10 +421,7 @@ EL3 Runtime Software image load
BL2 loads the EL3 Runtime Software image from platform storage into a platform-
specific address in trusted SRAM. If there is not enough memory to load the
image or image is missing it leads to an assertion failure. If ``LOAD_IMAGE_V2``
is disabled and if image loads successfully, BL2 updates the amount of trusted
SRAM used and available for use by EL3 Runtime Software. This information is
populated at a platform-specific memory address.
image or image is missing it leads to an assertion failure.
AArch64 BL32 (Secure-EL1 Payload) image load
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -1281,47 +1274,22 @@ interrupts on the platform. To this end, the platform is expected to provide the
GIC driver (either GICv2 or GICv3, as selected by the platform) with the
interrupt configuration during the driver initialisation.
There are two ways to specify secure interrupt configuration:
Secure interrupt configuration are specified in an array of secure interrupt
properties. In this scheme, in both GICv2 and GICv3 driver data structures, the
``interrupt_props`` member points to an array of interrupt properties. Each
element of the array specifies the interrupt number and its configuration, viz.
priority, group, configuration. Each element of the array shall be populated by
the macro ``INTR_PROP_DESC()``. The macro takes the following arguments:
#. Array of secure interrupt properties: In this scheme, in both GICv2 and GICv3
driver data structures, the ``interrupt_props`` member points to an array of
interrupt properties. Each element of the array specifies the interrupt
number and its configuration, viz. priority, group, configuration. Each
element of the array shall be populated by the macro ``INTR_PROP_DESC()``.
The macro takes the following arguments:
- 10-bit interrupt number,
- 10-bit interrupt number,
- 8-bit interrupt priority,
- 8-bit interrupt priority,
- Interrupt type (one of ``INTR_TYPE_EL3``, ``INTR_TYPE_S_EL1``,
``INTR_TYPE_NS``),
- Interrupt type (one of ``INTR_TYPE_EL3``, ``INTR_TYPE_S_EL1``,
``INTR_TYPE_NS``),
- Interrupt configuration (either ``GIC_INTR_CFG_LEVEL`` or
``GIC_INTR_CFG_EDGE``).
#. Array of secure interrupts: In this scheme, the GIC driver is provided an
array of secure interrupt numbers. The GIC driver, at the time of
initialisation, iterates through the array and assigns each interrupt
the appropriate group.
- For the GICv2 driver, in ``gicv2_driver_data`` structure, the
``g0_interrupt_array`` member of the should point to the array of
interrupts to be assigned to *Group 0*, and the ``g0_interrupt_num``
member of the should be set to the number of interrupts in the array.
- For the GICv3 driver, in ``gicv3_driver_data`` structure:
- The ``g0_interrupt_array`` member of the should point to the array of
interrupts to be assigned to *Group 0*, and the ``g0_interrupt_num``
member of the should be set to the number of interrupts in the array.
- The ``g1s_interrupt_array`` member of the should point to the array of
interrupts to be assigned to *Group 1 Secure*, and the
``g1s_interrupt_num`` member of the should be set to the number of
interrupts in the array.
**Note that this scheme is deprecated.**
- Interrupt configuration (either ``GIC_INTR_CFG_LEVEL`` or
``GIC_INTR_CFG_EDGE``).
CPU specific operations framework
---------------------------------

View File

@ -14,13 +14,13 @@ To build:
.. code:: bash
make ERROR_DEPRECATED=1 CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp bl31
make CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp bl31
To build bl32 TSP you have to rebuild bl31 too:
.. code:: bash
make ERROR_DEPRECATED=1 CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp SPD=tspd bl31 bl32
make CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp SPD=tspd bl31 bl32
ZynqMP platform specific build options
======================================

View File

@ -1,608 +0,0 @@
Guide to migrate to new Platform porting interface
==================================================
.. section-numbering::
:suffix: .
.. contents::
--------------
Introduction
------------
The PSCI implementation in TF-A has undergone a redesign because of three
requirements that the PSCI 1.0 specification introduced :
- Removing the framework assumption about the structure of the MPIDR, and
its relation to the power topology enables support for deeper and more
complex hierarchies.
- Reworking the power state coordination implementation in the framework
to support the more detailed PSCI 1.0 requirements and reduce platform
port complexity
- Enable the use of the extended power\_state parameter and the larger StateID
field
The PSCI 1.0 implementation introduces new frameworks to fulfill the above
requirements. These framework changes mean that the platform porting API must
also be modified. This document is a guide to assist migration of the existing
platform ports to the new platform API.
This document describes the new platform API and compares it with the
deprecated API. It also describes the compatibility layer that enables the
existing platform ports to work with the PSCI 1.0 implementation. The
deprecated platform API is documented for reference.
Platform API modification due to PSCI framework changes
-------------------------------------------------------
This section describes changes to the platform APIs.
Power domain topology framework platform API modifications
----------------------------------------------------------
This removes the assumption in the PSCI implementation that MPIDR
based affinity instances map directly to power domains. A power domain, as
described in section 4.2 of `PSCI`_, could contain a core or a logical group
of cores (a cluster) which share some state on which power management
operations can be performed. The existing affinity instance based APIs
``plat_get_aff_count()`` and ``plat_get_aff_state()`` are deprecated. The new
platform interfaces that are introduced for this framework are:
- ``plat_core_pos_by_mpidr()``
- ``plat_my_core_pos()``
- ``plat_get_power_domain_tree_desc()``
``plat_my_core_pos()`` and ``plat_core_pos_by_mpidr()`` are mandatory
and are meant to replace the existing ``platform_get_core_pos()`` API.
The description of these APIs can be found in the `Porting Guide`_.
These are used by the power domain topology framework such that:
#. The generic PSCI code does not generate MPIDRs or use them to query the
platform about the number of power domains at a particular power level. The
``plat_get_power_domain_tree_desc()`` provides a description of the power
domain tree on the SoC through a pointer to the byte array containing the
power domain topology tree description data structure.
#. The linear indices returned by ``plat_core_pos_by_mpidr()`` and
``plat_my_core_pos()`` are used to retrieve core power domain nodes from
the power domain tree. These core indices are unique for a core and it is a
number between ``0`` and ``PLATFORM_CORE_COUNT - 1``. The platform can choose
to implement a static mapping between ``MPIDR`` and core index or implement
a dynamic mapping, choosing to skip the unavailable/unused cores to compact
the core indices.
In addition, the platforms must define the macros ``PLAT_NUM_PWR_DOMAINS`` and
``PLAT_MAX_PWR_LVL`` which replace the macros ``PLAT_NUM_AFFS`` and
``PLATFORM_MAX_AFFLVL`` respectively. On platforms where the affinity instances
correspond to power domains, the values of new macros remain the same as the
old ones.
More details on the power domain topology description and its platform
interface can be found in `psci pd tree`_.
Composite power state framework platform API modifications
----------------------------------------------------------
The state-ID field in the power-state parameter of a CPU\_SUSPEND call can be
used to describe the composite power states specific to a platform. The existing
PSCI state coordination had the limitation that it operates on a run/off
granularity of power states and it did not interpret the state-ID field. This
was acceptable as the specification requirement in PSCI 0.2 and the framework's
approach to coordination only required maintaining a reference
count of the number of cores that have requested the cluster to remain powered.
In the PSCI 1.0 specification, this approach is non optimal. If composite
power states are used, the PSCI implementation cannot make global
decisions about state coordination required because it does not understand the
platform specific states.
The PSCI 1.0 implementation now defines a generic representation of the
power-state parameter :
.. code:: c
typedef struct psci_power_state {
plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1];
} psci_power_state_t;
``pwr_domain_state`` is an array where each index corresponds to a power level.
Each entry in the array contains the local power state the power domain at
that power level could enter. The meaning of the local power state value is
platform defined, and can vary between levels in a single platform. The PSCI
implementation constraints the values only so that it can classify the state
as RUN, RETENTION or OFF as required by the specification:
#. Zero means RUN
#. All OFF state values at all levels must be higher than all
RETENTION state values at all levels
The platform is required to define the macros ``PLAT_MAX_RET_STATE`` and
``PLAT_MAX_OFF_STATE`` to the framework. The requirement for these macros can
be found in the `Porting Guide <porting-guide.rst>`__.
The PSCI 1.0 implementation adds support to involve the platform in state
coordination. This enables the platform to decide the final target state.
During a request to place a power domain in a low power state, the platform
is passed an array of requested ``plat_local_state_t`` for that power domain by
each core within it through the ``plat_get_target_pwr_state()`` API. This API
coordinates amongst these requested states to determine a target
``plat_local_state_t`` for that power domain. A default weak implementation of
this API is provided in the platform layer which returns the minimum of the
requested local states back to the PSCI state coordination. More details
of ``plat_get_target_pwr_state()`` API can be found in the
`Porting Guide <porting-guide.rst#user-content-function--plat_get_target_pwr_state-optional>`__.
The PSCI Generic implementation expects platform ports to populate the handlers
for the ``plat_psci_ops`` structure which is declared as :
.. code:: c
typedef struct plat_psci_ops {
void (*cpu_standby)(plat_local_state_t cpu_state);
int (*pwr_domain_on)(u_register_t mpidr);
void (*pwr_domain_off)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend_early)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend_finish)(
const psci_power_state_t *target_state);
void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state,
psci_power_state_t *req_state);
int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
void (*get_sys_suspend_power_state)(
psci_power_state_t *req_state);
int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state,
int pwrlvl);
int (*translate_power_state_by_mpidr)(u_register_t mpidr,
unsigned int power_state,
psci_power_state_t *output_state);
int (*get_node_hw_state)(u_register_t mpidr, unsigned int power_level);
int (*mem_protect_chk)(uintptr_t base, u_register_t length);
int (*read_mem_protect)(int *val);
int (*write_mem_protect)(int val);
int (*system_reset2)(int is_vendor,
int reset_type, u_register_t cookie);
} plat_psci_ops_t;
The description of these handlers can be found in the `Porting Guide <porting-guide.rst#user-content-function--plat_setup_psci_ops-mandatory>`__.
The previous ``plat_pm_ops`` structure is deprecated. Compared with the previous
handlers, the major differences are:
- Difference in parameters
The PSCI 1.0 implementation depends on the ``validate_power_state`` handler to
convert the power-state parameter (possibly encoding a composite power state)
passed in a PSCI ``CPU_SUSPEND`` to the ``psci_power_state`` format. This handler
is now mandatory for PSCI ``CPU_SUSPEND`` support.
The ``plat_psci_ops`` handlers, ``pwr_domain_off``, ``pwr_domain_suspend_early``
and ``pwr_domain_suspend``, are passed the target local state for each affected
power domain. The platform must execute operations specific to these target
states. Similarly, ``pwr_domain_on_finish`` and ``pwr_domain_suspend_finish``
are passed the local states of the affected power domains before wakeup. The
platform must execute actions to restore these power domains from these specific
local states.
- Difference in invocation
Whereas the power management handlers in ``plat_pm_ops`` used to be invoked
for each affinity level till the target affinity level, the new handlers
are only invoked once. The ``target_state`` encodes the target low power
state or the low power state woken up from for each affected power domain.
- Difference in semantics
Although the previous ``suspend`` handlers could be used for power down as well
as retention at different affinity levels, the new handlers make this support
explicit. The ``pwr_domain_suspend`` can be used to specify powerdown and
retention at various power domain levels subject to the conditions mentioned
in section 4.2.1 of `PSCI`_
Unlike the previous ``standby`` handler, the ``cpu_standby()`` handler is only used
as a fast path for placing a core power domain into a standby or retention
state.
The below diagram shows the sequence of a PSCI SUSPEND call and the interaction
with the platform layer depicting the exchange of data between PSCI Generic
layer and the platform layer.
|Image 1|
Refer `plat/arm/board/fvp/fvp\_pm.c`_ for the implementation details of
these handlers for the FVP. The commit `38dce70f51fb83b27958ba3e2ad15f5635cb1061`_
demonstrates the migration of Arm reference platforms to the new platform API.
Miscellaneous modifications
---------------------------
In addition to the framework changes, unification of warm reset entry points on
wakeup from low power modes has led to a change in the platform API. In the
earlier implementation, the warm reset entry used to be programmed into the
mailboxes by the 'ON' and 'SUSPEND' power management hooks. In the PSCI 1.0
implementation, this information is not required, because it can figure that
out by querying affinity info state whether to execute the 'suspend\_finisher\`
or 'on\_finisher'.
As a result, the warm reset entry point must be programmed only once. The
``plat_setup_psci_ops()`` API takes the secure entry point as an
additional parameter to enable the platforms to configure their mailbox. The
plat\_psci\_ops handlers ``pwr_domain_on`` and ``pwr_domain_suspend`` no longer take
the warm reset entry point as a parameter.
Also, some platform APIs which took ``MPIDR`` as an argument were only ever
invoked to perform actions specific to the caller core which makes the argument
redundant. Therefore the platform APIs ``plat_get_my_entrypoint()``,
``plat_is_my_cpu_primary()``, ``plat_set_my_stack()`` and
``plat_get_my_stack()`` are defined which are meant to be invoked only for
operations on the current caller core instead of ``platform_get_entrypoint()``,
``platform_is_primary_cpu()``, ``platform_set_stack()`` and ``platform_get_stack()``.
Compatibility layer
-------------------
To ease the migration of the platform ports to the new porting interface,
a compatibility layer is introduced that essentially implements a glue layer
between the old platform API and the new API. The build flag
``ENABLE_PLAT_COMPAT`` (enabled by default), specifies whether to enable this
layer or not. A platform port which has migrated to the new API can disable
this flag within the platform specific makefile.
The compatibility layer works on the assumption that the onus of
state coordination, in case multiple low power states are supported,
is with the platform. The generic PSCI implementation only takes into
account whether the suspend request is power down or not. This corresponds
with the behavior of the PSCI implementation before the introduction of
new frameworks. Also, it assumes that the affinity levels of the platform
correspond directly to the power domain levels.
The compatibility layer dynamically constructs the new topology
description array by querying the platform using ``plat_get_aff_count()``
and ``plat_get_aff_state()`` APIs. The linear index returned by
``platform_get_core_pos()`` is used as the core index for the cores. The
higher level (non-core) power domain nodes must know the cores contained
within its domain. It does so by storing the core index of first core
within it and number of core indexes following it. This means that core
indices returned by ``platform_get_core_pos()`` for cores within a particular
power domain must be consecutive. We expect that this is the case for most
platform ports including Arm reference platforms.
The old PSCI helpers like ``psci_get_suspend_powerstate()``,
``psci_get_suspend_stateid()``, ``psci_get_suspend_stateid_by_mpidr()``,
``psci_get_max_phys_off_afflvl()`` and ``psci_get_suspend_afflvl()`` are also
implemented for the compatibility layer. This allows the existing
platform ports to work with the new PSCI frameworks without significant
rework.
Deprecated Platform API
-----------------------
This section documents the deprecated platform porting API.
Common mandatory modifications
------------------------------
The mandatory macros to be defined by the platform port in ``platform_def.h``
- **#define : PLATFORM\_NUM\_AFFS**
Defines the total number of nodes in the affinity hierarchy at all affinity
levels used by the platform.
- **#define : PLATFORM\_MAX\_AFFLVL**
Defines the maximum affinity level that the power management operations
should apply to. Armv8-A has support for four affinity levels. It is likely
that hardware will implement fewer affinity levels. This macro allows the
PSCI implementation to consider only those affinity levels in the system
that the platform implements. For example, the Base AEM FVP implements two
clusters with a configurable number of cores. It reports the maximum
affinity level as 1, resulting in PSCI power control up to the cluster
level.
The following functions must be implemented by the platform port to enable
the reset vector code to perform the required tasks.
Function : platform\_get\_entrypoint() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : unsigned long
This function is called with the ``SCTLR.M`` and ``SCTLR.C`` bits disabled. The core
is identified by its ``MPIDR``, which is passed as the argument. The function is
responsible for distinguishing between a warm and cold reset using platform-
specific means. If it is a warm reset, it returns the entrypoint into the
BL31 image that the core must jump to. If it is a cold reset, this function
must return zero.
This function is also responsible for implementing a platform-specific mechanism
to handle the condition where the core has been warm reset but there is no
entrypoint to jump to.
This function does not follow the Procedure Call Standard used by the
Application Binary Interface for the Arm 64-bit architecture. The caller should
not assume that callee saved registers are preserved across a call to this
function.
Function : platform\_is\_primary\_cpu() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : unsigned int
This function identifies a core by its ``MPIDR``, which is passed as the argument,
to determine whether this core is the primary core or a secondary core. A return
value of zero indicates that the core is not the primary core, while a non-zero
return value indicates that the core is the primary core.
Common optional modifications
-----------------------------
Function : platform\_get\_core\_pos()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : int
A platform may need to convert the ``MPIDR`` of a core to an absolute number, which
can be used as a core-specific linear index into blocks of memory (for example
while allocating per-core stacks). This routine contains a simple mechanism
to perform this conversion, using the assumption that each cluster contains a
maximum of four cores:
::
linear index = cpu_id + (cluster_id * 4)
cpu_id = 8-bit value in MPIDR at affinity level 0
cluster_id = 8-bit value in MPIDR at affinity level 1
Function : platform\_set\_stack()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : void
This function sets the current stack pointer to the normal memory stack that
has been allocated for the core specified by MPIDR. For BL images that only
require a stack for the primary core the parameter is ignored. The size of
the stack allocated to each core is specified by the platform defined constant
``PLATFORM_STACK_SIZE``.
Common implementations of this function for the UP and MP BL images are
provided in `plat/common/aarch64/platform\_up\_stack.S`_ and
`plat/common/aarch64/platform\_mp\_stack.S`_
Function : platform\_get\_stack()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : unsigned long
This function returns the base address of the normal memory stack that
has been allocated for the core specificed by MPIDR. For BL images that only
require a stack for the primary core the parameter is ignored. The size of
the stack allocated to each core is specified by the platform defined constant
``PLATFORM_STACK_SIZE``.
Common implementations of this function for the UP and MP BL images are
provided in `plat/common/aarch64/platform\_up\_stack.S`_ and
`plat/common/aarch64/platform\_mp\_stack.S`_
Modifications for Power State Coordination Interface (in BL31)
--------------------------------------------------------------
The following functions must be implemented to initialize PSCI functionality in
TF-A.
Function : plat\_get\_aff\_count() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned int, unsigned long
Return : unsigned int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
called by the primary core.
This function is called by the PSCI initialization code to detect the system
topology. Its purpose is to return the number of affinity instances implemented
at a given ``affinity level`` (specified by the first argument) and a given
``MPIDR`` (specified by the second argument). For example, on a dual-cluster
system where first cluster implements two cores and the second cluster
implements four cores, a call to this function with an ``MPIDR`` corresponding
to the first cluster (``0x0``) and affinity level 0, would return 2. A call
to this function with an ``MPIDR`` corresponding to the second cluster (``0x100``)
and affinity level 0, would return 4.
Function : plat\_get\_aff\_state() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned int, unsigned long
Return : unsigned int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
called by the primary core.
This function is called by the PSCI initialization code. Its purpose is to
return the state of an affinity instance. The affinity instance is determined by
the affinity ID at a given ``affinity level`` (specified by the first argument)
and an ``MPIDR`` (specified by the second argument). The state can be one of
``PSCI_AFF_PRESENT`` or ``PSCI_AFF_ABSENT``. The latter state is used to cater for
system topologies where certain affinity instances are unimplemented. For
example, consider a platform that implements a single cluster with four cores and
another core implemented directly on the interconnect with the cluster. The
``MPIDR``\ s of the cluster would range from ``0x0-0x3``. The ``MPIDR`` of the single
core is 0x100 to indicate that it does not belong to cluster 0. Cluster 1
is missing but needs to be accounted for to reach this single core in the
topology tree. Therefore it is marked as ``PSCI_AFF_ABSENT``.
Function : platform\_setup\_pm() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : const plat_pm_ops **
Return : int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
called by the primary core.
This function is called by PSCI initialization code. Its purpose is to export
handler routines for platform-specific power management actions by populating
the passed pointer with a pointer to the private ``plat_pm_ops`` structure of
BL31.
A description of each member of this structure is given below. A platform port
is expected to implement these handlers if the corresponding PSCI operation
is to be supported and these handlers are expected to succeed if the return
type is ``void``.
plat\_pm\_ops.affinst\_standby()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Perform the platform-specific setup to enter the standby state indicated by the
passed argument. The generic code expects the handler to succeed.
plat\_pm\_ops.affinst\_on()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Perform the platform specific setup to power on an affinity instance, specified
by the ``MPIDR`` (first argument) and ``affinity level`` (third argument). The
``state`` (fourth argument) contains the current state of that affinity instance
(ON or OFF). This is useful to determine whether any action must be taken. For
example, while powering on a core, the cluster that contains this core might
already be in the ON state. The platform decides what actions must be taken to
transition from the current state to the target state (indicated by the power
management operation). The generic code expects the platform to return
E\_SUCCESS on success or E\_INTERN\_FAIL for any failure.
plat\_pm\_ops.affinst\_off()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Perform the platform specific setup to power off an affinity instance of the
calling core. It is called by the PSCI ``CPU_OFF`` API implementation.
The ``affinity level`` (first argument) and ``state`` (second argument) have
a similar meaning as described in the ``affinst_on()`` operation. They
identify the affinity instance on which the call is made and its
current state. This gives the platform port an indication of the
state transition it must make to perform the requested action. For example, if
the calling core is the last powered on core in the cluster, after powering down
affinity level 0 (the core), the platform port should power down affinity
level 1 (the cluster) as well. The generic code expects the handler to succeed.
plat\_pm\_ops.affinst\_suspend()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Perform the platform specific setup to power off an affinity instance of the
calling core. It is called by the PSCI ``CPU_SUSPEND`` API and ``SYSTEM_SUSPEND``
API implementation
The ``affinity level`` (second argument) and ``state`` (third argument) have a
similar meaning as described in the ``affinst_on()`` operation. They are used to
identify the affinity instance on which the call is made and its current state.
This gives the platform port an indication of the state transition it must
make to perform the requested action. For example, if the calling core is the
last powered on core in the cluster, after powering down affinity level 0
(the core), the platform port should power down affinity level 1 (the cluster)
as well.
The difference between turning an affinity instance off and suspending it
is that in the former case, the affinity instance is expected to re-initialize
its state when it is next powered on (see ``affinst_on_finish()``). In the latter
case, the affinity instance is expected to save enough state so that it can
resume execution by restoring this state when it is powered on (see
``affinst_suspend_finish()``).The generic code expects the handler to succeed.
plat\_pm\_ops.affinst\_on\_finish()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation after the calling core is
powered on and released from reset in response to an earlier PSCI ``CPU_ON`` call.
It performs the platform-specific setup required to initialize enough state for
this core to enter the Normal world and also provide secure runtime firmware
services.
The ``affinity level`` (first argument) and ``state`` (second argument) have a
similar meaning as described in the previous operations. The generic code
expects the handler to succeed.
plat\_pm\_ops.affinst\_suspend\_finish()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation after the calling core is
powered on and released from reset in response to an asynchronous wakeup
event, for example a timer interrupt that was programmed by the core during the
``CPU_SUSPEND`` call or ``SYSTEM_SUSPEND`` call. It performs the platform-specific
setup required to restore the saved state for this core to resume execution
in the Normal world and also provide secure runtime firmware services.
The ``affinity level`` (first argument) and ``state`` (second argument) have a
similar meaning as described in the previous operations. The generic code
expects the platform to succeed.
plat\_pm\_ops.validate\_power\_state()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation during the ``CPU_SUSPEND``
call to validate the ``power_state`` parameter of the PSCI API. If the
``power_state`` is known to be invalid, the platform must return
PSCI\_E\_INVALID\_PARAMS as an error, which is propagated back to the Normal
world PSCI client.
plat\_pm\_ops.validate\_ns\_entrypoint()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation during the ``CPU_SUSPEND``,
``SYSTEM_SUSPEND`` and ``CPU_ON`` calls to validate the Non-secure ``entry_point``
parameter passed by the Normal world. If the ``entry_point`` is known to be
invalid, the platform must return PSCI\_E\_INVALID\_PARAMS as an error, which is
propagated back to the Normal world PSCI client.
plat\_pm\_ops.get\_sys\_suspend\_power\_state()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation during the ``SYSTEM_SUSPEND``
call to return the ``power_state`` parameter. This allows the platform to encode
the appropriate State-ID field within the ``power_state`` parameter which can be
utilized in ``affinst_suspend()`` to suspend to system affinity level. The
``power_state`` parameter should be in the same format as specified by the
PSCI specification for the CPU\_SUSPEND API.
--------------
*Copyright (c) 2015-2018, Arm Limited and Contributors. All rights reserved.*
.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
.. _Porting Guide: porting-guide.rst#user-content-function--plat_my_core_pos
.. _psci pd tree: psci-pd-tree.rst
.. _plat/arm/board/fvp/fvp\_pm.c: ../plat/arm/board/fvp/fvp_pm.c
.. _38dce70f51fb83b27958ba3e2ad15f5635cb1061: https://github.com/ARM-software/arm-trusted-firmware/commit/38dce70f51fb83b27958ba3e2ad15f5635cb1061
.. _plat/common/aarch64/platform\_up\_stack.S: ../plat/common/aarch64/platform_up_stack.S
.. _plat/common/aarch64/platform\_mp\_stack.S: ../plat/common/aarch64/platform_mp_stack.S
.. |Image 1| image:: diagrams/psci-suspend-sequence.png?raw=true

View File

@ -12,10 +12,6 @@ Trusted Firmware-A Porting Guide
Introduction
------------
Please note that this document has been updated for the new platform API
as required by the PSCI v1.0 implementation. Please refer to the
`Migration Guide`_ for the previous platform API.
Porting Trusted Firmware-A (TF-A) to a new platform involves making some
mandatory and optional modifications for both the cold and warm boot paths.
Modifications consist of:
@ -481,13 +477,6 @@ constants must also be defined:
enabled for a BL image, ``MAX_MMAP_REGIONS`` must be defined to accommodate
the dynamic regions as well.
- **#define : ADDR\_SPACE\_SIZE**
Defines the total size of the address space in bytes. For example, for a 32
bit address space, this value should be ``(1ULL << 32)``. This definition is
now deprecated, platforms should use ``PLAT_PHY_ADDR_SPACE_SIZE`` and
``PLAT_VIRT_ADDR_SPACE_SIZE`` instead.
- **#define : PLAT\_VIRT\_ADDR\_SPACE\_SIZE**
Defines the total size of the virtual address space in bytes. For example,
@ -2976,12 +2965,6 @@ The default implementation of this function calls
Build flags
-----------
- **ENABLE\_PLAT\_COMPAT**
All the platforms ports conforming to this API specification should define
the build flag ``ENABLE_PLAT_COMPAT`` to 0 as the compatibility layer should
be disabled. For more details on compatibility layer, refer
`Migration Guide`_.
There are some build flags which can be defined by the platform to control
inclusion or exclusion of certain BL stages from the FIP image. These flags
need to be defined in the platform makefile which will get included by the
@ -3067,7 +3050,6 @@ amount of open resources per driver.
*Copyright (c) 2013-2018, Arm Limited and Contributors. All rights reserved.*
.. _Migration Guide: platform-migration-guide.rst
.. _include/plat/common/platform.h: ../include/plat/common/platform.h
.. _include/plat/arm/common/plat\_arm.h: ../include/plat/arm/common/plat_arm.h%5D
.. _User Guide: user-guide.rst

View File

@ -234,11 +234,6 @@ Common build options
compiling TF-A. Its value must be a numeric, and defaults to 0. See also,
*Armv8 Architecture Extensions* in `Firmware Design`_.
- ``ARM_GIC_ARCH``: Choice of Arm GIC architecture version used by the Arm
Legacy GIC driver for implementing the platform GIC API. This API is used
by the interrupt management framework. Default is 2 (that is, version 2.0).
This build option is deprecated.
- ``ARM_PLAT_MT``: This flag determines whether the Arm platform layer has to
cater for the multi-threading ``MT`` bit when accessing MPIDR. When this flag
is set, the functions which deal with MPIDR assume that the ``MT`` bit in
@ -334,8 +329,8 @@ Common build options
- ``DYN_DISABLE_AUTH``: Provides the capability to dynamically disable Trusted
Board Boot authentication at runtime. This option is meant to be enabled only
for development platforms. Both TRUSTED_BOARD_BOOT and LOAD_IMAGE_V2 flags
must be set if this flag has to be enabled. 0 is the default.
for development platforms. ``TRUSTED_BOARD_BOOT`` flag must be set if this
flag has to be enabled. 0 is the default.
- ``EL3_PAYLOAD_BASE``: This option enables booting an EL3 payload instead of
the normal boot flow. It must specify the entry point address of the EL3
@ -514,12 +509,6 @@ Common build options
- ``LDFLAGS``: Extra user options appended to the linkers' command line in
addition to the one set by the build system.
- ``LOAD_IMAGE_V2``: Boolean option to enable support for new version (v2) of
image loading, which provides more flexibility and scalability around what
images are loaded and executed during boot. Default is 0.
Note: this flag must be enabled for AArch32 builds.
- ``LOG_LEVEL``: Chooses the log level, which controls the amount of console log
output compiled into the build. This should be one of the following:
@ -844,9 +833,6 @@ Arm FVP platform specific build options
- ``FVP_GIC600`` : The GIC600 implementation of GICv3 is selected
- ``FVP_GICV2`` : The GICv2 only driver is selected
- ``FVP_GICV3`` : The GICv3 only driver is selected (default option)
- ``FVP_GICV3_LEGACY``: The Legacy GICv3 driver is selected (deprecated)
Note: If TF-A is compiled with this option on FVPs with GICv3 hardware,
then it configures the hardware to run in GICv2 emulation mode
- ``FVP_USE_SP804_TIMER`` : Use the SP804 timer instead of the Generic Timer
for functions that wait for an arbitrary time length (udelay and mdelay).
@ -1085,18 +1071,6 @@ destination. In that case, use -f or --force to continue.
More information about FIP can be found in the `Firmware Design`_ document.
Migrating from fip\_create to fiptool
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The previous version of fiptool was called fip\_create. A compatibility script
that emulates the basic functionality of the previous fip\_create is provided.
However, users are strongly encouraged to migrate to fiptool.
- To create a new FIP file, replace "fip\_create" with "fiptool create".
- To update a FIP file, replace "fip\_create" with "fiptool update".
- To dump the contents of a FIP file, replace "fip\_create --dump"
with "fiptool info".
Building FIP images with support for Trusted Board Boot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -1215,12 +1189,12 @@ command:
make PLAT=<platform> [DEBUG=1] [V=1] certtool
For platforms that do not require their own IDs in certificate files,
the generic 'cert\_create' tool can be built with the following command:
For platforms that require their own IDs in certificate files, the generic
'cert\_create' tool can be built with the following command:
::
make USE_TBBR_DEFS=1 [DEBUG=1] [V=1] certtool
make USE_TBBR_DEFS=0 [DEBUG=1] [V=1] certtool
``DEBUG=1`` builds the tool in debug mode. ``V=1`` makes the build process more
verbose. The following command should be used to obtain help about the tool:

View File

@ -1,88 +0,0 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <assert.h>
#include <cci400.h>
#include <debug.h>
#include <mmio.h>
#include <stdint.h>
#define MAX_CLUSTERS 2
static uintptr_t cci_base_addr;
static unsigned int cci_cluster_ix_to_iface[MAX_CLUSTERS];
void cci_init(uintptr_t cci_base,
int slave_iface3_cluster_ix,
int slave_iface4_cluster_ix)
{
/*
* Check the passed arguments are valid. The cluster indices must be
* less than MAX_CLUSTERS, not the same as each other and at least one
* of them must refer to a valid cluster index.
*/
assert(cci_base);
assert(slave_iface3_cluster_ix < MAX_CLUSTERS);
assert(slave_iface4_cluster_ix < MAX_CLUSTERS);
assert(slave_iface3_cluster_ix != slave_iface4_cluster_ix);
assert((slave_iface3_cluster_ix >= 0) ||
(slave_iface4_cluster_ix >= 0));
WARN("Please migrate to common cci driver, This driver will be" \
" deprecated in future\n");
cci_base_addr = cci_base;
if (slave_iface3_cluster_ix >= 0)
cci_cluster_ix_to_iface[slave_iface3_cluster_ix] =
SLAVE_IFACE3_OFFSET;
if (slave_iface4_cluster_ix >= 0)
cci_cluster_ix_to_iface[slave_iface4_cluster_ix] =
SLAVE_IFACE4_OFFSET;
}
static inline unsigned long get_slave_iface_base(unsigned long mpidr)
{
/*
* We assume the TF topology code allocates affinity instances
* consecutively from zero.
* It is a programming error if this is called without initializing
* the slave interface to use for this cluster.
*/
unsigned int cluster_id =
(mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(cluster_id < MAX_CLUSTERS);
assert(cci_cluster_ix_to_iface[cluster_id] != 0);
return cci_base_addr + cci_cluster_ix_to_iface[cluster_id];
}
void cci_enable_cluster_coherency(unsigned long mpidr)
{
assert(cci_base_addr);
/* Enable Snoops and DVM messages */
mmio_write_32(get_slave_iface_base(mpidr) + SNOOP_CTRL_REG,
DVM_EN_BIT | SNOOP_EN_BIT);
/* Wait for the dust to settle down */
while (mmio_read_32(cci_base_addr + STATUS_REG) & CHANGE_PENDING_BIT)
;
}
void cci_disable_cluster_coherency(unsigned long mpidr)
{
assert(cci_base_addr);
/* Disable Snoops and DVM messages */
mmio_write_32(get_slave_iface_base(mpidr) + SNOOP_CTRL_REG,
~(DVM_EN_BIT | SNOOP_EN_BIT));
/* Wait for the dust to settle down */
while (mmio_read_32(cci_base_addr + STATUS_REG) & CHANGE_PENDING_BIT)
;
}

View File

@ -1,435 +0,0 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <arm_gic.h>
#include <assert.h>
#include <bl_common.h>
#include <debug.h>
#include <gic_v2.h>
#include <gic_v3.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <stdint.h>
/* Value used to initialize Non-Secure IRQ priorities four at a time */
#define GICD_IPRIORITYR_DEF_VAL \
(GIC_HIGHEST_NS_PRIORITY | \
(GIC_HIGHEST_NS_PRIORITY << 8) | \
(GIC_HIGHEST_NS_PRIORITY << 16) | \
(GIC_HIGHEST_NS_PRIORITY << 24))
static uintptr_t g_gicc_base;
static uintptr_t g_gicd_base;
static uintptr_t g_gicr_base;
static const unsigned int *g_irq_sec_ptr;
static unsigned int g_num_irqs;
/*******************************************************************************
* This function does some minimal GICv3 configuration. The Firmware itself does
* not fully support GICv3 at this time and relies on GICv2 emulation as
* provided by GICv3. This function allows software (like Linux) in later stages
* to use full GICv3 features.
******************************************************************************/
static void gicv3_cpuif_setup(void)
{
unsigned int val;
uintptr_t base;
/*
* When CPUs come out of reset they have their GICR_WAKER.ProcessorSleep
* bit set. In order to allow interrupts to get routed to the CPU we
* need to clear this bit if set and wait for GICR_WAKER.ChildrenAsleep
* to clear (GICv3 Architecture specification 5.4.23).
* GICR_WAKER is NOT banked per CPU, compute the correct base address
* per CPU.
*/
assert(g_gicr_base);
base = gicv3_get_rdist(g_gicr_base, read_mpidr());
if (base == (uintptr_t)NULL) {
/* No re-distributor base address. This interface cannot be
* configured.
*/
panic();
}
val = gicr_read_waker(base);
val &= ~WAKER_PS;
gicr_write_waker(base, val);
dsb();
/* We need to wait for ChildrenAsleep to clear. */
val = gicr_read_waker(base);
while (val & WAKER_CA)
val = gicr_read_waker(base);
val = read_icc_sre_el3();
write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE);
isb();
}
/*******************************************************************************
* This function does some minimal GICv3 configuration when cores go
* down.
******************************************************************************/
static void gicv3_cpuif_deactivate(void)
{
unsigned int val;
uintptr_t base;
/*
* When taking CPUs down we need to set GICR_WAKER.ProcessorSleep and
* wait for GICR_WAKER.ChildrenAsleep to get set.
* (GICv3 Architecture specification 5.4.23).
* GICR_WAKER is NOT banked per CPU, compute the correct base address
* per CPU.
*/
assert(g_gicr_base);
base = gicv3_get_rdist(g_gicr_base, read_mpidr());
if (base == (uintptr_t)NULL) {
/* No re-distributor base address. This interface cannot be
* configured.
*/
panic();
}
val = gicr_read_waker(base);
val |= WAKER_PS;
gicr_write_waker(base, val);
dsb();
/* We need to wait for ChildrenAsleep to set. */
val = gicr_read_waker(base);
while ((val & WAKER_CA) == 0)
val = gicr_read_waker(base);
}
/*******************************************************************************
* Enable secure interrupts and use FIQs to route them. Disable legacy bypass
* and set the priority mask register to allow all interrupts to trickle in.
******************************************************************************/
void arm_gic_cpuif_setup(void)
{
unsigned int val;
assert(g_gicc_base);
val = gicc_read_iidr(g_gicc_base);
/*
* If GICv3 we need to do a bit of additional setup. We want to
* allow default GICv2 behaviour but allow the next stage to
* enable full gicv3 features.
*/
if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3)
gicv3_cpuif_setup();
val = ENABLE_GRP0 | FIQ_EN | FIQ_BYP_DIS_GRP0;
val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
gicc_write_pmr(g_gicc_base, GIC_PRI_MASK);
gicc_write_ctlr(g_gicc_base, val);
}
/*******************************************************************************
* Place the cpu interface in a state where it can never make a cpu exit wfi as
* as result of an asserted interrupt. This is critical for powering down a cpu
******************************************************************************/
void arm_gic_cpuif_deactivate(void)
{
unsigned int val;
/* Disable secure, non-secure interrupts and disable their bypass */
assert(g_gicc_base);
val = gicc_read_ctlr(g_gicc_base);
val &= ~(ENABLE_GRP0 | ENABLE_GRP1);
val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
gicc_write_ctlr(g_gicc_base, val);
val = gicc_read_iidr(g_gicc_base);
/*
* If GICv3 we need to do a bit of additional setup. Make sure the
* RDIST is put to sleep.
*/
if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3)
gicv3_cpuif_deactivate();
}
/*******************************************************************************
* Per cpu gic distributor setup which will be done by all cpus after a cold
* boot/hotplug. This marks out the secure interrupts & enables them.
******************************************************************************/
void arm_gic_pcpu_distif_setup(void)
{
unsigned int index, irq_num, sec_ppi_sgi_mask;
assert(g_gicd_base);
/* Setup PPI priorities doing four at a time */
for (index = 0; index < 32; index += 4) {
gicd_write_ipriorityr(g_gicd_base, index,
GICD_IPRIORITYR_DEF_VAL);
}
assert(g_irq_sec_ptr);
sec_ppi_sgi_mask = 0;
/* Ensure all SGIs and PPIs are Group0 to begin with */
gicd_write_igroupr(g_gicd_base, 0, 0);
for (index = 0; index < g_num_irqs; index++) {
irq_num = g_irq_sec_ptr[index];
if (irq_num < MIN_SPI_ID) {
/* We have an SGI or a PPI */
sec_ppi_sgi_mask |= 1U << irq_num;
gicd_set_ipriorityr(g_gicd_base, irq_num,
GIC_HIGHEST_SEC_PRIORITY);
gicd_set_isenabler(g_gicd_base, irq_num);
}
}
/*
* Invert the bitmask to create a mask for non-secure PPIs and
* SGIs. Program the GICD_IGROUPR0 with this bit mask. This write will
* update the GICR_IGROUPR0 as well in case we are running on a GICv3
* system. This is critical if GICD_CTLR.ARE_NS=1.
*/
gicd_write_igroupr(g_gicd_base, 0, ~sec_ppi_sgi_mask);
}
/*******************************************************************************
* Get the current CPU bit mask from GICD_ITARGETSR0
******************************************************************************/
static unsigned int arm_gic_get_cpuif_id(void)
{
unsigned int val;
val = gicd_read_itargetsr(g_gicd_base, 0);
return val & GIC_TARGET_CPU_MASK;
}
/*******************************************************************************
* Global gic distributor setup which will be done by the primary cpu after a
* cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It
* then enables the secure GIC distributor interface.
******************************************************************************/
static void arm_gic_distif_setup(void)
{
unsigned int num_ints, ctlr, index, irq_num;
uint8_t target_cpu;
/* Disable the distributor before going further */
assert(g_gicd_base);
ctlr = gicd_read_ctlr(g_gicd_base);
ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1);
gicd_write_ctlr(g_gicd_base, ctlr);
/*
* Mark out non-secure SPI interrupts. The number of interrupts is
* calculated as 32 * (IT_LINES + 1). We do 32 at a time.
*/
num_ints = gicd_read_typer(g_gicd_base) & IT_LINES_NO_MASK;
num_ints = (num_ints + 1) << 5;
for (index = MIN_SPI_ID; index < num_ints; index += 32)
gicd_write_igroupr(g_gicd_base, index, ~0);
/* Setup SPI priorities doing four at a time */
for (index = MIN_SPI_ID; index < num_ints; index += 4) {
gicd_write_ipriorityr(g_gicd_base, index,
GICD_IPRIORITYR_DEF_VAL);
}
/* Read the target CPU mask */
target_cpu = arm_gic_get_cpuif_id();
/* Configure SPI secure interrupts now */
assert(g_irq_sec_ptr);
for (index = 0; index < g_num_irqs; index++) {
irq_num = g_irq_sec_ptr[index];
if (irq_num >= MIN_SPI_ID) {
/* We have an SPI */
gicd_clr_igroupr(g_gicd_base, irq_num);
gicd_set_ipriorityr(g_gicd_base, irq_num,
GIC_HIGHEST_SEC_PRIORITY);
gicd_set_itargetsr(g_gicd_base, irq_num, target_cpu);
gicd_set_isenabler(g_gicd_base, irq_num);
}
}
/*
* Configure the SGI and PPI. This is done in a separated function
* because each CPU is responsible for initializing its own private
* interrupts.
*/
arm_gic_pcpu_distif_setup();
gicd_write_ctlr(g_gicd_base, ctlr | ENABLE_GRP0);
}
/*******************************************************************************
* Initialize the ARM GIC driver with the provided platform inputs
******************************************************************************/
void arm_gic_init(uintptr_t gicc_base,
uintptr_t gicd_base,
uintptr_t gicr_base,
const unsigned int *irq_sec_ptr,
unsigned int num_irqs)
{
unsigned int val;
assert(gicc_base);
assert(gicd_base);
assert(irq_sec_ptr);
g_gicc_base = gicc_base;
g_gicd_base = gicd_base;
val = gicc_read_iidr(g_gicc_base);
if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
assert(gicr_base);
g_gicr_base = gicr_base;
}
g_irq_sec_ptr = irq_sec_ptr;
g_num_irqs = num_irqs;
}
/*******************************************************************************
* Setup the ARM GIC CPU and distributor interfaces.
******************************************************************************/
void arm_gic_setup(void)
{
arm_gic_cpuif_setup();
arm_gic_distif_setup();
}
/*******************************************************************************
* An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
* The interrupt controller knows which pin/line it uses to signal a type of
* interrupt. This function provides a common implementation of
* plat_interrupt_type_to_line() in an ARM GIC environment for optional re-use
* across platforms. It lets the interrupt management framework determine
* for a type of interrupt and security state, which line should be used in the
* SCR_EL3 to control its routing to EL3. The interrupt line is represented as
* the bit position of the IRQ or FIQ bit in the SCR_EL3.
******************************************************************************/
uint32_t arm_gic_interrupt_type_to_line(uint32_t type,
uint32_t security_state)
{
assert(type == INTR_TYPE_S_EL1 ||
type == INTR_TYPE_EL3 ||
type == INTR_TYPE_NS);
assert(sec_state_is_valid(security_state));
/*
* We ignore the security state parameter under the assumption that
* both normal and secure worlds are using ARM GICv2. This parameter
* will be used when the secure world starts using GICv3.
*/
#if ARM_GIC_ARCH == 2
return gicv2_interrupt_type_to_line(g_gicc_base, type);
#else
#error "Invalid ARM GIC architecture version specified for platform port"
#endif /* ARM_GIC_ARCH */
}
#if ARM_GIC_ARCH == 2
/*******************************************************************************
* This function returns the type of the highest priority pending interrupt at
* the GIC cpu interface. INTR_TYPE_INVAL is returned when there is no
* interrupt pending.
******************************************************************************/
uint32_t arm_gic_get_pending_interrupt_type(void)
{
uint32_t id;
assert(g_gicc_base);
id = gicc_read_hppir(g_gicc_base) & INT_ID_MASK;
/* Assume that all secure interrupts are S-EL1 interrupts */
if (id < 1022)
return INTR_TYPE_S_EL1;
if (id == GIC_SPURIOUS_INTERRUPT)
return INTR_TYPE_INVAL;
return INTR_TYPE_NS;
}
/*******************************************************************************
* This function returns the id of the highest priority pending interrupt at
* the GIC cpu interface. INTR_ID_UNAVAILABLE is returned when there is no
* interrupt pending.
******************************************************************************/
uint32_t arm_gic_get_pending_interrupt_id(void)
{
uint32_t id;
assert(g_gicc_base);
id = gicc_read_hppir(g_gicc_base) & INT_ID_MASK;
if (id < 1022)
return id;
if (id == 1023)
return INTR_ID_UNAVAILABLE;
/*
* Find out which non-secure interrupt it is under the assumption that
* the GICC_CTLR.AckCtl bit is 0.
*/
return gicc_read_ahppir(g_gicc_base) & INT_ID_MASK;
}
/*******************************************************************************
* This functions reads the GIC cpu interface Interrupt Acknowledge register
* to start handling the pending interrupt. It returns the contents of the IAR.
******************************************************************************/
uint32_t arm_gic_acknowledge_interrupt(void)
{
assert(g_gicc_base);
return gicc_read_IAR(g_gicc_base);
}
/*******************************************************************************
* This functions writes the GIC cpu interface End Of Interrupt register with
* the passed value to finish handling the active interrupt
******************************************************************************/
void arm_gic_end_of_interrupt(uint32_t id)
{
assert(g_gicc_base);
gicc_write_EOIR(g_gicc_base, id);
}
/*******************************************************************************
* This function returns the type of the interrupt id depending upon the group
* this interrupt has been configured under by the interrupt controller i.e.
* group0 or group1.
******************************************************************************/
uint32_t arm_gic_get_interrupt_type(uint32_t id)
{
uint32_t group;
assert(g_gicd_base);
group = gicd_get_igroupr(g_gicd_base, id);
/* Assume that all secure interrupts are S-EL1 interrupts */
if (group == GRP0)
return INTR_TYPE_S_EL1;
else
return INTR_TYPE_NS;
}
#else
#error "Invalid ARM GIC architecture version specified for platform port"
#endif /* ARM_GIC_ARCH */

View File

@ -1,284 +0,0 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <assert.h>
#include <gic_v2.h>
#include <interrupt_mgmt.h>
#include <mmio.h>
/*******************************************************************************
* GIC Distributor interface accessors for reading entire registers
******************************************************************************/
unsigned int gicd_read_igroupr(uintptr_t base, unsigned int id)
{
unsigned n = id >> IGROUPR_SHIFT;
return mmio_read_32(base + GICD_IGROUPR + (n << 2));
}
unsigned int gicd_read_isenabler(uintptr_t base, unsigned int id)
{
unsigned n = id >> ISENABLER_SHIFT;
return mmio_read_32(base + GICD_ISENABLER + (n << 2));
}
unsigned int gicd_read_icenabler(uintptr_t base, unsigned int id)
{
unsigned n = id >> ICENABLER_SHIFT;
return mmio_read_32(base + GICD_ICENABLER + (n << 2));
}
unsigned int gicd_read_ispendr(uintptr_t base, unsigned int id)
{
unsigned n = id >> ISPENDR_SHIFT;
return mmio_read_32(base + GICD_ISPENDR + (n << 2));
}
unsigned int gicd_read_icpendr(uintptr_t base, unsigned int id)
{
unsigned n = id >> ICPENDR_SHIFT;
return mmio_read_32(base + GICD_ICPENDR + (n << 2));
}
unsigned int gicd_read_isactiver(uintptr_t base, unsigned int id)
{
unsigned n = id >> ISACTIVER_SHIFT;
return mmio_read_32(base + GICD_ISACTIVER + (n << 2));
}
unsigned int gicd_read_icactiver(uintptr_t base, unsigned int id)
{
unsigned n = id >> ICACTIVER_SHIFT;
return mmio_read_32(base + GICD_ICACTIVER + (n << 2));
}
unsigned int gicd_read_ipriorityr(uintptr_t base, unsigned int id)
{
unsigned n = id >> IPRIORITYR_SHIFT;
return mmio_read_32(base + GICD_IPRIORITYR + (n << 2));
}
unsigned int gicd_read_itargetsr(uintptr_t base, unsigned int id)
{
unsigned n = id >> ITARGETSR_SHIFT;
return mmio_read_32(base + GICD_ITARGETSR + (n << 2));
}
unsigned int gicd_read_icfgr(uintptr_t base, unsigned int id)
{
unsigned n = id >> ICFGR_SHIFT;
return mmio_read_32(base + GICD_ICFGR + (n << 2));
}
unsigned int gicd_read_cpendsgir(uintptr_t base, unsigned int id)
{
unsigned n = id >> CPENDSGIR_SHIFT;
return mmio_read_32(base + GICD_CPENDSGIR + (n << 2));
}
unsigned int gicd_read_spendsgir(uintptr_t base, unsigned int id)
{
unsigned n = id >> SPENDSGIR_SHIFT;
return mmio_read_32(base + GICD_SPENDSGIR + (n << 2));
}
/*******************************************************************************
* GIC Distributor interface accessors for writing entire registers
******************************************************************************/
void gicd_write_igroupr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> IGROUPR_SHIFT;
mmio_write_32(base + GICD_IGROUPR + (n << 2), val);
}
void gicd_write_isenabler(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISENABLER_SHIFT;
mmio_write_32(base + GICD_ISENABLER + (n << 2), val);
}
void gicd_write_icenabler(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICENABLER_SHIFT;
mmio_write_32(base + GICD_ICENABLER + (n << 2), val);
}
void gicd_write_ispendr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISPENDR_SHIFT;
mmio_write_32(base + GICD_ISPENDR + (n << 2), val);
}
void gicd_write_icpendr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICPENDR_SHIFT;
mmio_write_32(base + GICD_ICPENDR + (n << 2), val);
}
void gicd_write_isactiver(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISACTIVER_SHIFT;
mmio_write_32(base + GICD_ISACTIVER + (n << 2), val);
}
void gicd_write_icactiver(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICACTIVER_SHIFT;
mmio_write_32(base + GICD_ICACTIVER + (n << 2), val);
}
void gicd_write_ipriorityr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> IPRIORITYR_SHIFT;
mmio_write_32(base + GICD_IPRIORITYR + (n << 2), val);
}
void gicd_write_itargetsr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ITARGETSR_SHIFT;
mmio_write_32(base + GICD_ITARGETSR + (n << 2), val);
}
void gicd_write_icfgr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICFGR_SHIFT;
mmio_write_32(base + GICD_ICFGR + (n << 2), val);
}
void gicd_write_cpendsgir(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> CPENDSGIR_SHIFT;
mmio_write_32(base + GICD_CPENDSGIR + (n << 2), val);
}
void gicd_write_spendsgir(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> SPENDSGIR_SHIFT;
mmio_write_32(base + GICD_SPENDSGIR + (n << 2), val);
}
/*******************************************************************************
* GIC Distributor interface accessors for individual interrupt manipulation
******************************************************************************/
unsigned int gicd_get_igroupr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
return (reg_val >> bit_num) & 0x1;
}
void gicd_set_igroupr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
gicd_write_igroupr(base, id, reg_val | (1 << bit_num));
}
void gicd_clr_igroupr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
gicd_write_igroupr(base, id, reg_val & ~(1 << bit_num));
}
void gicd_set_isenabler(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISENABLER_SHIFT) - 1);
gicd_write_isenabler(base, id, (1 << bit_num));
}
void gicd_set_icenabler(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICENABLER_SHIFT) - 1);
gicd_write_icenabler(base, id, (1 << bit_num));
}
void gicd_set_ispendr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISPENDR_SHIFT) - 1);
gicd_write_ispendr(base, id, (1 << bit_num));
}
void gicd_set_icpendr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICPENDR_SHIFT) - 1);
gicd_write_icpendr(base, id, (1 << bit_num));
}
void gicd_set_isactiver(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISACTIVER_SHIFT) - 1);
gicd_write_isactiver(base, id, (1 << bit_num));
}
void gicd_set_icactiver(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICACTIVER_SHIFT) - 1);
gicd_write_icactiver(base, id, (1 << bit_num));
}
/*
* Make sure that the interrupt's group is set before expecting
* this function to do its job correctly.
*/
void gicd_set_ipriorityr(uintptr_t base, unsigned int id, unsigned int pri)
{
/*
* Enforce ARM recommendation to manage priority values such
* that group1 interrupts always have a lower priority than
* group0 interrupts.
* Note, lower numerical values are higher priorities so the comparison
* checks below are reversed from what might be expected.
*/
assert(gicd_get_igroupr(base, id) == GRP1 ?
pri >= GIC_HIGHEST_NS_PRIORITY &&
pri <= GIC_LOWEST_NS_PRIORITY :
pri >= GIC_HIGHEST_SEC_PRIORITY &&
pri <= GIC_LOWEST_SEC_PRIORITY);
mmio_write_8(base + GICD_IPRIORITYR + id, pri & GIC_PRI_MASK);
}
void gicd_set_itargetsr(uintptr_t base, unsigned int id, unsigned int target)
{
mmio_write_8(base + GICD_ITARGETSR + id, target & GIC_TARGET_CPU_MASK);
}
/*******************************************************************************
* This function allows the interrupt management framework to determine (through
* the platform) which interrupt line (IRQ/FIQ) to use for an interrupt type to
* route it to EL3. The interrupt line is represented as the bit position of the
* IRQ or FIQ bit in the SCR_EL3.
******************************************************************************/
uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type)
{
uint32_t gicc_ctlr;
/* Non-secure interrupts are signalled on the IRQ line always */
if (type == INTR_TYPE_NS)
return __builtin_ctz(SCR_IRQ_BIT);
/*
* Secure interrupts are signalled using the IRQ line if the FIQ_EN
* bit is not set else they are signalled using the FIQ line.
*/
gicc_ctlr = gicc_read_ctlr(cpuif_base);
if (gicc_ctlr & FIQ_EN)
return __builtin_ctz(SCR_FIQ_BIT);
else
return __builtin_ctz(SCR_IRQ_BIT);
}

View File

@ -1,56 +0,0 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <debug.h>
#include <gic_v3.h>
uintptr_t gicv3_get_rdist(uintptr_t gicr_base, u_register_t mpidr)
{
uint32_t cpu_aff, gicr_aff;
uint64_t gicr_typer;
uintptr_t addr;
/* Construct the affinity as used by GICv3. MPIDR and GIC affinity level
* mask is the same.
*/
cpu_aff = ((mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK) <<
GICV3_AFF0_SHIFT;
cpu_aff |= ((mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK) <<
GICV3_AFF1_SHIFT;
cpu_aff |= ((mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK) <<
GICV3_AFF2_SHIFT;
cpu_aff |= ((mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK) <<
GICV3_AFF3_SHIFT;
addr = gicr_base;
do {
gicr_typer = gicr_read_typer(addr);
gicr_aff = (gicr_typer >> GICR_TYPER_AFF_SHIFT) &
GICR_TYPER_AFF_MASK;
if (cpu_aff == gicr_aff) {
/* Disable this print for now as it appears every time
* when using PSCI CPU_SUSPEND.
* TODO: Print this only the first time for each CPU.
* INFO("GICv3 - Found RDIST for MPIDR(0x%lx) at %p\n",
* mpidr, (void *) addr);
*/
return addr;
}
/* TODO:
* For GICv4 we need to adjust the Base address based on
* GICR_TYPER.VLPIS
*/
addr += (1 << GICR_PCPUBASE_SHIFT);
} while (!(gicr_typer & GICR_TYPER_LAST));
/* If we get here we did not find a match. */
ERROR("GICv3 - Did not find RDIST for CPU with MPIDR 0x%lx\n", mpidr);
return (uintptr_t)NULL;
}

View File

@ -114,43 +114,6 @@ void gicv2_spis_configure_defaults(uintptr_t gicd_base)
gicd_write_icfgr(gicd_base, index, 0U);
}
#if !ERROR_DEPRECATED
/*******************************************************************************
* Helper function to configure secure G0 SPIs.
******************************************************************************/
void gicv2_secure_spis_configure(uintptr_t gicd_base,
unsigned int num_ints,
const unsigned int *sec_intr_list)
{
unsigned int index, irq_num;
/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
if (num_ints != 0U)
assert(sec_intr_list != NULL);
for (index = 0; index < num_ints; index++) {
irq_num = sec_intr_list[index];
if (irq_num >= MIN_SPI_ID) {
/* Configure this interrupt as a secure interrupt */
gicd_clr_igroupr(gicd_base, irq_num);
/* Set the priority of this interrupt */
gicd_set_ipriorityr(gicd_base,
irq_num,
GIC_HIGHEST_SEC_PRIORITY);
/* Target the secure interrupts to primary CPU */
gicd_set_itargetsr(gicd_base, irq_num,
gicv2_get_cpuif_id(gicd_base));
/* Enable this interrupt */
gicd_set_isenabler(gicd_base, irq_num);
}
}
}
#endif
/*******************************************************************************
* Helper function to configure properties of secure G0 SPIs.
******************************************************************************/
@ -192,56 +155,6 @@ void gicv2_secure_spis_configure_props(uintptr_t gicd_base,
}
}
#if !ERROR_DEPRECATED
/*******************************************************************************
* Helper function to configure secure G0 SGIs and PPIs.
******************************************************************************/
void gicv2_secure_ppi_sgi_setup(uintptr_t gicd_base,
unsigned int num_ints,
const unsigned int *sec_intr_list)
{
unsigned int index, irq_num, sec_ppi_sgi_mask = 0;
/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
assert(num_ints ? (uintptr_t)sec_intr_list : 1);
/*
* Disable all SGIs (imp. def.)/PPIs before configuring them. This is a
* more scalable approach as it avoids clearing the enable bits in the
* GICD_CTLR.
*/
gicd_write_icenabler(gicd_base, 0, ~0);
/* Setup the default PPI/SGI priorities doing four at a time */
for (index = 0; index < MIN_SPI_ID; index += 4)
gicd_write_ipriorityr(gicd_base,
index,
GICD_IPRIORITYR_DEF_VAL);
for (index = 0; index < num_ints; index++) {
irq_num = sec_intr_list[index];
if (irq_num < MIN_SPI_ID) {
/* We have an SGI or a PPI. They are Group0 at reset */
sec_ppi_sgi_mask |= 1U << irq_num;
/* Set the priority of this interrupt */
gicd_set_ipriorityr(gicd_base,
irq_num,
GIC_HIGHEST_SEC_PRIORITY);
}
}
/*
* Invert the bitmask to create a mask for non-secure PPIs and
* SGIs. Program the GICD_IGROUPR0 with this bit mask.
*/
gicd_write_igroupr(gicd_base, 0, ~sec_ppi_sgi_mask);
/* Enable the Group 0 SGIs and PPIs */
gicd_write_isenabler(gicd_base, 0, sec_ppi_sgi_mask);
}
#endif
/*******************************************************************************
* Helper function to configure properties of secure G0 SGIs and PPIs.
******************************************************************************/

View File

@ -79,27 +79,9 @@ void gicv2_pcpu_distif_init(void)
assert(driver_data != NULL);
assert(driver_data->gicd_base != 0U);
#if !ERROR_DEPRECATED
if (driver_data->interrupt_props != NULL) {
#endif
gicv2_secure_ppi_sgi_setup_props(driver_data->gicd_base,
driver_data->interrupt_props,
driver_data->interrupt_props_num);
#if !ERROR_DEPRECATED
} else {
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
assert(driver_data->g0_interrupt_array);
gicv2_secure_ppi_sgi_setup(driver_data->gicd_base,
driver_data->g0_interrupt_num,
driver_data->g0_interrupt_array);
#pragma GCC diagnostic pop
}
#endif
gicv2_secure_ppi_sgi_setup_props(driver_data->gicd_base,
driver_data->interrupt_props,
driver_data->interrupt_props_num);
/* Enable G0 interrupts if not already */
ctlr = gicd_read_ctlr(driver_data->gicd_base);
@ -129,30 +111,10 @@ void gicv2_distif_init(void)
/* Set the default attribute of all SPIs */
gicv2_spis_configure_defaults(driver_data->gicd_base);
#if !ERROR_DEPRECATED
if (driver_data->interrupt_props != NULL) {
#endif
gicv2_secure_spis_configure_props(driver_data->gicd_base,
driver_data->interrupt_props,
driver_data->interrupt_props_num);
#if !ERROR_DEPRECATED
} else {
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
gicv2_secure_spis_configure_props(driver_data->gicd_base,
driver_data->interrupt_props,
driver_data->interrupt_props_num);
assert(driver_data->g0_interrupt_array);
/* Configure the G0 SPIs */
gicv2_secure_spis_configure(driver_data->gicd_base,
driver_data->g0_interrupt_num,
driver_data->g0_interrupt_array);
#pragma GCC diagnostic pop
}
#endif
/* Re-enable the secure SPIs now that they have been configured */
gicd_write_ctlr(driver_data->gicd_base, ctlr | CTLR_ENABLE_G0_BIT);
@ -169,35 +131,8 @@ void gicv2_driver_init(const gicv2_driver_data_t *plat_driver_data)
assert(plat_driver_data->gicd_base != 0U);
assert(plat_driver_data->gicc_base != 0U);
#if !ERROR_DEPRECATED
if (plat_driver_data->interrupt_props == NULL) {
/* Interrupt properties array size must be 0 */
assert(plat_driver_data->interrupt_props_num == 0);
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
/*
* If there are no interrupts of a particular type, then the
* number of interrupts of that type should be 0 and vice-versa.
*/
assert(plat_driver_data->g0_interrupt_array ?
plat_driver_data->g0_interrupt_num :
plat_driver_data->g0_interrupt_num == 0);
#pragma GCC diagnostic pop
WARN("Using deprecated integer interrupt array in "
"gicv2_driver_data_t\n");
WARN("Please migrate to using an interrupt_prop_t array\n");
}
#else
assert(plat_driver_data->interrupt_props_num > 0 ?
plat_driver_data->interrupt_props != NULL : 1);
#endif
/* Ensure that this is a GICv2 system */
gic_version = gicd_read_pidr2(plat_driver_data->gicd_base);

View File

@ -15,14 +15,6 @@
* Private function prototypes
******************************************************************************/
void gicv2_spis_configure_defaults(uintptr_t gicd_base);
#if !ERROR_DEPRECATED
void gicv2_secure_spis_configure(uintptr_t gicd_base,
unsigned int num_ints,
const unsigned int *sec_intr_list);
void gicv2_secure_ppi_sgi_setup(uintptr_t gicd_base,
unsigned int num_ints,
const unsigned int *sec_intr_list);
#endif
void gicv2_secure_spis_configure_props(uintptr_t gicd_base,
const interrupt_prop_t *interrupt_props,
unsigned int interrupt_props_num);

View File

@ -377,56 +377,6 @@ void gicv3_spis_config_defaults(uintptr_t gicd_base)
gicd_write_icfgr(gicd_base, index, 0U);
}
#if !ERROR_DEPRECATED
/*******************************************************************************
* Helper function to configure secure G0 and G1S SPIs.
******************************************************************************/
void gicv3_secure_spis_config(uintptr_t gicd_base,
unsigned int num_ints,
const unsigned int *sec_intr_list,
unsigned int int_grp)
{
unsigned int index, irq_num;
unsigned long long gic_affinity_val;
assert((int_grp == INTR_GROUP1S) || (int_grp == INTR_GROUP0));
/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
if (num_ints != 0U)
assert(sec_intr_list != NULL);
for (index = 0U; index < num_ints; index++) {
irq_num = sec_intr_list[index];
if (irq_num >= MIN_SPI_ID) {
/* Configure this interrupt as a secure interrupt */
gicd_clr_igroupr(gicd_base, irq_num);
/* Configure this interrupt as G0 or a G1S interrupt */
if (int_grp == INTR_GROUP1S)
gicd_set_igrpmodr(gicd_base, irq_num);
else
gicd_clr_igrpmodr(gicd_base, irq_num);
/* Set the priority of this interrupt */
gicd_set_ipriorityr(gicd_base,
irq_num,
GIC_HIGHEST_SEC_PRIORITY);
/* Target SPIs to the primary CPU */
gic_affinity_val =
gicd_irouter_val_from_mpidr(read_mpidr(), 0U);
gicd_write_irouter(gicd_base,
irq_num,
gic_affinity_val);
/* Enable this interrupt */
gicd_set_isenabler(gicd_base, irq_num);
}
}
}
#endif
/*******************************************************************************
* Helper function to configure properties of secure SPIs
******************************************************************************/
@ -512,47 +462,6 @@ void gicv3_ppi_sgi_config_defaults(uintptr_t gicr_base)
gicr_write_icfgr1(gicr_base, 0U);
}
#if !ERROR_DEPRECATED
/*******************************************************************************
* Helper function to configure secure G0 and G1S SPIs.
******************************************************************************/
void gicv3_secure_ppi_sgi_config(uintptr_t gicr_base,
unsigned int num_ints,
const unsigned int *sec_intr_list,
unsigned int int_grp)
{
unsigned int index, irq_num;
assert((int_grp == INTR_GROUP1S) || (int_grp == INTR_GROUP0));
/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
if (num_ints != 0U)
assert(sec_intr_list != NULL);
for (index = 0; index < num_ints; index++) {
irq_num = sec_intr_list[index];
if (irq_num < MIN_SPI_ID) {
/* Configure this interrupt as a secure interrupt */
gicr_clr_igroupr0(gicr_base, irq_num);
/* Configure this interrupt as G0 or a G1S interrupt */
if (int_grp == INTR_GROUP1S)
gicr_set_igrpmodr0(gicr_base, irq_num);
else
gicr_clr_igrpmodr0(gicr_base, irq_num);
/* Set the priority of this interrupt */
gicr_set_ipriorityr(gicr_base,
irq_num,
GIC_HIGHEST_SEC_PRIORITY);
/* Enable this interrupt */
gicr_set_isenabler0(gicr_base, irq_num);
}
}
}
#endif
/*******************************************************************************
* Helper function to configure properties of secure G0 and G1S PPIs and SGIs.
******************************************************************************/

View File

@ -67,45 +67,8 @@ void gicv3_driver_init(const gicv3_driver_data_t *plat_driver_data)
assert(IS_IN_EL3());
#if !ERROR_DEPRECATED
if (plat_driver_data->interrupt_props == NULL) {
/* Interrupt properties array size must be 0 */
assert(plat_driver_data->interrupt_props_num == 0);
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
/*
* The platform should provide a list of at least one type of
* interrupt.
*/
assert(plat_driver_data->g0_interrupt_array ||
plat_driver_data->g1s_interrupt_array);
/*
* If there are no interrupts of a particular type, then the
* number of interrupts of that type should be 0 and vice-versa.
*/
assert(plat_driver_data->g0_interrupt_array ?
plat_driver_data->g0_interrupt_num :
plat_driver_data->g0_interrupt_num == 0);
assert(plat_driver_data->g1s_interrupt_array ?
plat_driver_data->g1s_interrupt_num :
plat_driver_data->g1s_interrupt_num == 0);
#pragma GCC diagnostic pop
WARN("Using deprecated integer interrupt arrays in "
"gicv3_driver_data_t\n");
WARN("Please migrate to using interrupt_prop_t arrays\n");
}
#else
assert(plat_driver_data->interrupt_props_num > 0 ?
plat_driver_data->interrupt_props != NULL : 1);
#endif
/* Check for system register support */
#ifdef AARCH32
@ -193,45 +156,10 @@ void gicv3_distif_init(void)
/* Set the default attribute of all SPIs */
gicv3_spis_config_defaults(gicv3_driver_data->gicd_base);
#if !ERROR_DEPRECATED
if (gicv3_driver_data->interrupt_props != NULL) {
#endif
bitmap = gicv3_secure_spis_config_props(
gicv3_driver_data->gicd_base,
gicv3_driver_data->interrupt_props,
gicv3_driver_data->interrupt_props_num);
#if !ERROR_DEPRECATED
} else {
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
assert(gicv3_driver_data->g1s_interrupt_array ||
gicv3_driver_data->g0_interrupt_array);
/* Configure the G1S SPIs */
if (gicv3_driver_data->g1s_interrupt_array) {
gicv3_secure_spis_config(gicv3_driver_data->gicd_base,
gicv3_driver_data->g1s_interrupt_num,
gicv3_driver_data->g1s_interrupt_array,
INTR_GROUP1S);
bitmap |= CTLR_ENABLE_G1S_BIT;
}
/* Configure the G0 SPIs */
if (gicv3_driver_data->g0_interrupt_array) {
gicv3_secure_spis_config(gicv3_driver_data->gicd_base,
gicv3_driver_data->g0_interrupt_num,
gicv3_driver_data->g0_interrupt_array,
INTR_GROUP0);
bitmap |= CTLR_ENABLE_G0_BIT;
}
#pragma GCC diagnostic pop
}
#endif
bitmap = gicv3_secure_spis_config_props(
gicv3_driver_data->gicd_base,
gicv3_driver_data->interrupt_props,
gicv3_driver_data->interrupt_props_num);
/* Enable the secure SPIs now that they have been configured */
gicd_set_ctlr(gicv3_driver_data->gicd_base, bitmap, RWP_TRUE);
@ -266,44 +194,9 @@ void gicv3_rdistif_init(unsigned int proc_num)
/* Set the default attribute of all SGIs and PPIs */
gicv3_ppi_sgi_config_defaults(gicr_base);
#if !ERROR_DEPRECATED
if (gicv3_driver_data->interrupt_props != NULL) {
#endif
bitmap = gicv3_secure_ppi_sgi_config_props(gicr_base,
gicv3_driver_data->interrupt_props,
gicv3_driver_data->interrupt_props_num);
#if !ERROR_DEPRECATED
} else {
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
assert(gicv3_driver_data->g1s_interrupt_array ||
gicv3_driver_data->g0_interrupt_array);
/* Configure the G1S SGIs/PPIs */
if (gicv3_driver_data->g1s_interrupt_array) {
gicv3_secure_ppi_sgi_config(gicr_base,
gicv3_driver_data->g1s_interrupt_num,
gicv3_driver_data->g1s_interrupt_array,
INTR_GROUP1S);
bitmap |= CTLR_ENABLE_G1S_BIT;
}
/* Configure the G0 SGIs/PPIs */
if (gicv3_driver_data->g0_interrupt_array) {
gicv3_secure_ppi_sgi_config(gicr_base,
gicv3_driver_data->g0_interrupt_num,
gicv3_driver_data->g0_interrupt_array,
INTR_GROUP0);
bitmap |= CTLR_ENABLE_G0_BIT;
}
#pragma GCC diagnostic pop
}
#endif
bitmap = gicv3_secure_ppi_sgi_config_props(gicr_base,
gicv3_driver_data->interrupt_props,
gicv3_driver_data->interrupt_props_num);
/* Enable interrupt groups as required, if not already */
if ((ctlr & bitmap) != bitmap)

View File

@ -95,16 +95,6 @@ void gicr_set_icfgr1(uintptr_t base, unsigned int id, unsigned int cfg);
******************************************************************************/
void gicv3_spis_config_defaults(uintptr_t gicd_base);
void gicv3_ppi_sgi_config_defaults(uintptr_t gicr_base);
#if !ERROR_DEPRECATED
void gicv3_secure_spis_config(uintptr_t gicd_base,
unsigned int num_ints,
const unsigned int *sec_intr_list,
unsigned int int_grp);
void gicv3_secure_ppi_sgi_config(uintptr_t gicr_base,
unsigned int num_ints,
const unsigned int *sec_intr_list,
unsigned int int_grp);
#endif
unsigned int gicv3_secure_ppi_sgi_config_props(uintptr_t gicr_base,
const interrupt_prop_t *interrupt_props,
unsigned int interrupt_props_num);

View File

@ -1,9 +0,0 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#if !ERROR_DEPRECATED
#include "./aarch64/pl011_console.S"
#endif

View File

@ -1,11 +0,0 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#if ERROR_DEPRECATED
#error "Using deprecated TZC-400 source file"
#else
#include "../tzc/tzc400.c"
#endif /* ERROR_DEPRECATED */

View File

@ -60,16 +60,6 @@ ifeq (${TF_MBEDTLS_KEY_ALG},)
endif
endif
# If MBEDTLS_KEY_ALG build flag is defined use it to set TF_MBEDTLS_KEY_ALG for
# backward compatibility
ifdef MBEDTLS_KEY_ALG
ifeq (${ERROR_DEPRECATED},1)
$(error "MBEDTLS_KEY_ALG is deprecated. Please use the new build flag TF_MBEDTLS_KEY_ALG")
endif
$(warning "MBEDTLS_KEY_ALG is deprecated. Please use the new build flag TF_MBEDTLS_KEY_ALG")
TF_MBEDTLS_KEY_ALG := ${MBEDTLS_KEY_ALG}
endif
ifeq (${HASH_ALG}, sha384)
TF_MBEDTLS_HASH_ALG_ID := TF_MBEDTLS_SHA384
else ifeq (${HASH_ALG}, sha512)

View File

@ -15,9 +15,11 @@
.globl console_cdns_core_init
.globl console_cdns_core_putc
.globl console_cdns_core_getc
.globl console_cdns_core_flush
.globl console_cdns_putc
.globl console_cdns_getc
.globl console_cdns_flush
/* -----------------------------------------------
* int console_cdns_core_init(uintptr_t base_addr)
@ -87,6 +89,7 @@ endfunc console_cdns_register
.equ console_core_init,console_cdns_core_init
.equ console_core_putc,console_cdns_core_putc
.equ console_core_getc,console_cdns_core_getc
.equ console_core_flush,console_cdns_core_flush
#endif
/* --------------------------------------------------------
@ -188,8 +191,7 @@ func console_cdns_getc
endfunc console_cdns_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* DEPRECATED: Not used with MULTI_CONSOLE_API!
* int console_cdns_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - console base address
@ -197,8 +199,30 @@ endfunc console_cdns_getc
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_flush
func console_cdns_core_flush
#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif /* ENABLE_ASSERTIONS */
/* Placeholder */
mov w0, #0
ret
endfunc console_core_flush
endfunc console_cdns_core_flush
/* ---------------------------------------------
* int console_cdns_flush(console_pl011_t *console)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - pointer to console_t structure
* Out : return -1 on error else return 0.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_cdns_flush
#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif /* ENABLE_ASSERTIONS */
ldr x0, [x0, #CONSOLE_T_CDNS_BASE]
b console_cdns_core_flush
endfunc console_cdns_flush

View File

@ -1,9 +0,0 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#if !ERROR_DEPRECATED
#include "./aarch64/cdns_console.S"
#endif

View File

@ -1,9 +0,0 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#if !ERROR_DEPRECATED
#include "./aarch64/console.S"
#endif

View File

@ -1,9 +0,0 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#if !ERROR_DEPRECATED
#include "./aarch64/skeleton_console.S"
#endif

View File

@ -1,9 +0,0 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#if !ERROR_DEPRECATED
#include "./aarch64/16550_console.S"
#endif

View File

@ -10,10 +10,6 @@
/*******************************************************************************
* Mandatory SP_MIN functions
******************************************************************************/
#if !ERROR_DEPRECATED
void sp_min_early_platform_setup(void *from_bl2,
void *plat_params_from_bl2);
#endif
void sp_min_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3);
void sp_min_platform_setup(void);

View File

@ -97,36 +97,6 @@
.fill \label + (32 * 4) - .
.endm
/*
* This macro verifies that the given vector doesn't exceed the
* architectural limit of 32 instructions. This is meant to be placed
* immediately after the last instruction in the vector. It takes the
* vector entry as the parameter
*/
.macro check_vector_size since
#if ERROR_DEPRECATED
.error "check_vector_size must not be used. Use end_vector_entry instead"
#endif
end_vector_entry \since
.endm
#if ENABLE_PLAT_COMPAT
/*
* This macro calculates the base address of an MP stack using the
* platform_get_core_pos() index, the name of the stack storage and
* the size of each stack
* In: X0 = MPIDR of CPU whose stack is wanted
* Out: X0 = physical address of stack base
* Clobber: X30, X1, X2
*/
.macro get_mp_stack _name, _size
bl platform_get_core_pos
ldr x2, =(\_name + \_size)
mov x1, #\_size
madd x0, x0, x1, x2
.endm
#endif
/*
* This macro calculates the base address of the current CPU's MP stack
* using the plat_my_core_pos() index, the name of the stack storage

View File

@ -107,10 +107,6 @@ IMPORT_SYM(unsigned long, __COHERENT_RAM_END__, BL_COHERENT_RAM_END);
typedef struct meminfo {
uintptr_t total_base;
size_t total_size;
#if !LOAD_IMAGE_V2
uintptr_t free_base;
size_t free_size;
#endif
} meminfo_t;
/*****************************************************************************
@ -124,9 +120,7 @@ typedef struct image_info {
param_header_t h;
uintptr_t image_base; /* physical address of base of image */
uint32_t image_size; /* bytes read from image file */
#if LOAD_IMAGE_V2
uint32_t image_max_size;
#endif
} image_info_t;
/*****************************************************************************
@ -145,7 +139,6 @@ typedef struct image_desc {
entry_point_info_t ep_info;
} image_desc_t;
#if LOAD_IMAGE_V2
/* BL image node in the BL image loading sequence */
typedef struct bl_load_info_node {
unsigned int image_id;
@ -176,33 +169,6 @@ typedef struct bl_params {
bl_params_node_t *head;
} bl_params_t;
#else /* LOAD_IMAGE_V2 */
/*******************************************************************************
* This structure represents the superset of information that can be passed to
* BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
* populated only if BL2 detects its presence. A pointer to a structure of this
* type should be passed in X0 to BL31's cold boot entrypoint.
*
* Use of this structure and the X0 parameter is not mandatory: the BL31
* platform code can use other mechanisms to provide the necessary information
* about BL32 and BL33 to the common and SPD code.
*
* BL31 image information is mandatory if this structure is used. If either of
* the optional BL32 and BL33 image information is not provided, this is
* indicated by the respective image_info pointers being zero.
******************************************************************************/
typedef struct bl31_params {
param_header_t h;
image_info_t *bl31_image_info;
entry_point_info_t *bl32_ep_info;
image_info_t *bl32_image_info;
entry_point_info_t *bl33_ep_info;
image_info_t *bl33_image_info;
} bl31_params_t;
#endif /* LOAD_IMAGE_V2 */
/*******************************************************************************
* Function & variable prototypes
******************************************************************************/
@ -211,27 +177,8 @@ size_t get_image_size(unsigned int image_id);
int is_mem_free(uintptr_t free_base, size_t free_size,
uintptr_t addr, size_t size);
#if LOAD_IMAGE_V2
int load_auth_image(unsigned int image_id, image_info_t *image_data);
#else
int load_image(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info);
int load_auth_image(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info);
void reserve_mem(uintptr_t *free_base, size_t *free_size,
uintptr_t addr, size_t size);
#endif /* LOAD_IMAGE_V2 */
#if TRUSTED_BOARD_BOOT && defined(DYN_DISABLE_AUTH)
/*
* API to dynamically disable authentication. Only meant for development

View File

@ -8,7 +8,6 @@
#include <bl_common.h>
#if LOAD_IMAGE_V2
/* Following structure is used to store BL ep/image info. */
typedef struct bl_mem_params_node {
unsigned int image_id;
@ -38,5 +37,4 @@ bl_load_info_t *get_bl_load_info_from_mem_params_desc(void);
bl_params_t *get_next_bl_params_from_mem_params_desc(void);
void populate_next_bl_params_config(bl_params_t *bl2_to_next_bl_params);
#endif /* LOAD_IMAGE_V2 */
#endif /* __DESC_IMAGE_LOAD_H__ */

View File

@ -1,34 +0,0 @@
/*
* Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __ARM_GIC_H__
#define __ARM_GIC_H__
#include <cdefs.h>
#include <stdint.h>
/*******************************************************************************
* Function declarations
******************************************************************************/
void arm_gic_init(uintptr_t gicc_base,
uintptr_t gicd_base,
uintptr_t gicr_base,
const unsigned int *irq_sec_ptr,
unsigned int num_irqs) __deprecated;
void arm_gic_setup(void) __deprecated;
void arm_gic_cpuif_deactivate(void) __deprecated;
void arm_gic_cpuif_setup(void) __deprecated;
void arm_gic_pcpu_distif_setup(void) __deprecated;
uint32_t arm_gic_interrupt_type_to_line(uint32_t type,
uint32_t security_state) __deprecated;
uint32_t arm_gic_get_pending_interrupt_type(void) __deprecated;
uint32_t arm_gic_get_pending_interrupt_id(void) __deprecated;
uint32_t arm_gic_acknowledge_interrupt(void) __deprecated;
void arm_gic_end_of_interrupt(uint32_t id) __deprecated;
uint32_t arm_gic_get_interrupt_type(uint32_t id) __deprecated;
#endif /* __GIC_H__ */

View File

@ -1,76 +0,0 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __CCI_400_H__
#define __CCI_400_H__
/**************************************************************
* THIS DRIVER IS DEPRECATED. Please use the driver in cci.h
**************************************************************/
#if ERROR_DEPRECATED
#error " The CCI-400 specific driver is deprecated."
#endif
/* Slave interface offsets from PERIPHBASE */
#define SLAVE_IFACE4_OFFSET 0x5000
#define SLAVE_IFACE3_OFFSET 0x4000
#define SLAVE_IFACE2_OFFSET 0x3000
#define SLAVE_IFACE1_OFFSET 0x2000
#define SLAVE_IFACE0_OFFSET 0x1000
#define SLAVE_IFACE_OFFSET(index) SLAVE_IFACE0_OFFSET + \
(0x1000 * (index))
/* Control and ID register offsets */
#define CTRL_OVERRIDE_REG 0x0
#define SPEC_CTRL_REG 0x4
#define SECURE_ACCESS_REG 0x8
#define STATUS_REG 0xc
#define IMPRECISE_ERR_REG 0x10
#define PERFMON_CTRL_REG 0x100
/* Slave interface register offsets */
#define SNOOP_CTRL_REG 0x0
#define SH_OVERRIDE_REG 0x4
#define READ_CHNL_QOS_VAL_OVERRIDE_REG 0x100
#define WRITE_CHNL_QOS_VAL_OVERRIDE_REG 0x104
#define QOS_CTRL_REG 0x10c
#define MAX_OT_REG 0x110
#define TARGET_LATENCY_REG 0x130
#define LATENCY_REGULATION_REG 0x134
#define QOS_RANGE_REG 0x138
/* Snoop Control register bit definitions */
#define DVM_EN_BIT (1 << 1)
#define SNOOP_EN_BIT (1 << 0)
/* Status register bit definitions */
#define CHANGE_PENDING_BIT (1 << 0)
#ifndef __ASSEMBLY__
#include <stdint.h>
/* Function declarations */
/*
* The CCI-400 driver must be initialized with the base address of the
* CCI-400 device in the platform memory map, and the cluster indices for
* the CCI-400 slave interfaces 3 and 4 respectively. These are the fully
* coherent ACE slave interfaces of CCI-400.
* The cluster indices must either be 0 or 1, corresponding to the level 1
* affinity instance of the mpidr representing the cluster. A negative cluster
* index indicates that no cluster is present on that slave interface.
*/
void cci_init(uintptr_t cci_base,
int slave_iface3_cluster_ix,
int slave_iface4_cluster_ix) __deprecated;
void cci_enable_cluster_coherency(unsigned long mpidr) __deprecated;
void cci_disable_cluster_coherency(unsigned long mpidr) __deprecated;
#endif /* __ASSEMBLY__ */
#endif /* __CCI_400_H__ */

View File

@ -1,280 +0,0 @@
/*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __GIC_V2_H__
#define __GIC_V2_H__
/* The macros required here are additional to those in gic_common.h. */
#include <gic_common.h>
/******************************************************************************
* THIS DRIVER IS DEPRECATED. For GICv2 systems, use the driver in gicv2.h
* and for GICv3 systems, use the driver in gicv3.h.
*****************************************************************************/
#if ERROR_DEPRECATED
#error " The legacy ARM GIC driver is deprecated."
#endif
#define GIC400_NUM_SPIS U(480)
#define MAX_PPIS U(14)
#define MAX_SGIS U(16)
#define GRP0 U(0)
#define GRP1 U(1)
#define GIC_TARGET_CPU_MASK U(0xff)
#define ENABLE_GRP0 (U(1) << 0)
#define ENABLE_GRP1 (U(1) << 1)
/* Distributor interface definitions */
#define GICD_ITARGETSR U(0x800)
#define GICD_SGIR U(0xF00)
#define GICD_CPENDSGIR U(0xF10)
#define GICD_SPENDSGIR U(0xF20)
#define CPENDSGIR_SHIFT U(2)
#define SPENDSGIR_SHIFT CPENDSGIR_SHIFT
/* GICD_TYPER bit definitions */
#define IT_LINES_NO_MASK U(0x1f)
/* Physical CPU Interface registers */
#define GICC_CTLR U(0x0)
#define GICC_PMR U(0x4)
#define GICC_BPR U(0x8)
#define GICC_IAR U(0xC)
#define GICC_EOIR U(0x10)
#define GICC_RPR U(0x14)
#define GICC_HPPIR U(0x18)
#define GICC_AHPPIR U(0x28)
#define GICC_IIDR U(0xFC)
#define GICC_DIR U(0x1000)
#define GICC_PRIODROP GICC_EOIR
/* Common CPU Interface definitions */
#define INT_ID_MASK U(0x3ff)
/* GICC_CTLR bit definitions */
#define EOI_MODE_NS (U(1) << 10)
#define EOI_MODE_S (U(1) << 9)
#define IRQ_BYP_DIS_GRP1 (U(1) << 8)
#define FIQ_BYP_DIS_GRP1 (U(1) << 7)
#define IRQ_BYP_DIS_GRP0 (U(1) << 6)
#define FIQ_BYP_DIS_GRP0 (U(1) << 5)
#define CBPR (U(1) << 4)
#define FIQ_EN (U(1) << 3)
#define ACK_CTL (U(1) << 2)
/* GICC_IIDR bit masks and shifts */
#define GICC_IIDR_PID_SHIFT U(20)
#define GICC_IIDR_ARCH_SHIFT U(16)
#define GICC_IIDR_REV_SHIFT U(12)
#define GICC_IIDR_IMP_SHIFT U(0)
#define GICC_IIDR_PID_MASK U(0xfff)
#define GICC_IIDR_ARCH_MASK U(0xf)
#define GICC_IIDR_REV_MASK U(0xf)
#define GICC_IIDR_IMP_MASK U(0xfff)
/* HYP view virtual CPU Interface registers */
#define GICH_CTL U(0x0)
#define GICH_VTR U(0x4)
#define GICH_ELRSR0 U(0x30)
#define GICH_ELRSR1 U(0x34)
#define GICH_APR0 U(0xF0)
#define GICH_LR_BASE U(0x100)
/* Virtual CPU Interface registers */
#define GICV_CTL U(0x0)
#define GICV_PRIMASK U(0x4)
#define GICV_BP U(0x8)
#define GICV_INTACK U(0xC)
#define GICV_EOI U(0x10)
#define GICV_RUNNINGPRI U(0x14)
#define GICV_HIGHESTPEND U(0x18)
#define GICV_DEACTIVATE U(0x1000)
#ifndef __ASSEMBLY__
#include <mmio.h>
#include <stdint.h>
/*******************************************************************************
* GIC Distributor function prototypes
******************************************************************************/
unsigned int gicd_read_igroupr(uintptr_t, unsigned int);
unsigned int gicd_read_isenabler(uintptr_t, unsigned int);
unsigned int gicd_read_icenabler(uintptr_t, unsigned int);
unsigned int gicd_read_ispendr(uintptr_t, unsigned int);
unsigned int gicd_read_icpendr(uintptr_t, unsigned int);
unsigned int gicd_read_isactiver(uintptr_t, unsigned int);
unsigned int gicd_read_icactiver(uintptr_t, unsigned int);
unsigned int gicd_read_ipriorityr(uintptr_t, unsigned int);
unsigned int gicd_read_itargetsr(uintptr_t, unsigned int);
unsigned int gicd_read_icfgr(uintptr_t, unsigned int);
unsigned int gicd_read_cpendsgir(uintptr_t, unsigned int);
unsigned int gicd_read_spendsgir(uintptr_t, unsigned int);
void gicd_write_igroupr(uintptr_t, unsigned int, unsigned int);
void gicd_write_isenabler(uintptr_t, unsigned int, unsigned int);
void gicd_write_icenabler(uintptr_t, unsigned int, unsigned int);
void gicd_write_ispendr(uintptr_t, unsigned int, unsigned int);
void gicd_write_icpendr(uintptr_t, unsigned int, unsigned int);
void gicd_write_isactiver(uintptr_t, unsigned int, unsigned int);
void gicd_write_icactiver(uintptr_t, unsigned int, unsigned int);
void gicd_write_ipriorityr(uintptr_t, unsigned int, unsigned int);
void gicd_write_itargetsr(uintptr_t, unsigned int, unsigned int);
void gicd_write_icfgr(uintptr_t, unsigned int, unsigned int);
void gicd_write_cpendsgir(uintptr_t, unsigned int, unsigned int);
void gicd_write_spendsgir(uintptr_t, unsigned int, unsigned int);
unsigned int gicd_get_igroupr(uintptr_t, unsigned int);
void gicd_set_igroupr(uintptr_t, unsigned int);
void gicd_clr_igroupr(uintptr_t, unsigned int);
void gicd_set_isenabler(uintptr_t, unsigned int);
void gicd_set_icenabler(uintptr_t, unsigned int);
void gicd_set_ispendr(uintptr_t, unsigned int);
void gicd_set_icpendr(uintptr_t, unsigned int);
void gicd_set_isactiver(uintptr_t, unsigned int);
void gicd_set_icactiver(uintptr_t, unsigned int);
void gicd_set_ipriorityr(uintptr_t, unsigned int, unsigned int);
void gicd_set_itargetsr(uintptr_t, unsigned int, unsigned int);
/*******************************************************************************
* GIC Distributor interface accessors for reading entire registers
******************************************************************************/
static inline unsigned int gicd_read_ctlr(uintptr_t base)
{
return mmio_read_32(base + GICD_CTLR);
}
static inline unsigned int gicd_read_typer(uintptr_t base)
{
return mmio_read_32(base + GICD_TYPER);
}
static inline unsigned int gicd_read_sgir(uintptr_t base)
{
return mmio_read_32(base + GICD_SGIR);
}
/*******************************************************************************
* GIC Distributor interface accessors for writing entire registers
******************************************************************************/
static inline void gicd_write_ctlr(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICD_CTLR, val);
}
static inline void gicd_write_sgir(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICD_SGIR, val);
}
/*******************************************************************************
* GIC CPU interface accessors for reading entire registers
******************************************************************************/
static inline unsigned int gicc_read_ctlr(uintptr_t base)
{
return mmio_read_32(base + GICC_CTLR);
}
static inline unsigned int gicc_read_pmr(uintptr_t base)
{
return mmio_read_32(base + GICC_PMR);
}
static inline unsigned int gicc_read_BPR(uintptr_t base)
{
return mmio_read_32(base + GICC_BPR);
}
static inline unsigned int gicc_read_IAR(uintptr_t base)
{
return mmio_read_32(base + GICC_IAR);
}
static inline unsigned int gicc_read_EOIR(uintptr_t base)
{
return mmio_read_32(base + GICC_EOIR);
}
static inline unsigned int gicc_read_hppir(uintptr_t base)
{
return mmio_read_32(base + GICC_HPPIR);
}
static inline unsigned int gicc_read_ahppir(uintptr_t base)
{
return mmio_read_32(base + GICC_AHPPIR);
}
static inline unsigned int gicc_read_dir(uintptr_t base)
{
return mmio_read_32(base + GICC_DIR);
}
static inline unsigned int gicc_read_iidr(uintptr_t base)
{
return mmio_read_32(base + GICC_IIDR);
}
/*******************************************************************************
* GIC CPU interface accessors for writing entire registers
******************************************************************************/
static inline void gicc_write_ctlr(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICC_CTLR, val);
}
static inline void gicc_write_pmr(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICC_PMR, val);
}
static inline void gicc_write_BPR(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICC_BPR, val);
}
static inline void gicc_write_IAR(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICC_IAR, val);
}
static inline void gicc_write_EOIR(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICC_EOIR, val);
}
static inline void gicc_write_hppir(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICC_HPPIR, val);
}
static inline void gicc_write_dir(uintptr_t base, unsigned int val)
{
mmio_write_32(base + GICC_DIR, val);
}
/*******************************************************************************
* Prototype of function to map an interrupt type to the interrupt line used to
* signal it.
******************************************************************************/
uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type);
#endif /*__ASSEMBLY__*/
#endif /* __GIC_V2_H__ */

View File

@ -1,73 +0,0 @@
/*
* Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __GIC_V3_H__
#define __GIC_V3_H__
/******************************************************************************
* THIS DRIVER IS DEPRECATED. For GICv2 systems, use the driver in gicv2.h
* and for GICv3 systems, use the driver in gicv3.h.
*****************************************************************************/
#if ERROR_DEPRECATED
#error " The legacy ARM GIC driver is deprecated."
#endif
#include <mmio.h>
#include <stdint.h>
/* GICv3 Re-distributor interface registers & shifts */
#define GICR_PCPUBASE_SHIFT 0x11
#define GICR_TYPER 0x08
#define GICR_WAKER 0x14
/* GICR_WAKER bit definitions */
#define WAKER_CA (U(1) << 2)
#define WAKER_PS (U(1) << 1)
/* GICR_TYPER bit definitions */
#define GICR_TYPER_AFF_SHIFT 32
#define GICR_TYPER_AFF_MASK 0xffffffff
#define GICR_TYPER_LAST (U(1) << 4)
/* GICv3 ICC_SRE register bit definitions*/
#define ICC_SRE_EN (U(1) << 3)
#define ICC_SRE_SRE (U(1) << 0)
/*******************************************************************************
* GICv3 defintions
******************************************************************************/
#define GICV3_AFFLVL_MASK 0xff
#define GICV3_AFF0_SHIFT 0
#define GICV3_AFF1_SHIFT 8
#define GICV3_AFF2_SHIFT 16
#define GICV3_AFF3_SHIFT 24
#define GICV3_AFFINITY_MASK 0xffffffff
/*******************************************************************************
* Function prototypes
******************************************************************************/
uintptr_t gicv3_get_rdist(uintptr_t gicr_base, u_register_t mpidr);
/*******************************************************************************
* GIC Redistributor interface accessors
******************************************************************************/
static inline uint32_t gicr_read_waker(uintptr_t base)
{
return mmio_read_32(base + GICR_WAKER);
}
static inline void gicr_write_waker(uintptr_t base, uint32_t val)
{
mmio_write_32(base + GICR_WAKER, val);
}
static inline uint64_t gicr_read_typer(uintptr_t base)
{
return mmio_read_64(base + GICR_TYPER);
}
#endif /* __GIC_V3_H__ */

View File

@ -7,6 +7,8 @@
#ifndef __GICV2_H__
#define __GICV2_H__
#include <gic_common.h>
/*******************************************************************************
* GICv2 miscellaneous definitions
******************************************************************************/
@ -132,14 +134,6 @@
* The 'gicc_base' field contains the base address of the CPU Interface
* programmer's view.
*
* The 'g0_interrupt_array' field is a pointer to an array in which each entry
* corresponds to an ID of a Group 0 interrupt. This field is ignored when
* 'interrupt_props' field is used. This field is deprecated.
*
* The 'g0_interrupt_num' field contains the number of entries in the
* 'g0_interrupt_array'. This field is ignored when 'interrupt_props' field is
* used. This field is deprecated.
*
* The 'target_masks' is a pointer to an array containing 'target_masks_num'
* elements. The GIC driver will populate the array with per-PE target mask to
* use to when targeting interrupts.
@ -155,10 +149,6 @@
typedef struct gicv2_driver_data {
uintptr_t gicd_base;
uintptr_t gicc_base;
#if !ERROR_DEPRECATED
unsigned int g0_interrupt_num __deprecated;
const unsigned int *g0_interrupt_array __deprecated;
#endif
unsigned int *target_masks;
unsigned int target_masks_num;
const interrupt_prop_t *interrupt_props;

View File

@ -276,23 +276,6 @@ static inline void gicv3_end_of_interrupt(unsigned int id)
* The 'gicr_base' field contains the base address of the Re-distributor
* interface programmer's view.
*
* The 'g0_interrupt_array' field is a pointer to an array in which each entry
* corresponds to an ID of a Group 0 interrupt. This field is ignored when
* 'interrupt_props' field is used. This field is deprecated.
*
* The 'g0_interrupt_num' field contains the number of entries in the
* 'g0_interrupt_array'. This field is ignored when 'interrupt_props' field is
* used. This field is deprecated.
*
* The 'g1s_interrupt_array' field is a pointer to an array in which each entry
* corresponds to an ID of a Group 1 interrupt. This field is ignored when
* 'interrupt_props' field is used. This field is deprecated.
*
* The 'g1s_interrupt_num' field contains the number of entries in the
* 'g1s_interrupt_array'. This field must be 0 if 'interrupt_props' field is
* used. This field is ignored when 'interrupt_props' field is used. This field
* is deprecated.
*
* The 'interrupt_props' field is a pointer to an array that enumerates secure
* interrupts and their properties. If this field is not NULL, both
* 'g0_interrupt_array' and 'g1s_interrupt_array' fields are ignored.
@ -326,12 +309,6 @@ typedef unsigned int (*mpidr_hash_fn)(u_register_t mpidr);
typedef struct gicv3_driver_data {
uintptr_t gicd_base;
uintptr_t gicr_base;
#if !ERROR_DEPRECATED
unsigned int g0_interrupt_num __deprecated;
unsigned int g1s_interrupt_num __deprecated;
const unsigned int *g0_interrupt_array __deprecated;
const unsigned int *g1s_interrupt_array __deprecated;
#endif
const interrupt_prop_t *interrupt_props;
unsigned int interrupt_props_num;
unsigned int rdistif_num;

View File

@ -85,16 +85,6 @@
(TZC_400_REGION_ATTR_F_EN_MASK << \
TZC_REGION_ATTR_F_EN_SHIFT)
/*
* Define some macros for backward compatibility with existing tzc400 clients.
*/
#if !ERROR_DEPRECATED
#define REG_ATTR_FILTER_BIT(x) ((1 << x) \
<< TZC_REGION_ATTR_F_EN_SHIFT)
#define REG_ATTR_FILTER_BIT_ALL (TZC_400_REGION_ATTR_F_EN_MASK << \
TZC_REGION_ATTR_F_EN_SHIFT)
#endif /* __ERROR_DEPRECATED__ */
/*
* All TZC region configuration registers are placed one after another. It
* depicts size of block of registers for programming each region.
@ -123,24 +113,6 @@ void tzc400_set_action(tzc_action_t action);
void tzc400_enable_filters(void);
void tzc400_disable_filters(void);
/*
* Deprecated APIs
*/
static inline void tzc_init(uintptr_t base) __deprecated;
static inline void tzc_configure_region0(
tzc_region_attributes_t sec_attr,
unsigned int ns_device_access) __deprecated;
static inline void tzc_configure_region(
unsigned int filters,
int region,
unsigned long long region_base,
unsigned long long region_top,
tzc_region_attributes_t sec_attr,
unsigned int ns_device_access) __deprecated;
static inline void tzc_set_action(tzc_action_t action) __deprecated;
static inline void tzc_enable_filters(void) __deprecated;
static inline void tzc_disable_filters(void) __deprecated;
static inline void tzc_init(uintptr_t base)
{
tzc400_init(base);

View File

@ -30,9 +30,6 @@
/* Prevent mbed TLS from using snprintf so that it can use tf_snprintf. */
#define MBEDTLS_PLATFORM_SNPRINTF_ALT
#if !ERROR_DEPRECATED
#define MBEDTLS_PKCS1_V15
#endif
#define MBEDTLS_PKCS1_V21
#define MBEDTLS_X509_ALLOW_UNSUPPORTED_CRITICAL_EXTENSION

View File

@ -126,10 +126,6 @@
#define SDCR_SPD_ENABLE U(0x3)
#define SDCR_RESET_VAL U(0x0)
#if !ERROR_DEPRECATED
#define SDCR_DEF_VAL SDCR_SPD(SDCR_SPD_DISABLE)
#endif
/* HSCTLR definitions */
#define HSCTLR_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
(U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
@ -220,10 +216,6 @@
#define NSASEDIS_BIT (U(1) << 15)
#define NSTRCDIS_BIT (U(1) << 20)
/* NOTE: correct typo in the definitions */
#if !ERROR_DEPRECATED
#define NASCR_CP11_BIT (U(1) << 11)
#define NASCR_CP10_BIT (U(1) << 10)
#endif
#define NSACR_CP11_BIT (U(1) << 11)
#define NSACR_CP10_BIT (U(1) << 10)
#define NSACR_IMP_DEF_MASK (U(0x7) << 16)

View File

@ -1,14 +0,0 @@
/*
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SMCC_HELPERS_H__
#define __SMCC_HELPERS_H__
#if !ERROR_DEPRECATED
#include <smccc_helpers.h>
#endif
#endif /* __SMCC_HELPERS_H__ */

View File

@ -1,15 +0,0 @@
/*
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SMCC_MACROS_S__
#define __SMCC_MACROS_S__
#if !ERROR_DEPRECATED
#include <smccc_macros.S>
#define smcc_save_gp_mode_regs smccc_save_gp_mode_regs
#endif
#endif /* __SMCC_MACROS_S__ */

View File

@ -245,10 +245,6 @@
#define MDCR_TPM_BIT (U(1) << 6)
#define MDCR_EL3_RESET_VAL U(0x0)
#if !ERROR_DEPRECATED
#define MDCR_DEF_VAL (MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE))
#endif
/* MDCR_EL2 definitions */
#define MDCR_EL2_TPMS (U(1) << 14)
#define MDCR_EL2_E2PB(x) ((x) << 12)

View File

@ -1,14 +0,0 @@
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SMCC_HELPERS_H__
#define __SMCC_HELPERS_H__
#if !ERROR_DEPRECATED
#include <smccc_helpers.h>
#endif
#endif /* __SMCC_HELPERS_H__ */

View File

@ -67,16 +67,4 @@
******************************************************************************/
#define CORTEX_A53_L2MERRSR p15, 3, c15
#if !ERROR_DEPRECATED
/*
* These registers were previously wrongly named. Provide previous definitions so
* as not to break platforms that continue using them.
*/
#define CORTEX_A53_ACTLR CORTEX_A53_CPUACTLR
#define CORTEX_A53_ACTLR_ENDCCASCI_SHIFT CORTEX_A53_CPUACTLR_ENDCCASCI_SHIFT
#define CORTEX_A53_ACTLR_ENDCCASCI CORTEX_A53_CPUACTLR_ENDCCASCI
#define CORTEX_A53_ACTLR_DTAH CORTEX_A53_CPUACTLR_DTAH
#endif /* !ERROR_DEPRECATED */
#endif /* __CORTEX_A53_H__ */

View File

@ -79,22 +79,4 @@
******************************************************************************/
#define CORTEX_A57_L2MERRSR p15, 3, c15
#if !ERROR_DEPRECATED
/*
* These registers were previously wrongly named. Provide previous definitions so
* as not to break platforms that continue using them.
*/
#define CORTEX_A57_ACTLR CORTEX_A57_CPUACTLR
#define CORTEX_A57_ACTLR_DIS_LOAD_PASS_DMB CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB
#define CORTEX_A57_ACTLR_GRE_NGRE_AS_NGNRE CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE
#define CORTEX_A57_ACTLR_DIS_OVERREAD CORTEX_A57_CPUACTLR_DIS_OVERREAD
#define CORTEX_A57_ACTLR_NO_ALLOC_WBWA CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA
#define CORTEX_A57_ACTLR_DCC_AS_DCCI CORTEX_A57_CPUACTLR_DCC_AS_DCCI
#define CORTEX_A57_ACTLR_FORCE_FPSCR_FLUSH CORTEX_A57_CPUACTLR_FORCE_FPSCR_FLUSH
#define CORTEX_A57_ACTLR_DIS_STREAMING CORTEX_A57_CPUACTLR_DIS_STREAMING
#define CORTEX_A57_ACTLR_DIS_L1_STREAMING CORTEX_A57_CPUACTLR_DIS_L1_STREAMING
#define CORTEX_A57_ACTLR_DIS_INDIRECT_PREDICTOR CORTEX_A57_CPUACTLR_DIS_INDIRECT_PREDICTOR
#endif /* !ERROR_DEPRECATED */
#endif /* __CORTEX_A57_H__ */

View File

@ -54,16 +54,4 @@
******************************************************************************/
#define CORTEX_A72_L2MERRSR p15, 3, c15
#if !ERROR_DEPRECATED
/*
* These registers were previously wrongly named. Provide previous definitions so
* as not to break platforms that continue using them.
*/
#define CORTEX_A72_ACTLR CORTEX_A72_CPUACTLR
#define CORTEX_A72_ACTLR_DISABLE_L1_DCACHE_HW_PFTCH CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
#define CORTEX_A72_ACTLR_NO_ALLOC_WBWA CORTEX_A72_CPUACTLR_NO_ALLOC_WBWA
#define CORTEX_A72_ACTLR_DCC_AS_DCCI CORTEX_A72_CPUACTLR_DCC_AS_DCCI
#endif /* !ERROR_DEPRECATED */
#endif /* __CORTEX_A72_H__ */

View File

@ -71,21 +71,4 @@
******************************************************************************/
#define CORTEX_A53_L2MERRSR_EL1 S3_1_C15_C2_3
#if !ERROR_DEPRECATED
/*
* These registers were previously wrongly named. Provide previous definitions
* so as not to break platforms that continue using them.
*/
#define CORTEX_A53_ACTLR_EL1 CORTEX_A53_CPUACTLR_EL1
#define CORTEX_A53_ACTLR_ENDCCASCI_SHIFT CORTEX_A53_CPUACTLR_EL1_ENDCCASCI_SHIFT
#define CORTEX_A53_ACTLR_ENDCCASCI CORTEX_A53_CPUACTLR_EL1_ENDCCASCI
#define CORTEX_A53_ACTLR_RADIS_SHIFT CORTEX_A53_CPUACTLR_EL1_RADIS_SHIFT
#define CORTEX_A53_ACTLR_RADIS CORTEX_A53_CPUACTLR_EL1_RADIS
#define CORTEX_A53_ACTLR_L1RADIS_SHIFT CORTEX_A53_CPUACTLR_EL1_L1RADIS_SHIFT
#define CORTEX_A53_ACTLR_L1RADIS CORTEX_A53_CPUACTLR_EL1_L1RADIS
#define CORTEX_A53_ACTLR_DTAH_SHIFT CORTEX_A53_CPUACTLR_EL1_DTAH_SHIFT
#define CORTEX_A53_ACTLR_DTAH CORTEX_A53_CPUACTLR_EL1_DTAH
#endif /* !ERROR_DEPRECATED */
#endif /* __CORTEX_A53_H__ */

View File

@ -81,22 +81,4 @@
******************************************************************************/
#define CORTEX_A57_L2MERRSR_EL1 S3_1_C15_C2_3
#if !ERROR_DEPRECATED
/*
* These registers were previously wrongly named. Provide previous definitions so
* as not to break platforms that continue using them.
*/
#define CORTEX_A57_ACTLR_EL1 CORTEX_A57_CPUACTLR_EL1
#define CORTEX_A57_ACTLR_DIS_LOAD_PASS_DMB CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB
#define CORTEX_A57_ACTLR_GRE_NGRE_AS_NGNRE CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE
#define CORTEX_A57_ACTLR_DIS_OVERREAD CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD
#define CORTEX_A57_ACTLR_NO_ALLOC_WBWA CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA
#define CORTEX_A57_ACTLR_DCC_AS_DCCI CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI
#define CORTEX_A57_ACTLR_FORCE_FPSCR_FLUSH CORTEX_A57_CPUACTLR_EL1_FORCE_FPSCR_FLUSH
#define CORTEX_A57_ACTLR_DIS_STREAMING CORTEX_A57_CPUACTLR_EL1_DIS_STREAMING
#define CORTEX_A57_ACTLR_DIS_L1_STREAMING CORTEX_A57_CPUACTLR_EL1_DIS_L1_STREAMING
#define CORTEX_A57_ACTLR_DIS_INDIRECT_PREDICTOR CORTEX_A57_CPUACTLR_EL1_DIS_INDIRECT_PREDICTOR
#endif /* !ERROR_DEPRECATED */
#endif /* __CORTEX_A57_H__ */

View File

@ -61,16 +61,4 @@
******************************************************************************/
#define CORTEX_A72_L2MERRSR_EL1 S3_1_C15_C2_3
#if !ERROR_DEPRECATED
/*
* These registers were previously wrongly named. Provide previous definitions so
* as not to break platforms that continue using them.
*/
#define CORTEX_A72_ACTLR CORTEX_A72_CPUACTLR_EL1
#define CORTEX_A72_ACTLR_DISABLE_L1_DCACHE_HW_PFTCH CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
#define CORTEX_A72_ACTLR_NO_ALLOC_WBWA CORTEX_A72_CPUACTLR_EL1_NO_ALLOC_WBWA
#define CORTEX_A72_ACTLR_DCC_AS_DCCI CORTEX_A72_CPUACTLR_EL1_DCC_AS_DCCI
#endif /* !ERROR_DEPRECATED */
#endif /* __CORTEX_A72_H__ */

View File

@ -47,16 +47,6 @@ void cm_write_scr_el3_bit(uint32_t security_state,
void cm_set_next_eret_context(uint32_t security_state);
uint32_t cm_get_scr_el3(uint32_t security_state);
void cm_init_context(uint64_t mpidr,
const struct entry_point_info *ep) __deprecated;
void *cm_get_context_by_mpidr(uint64_t mpidr,
uint32_t security_state) __deprecated;
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state) __deprecated;
/* Inline definitions */
/*******************************************************************************

View File

@ -10,9 +10,6 @@
#include <bakery_lock.h>
#include <bl_common.h>
#include <platform_def.h> /* for PLAT_NUM_PWR_DOMAINS */
#if ENABLE_PLAT_COMPAT
#include <psci_compat.h>
#endif
#include <psci_lib.h> /* To maintain compatibility for SPDs */
#include <utils_def.h>
@ -350,12 +347,6 @@ int psci_features(unsigned int psci_fid);
void __dead2 psci_power_down_wfi(void);
void psci_arch_setup(void);
/*
* The below API is deprecated. This is now replaced by bl31_warmboot_entry in
* AArch64.
*/
void psci_entrypoint(void) __deprecated;
#endif /*__ASSEMBLY__*/
#endif /* PSCI_H */

View File

@ -1,93 +0,0 @@
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PSCI_COMPAT_H
#define PSCI_COMPAT_H
#include <arch.h>
#include <platform_def.h>
#include <utils_def.h>
#ifndef __ASSEMBLY__
/*
* The below declarations are to enable compatibility for the platform ports
* using the old platform interface and psci helpers.
*/
#define PLAT_MAX_PWR_LVL PLATFORM_MAX_AFFLVL
#define PLAT_NUM_PWR_DOMAINS PLATFORM_NUM_AFFS
/*******************************************************************************
* PSCI affinity related constants. An affinity instance could
* be present or absent physically to cater for asymmetric topologies.
******************************************************************************/
#define PSCI_AFF_ABSENT 0x0
#define PSCI_AFF_PRESENT 0x1
#define PSCI_STATE_ON U(0x0)
#define PSCI_STATE_OFF U(0x1)
#define PSCI_STATE_ON_PENDING U(0x2)
#define PSCI_STATE_SUSPEND U(0x3)
/*
* Using the compatibility platform interfaces means that the local states
* used in psci_power_state_t need to only convey whether its power down
* or standby state. The onus is on the platform port to do the right thing
* including the state coordination in case multiple power down states are
* involved. Hence if we assume 3 generic states viz, run, standby and
* power down, we can assign 1 and 2 to standby and power down respectively.
*/
#define PLAT_MAX_RET_STATE U(1)
#define PLAT_MAX_OFF_STATE U(2)
/*
* Macro to represent invalid affinity level within PSCI.
*/
#define PSCI_INVALID_DATA -1
#define psci_get_pstate_afflvl(pstate) psci_get_pstate_pwrlvl(pstate)
/*
* This array stores the 'power_state' requests of each CPU during
* CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the
* compatibility layer when appropriate platform hooks are invoked.
*/
extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT];
/*******************************************************************************
* Structure populated by platform specific code to export routines which
* perform common low level pm functions
******************************************************************************/
typedef struct plat_pm_ops {
void (*affinst_standby)(unsigned int power_state);
int (*affinst_on)(unsigned long mpidr,
unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state);
void (*affinst_off)(unsigned int afflvl, unsigned int state);
void (*affinst_suspend)(unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state);
void (*affinst_on_finish)(unsigned int afflvl, unsigned int state);
void (*affinst_suspend_finish)(unsigned int afflvl,
unsigned int state);
void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state);
int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
unsigned int (*get_sys_suspend_power_state)(void);
} plat_pm_ops_t;
/*******************************************************************************
* Function & Data prototypes to enable compatibility for older platform ports
******************************************************************************/
int psci_get_suspend_stateid_by_mpidr(unsigned long);
int psci_get_suspend_stateid(void);
int psci_get_suspend_powerstate(void);
unsigned int psci_get_max_phys_off_afflvl(void);
int psci_get_suspend_afflvl(void);
#endif /* ____ASSEMBLY__ */
#endif /* PSCI_COMPAT_H */

View File

@ -1,14 +0,0 @@
/*
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SMCC_H__
#define __SMCC_H__
#if !ERROR_DEPRECATED
#include <smccc.h>
#endif
#endif /* __SMCC_H__ */

View File

@ -57,17 +57,6 @@
* does not equal SMC_UNK. This is to ensure that the caller won't mistake the
* returned UUID in x0 for an invalid SMC error return
*/
#if !ERROR_DEPRECATED
#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
_n0, _n1, _n2, _n3, _n4, _n5) \
CASSERT((uint32_t)(_tl) != (uint32_t) SMC_UNK, invalid_svc_uuid);\
static const uuid_t _name = { \
_tl, _tm, _th, _cl, _ch, \
{ _n0, _n1, _n2, _n3, _n4, _n5 } \
}
#endif
#define DEFINE_SVC_UUID2(_name, _tl, _tm, _th, _cl, _ch, \
_n0, _n1, _n2, _n3, _n4, _n5) \
CASSERT((uint32_t)(_tl) != (uint32_t) SMC_UNK, invalid_svc_uuid);\

View File

@ -66,9 +66,6 @@
#define SMC_32 U(0)
#define SMC_TYPE_FAST ULL(1)
#if !ERROR_DEPRECATED
#define SMC_TYPE_STD ULL(0)
#endif
#define SMC_TYPE_YIELD ULL(0)
#define SMC_OK ULL(0)

View File

@ -7,10 +7,6 @@
#ifndef __UTILS_H__
#define __UTILS_H__
#if !ERROR_DEPRECATED
#include <utils_def.h>
#endif
/*
* C code should be put in this part of the header to avoid breaking ASM files
* or linker scripts including it.

View File

@ -67,11 +67,6 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
#ifdef AARCH32
/* AArch32 specific translation table API */
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags);
void enable_mmu_direct(unsigned int flags);
#endif
void enable_mmu_svc_mon(unsigned int flags);
void enable_mmu_hyp(unsigned int flags);

View File

@ -71,10 +71,6 @@
#define MT_CODE (MT_MEMORY | MT_RO | MT_EXECUTE)
#define MT_RO_DATA (MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
#if !ERROR_DEPRECATED
typedef unsigned int mmap_attr_t;
#endif
/*
* Structure for specifying a single region of memory.
*/

View File

@ -104,10 +104,6 @@
#define MT_RO_DATA (MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
#define MT_RW_DATA (MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
#if !ERROR_DEPRECATED
typedef unsigned int mmap_attr_t;
#endif
/*
* Structure for specifying a single region of memory.
*/

View File

@ -165,21 +165,6 @@
#define ARM_IRQ_SEC_SGI_6 14
#define ARM_IRQ_SEC_SGI_7 15
/*
* List of secure interrupts are deprecated, but are retained only to support
* legacy configurations.
*/
#define ARM_G1S_IRQS ARM_IRQ_SEC_PHY_TIMER, \
ARM_IRQ_SEC_SGI_1, \
ARM_IRQ_SEC_SGI_2, \
ARM_IRQ_SEC_SGI_3, \
ARM_IRQ_SEC_SGI_4, \
ARM_IRQ_SEC_SGI_5, \
ARM_IRQ_SEC_SGI_7
#define ARM_G0_IRQS ARM_IRQ_SEC_SGI_0, \
ARM_IRQ_SEC_SGI_6
/*
* Define a list of Group 1 Secure and Group 0 interrupt properties as per GICv3
* terminology. On a GICv2 system or mode, the lists will be merged and treated

View File

@ -17,7 +17,6 @@
/*******************************************************************************
* Forward declarations
******************************************************************************/
struct bl31_params;
struct meminfo;
struct image_info;
struct bl_params;
@ -197,13 +196,8 @@ void arm_bl2u_platform_setup(void);
void arm_bl2u_plat_arch_setup(void);
/* BL31 utility functions */
#if LOAD_IMAGE_V2
void arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_config,
uintptr_t hw_config, void *plat_params_from_bl2);
#else
void arm_bl31_early_platform_setup(struct bl31_params *from_bl2, uintptr_t soc_fw_config,
uintptr_t hw_config, void *plat_params_from_bl2);
#endif /* LOAD_IMAGE_V2 */
void arm_bl31_platform_setup(void);
void arm_bl31_plat_runtime_setup(void);
void arm_bl31_plat_arch_setup(void);
@ -252,13 +246,11 @@ void plat_arm_error_handler(int err);
unsigned int plat_arm_get_cpu_pe_count(u_register_t mpidr);
#endif
#if LOAD_IMAGE_V2
/*
* This function is called after loading SCP_BL2 image and it is used to perform
* any platform-specific actions required to handle the SCP firmware.
*/
int plat_arm_bl2_handle_scp_bl2(struct image_info *scp_bl2_image_info);
#endif
/*
* Optional functions required in ARM standard platforms

View File

@ -31,7 +31,6 @@
*/
#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n"
#if LOAD_IMAGE_V2
#define BL2_IMAGE_DESC { \
.image_id = BL2_IMAGE_ID, \
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \
@ -42,17 +41,6 @@
VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),\
.ep_info.pc = BL2_BASE, \
}
#else /* LOAD_IMAGE_V2 */
#define BL2_IMAGE_DESC { \
.image_id = BL2_IMAGE_ID, \
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \
VERSION_1, image_info_t, 0), \
.image_info.image_base = BL2_BASE, \
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, \
VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),\
.ep_info.pc = BL2_BASE, \
}
#endif /* LOAD_IMAGE_V2 */
/*
* The following constants identify the extents of the code & read-only data

View File

@ -17,7 +17,6 @@ struct auth_img_desc_s;
struct meminfo;
struct image_info;
struct entry_point_info;
struct bl31_params;
struct image_desc;
struct bl_load_info;
struct bl_params;
@ -38,7 +37,6 @@ struct secure_partition_boot_info;
/*******************************************************************************
* Mandatory common functions
******************************************************************************/
unsigned long long plat_get_syscnt_freq(void) __deprecated;
unsigned int plat_get_syscnt_freq2(void);
int plat_get_image_source(unsigned int image_id,
@ -136,10 +134,6 @@ int bl1_plat_mem_check(uintptr_t mem_base, unsigned int mem_size,
/*******************************************************************************
* Optional BL1 functions (may be overridden)
******************************************************************************/
#if !ERROR_DEPRECATED
void bl1_init_bl2_mem_layout(const struct meminfo *bl1_mem_layout,
struct meminfo *bl2_mem_layout);
#endif
/*
* The following functions are used for image loading process in BL1.
*/
@ -169,14 +163,10 @@ int bl1_plat_handle_post_image_load(unsigned int image_id);
* Mandatory BL2 functions
******************************************************************************/
void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, u_register_t arg2, u_register_t arg3);
#if !ERROR_DEPRECATED
void bl2_early_platform_setup(struct meminfo *mem_layout);
#endif
void bl2_plat_arch_setup(void);
void bl2_platform_setup(void);
struct meminfo *bl2_plat_sec_mem_layout(void);
#if LOAD_IMAGE_V2
/*
* This function can be used by the platforms to update/use image
* information for given `image_id`.
@ -184,65 +174,6 @@ struct meminfo *bl2_plat_sec_mem_layout(void);
int bl2_plat_handle_pre_image_load(unsigned int image_id);
int bl2_plat_handle_post_image_load(unsigned int image_id);
#else /* LOAD_IMAGE_V2 */
/*
* This function returns a pointer to the shared memory that the platform has
* kept aside to pass trusted firmware related information that BL31
* could need
*/
struct bl31_params *bl2_plat_get_bl31_params(void);
/*
* This function returns a pointer to the shared memory that the platform
* has kept to point to entry point information of BL31 to BL2
*/
struct entry_point_info *bl2_plat_get_bl31_ep_info(void);
/*
* This function flushes to main memory all the params that are
* passed to BL31
*/
void bl2_plat_flush_bl31_params(void);
/*
* The next 2 functions allow the platform to change the entrypoint information
* for the mandatory 3rd level BL images, BL31 and BL33. This is done after
* BL2 has loaded those images into memory but before BL31 is executed.
*/
void bl2_plat_set_bl31_ep_info(struct image_info *image,
struct entry_point_info *ep);
void bl2_plat_set_bl33_ep_info(struct image_info *image,
struct entry_point_info *ep);
/* Gets the memory layout for BL33 */
void bl2_plat_get_bl33_meminfo(struct meminfo *mem_info);
/*******************************************************************************
* Conditionally mandatory BL2 functions: must be implemented if SCP_BL2 image
* is supported
******************************************************************************/
/* Gets the memory layout for SCP_BL2 */
void bl2_plat_get_scp_bl2_meminfo(struct meminfo *mem_info);
/*
* This function is called after loading SCP_BL2 image and it is used to perform
* any platform-specific actions required to handle the SCP firmware.
*/
int bl2_plat_handle_scp_bl2(struct image_info *scp_bl2_image_info);
/*******************************************************************************
* Conditionally mandatory BL2 functions: must be implemented if BL32 image
* is supported
******************************************************************************/
void bl2_plat_set_bl32_ep_info(struct image_info *image,
struct entry_point_info *ep);
/* Gets the memory layout for BL32 */
void bl2_plat_get_bl32_meminfo(struct meminfo *mem_info);
#endif /* LOAD_IMAGE_V2 */
/*******************************************************************************
* Optional BL2 functions (may be overridden)
@ -283,15 +214,6 @@ int bl2u_plat_handle_scp_bl2u(void);
/*******************************************************************************
* Mandatory BL31 functions
******************************************************************************/
#if !ERROR_DEPRECATED
#if LOAD_IMAGE_V2
void bl31_early_platform_setup(void *from_bl2,
void *plat_params_from_bl2);
#else
void bl31_early_platform_setup(struct bl31_params *from_bl2,
void *plat_params_from_bl2);
#endif
#endif /* ERROR_DEPRECATED */
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3);
void bl31_plat_arch_setup(void);
@ -345,7 +267,6 @@ const struct mmap_region *plat_get_secure_partition_mmap(void *cookie);
const struct secure_partition_boot_info *plat_get_secure_partition_boot_info(
void *cookie);
#if LOAD_IMAGE_V2
/*******************************************************************************
* Mandatory BL image load functions(may be overridden).
******************************************************************************/
@ -368,36 +289,11 @@ struct bl_params *plat_get_next_bl_params(void);
*/
void plat_flush_next_bl_params(void);
#endif /* LOAD_IMAGE_V2 */
#if ENABLE_PLAT_COMPAT
/*
* The below declarations are to enable compatibility for the platform ports
* using the old platform interface.
*/
/*******************************************************************************
* Optional common functions (may be overridden)
******************************************************************************/
unsigned int platform_get_core_pos(unsigned long mpidr);
/*******************************************************************************
* Mandatory PSCI Compatibility functions (BL31)
******************************************************************************/
int platform_setup_pm(const plat_pm_ops_t **);
unsigned int plat_get_aff_count(unsigned int, unsigned long);
unsigned int plat_get_aff_state(unsigned int, unsigned long);
#else /* __ENABLE_PLAT_COMPAT__ */
/*
* The below function enable Trusted Firmware components like SPDs which
* haven't migrated to the new platform API to compile on platforms which
* have the compatibility layer disabled.
*/
unsigned int platform_core_pos_helper(unsigned long mpidr);
unsigned int platform_get_core_pos(unsigned long mpidr) __deprecated;
#endif /* __ENABLE_PLAT_COMPAT__ */
#endif /* PLATFORM_H */

View File

@ -14,9 +14,7 @@
*/
/* Size of cacheable stacks */
#if DEBUG_XLAT_TABLE
# define PLATFORM_STACK_SIZE 0x800
#elif IMAGE_BL1
#if IMAGE_BL1
#if TRUSTED_BOARD_BOOT
# define PLATFORM_STACK_SIZE 0x1000
#else

View File

@ -24,6 +24,15 @@ extern const mmap_region_t plat_marvell_mmap[];
<= MAX_MMAP_REGIONS, \
assert_max_mmap_regions)
struct marvell_bl31_params {
param_header_t h;
image_info_t *bl31_image_info;
entry_point_info_t *bl32_ep_info;
image_info_t *bl32_image_info;
entry_point_info_t *bl33_ep_info;
image_info_t *bl33_image_info;
};
/*
* Utility functions common to Marvell standard platforms
*/
@ -67,7 +76,7 @@ uint32_t marvell_get_spsr_for_bl32_entry(void);
uint32_t marvell_get_spsr_for_bl33_entry(void);
/* BL31 utility functions */
void marvell_bl31_early_platform_setup(struct bl31_params *from_bl2,
void marvell_bl31_early_platform_setup(void *from_bl2,
uintptr_t soc_fw_config,
uintptr_t hw_config,
void *plat_params_from_bl2);

View File

@ -55,16 +55,6 @@ func smc
smc #0
endfunc smc
/* -----------------------------------------------------------------------
* void zeromem16(void *mem, unsigned int length);
*
* Initialise a memory region to 0.
* The memory address must be 16-byte aligned.
* NOTE: This function is deprecated and zeromem should be used instead.
* -----------------------------------------------------------------------
*/
.equ zeromem16, zeromem
/* -----------------------------------------------------------------------
* void zero_normalmem(void *mem, unsigned int length);
*

View File

@ -1,14 +0,0 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* This file is deprecated and is retained here only for compatibility.
* The xlat_tables library can be found in `lib/xlat_tables` directory.
*/
#if !ERROR_DEPRECATED
#include "../xlat_tables/xlat_tables_common.c"
#include "../xlat_tables/aarch64/xlat_tables.c"
#endif

View File

@ -1,9 +0,0 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#if !ERROR_DEPRECATED
#include "./aarch64/spinlock.S"
#endif

View File

@ -12,9 +12,6 @@
.globl psci_do_pwrdown_cache_maintenance
.globl psci_do_pwrup_cache_maintenance
.globl psci_power_down_wfi
#if !ERROR_DEPRECATED
.globl psci_entrypoint
#endif
/* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
@ -131,12 +128,3 @@ func psci_power_down_wfi
wfi
no_ret plat_panic_handler
endfunc psci_power_down_wfi
/* -----------------------------------------------------------------------
* void psci_entrypoint(void);
* The deprecated entry point for PSCI on warm boot for AArch64.
* -----------------------------------------------------------------------
*/
func_deprecated psci_entrypoint
b bl31_warm_entrypoint
endfunc_deprecated psci_entrypoint

View File

@ -938,84 +938,6 @@ int psci_secondaries_brought_up(void)
return (n_valid > 1U) ? 1 : 0;
}
#if ENABLE_PLAT_COMPAT
/*******************************************************************************
* PSCI Compatibility helper function to return the 'power_state' parameter of
* the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
* if not invoked within CPU_SUSPEND for the current CPU.
******************************************************************************/
int psci_get_suspend_powerstate(void)
{
/* Sanity check to verify that CPU is within CPU_SUSPEND */
if (psci_get_aff_info_state() == AFF_STATE_ON &&
!is_local_state_run(psci_get_cpu_local_state()))
return psci_power_state_compat[plat_my_core_pos()];
return PSCI_INVALID_DATA;
}
/*******************************************************************************
* PSCI Compatibility helper function to return the state id of the current
* cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
* if not invoked within CPU_SUSPEND for the current CPU.
******************************************************************************/
int psci_get_suspend_stateid(void)
{
unsigned int power_state;
power_state = psci_get_suspend_powerstate();
if (power_state != PSCI_INVALID_DATA)
return psci_get_pstate_id(power_state);
return PSCI_INVALID_DATA;
}
/*******************************************************************************
* PSCI Compatibility helper function to return the state id encoded in the
* 'power_state' parameter of the CPU specified by 'mpidr'. Returns
* PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
******************************************************************************/
int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
{
int cpu_idx = plat_core_pos_by_mpidr(mpidr);
if (cpu_idx == -1)
return PSCI_INVALID_DATA;
/* Sanity check to verify that the CPU is in CPU_SUSPEND */
if ((psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON) &&
(!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))))
return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
return PSCI_INVALID_DATA;
}
/*******************************************************************************
* This function returns highest affinity level which is in OFF
* state. The affinity instance with which the level is associated is
* determined by the caller.
******************************************************************************/
unsigned int psci_get_max_phys_off_afflvl(void)
{
psci_power_state_t state_info;
zeromem(&state_info, sizeof(state_info));
psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
return psci_find_target_suspend_lvl(&state_info);
}
/*******************************************************************************
* PSCI Compatibility helper function to return target affinity level requested
* for the CPU_SUSPEND. This function assumes affinity levels correspond to
* power domain levels on the platform.
******************************************************************************/
int psci_get_suspend_afflvl(void)
{
return psci_get_suspend_pwrlvl();
}
#endif
/*******************************************************************************
* Initiate power down sequence, by calling power down operations registered for
* this CPU.

View File

@ -61,22 +61,6 @@ void init_xlat_tables(void)
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
}
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created.
******************************************************************************/
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags)
{
enable_mmu_svc_mon(flags);
}
void enable_mmu_direct(unsigned int flags)
{
enable_mmu_direct_svc_mon(flags);
}
#endif
void enable_mmu_svc_mon(unsigned int flags)
{
unsigned int mair0, ttbcr, sctlr;

View File

@ -15,23 +15,6 @@
#error xlat tables v2 must be used with HW_ASSISTED_COHERENCY
#endif
/*
* If the platform hasn't defined a physical and a virtual address space size
* default to ADDR_SPACE_SIZE.
*/
#if ERROR_DEPRECATED
# ifdef ADDR_SPACE_SIZE
# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
# endif
#elif defined(ADDR_SPACE_SIZE)
# ifndef PLAT_PHY_ADDR_SPACE_SIZE
# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
#endif
CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(PLAT_VIRT_ADDR_SPACE_SIZE),
assert_valid_virt_addr_space_size);

View File

@ -18,24 +18,6 @@
*/
uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
/*
* Each platform can define the size of its physical and virtual address spaces.
* If the platform hasn't defined one or both of them, default to
* ADDR_SPACE_SIZE. The latter is deprecated, though.
*/
#if ERROR_DEPRECATED
# ifdef ADDR_SPACE_SIZE
# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
# endif
#elif defined(ADDR_SPACE_SIZE)
# ifndef PLAT_PHY_ADDR_SPACE_SIZE
# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
#endif
/*
* Allocate and initialise the default translation context for the BL image
* currently executing.
@ -121,18 +103,6 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
#ifdef AARCH32
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags)
{
enable_mmu_svc_mon(flags);
}
void enable_mmu_direct(unsigned int flags)
{
enable_mmu_direct_svc_mon(flags);
}
#endif
void enable_mmu_svc_mon(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,

View File

@ -23,10 +23,6 @@ ARCH := aarch64
ARM_ARCH_MAJOR := 8
ARM_ARCH_MINOR := 0
# Determine the version of ARM GIC architecture to use for interrupt management
# in EL3. The platform port can change this value if needed.
ARM_GIC_ARCH := 2
# Base commit to perform code check on
BASE_COMMIT := origin/master
@ -116,9 +112,6 @@ HW_ASSISTED_COHERENCY := 0
# Set the default algorithm for the generation of Trusted Board Boot keys
KEY_ALG := rsa
# Flag to enable new version of image loading
LOAD_IMAGE_V2 := 0
# Enable use of the console API allowing multiple consoles to be registered
# at the same time.
MULTI_CONSOLE_API := 0
@ -169,14 +162,14 @@ SPIN_ON_BL1_EXIT := 0
# Flags to build TF with Trusted Boot support
TRUSTED_BOARD_BOOT := 0
# Build option to choose whether Trusted firmware uses Coherent memory or not.
# Build option to choose whether Trusted Firmware uses Coherent memory or not.
USE_COHERENT_MEM := 1
# Build option to choose wheter Trusted firmware uses library at ROM
USE_ROMLIB := 0
# Build option to choose whether Trusted Firmware uses library at ROM
USE_ROMLIB := 0
# Use tbbr_oid.h instead of platform_oid.h
USE_TBBR_DEFS = $(ERROR_DEPRECATED)
USE_TBBR_DEFS := 1
# Build verbosity
V := 0

View File

@ -42,14 +42,8 @@ ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
# Disable the PSCI platform compatibility layer.
ENABLE_PLAT_COMPAT := 0
MULTI_CONSOLE_API := 1
# Prohibit using deprecated interfaces. We rely on this for this platform.
ERROR_DEPRECATED := 1
# The reset vector can be changed for each CPU.
PROGRAMMABLE_RESET_ADDRESS := 1

View File

@ -44,14 +44,8 @@ ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
# Disable the PSCI platform compatibility layer.
ENABLE_PLAT_COMPAT := 0
MULTI_CONSOLE_API := 1
# Prohibit using deprecated interfaces. We rely on this for this platform.
ERROR_DEPRECATED := 1
# The reset vector can be changed for each CPU.
PROGRAMMABLE_RESET_ADDRESS := 1

View File

@ -24,7 +24,6 @@
/* Defines for GIC Driver build time selection */
#define FVP_GICV2 1
#define FVP_GICV3 2
#define FVP_GICV3_LEGACY 3
/*******************************************************************************
* arm_config holds the characteristics of the differences between the three FVP
@ -92,9 +91,9 @@ const mmap_region_t plat_arm_mmap[] = {
#if TRUSTED_BOARD_BOOT
/* To access the Root of Trust Public Key registers. */
MAP_DEVICE2,
#if LOAD_IMAGE_V2 && !BL2_AT_EL3
#if !BL2_AT_EL3
ARM_MAP_BL1_RW,
#endif /* LOAD_IMAGE_V2 && !BL2_AT_EL3 */
#endif
#endif /* TRUSTED_BOARD_BOOT */
#if ENABLE_SPM
ARM_SP_IMAGE_MMAP,
@ -399,7 +398,7 @@ void fvp_interconnect_disable(void)
#endif
}
#if TRUSTED_BOARD_BOOT && LOAD_IMAGE_V2
#if TRUSTED_BOARD_BOOT
int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
{
assert(heap_addr != NULL);

View File

@ -145,12 +145,6 @@
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
*/
#define PLAT_ARM_G1S_IRQS ARM_G1S_IRQS, \
FVP_IRQ_TZ_WDOG, \
FVP_IRQ_SEC_SYS_TIMER
#define PLAT_ARM_G0_IRQS ARM_G0_IRQS
#define PLAT_ARM_G1S_IRQ_PROPS(grp) \
ARM_G1S_IRQ_PROPS(grp), \
INTR_PROP_DESC(FVP_IRQ_TZ_WDOG, GIC_HIGHEST_SEC_PRIORITY, grp, \

View File

@ -69,19 +69,6 @@ FVP_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
plat/arm/common/arm_gicv2.c
FVP_DT_PREFIX := fvp-base-gicv2-psci
else ifeq (${FVP_USE_GIC_DRIVER}, FVP_GICV3_LEGACY)
ifeq (${ARCH}, aarch32)
$(error "GICV3 Legacy driver not supported for AArch32 build")
endif
FVP_GIC_SOURCES := drivers/arm/gic/arm_gic.c \
drivers/arm/gic/gic_v2.c \
drivers/arm/gic/gic_v3.c \
plat/common/plat_gic.c \
plat/arm/common/arm_gicv3_legacy.c
FVP_DT_PREFIX := fvp-base-gicv2-psci
else
$(error "Incorrect GIC driver chosen on FVP port")
endif
@ -208,9 +195,6 @@ $(eval FVP_HW_CONFIG := ${BUILD_PLAT}/$(patsubst %.dts,%.dtb,$(FVP_HW_CONFIG_DTS
$(eval $(call TOOL_ADD_PAYLOAD,${FVP_HW_CONFIG},--hw-config))
endif
# Disable the PSCI platform compatibility layer
ENABLE_PLAT_COMPAT := 0
# Enable Activity Monitor Unit extensions by default
ENABLE_AMU := 1
@ -247,9 +231,7 @@ include plat/arm/board/common/board_common.mk
include plat/arm/common/arm_common.mk
# FVP being a development platform, enable capability to disable Authentication
# dynamically if TRUSTED_BOARD_BOOT and LOAD_IMAGE_V2 is set.
# dynamically if TRUSTED_BOARD_BOOT is set.
ifeq (${TRUSTED_BOARD_BOOT}, 1)
ifeq (${LOAD_IMAGE_V2}, 1)
DYN_DISABLE_AUTH := 1
endif
endif

View File

@ -115,9 +115,6 @@ ERRATA_A72_859971 := 0
# power down sequence
SKIP_A57_L1_FLUSH_PWR_DWN := 1
# Disable the PSCI platform compatibility layer
ENABLE_PLAT_COMPAT := 0
# Enable memory map related constants optimisation
ARM_BOARD_OPTIMISE_MEM := 1

View File

@ -76,16 +76,6 @@ void arm_bl1_early_platform_setup(void)
/* Allow BL1 to see the whole Trusted RAM */
bl1_tzram_layout.total_base = ARM_BL_RAM_BASE;
bl1_tzram_layout.total_size = ARM_BL_RAM_SIZE;
#if !LOAD_IMAGE_V2
/* Calculate how much RAM BL1 is using and how much remains free */
bl1_tzram_layout.free_base = ARM_BL_RAM_BASE;
bl1_tzram_layout.free_size = ARM_BL_RAM_SIZE;
reserve_mem(&bl1_tzram_layout.free_base,
&bl1_tzram_layout.free_size,
BL1_RAM_BASE,
BL1_RAM_LIMIT - BL1_RAM_BASE);
#endif /* LOAD_IMAGE_V2 */
}
void bl1_early_platform_setup(void)
@ -155,13 +145,12 @@ void arm_bl1_platform_setup(void)
{
/* Initialise the IO layer and register platform IO devices */
plat_arm_io_setup();
#if LOAD_IMAGE_V2
arm_load_tb_fw_config();
#if TRUSTED_BOARD_BOOT
/* Share the Mbed TLS heap info with other images */
arm_bl1_set_mbedtls_heap();
#endif /* TRUSTED_BOARD_BOOT */
#endif /* LOAD_IMAGE_V2 */
/*
* Allow access to the System counter timer module and program
* counter frequency for non secure images during FWU

View File

@ -40,138 +40,9 @@ CASSERT(BL2_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_bl2_base_overflows);
bl2_tzram_layout.total_size, \
MT_MEMORY | MT_RW | MT_SECURE)
#if LOAD_IMAGE_V2
#pragma weak arm_bl2_plat_handle_post_image_load
#else /* LOAD_IMAGE_V2 */
/*******************************************************************************
* This structure represents the superset of information that is passed to
* BL31, e.g. while passing control to it from BL2, bl31_params
* and other platform specific params
******************************************************************************/
typedef struct bl2_to_bl31_params_mem {
bl31_params_t bl31_params;
image_info_t bl31_image_info;
image_info_t bl32_image_info;
image_info_t bl33_image_info;
entry_point_info_t bl33_ep_info;
entry_point_info_t bl32_ep_info;
entry_point_info_t bl31_ep_info;
} bl2_to_bl31_params_mem_t;
static bl2_to_bl31_params_mem_t bl31_params_mem;
/* Weak definitions may be overridden in specific ARM standard platform */
#pragma weak bl2_plat_get_bl31_params
#pragma weak bl2_plat_get_bl31_ep_info
#pragma weak bl2_plat_flush_bl31_params
#pragma weak bl2_plat_set_bl31_ep_info
#pragma weak bl2_plat_get_scp_bl2_meminfo
#pragma weak bl2_plat_get_bl32_meminfo
#pragma weak bl2_plat_set_bl32_ep_info
#pragma weak bl2_plat_get_bl33_meminfo
#pragma weak bl2_plat_set_bl33_ep_info
#if ARM_BL31_IN_DRAM
meminfo_t *bl2_plat_sec_mem_layout(void)
{
static meminfo_t bl2_dram_layout
__aligned(CACHE_WRITEBACK_GRANULE) = {
.total_base = BL31_BASE,
.total_size = (ARM_AP_TZC_DRAM1_BASE +
ARM_AP_TZC_DRAM1_SIZE) - BL31_BASE,
.free_base = BL31_BASE,
.free_size = (ARM_AP_TZC_DRAM1_BASE +
ARM_AP_TZC_DRAM1_SIZE) - BL31_BASE
};
return &bl2_dram_layout;
}
#else
meminfo_t *bl2_plat_sec_mem_layout(void)
{
return &bl2_tzram_layout;
}
#endif /* ARM_BL31_IN_DRAM */
/*******************************************************************************
* This function assigns a pointer to the memory that the platform has kept
* aside to pass platform specific and trusted firmware related information
* to BL31. This memory is allocated by allocating memory to
* bl2_to_bl31_params_mem_t structure which is a superset of all the
* structure whose information is passed to BL31
* NOTE: This function should be called only once and should be done
* before generating params to BL31
******************************************************************************/
bl31_params_t *bl2_plat_get_bl31_params(void)
{
bl31_params_t *bl2_to_bl31_params;
/*
* Initialise the memory for all the arguments that needs to
* be passed to BL31
*/
zeromem(&bl31_params_mem, sizeof(bl2_to_bl31_params_mem_t));
/* Assign memory for TF related information */
bl2_to_bl31_params = &bl31_params_mem.bl31_params;
SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
/* Fill BL31 related information */
bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
VERSION_1, 0);
/* Fill BL32 related information if it exists */
#ifdef BL32_BASE
bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
VERSION_1, 0);
bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
VERSION_1, 0);
#endif /* BL32_BASE */
/* Fill BL33 related information */
bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
PARAM_EP, VERSION_1, 0);
/* BL33 expects to receive the primary CPU MPID (through x0) */
bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
VERSION_1, 0);
return bl2_to_bl31_params;
}
/* Flush the TF params and the TF plat params */
void bl2_plat_flush_bl31_params(void)
{
flush_dcache_range((unsigned long)&bl31_params_mem,
sizeof(bl2_to_bl31_params_mem_t));
}
/*******************************************************************************
* This function returns a pointer to the shared memory that the platform
* has kept to point to entry point information of BL31 to BL2
******************************************************************************/
struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
{
#if DEBUG
bl31_params_mem.bl31_ep_info.args.arg3 = ARM_BL31_PLAT_PARAM_VAL;
#endif
return &bl31_params_mem.bl31_ep_info;
}
#endif /* LOAD_IMAGE_V2 */
/*******************************************************************************
* BL1 has passed the extents of the trusted SRAM that should be visible to BL2
* in x0. This memory layout is sitting at the base of the free trusted SRAM.
@ -189,10 +60,8 @@ void arm_bl2_early_platform_setup(uintptr_t tb_fw_config,
/* Initialise the IO layer and register platform IO devices */
plat_arm_io_setup();
#if LOAD_IMAGE_V2
if (tb_fw_config != 0U)
arm_bl2_set_tb_cfg_addr((void *)tb_fw_config);
#endif
}
void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, u_register_t arg2, u_register_t arg3)
@ -208,9 +77,7 @@ void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, u_register_
*/
void bl2_plat_preload_setup(void)
{
#if LOAD_IMAGE_V2
arm_bl2_dyn_cfg_init();
#endif
}
/*
@ -274,7 +141,6 @@ void bl2_plat_arch_setup(void)
arm_bl2_plat_arch_setup();
}
#if LOAD_IMAGE_V2
int arm_bl2_handle_post_image_load(unsigned int image_id)
{
int err = 0;
@ -342,86 +208,3 @@ int bl2_plat_handle_post_image_load(unsigned int image_id)
{
return arm_bl2_plat_handle_post_image_load(image_id);
}
#else /* LOAD_IMAGE_V2 */
/*******************************************************************************
* Populate the extents of memory available for loading SCP_BL2 (if used),
* i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2.
******************************************************************************/
void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)
{
*scp_bl2_meminfo = bl2_tzram_layout;
}
/*******************************************************************************
* Before calling this function BL31 is loaded in memory and its entrypoint
* is set by load_image. This is a placeholder for the platform to change
* the entrypoint of BL31 and set SPSR and security state.
* On ARM standard platforms we only set the security state of the entrypoint
******************************************************************************/
void bl2_plat_set_bl31_ep_info(image_info_t *bl31_image_info,
entry_point_info_t *bl31_ep_info)
{
SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
}
/*******************************************************************************
* Before calling this function BL32 is loaded in memory and its entrypoint
* is set by load_image. This is a placeholder for the platform to change
* the entrypoint of BL32 and set SPSR and security state.
* On ARM standard platforms we only set the security state of the entrypoint
******************************************************************************/
#ifdef BL32_BASE
void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
entry_point_info_t *bl32_ep_info)
{
SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
bl32_ep_info->spsr = arm_get_spsr_for_bl32_entry();
}
/*******************************************************************************
* Populate the extents of memory available for loading BL32
******************************************************************************/
void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
{
/*
* Populate the extents of memory available for loading BL32.
*/
bl32_meminfo->total_base = BL32_BASE;
bl32_meminfo->free_base = BL32_BASE;
bl32_meminfo->total_size =
(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
bl32_meminfo->free_size =
(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
}
#endif /* BL32_BASE */
/*******************************************************************************
* Before calling this function BL33 is loaded in memory and its entrypoint
* is set by load_image. This is a placeholder for the platform to change
* the entrypoint of BL33 and set SPSR and security state.
* On ARM standard platforms we only set the security state of the entrypoint
******************************************************************************/
void bl2_plat_set_bl33_ep_info(image_info_t *image,
entry_point_info_t *bl33_ep_info)
{
SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
bl33_ep_info->spsr = arm_get_spsr_for_bl33_entry();
}
/*******************************************************************************
* Populate the extents of memory available for loading BL33
******************************************************************************/
void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
{
bl33_meminfo->total_base = ARM_NS_DRAM1_BASE;
bl33_meminfo->total_size = ARM_NS_DRAM1_SIZE;
bl33_meminfo->free_base = ARM_NS_DRAM1_BASE;
bl33_meminfo->free_size = ARM_NS_DRAM1_SIZE;
}
#endif /* LOAD_IMAGE_V2 */

View File

@ -71,13 +71,8 @@ struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
* while creating page tables. BL2 has flushed this information to memory, so
* we are guaranteed to pick up good data.
******************************************************************************/
#if LOAD_IMAGE_V2
void arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_config,
uintptr_t hw_config, void *plat_params_from_bl2)
#else
void arm_bl31_early_platform_setup(bl31_params_t *from_bl2, uintptr_t soc_fw_config,
uintptr_t hw_config, void *plat_params_from_bl2)
#endif
{
/* Initialize the console to provide early debug support */
arm_console_boot_init();
@ -135,7 +130,6 @@ void arm_bl31_early_platform_setup(bl31_params_t *from_bl2, uintptr_t soc_fw_con
assert(((unsigned long long)plat_params_from_bl2) ==
ARM_BL31_PLAT_PARAM_VAL);
# if LOAD_IMAGE_V2
/*
* Check params passed from BL2 should not be NULL,
*/
@ -162,29 +156,6 @@ void arm_bl31_early_platform_setup(bl31_params_t *from_bl2, uintptr_t soc_fw_con
if (bl33_image_ep_info.pc == 0U)
panic();
# else /* LOAD_IMAGE_V2 */
/*
* Check params passed from BL2 should not be NULL,
*/
assert(from_bl2 != NULL);
assert(from_bl2->h.type == PARAM_BL31);
assert(from_bl2->h.version >= VERSION_1);
/* Dynamic Config is not supported for LOAD_IMAGE_V1 */
assert(soc_fw_config == 0U);
assert(hw_config == 0U);
/*
* Copy BL32 (if populated by BL2) and BL33 entry point information.
* They are stored in Secure RAM, in BL2's address space.
*/
if (from_bl2->bl32_ep_info)
bl32_image_ep_info = *from_bl2->bl32_ep_info;
bl33_image_ep_info = *from_bl2->bl33_ep_info;
# endif /* LOAD_IMAGE_V2 */
#endif /* RESET_TO_BL31 */
}

View File

@ -21,9 +21,7 @@
/* Conditionally provide a weak definition of plat_get_syscnt_freq2 to avoid
* conflicts with the definition in plat/common. */
#if ERROR_DEPRECATED
#pragma weak plat_get_syscnt_freq2
#endif
void arm_setup_romlib(void)

View File

@ -122,17 +122,11 @@ ENABLE_PMF := 1
# mapping the former as executable and the latter as execute-never.
SEPARATE_CODE_AND_RODATA := 1
# Enable new version of image loading on ARM platforms
LOAD_IMAGE_V2 := 1
# Use the multi console API, which is only available for AArch64 for now
ifeq (${ARCH}, aarch64)
MULTI_CONSOLE_API := 1
endif
# Use generic OID definition (tbbr_oid.h)
USE_TBBR_DEFS := 1
# Disable ARM Cryptocell by default
ARM_CRYPTOCELL_INTEG := 0
$(eval $(call assert_boolean,ARM_CRYPTOCELL_INTEG))
@ -202,7 +196,6 @@ ifeq (${BL2_AT_EL3},1)
BL2_SOURCES += plat/arm/common/arm_bl2_el3_setup.c
endif
ifeq (${LOAD_IMAGE_V2},1)
# Because BL1/BL2 execute in AArch64 mode but BL32 in AArch32 we need to use
# the AArch32 descriptors.
ifeq (${JUNO_AARCH32_EL3_RUNTIME},1)
@ -215,7 +208,6 @@ BL2_SOURCES += plat/arm/common/arm_image_load.c \
ifeq (${SPD},opteed)
BL2_SOURCES += lib/optee/optee_utils.c
endif
endif
BL2U_SOURCES += drivers/delay_timer/delay_timer.c \
drivers/delay_timer/generic_delay_timer.c \

View File

@ -17,9 +17,8 @@
#include <string.h>
#include <tbbr_img_def.h>
#if LOAD_IMAGE_V2
/* Variable to store the address of TB_FW_CONFIG file */
/* Variable to store the address to TB_FW_CONFIG passed from BL1 */
static void *tb_fw_cfg_dtb;
static size_t tb_fw_cfg_dtb_size;
@ -39,9 +38,7 @@ static size_t mbedtls_heap_size;
* - To allocate space for the Mbed TLS heap --only if-- Trusted Board Boot
* is enabled.
* - This implementation requires the DTB to be present so that BL1 has a
* mechanism to pass the pointer to BL2. If LOAD_IMAGE_V2=0 then
* TB_FW_CONFIG is not present, which means that this implementation
* cannot be applied.
* mechanism to pass the pointer to BL2.
*/
int arm_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
{
@ -283,5 +280,3 @@ void arm_bl2_dyn_cfg_init(void)
dyn_disable_auth();
#endif
}
#endif /* LOAD_IMAGE_V2 */

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arm_def.h>
#include <arm_gic.h>
#include <plat_arm.h>
#include <platform.h>
#include <platform_def.h>
/******************************************************************************
* The following function is defined as weak to allow a platform to override
* the way the Legacy GICv3 driver is initialised and used.
*****************************************************************************/
#pragma weak plat_arm_gic_driver_init
#pragma weak plat_arm_gic_init
#pragma weak plat_arm_gic_cpuif_enable
#pragma weak plat_arm_gic_cpuif_disable
#pragma weak plat_arm_gic_pcpu_init
/*
* In the GICv3 Legacy mode, the Group 1 secure interrupts are treated as Group
* 0 interrupts.
*/
static const unsigned int irq_sec_array[] = {
PLAT_ARM_G0_IRQS,
PLAT_ARM_G1S_IRQS
};
void plat_arm_gic_driver_init(void)
{
arm_gic_init(PLAT_ARM_GICC_BASE,
PLAT_ARM_GICD_BASE,
PLAT_ARM_GICR_BASE,
irq_sec_array,
ARRAY_SIZE(irq_sec_array));
}
/******************************************************************************
* ARM common helper to initialize the GIC.
*****************************************************************************/
void plat_arm_gic_init(void)
{
arm_gic_setup();
}
/******************************************************************************
* ARM common helper to enable the GIC CPU interface
*****************************************************************************/
void plat_arm_gic_cpuif_enable(void)
{
arm_gic_cpuif_setup();
}
/******************************************************************************
* ARM common helper to disable the GIC CPU interface
*****************************************************************************/
void plat_arm_gic_cpuif_disable(void)
{
arm_gic_cpuif_deactivate();
}
/******************************************************************************
* ARM common helper to initialize the per-cpu distributor in GICv2 or
* redistributor interface in GICv3.
*****************************************************************************/
void plat_arm_gic_pcpu_init(void)
{
arm_gic_pcpu_distif_setup();
}
/******************************************************************************
* Stubs for Redistributor power management. Although legacy configuration isn't
* supported, these are provided for the sake of uniform GIC API
*****************************************************************************/
void plat_arm_gic_redistif_on(void)
{
return;
}
void plat_arm_gic_redistif_off(void)
{
return;
}
/******************************************************************************
* ARM common helper to save & restore the GICv3 on resume from system suspend.
*****************************************************************************/
void plat_arm_gic_save(void)
{
return;
}
void plat_arm_gic_resume(void)
{
arm_gic_setup();
}

View File

@ -6,7 +6,6 @@
#include <arch_helpers.h>
#include <arm_def.h>
#include <arm_gic.h>
#include <assert.h>
#include <errno.h>
#include <plat_arm.h>

View File

@ -14,21 +14,13 @@
#include "../drivers/scp/css_scp.h"
/* Weak definition may be overridden in specific CSS based platform */
#if LOAD_IMAGE_V2
#pragma weak plat_arm_bl2_handle_scp_bl2
#else
#pragma weak bl2_plat_handle_scp_bl2
#endif
/*******************************************************************************
* Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
* Return 0 on success, -1 otherwise.
******************************************************************************/
#if LOAD_IMAGE_V2
int plat_arm_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
#else
int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
#endif
{
int ret;

View File

@ -62,11 +62,6 @@
#define PLAT_MAX_PWR_LVL U(1)
#define PLAT_ARM_G1S_IRQS ARM_G1S_IRQS, \
CSS_IRQ_MHU
#define PLAT_ARM_G0_IRQS ARM_G0_IRQS
#define PLAT_ARM_G1S_IRQ_PROPS(grp) CSS_G1S_IRQ_PROPS(grp)
#define PLAT_ARM_G0_IRQ_PROPS(grp) ARM_G0_IRQ_PROPS(grp)

View File

@ -7,7 +7,6 @@
#ifndef __SGI_PLAT_CONFIG_H__
#define __SGI_PLAT_CONFIG_H__
#include <arm_gic.h>
#include <ccn.h>
#include <gicv3.h>

View File

@ -6,8 +6,6 @@
CSS_USE_SCMI_SDS_DRIVER := 1
ENABLE_PLAT_COMPAT := 0
CSS_ENT_BASE := plat/arm/css/sgi
RAS_EXTENSION := 0

View File

@ -65,7 +65,7 @@ const mmap_region_t plat_arm_mmap[] = {
#if ENABLE_SPM
ARM_SP_IMAGE_MMAP,
#endif
#if TRUSTED_BOARD_BOOT && LOAD_IMAGE_V2 && !BL2_AT_EL3
#if TRUSTED_BOARD_BOOT && !BL2_AT_EL3
ARM_MAP_BL1_RW,
#endif
{0}
@ -148,7 +148,7 @@ const struct secure_partition_boot_info *plat_get_secure_partition_boot_info(
}
#endif /* ENABLE_SPM && defined(IMAGE_BL31) */
#if TRUSTED_BOARD_BOOT && LOAD_IMAGE_V2
#if TRUSTED_BOARD_BOOT
int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
{
assert(heap_addr != NULL);

View File

@ -7,7 +7,6 @@
#ifndef __SGM_PLAT_CONFIG_H__
#define __SGM_PLAT_CONFIG_H__
#include <arm_gic.h>
#include <ccn.h>
#include <gicv3.h>

Some files were not shown because too many files have changed in this diff Show More