AArch32: Add generic changes in BL1

This patch adds generic changes in BL1 to support AArch32 state.
New AArch32 specific assembly/C files are introduced and
some files are moved to AArch32/64 specific folders.
BL1 for AArch64 is refactored but functionally identical.
BL1 executes in Secure Monitor mode in AArch32 state.

NOTE: BL1 in AArch32 state ONLY handles BL1_RUN_IMAGE SMC.

Change-Id: I6e2296374c7efbf3cf2aa1a0ce8de0732d8c98a5
This commit is contained in:
Yatharth Kochar 2016-06-28 17:07:09 +01:00
parent 1a0a3f0622
commit f3b4914be3
10 changed files with 478 additions and 26 deletions

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************************
* TODO: Function that does the first bit of architectural setup.
******************************************************************************/
void bl1_arch_setup(void)
{
}

View File

@ -0,0 +1,177 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <context.h>
#include <context_mgmt.h>
#include <debug.h>
#include <platform.h>
#include <smcc_helpers.h>
/*
* Following arrays will be used for context management.
* There are 2 instances, for the Secure and Non-Secure contexts.
*/
static cpu_context_t bl1_cpu_context[2];
static smc_ctx_t bl1_smc_context[2];
/* Following contains the next cpu context pointer. */
static void *bl1_next_cpu_context_ptr;
/* Following contains the next smc context pointer. */
static void *bl1_next_smc_context_ptr;
/* Following functions are used for SMC context handling */
void *smc_get_ctx(int security_state)
{
assert(sec_state_is_valid(security_state));
return &bl1_smc_context[security_state];
}
void smc_set_next_ctx(int security_state)
{
assert(sec_state_is_valid(security_state));
bl1_next_smc_context_ptr = &bl1_smc_context[security_state];
}
void *smc_get_next_ctx(void)
{
return bl1_next_smc_context_ptr;
}
/* Following functions are used for CPU context handling */
void *cm_get_context(uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
return &bl1_cpu_context[security_state];
}
void cm_set_next_context(void *cpu_context)
{
assert(cpu_context);
bl1_next_cpu_context_ptr = cpu_context;
}
void *cm_get_next_context(void)
{
return bl1_next_cpu_context_ptr;
}
/*******************************************************************************
* Following function copies GP regs r0-r4, lr and spsr,
* from the CPU context to the SMC context structures.
******************************************************************************/
static void copy_cpu_ctx_to_smc_ctx(const regs_t *cpu_reg_ctx,
smc_ctx_t *next_smc_ctx)
{
next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
next_smc_ctx->r1 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R1);
next_smc_ctx->r2 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R2);
next_smc_ctx->r3 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R3);
next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
}
/*******************************************************************************
* Following function flushes the SMC & CPU context pointer and its data.
******************************************************************************/
static void flush_smc_and_cpu_ctx(void)
{
flush_dcache_range((uintptr_t)&bl1_next_smc_context_ptr,
sizeof(bl1_next_smc_context_ptr));
flush_dcache_range((uintptr_t)bl1_next_smc_context_ptr,
sizeof(smc_ctx_t));
flush_dcache_range((uintptr_t)&bl1_next_cpu_context_ptr,
sizeof(bl1_next_cpu_context_ptr));
flush_dcache_range((uintptr_t)bl1_next_cpu_context_ptr,
sizeof(cpu_context_t));
}
/*******************************************************************************
* This function prepares the context for Secure/Normal world images.
* Normal world images are transitioned to HYP(if supported) else SVC.
******************************************************************************/
void bl1_prepare_next_image(unsigned int image_id)
{
unsigned int security_state;
image_desc_t *image_desc;
entry_point_info_t *next_bl_ep;
/* Get the image descriptor. */
image_desc = bl1_plat_get_image_desc(image_id);
assert(image_desc);
/* Get the entry point info. */
next_bl_ep = &image_desc->ep_info;
/* Get the image security state. */
security_state = GET_SECURITY_STATE(next_bl_ep->h.attr);
/* Prepare the SPSR for the next BL image. */
if (security_state == SECURE) {
next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
} else {
/* Use HYP mode if supported else use SVC. */
if (GET_VIRT_EXT(read_id_pfr1()) == MODE32_hyp) {
next_bl_ep->spsr = SPSR_MODE32(MODE32_hyp, SPSR_T_ARM,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
} else {
next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
}
}
/* Allow platform to make change */
bl1_plat_set_ep_info(image_id, next_bl_ep);
/* Prepare the cpu context for the next BL image. */
cm_init_my_context(next_bl_ep);
cm_prepare_el3_exit(security_state);
cm_set_next_context(cm_get_context(security_state));
/* Prepare the smc context for the next BL image. */
smc_set_next_ctx(security_state);
copy_cpu_ctx_to_smc_ctx(get_regs_ctx(cm_get_next_context()),
smc_get_next_ctx());
/*
* Flush the SMC & CPU context and the (next)pointers,
* to access them after caches are disabled.
*/
flush_smc_and_cpu_ctx();
/* Indicate that image is in execution state. */
image_desc->state = IMAGE_STATE_EXECUTED;
print_entry_point_info(next_bl_ep);
}

View File

@ -0,0 +1,124 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <context.h>
#include <el3_common_macros.S>
#include <smcc_helpers.h>
#include <smcc_macros.S>
.globl bl1_vector_table
.globl bl1_entrypoint
/* -----------------------------------------------------
* Setup the vector table to support SVC & MON mode.
* -----------------------------------------------------
*/
vector_base bl1_vector_table
b bl1_entrypoint
b report_exception /* Undef */
b bl1_aarch32_smc_handler /* SMC call */
b report_exception /* Prefetch abort */
b report_exception /* Data abort */
b report_exception /* Reserved */
b report_exception /* IRQ */
b report_exception /* FIQ */
/* -----------------------------------------------------
* bl1_entrypoint() is the entry point into the trusted
* firmware code when a cpu is released from warm or
* cold reset.
* -----------------------------------------------------
*/
func bl1_entrypoint
/* ---------------------------------------------------------------------
* If the reset address is programmable then bl1_entrypoint() is
* executed only on the cold boot path. Therefore, we can skip the warm
* boot mailbox mechanism.
* ---------------------------------------------------------------------
*/
el3_entrypoint_common \
_set_endian=1 \
_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
_init_memory=1 \
_init_c_runtime=1 \
_exception_vectors=bl1_vector_table
/* -----------------------------------------------------
* Perform early platform setup & platform
* specific early arch. setup e.g. mmu setup
* -----------------------------------------------------
*/
bl bl1_early_platform_setup
bl bl1_plat_arch_setup
/* -----------------------------------------------------
* Jump to main function.
* -----------------------------------------------------
*/
bl bl1_main
/* -----------------------------------------------------
* Jump to next image.
* -----------------------------------------------------
*/
/*
* MMU needs to be disabled because both BL1 and BL2 execute
* in PL1, and therefore share the same address space.
* BL2 will initialize the address space according to its
* own requirement.
*/
bl disable_mmu_icache_secure
stcopr r0, TLBIALL
dsb sy
isb
/* Get the cpu_context for next BL image */
bl cm_get_next_context
/* Restore the SCR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
stcopr r2, SCR
isb
/*
* Get the smc_context for next BL image,
* program the gp/system registers and exit
* secure monitor mode
*/
bl smc_get_next_ctx
smcc_restore_gp_mode_regs
eret
endfunc bl1_entrypoint

View File

@ -0,0 +1,96 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl1.h>
#include <bl_common.h>
.globl bl1_aarch32_smc_handler
func bl1_aarch32_smc_handler
/* ------------------------------------------------
* SMC in BL1 is handled assuming that the MMU is
* turned off by BL2.
* ------------------------------------------------
*/
/* ----------------------------------------------
* Only RUN_IMAGE SMC is supported.
* ----------------------------------------------
*/
mov r8, #BL1_SMC_RUN_IMAGE
cmp r8, r0
blne report_exception
/* ------------------------------------------------
* Make sure only Secure world reaches here.
* ------------------------------------------------
*/
ldcopr r8, SCR
tst r8, #SCR_NS_BIT
blne report_exception
/* ---------------------------------------------------------------------
* Pass control to next secure image.
* Here it expects r1 to contain the address of a entry_point_info_t
* structure describing the BL entrypoint.
* ---------------------------------------------------------------------
*/
mov r8, r1
mov r0, r1
bl bl1_print_next_bl_ep_info
#if SPIN_ON_BL1_EXIT
bl print_debug_loop_message
debug_loop:
b debug_loop
#endif
mov r0, r8
bl bl1_plat_prepare_exit
stcopr r0, TLBIALL
dsb sy
isb
/*
* Extract PC and SPSR based on struct `entry_point_info_t`
* and load it in LR and SPSR registers respectively.
*/
ldr lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
ldr r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
msr spsr, r1
add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
ldm r8, {r0, r1, r2, r3}
eret
endfunc bl1_aarch32_smc_handler

View File

@ -192,15 +192,15 @@ func smc_handler64
mov sp, x30
/* ---------------------------------------------------------------------
* Pass EL3 control to BL31.
* Pass EL3 control to next BL image.
* Here it expects X1 with the address of a entry_point_info_t
* structure describing the BL31 entrypoint.
* structure describing the next BL image entrypoint.
* ---------------------------------------------------------------------
*/
mov x20, x1
mov x0, x20
bl bl1_print_bl31_ep_info
bl bl1_print_next_bl_ep_info
ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
msr elr_el3, x0

View File

@ -29,15 +29,19 @@
#
BL1_SOURCES += bl1/bl1_main.c \
bl1/aarch64/bl1_arch_setup.c \
bl1/aarch64/bl1_entrypoint.S \
bl1/aarch64/bl1_exceptions.S \
bl1/bl1_context_mgmt.c \
lib/cpus/aarch64/cpu_helpers.S \
lib/el3_runtime/aarch64/context.S \
lib/el3_runtime/aarch64/context_mgmt.c \
bl1/${ARCH}/bl1_arch_setup.c \
bl1/${ARCH}/bl1_context_mgmt.c \
bl1/${ARCH}/bl1_entrypoint.S \
bl1/${ARCH}/bl1_exceptions.S \
lib/cpus/${ARCH}/cpu_helpers.S \
lib/el3_runtime/${ARCH}/context_mgmt.c \
plat/common/plat_bl1_common.c
ifeq (${ARCH},aarch64)
BL1_SOURCES += lib/el3_runtime/aarch64/context.S
endif
ifeq (${TRUSTED_BOARD_BOOT},1)
BL1_SOURCES += bl1/bl1_fwu.c
endif

View File

@ -107,15 +107,20 @@ void bl1_main(void)
NOTICE("BL1: %s\n", version_string);
NOTICE("BL1: %s\n", build_message);
INFO("BL1: RAM 0x%lx - 0x%lx\n", BL1_RAM_BASE, BL1_RAM_LIMIT);
INFO("BL1: RAM %p - %p\n", (void *)BL1_RAM_BASE,
(void *)BL1_RAM_LIMIT);
#if DEBUG
unsigned long val;
u_register_t val;
/*
* Ensure that MMU/Caches and coherency are turned on
*/
#ifdef AARCH32
val = read_sctlr();
#else
val = read_sctlr_el3();
#endif
assert(val & SCTLR_M_BIT);
assert(val & SCTLR_C_BIT);
assert(val & SCTLR_I_BIT);
@ -223,21 +228,25 @@ void bl1_load_bl2(void)
bl1_init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout);
ep_info->args.arg1 = (unsigned long)bl2_tzram_layout;
ep_info->args.arg1 = (uintptr_t)bl2_tzram_layout;
NOTICE("BL1: Booting BL2\n");
VERBOSE("BL1: BL2 memory layout address = 0x%llx\n",
(unsigned long long) bl2_tzram_layout);
VERBOSE("BL1: BL2 memory layout address = %p\n",
(void *) bl2_tzram_layout);
}
/*******************************************************************************
* Function called just before handing over to BL31 to inform the user about
* the boot progress. In debug mode, also print details about the BL31 image's
* execution context.
* Function called just before handing over to the next BL to inform the user
* about the boot progress. In debug mode, also print details about the BL
* image's execution context.
******************************************************************************/
void bl1_print_bl31_ep_info(const entry_point_info_t *bl31_ep_info)
void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info)
{
#ifdef AARCH32
NOTICE("BL1: Booting BL32\n");
#else
NOTICE("BL1: Booting BL31\n");
print_entry_point_info(bl31_ep_info);
#endif /* AARCH32 */
print_entry_point_info(bl_ep_info);
}
#if SPIN_ON_BL1_EXIT

View File

@ -37,13 +37,13 @@
* Declarations of linker defined symbols which will tell us where BL1 lives
* in Trusted ROM and RAM
******************************************************************************/
extern uint64_t __BL1_ROM_END__;
#define BL1_ROM_END (uint64_t)(&__BL1_ROM_END__)
extern uintptr_t __BL1_ROM_END__;
#define BL1_ROM_END (uintptr_t)(&__BL1_ROM_END__)
extern uint64_t __BL1_RAM_START__;
extern uint64_t __BL1_RAM_END__;
#define BL1_RAM_BASE (uint64_t)(&__BL1_RAM_START__)
#define BL1_RAM_LIMIT (uint64_t)(&__BL1_RAM_END__)
extern uintptr_t __BL1_RAM_START__;
extern uintptr_t __BL1_RAM_END__;
#define BL1_RAM_BASE (uintptr_t)(&__BL1_RAM_START__)
#define BL1_RAM_LIMIT (uintptr_t)(&__BL1_RAM_END__)
/******************************************
* Function prototypes

View File

@ -103,5 +103,9 @@ static inline void cm_set_next_context(void *context)
"msr spsel, #0\n"
: : "r" (context));
}
#else
void *cm_get_next_context(void);
#endif /* AARCH32 */
#endif /* __CM_H__ */