AArch32: Refactor SP_MIN to support RESET_TO_SP_MIN

This patch uses the `el3_entrypoint_common` macro to initialize
CPU registers, in SP_MIN entrypoint.s file, in both cold and warm
boot path. It also adds conditional compilation, in cold and warm
boot entry path, based on RESET_TO_SP_MIN.

Change-Id: Id493ca840dc7b9e26948dc78ee928e9fdb76b9e4
This commit is contained in:
Yatharth Kochar 2016-06-30 15:02:31 +01:00
parent 6fe8aa2fa6
commit 3bdf0e5df2
2 changed files with 120 additions and 168 deletions

View File

@ -32,6 +32,7 @@
#include <asm_macros.S> #include <asm_macros.S>
#include <bl_common.h> #include <bl_common.h>
#include <context.h> #include <context.h>
#include <el3_common_macros.S>
#include <runtime_svc.h> #include <runtime_svc.h>
#include <smcc_helpers.h> #include <smcc_helpers.h>
#include <smcc_macros.S> #include <smcc_macros.S>
@ -41,7 +42,8 @@
.globl sp_min_entrypoint .globl sp_min_entrypoint
.globl sp_min_warm_entrypoint .globl sp_min_warm_entrypoint
func sp_min_vector_table
vector_base sp_min_vector_table
b sp_min_entrypoint b sp_min_entrypoint
b plat_panic_handler /* Undef */ b plat_panic_handler /* Undef */
b handle_smc /* Syscall */ b handle_smc /* Syscall */
@ -50,185 +52,70 @@ func sp_min_vector_table
b plat_panic_handler /* Reserved */ b plat_panic_handler /* Reserved */
b plat_panic_handler /* IRQ */ b plat_panic_handler /* IRQ */
b plat_panic_handler /* FIQ */ b plat_panic_handler /* FIQ */
endfunc sp_min_vector_table
func handle_smc
smcc_save_gp_mode_regs
/* r0 points to smc_context */
mov r2, r0 /* handle */
ldcopr r0, SCR
/* Save SCR in stack */
push {r0}
and r3, r0, #SCR_NS_BIT /* flags */
/* Switch to Secure Mode*/
bic r0, #SCR_NS_BIT
stcopr r0, SCR
isb
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
beq 1f /* SMC32 is detected */
mov r0, #SMC_UNK
str r0, [r2, #SMC_CTX_GPREG_R0]
mov r0, r2
b 2f /* Skip handling the SMC */
1:
mov r1, #0 /* cookie */
bl handle_runtime_svc
2:
/* r0 points to smc context */
/* Restore SCR from stack */
pop {r1}
stcopr r1, SCR
isb
b sp_min_exit
endfunc handle_smc
/* /*
* The Cold boot/Reset entrypoint for SP_MIN * The Cold boot/Reset entrypoint for SP_MIN
*/ */
func sp_min_entrypoint func sp_min_entrypoint
#if !RESET_TO_SP_MIN
/* /* ---------------------------------------------------------------
* The caches and TLBs are disabled at reset. If any implementation * Preceding bootloader has populated r0 with a pointer to a
* allows the caches/TLB to be hit while they are disabled, ensure * 'bl_params_t' structure & r1 with a pointer to platform
* that they are invalidated here * specific structure
* ---------------------------------------------------------------
*/ */
mov r11, r0
mov r12, r1
/* Make sure we are in Secure Mode*/ /* ---------------------------------------------------------------------
ldcopr r0, SCR * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
bic r0, #SCR_NS_BIT * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
stcopr r0, SCR * and primary/secondary CPU logic should not be executed in this case.
isb *
* Also, assume that the previous bootloader has already set up the CPU
/* Switch to monitor mode */ * endianness and has initialised the memory.
cps #MODE32_mon * ---------------------------------------------------------------------
isb
/*
* Set sane values for NS SCTLR as well.
* Switch to non secure mode for this.
*/ */
ldr r0, =(SCTLR_RES1) el3_entrypoint_common \
ldcopr r1, SCR _set_endian=0 \
orr r2, r1, #SCR_NS_BIT _warm_boot_mailbox=0 \
stcopr r2, SCR _secondary_cold_boot=0 \
isb _init_memory=0 \
_init_c_runtime=1 \
_exception_vectors=sp_min_vector_table
ldcopr r2, SCTLR /* ---------------------------------------------------------------------
orr r0, r0, r2 * Relay the previous bootloader's arguments to the platform layer
stcopr r0, SCTLR * ---------------------------------------------------------------------
isb
stcopr r1, SCR
isb
/*
* Set the CPU endianness before doing anything that might involve
* memory reads or writes.
*/ */
ldcopr r0, SCTLR mov r0, r11
bic r0, r0, #SCTLR_EE_BIT mov r1, r12
stcopr r0, SCTLR #else
isb /* ---------------------------------------------------------------------
* For RESET_TO_SP_MIN systems which have a programmable reset address,
/* Run the CPU Specific Reset handler */ * sp_min_entrypoint() is executed only on the cold boot path so we can
bl reset_handler * skip the warm boot mailbox mechanism.
* ---------------------------------------------------------------------
/*
* Enable the instruction cache and data access
* alignment checks
*/ */
ldcopr r0, SCTLR el3_entrypoint_common \
ldr r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT) _set_endian=1 \
orr r0, r0, r1 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
stcopr r0, SCTLR _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
isb _init_memory=1 \
_init_c_runtime=1 \
_exception_vectors=sp_min_vector_table
/* Set the vector tables */ /* ---------------------------------------------------------------------
ldr r0, =sp_min_vector_table * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
stcopr r0, VBAR * to run so there's no argument to relay from a previous bootloader.
stcopr r0, MVBAR * Zero the arguments passed to the platform layer to reflect that.
isb * ---------------------------------------------------------------------
/*
* Enable the SIF bit to disable instruction fetches
* from Non-secure memory.
*/ */
ldcopr r0, SCR mov r0, #0
orr r0, r0, #SCR_SIF_BIT mov r1, #0
stcopr r0, SCR #endif /* RESET_TO_SP_MIN */
/*
* Enable the SError interrupt now that the exception vectors have been
* setup.
*/
cpsie a
isb
/* Enable access to Advanced SIMD registers */
ldcopr r0, NSACR
bic r0, r0, #NSASEDIS_BIT
orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
stcopr r0, NSACR
isb
/*
* Enable access to Advanced SIMD, Floating point and to the Trace
* functionality as well.
*/
ldcopr r0, CPACR
bic r0, r0, #ASEDIS_BIT
bic r0, r0, #TRCDIS_BIT
orr r0, r0, #CPACR_ENABLE_FP_ACCESS
stcopr r0, CPACR
isb
vmrs r0, FPEXC
orr r0, r0, #FPEXC_EN_BIT
vmsr FPEXC, r0
/* Detect whether Warm or Cold boot */
bl plat_get_my_entrypoint
cmp r0, #0
/* If warm boot detected, jump to warm boot entry */
bxne r0
/* Setup C runtime stack */
bl plat_set_my_stack
/* Perform platform specific memory initialization */
bl platform_mem_init
/* Initialize the C Runtime Environment */
/*
* Invalidate the RW memory used by SP_MIN image. This includes
* the data and NOBITS sections. This is done to safeguard against
* possible corruption of this memory by dirty cache lines in a system
* cache as a result of use by an earlier boot loader stage.
*/
ldr r0, =__RW_START__
ldr r1, =__RW_END__
sub r1, r1, r0
bl inv_dcache_range
ldr r0, =__BSS_START__
ldr r1, =__BSS_SIZE__
bl zeromem
#if USE_COHERENT_MEM
ldr r0, =__COHERENT_RAM_START__
ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
bl zeromem
#endif
/* Perform platform specific early arch. setup */
bl sp_min_early_platform_setup bl sp_min_early_platform_setup
bl sp_min_plat_arch_setup bl sp_min_plat_arch_setup
@ -270,13 +157,76 @@ func sp_min_entrypoint
b sp_min_exit b sp_min_exit
endfunc sp_min_entrypoint endfunc sp_min_entrypoint
/*
* SMC handling function for SP_MIN.
*/
func handle_smc
smcc_save_gp_mode_regs
/* r0 points to smc_context */
mov r2, r0 /* handle */
ldcopr r0, SCR
/* Save SCR in stack */
push {r0}
and r3, r0, #SCR_NS_BIT /* flags */
/* Switch to Secure Mode*/
bic r0, #SCR_NS_BIT
stcopr r0, SCR
isb
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
beq 1f /* SMC32 is detected */
mov r0, #SMC_UNK
str r0, [r2, #SMC_CTX_GPREG_R0]
mov r0, r2
b 2f /* Skip handling the SMC */
1:
mov r1, #0 /* cookie */
bl handle_runtime_svc
2:
/* r0 points to smc context */
/* Restore SCR from stack */
pop {r1}
stcopr r1, SCR
isb
b sp_min_exit
endfunc handle_smc
/* /*
* The Warm boot entrypoint for SP_MIN. * The Warm boot entrypoint for SP_MIN.
*/ */
func sp_min_warm_entrypoint func sp_min_warm_entrypoint
/*
/* Setup C runtime stack */ * On the warm boot path, most of the EL3 initialisations performed by
bl plat_set_my_stack * 'el3_entrypoint_common' must be skipped:
*
* - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
* programming the reset address do we need to set the CPU endianness.
* In other cases, we assume this has been taken care by the
* entrypoint code.
*
* - No need to determine the type of boot, we know it is a warm boot.
*
* - Do not try to distinguish between primary and secondary CPUs, this
* notion only exists for a cold boot.
*
* - No need to initialise the memory or the C runtime environment,
* it has been done once and for all on the cold boot path.
*/
el3_entrypoint_common \
_set_endian=PROGRAMMABLE_RESET_ADDRESS \
_warm_boot_mailbox=0 \
_secondary_cold_boot=0 \
_init_memory=0 \
_init_c_runtime=0 \
_exception_vectors=sp_min_vector_table
/* -------------------------------------------- /* --------------------------------------------
* Enable the MMU with the DCache disabled. It * Enable the MMU with the DCache disabled. It

View File

@ -50,6 +50,7 @@ SECTIONS
__TEXT_START__ = .; __TEXT_START__ = .;
*entrypoint.o(.text*) *entrypoint.o(.text*)
*(.text*) *(.text*)
*(.vectors)
. = NEXT(4096); . = NEXT(4096);
__TEXT_END__ = .; __TEXT_END__ = .;
} >RAM } >RAM
@ -98,6 +99,7 @@ SECTIONS
KEEP(*(cpu_ops)) KEEP(*(cpu_ops))
__CPU_OPS_END__ = .; __CPU_OPS_END__ = .;
*(.vectors)
__RO_END_UNALIGNED__ = .; __RO_END_UNALIGNED__ = .;
/* /*