AArch32: Common changes needed for BL1/BL2

This patch adds common changes to support AArch32 state in
BL1 and BL2. Following are the changes:

* Added functions for disabling MMU from Secure state.
* Added AArch32 specific SMC function.
* Added semihosting support.
* Added reporting of unhandled exceptions.
* Added uniprocessor stack support.
* Added `el3_entrypoint_common` macro that can be
  shared by BL1 and BL32 (SP_MIN) BL stages. The
  `el3_entrypoint_common` is similar to the AArch64
  counterpart with the main difference in the assembly
  instructions and the registers that are relevant to
  AArch32 execution state.
* Enabled `LOAD_IMAGE_V2` flag in Makefile for
  `ARCH=aarch32` and added check to make sure that
  platform has not overridden to disable it.

Change-Id: I33c6d8dfefb2e5d142fdfd06a0f4a7332962e1a3
This commit is contained in:
Yatharth Kochar 2016-06-28 16:58:26 +01:00
parent a8aa7fec1d
commit 1a0a3f0622
18 changed files with 575 additions and 10 deletions

View File

@ -188,6 +188,10 @@ ifneq (${GENERATE_COT},0)
FWU_FIP_DEPS += fwu_certificates
endif
# For AArch32, enable new version of image loading.
ifeq (${ARCH},aarch32)
LOAD_IMAGE_V2 := 1
endif
################################################################################
# Toolchain
@ -364,6 +368,14 @@ ifeq (${LOAD_IMAGE_V2},1)
endif
endif
# For AArch32, LOAD_IMAGE_V2 must be enabled.
ifeq (${ARCH},aarch32)
ifeq (${LOAD_IMAGE_V2}, 0)
$(error "For AArch32, LOAD_IMAGE_V2 must be enabled.")
endif
endif
################################################################################
# Process platform overrideable behaviour
################################################################################

View File

@ -32,6 +32,7 @@
#include <asm_macros.S>
.globl do_panic
.globl report_exception
/***********************************************************
* The common implementation of do_panic for all BL stages
@ -40,3 +41,14 @@ func do_panic
b plat_panic_handler
endfunc do_panic
/***********************************************************
* This function is called from the vector table for
* unhandled exceptions. It reads the current mode and
* passes it to platform.
***********************************************************/
func report_exception
mrs r0, cpsr
and r0, #MODE32_MASK
bl plat_report_exception
bl plat_panic_handler
endfunc report_exception

View File

@ -776,11 +776,15 @@ called in the following circumstances:
The default implementation doesn't do anything, to avoid making assumptions
about the way the platform displays its status information.
This function receives the exception type as its argument. Possible values for
exceptions types are listed in the [include/common/bl_common.h] header file.
Note that these constants are not related to any architectural exception code;
they are just an ARM Trusted Firmware convention.
For AArch64, this function receives the exception type as its argument.
Possible values for exceptions types are listed in the
[include/common/bl_common.h] header file. Note that these constants are not
related to any architectural exception code; they are just an ARM Trusted
Firmware convention.
For AArch32, this function receives the exception mode as its argument.
Possible values for exception modes are listed in the
[include/lib/aarch32/arch.h] header file.
### Function : plat_reset_handler()
@ -2234,6 +2238,7 @@ _Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved._
[plat/common/aarch64/platform_up_stack.S]: ../plat/common/aarch64/platform_up_stack.S
[plat/arm/board/fvp/fvp_pm.c]: ../plat/arm/board/fvp/fvp_pm.c
[include/common/bl_common.h]: ../include/common/bl_common.h
[include/lib/aarch32/arch.h]: ../include/lib/aarch32/arch.h
[include/plat/arm/common/arm_def.h]: ../include/plat/arm/common/arm_def.h
[include/plat/common/common_def.h]: ../include/plat/common/common_def.h
[include/plat/common/platform.h]: ../include/plat/common/platform.h

View File

@ -69,6 +69,16 @@
lsl \reg, \reg, \tmp
.endm
/*
* Declare the exception vector table, enforcing it is aligned on a
* 32 byte boundary.
*/
.macro vector_base label
.section .vectors, "ax"
.align 5
\label:
.endm
/*
* This macro calculates the base address of the current CPU's multi
* processor(MP) stack using the plat_my_core_pos() index, the name of

View File

@ -0,0 +1,278 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __EL3_COMMON_MACROS_S__
#define __EL3_COMMON_MACROS_S__
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
/*
* Helper macro to initialise EL3 registers we care about.
*/
.macro el3_arch_init_common _exception_vectors
/* ---------------------------------------------------------------------
* Enable the instruction cache and alignment checks
* ---------------------------------------------------------------------
*/
ldr r1, =(SCTLR_RES1 | SCTLR_I_BIT | SCTLR_A_BIT)
ldcopr r0, SCTLR
orr r0, r0, r1
stcopr r0, SCTLR
isb
/* ---------------------------------------------------------------------
* Set the exception vectors (VBAR/MVBAR).
* ---------------------------------------------------------------------
*/
ldr r0, =\_exception_vectors
stcopr r0, VBAR
stcopr r0, MVBAR
isb
/* -----------------------------------------------------
* Enable the SIF bit to disable instruction fetches
* from Non-secure memory.
* -----------------------------------------------------
*/
ldcopr r0, SCR
orr r0, r0, #SCR_SIF_BIT
stcopr r0, SCR
/* -----------------------------------------------------
* Enable the Asynchronous data abort now that the
* exception vectors have been setup.
* -----------------------------------------------------
*/
cpsie a
isb
/* Enable access to Advanced SIMD registers */
ldcopr r0, NSACR
bic r0, r0, #NSASEDIS_BIT
bic r0, r0, #NSTRCDIS_BIT
orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
stcopr r0, NSACR
isb
/*
* Enable access to Advanced SIMD, Floating point and to the Trace
* functionality as well.
*/
ldcopr r0, CPACR
bic r0, r0, #ASEDIS_BIT
bic r0, r0, #TRCDIS_BIT
orr r0, r0, #CPACR_ENABLE_FP_ACCESS
stcopr r0, CPACR
isb
vmrs r0, FPEXC
orr r0, r0, #FPEXC_EN_BIT
vmsr FPEXC, r0
isb
.endm
/* -----------------------------------------------------------------------------
* This is the super set of actions that need to be performed during a cold boot
* or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
*
* This macro will always perform reset handling, architectural initialisations
* and stack setup. The rest of the actions are optional because they might not
* be needed, depending on the context in which this macro is called. This is
* why this macro is parameterised ; each parameter allows to enable/disable
* some actions.
*
* _set_endian:
* Whether the macro needs to configure the endianness of data accesses.
*
* _warm_boot_mailbox:
* Whether the macro needs to detect the type of boot (cold/warm). The
* detection is based on the platform entrypoint address : if it is zero
* then it is a cold boot, otherwise it is a warm boot. In the latter case,
* this macro jumps on the platform entrypoint address.
*
* _secondary_cold_boot:
* Whether the macro needs to identify the CPU that is calling it: primary
* CPU or secondary CPU. The primary CPU will be allowed to carry on with
* the platform initialisations, while the secondaries will be put in a
* platform-specific state in the meantime.
*
* If the caller knows this macro will only be called by the primary CPU
* then this parameter can be defined to 0 to skip this step.
*
* _init_memory:
* Whether the macro needs to initialise the memory.
*
* _init_c_runtime:
* Whether the macro needs to initialise the C runtime environment.
*
* _exception_vectors:
* Address of the exception vectors to program in the VBAR_EL3 register.
* -----------------------------------------------------------------------------
*/
.macro el3_entrypoint_common \
_set_endian, _warm_boot_mailbox, _secondary_cold_boot, \
_init_memory, _init_c_runtime, _exception_vectors
/* Make sure we are in Secure Mode */
#if ASM_ASSERTION
ldcopr r0, SCR
tst r0, #SCR_NS_BIT
ASM_ASSERT(eq)
#endif
.if \_set_endian
/* -------------------------------------------------------------
* Set the CPU endianness before doing anything that might
* involve memory reads or writes.
* -------------------------------------------------------------
*/
ldcopr r0, SCTLR
bic r0, r0, #SCTLR_EE_BIT
stcopr r0, SCTLR
isb
.endif /* _set_endian */
/* Switch to monitor mode */
cps #MODE32_mon
isb
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.
* Now is the time to distinguish between the two.
* Query the platform entrypoint address and if it is not zero
* then it means it is a warm boot so jump to this address.
* -------------------------------------------------------------
*/
bl plat_get_my_entrypoint
cmp r0, #0
bxne r0
.endif /* _warm_boot_mailbox */
/* ---------------------------------------------------------------------
* It is a cold boot.
* Perform any processor specific actions upon reset e.g. cache, TLB
* invalidations etc.
* ---------------------------------------------------------------------
*/
bl reset_handler
el3_arch_init_common \_exception_vectors
.if \_secondary_cold_boot
/* -------------------------------------------------------------
* Check if this is a primary or secondary CPU cold boot.
* The primary CPU will set up the platform while the
* secondaries are placed in a platform-specific state until the
* primary CPU performs the necessary actions to bring them out
* of that state and allows entry into the OS.
* -------------------------------------------------------------
*/
bl plat_is_my_cpu_primary
cmp r0, #0
bne do_primary_cold_boot
/* This is a cold boot on a secondary CPU */
bl plat_secondary_cold_boot_setup
/* plat_secondary_cold_boot_setup() is not supposed to return */
bl plat_panic_handler
do_primary_cold_boot:
.endif /* _secondary_cold_boot */
/* ---------------------------------------------------------------------
* Initialize memory now. Secondary CPU initialization won't get to this
* point.
* ---------------------------------------------------------------------
*/
.if \_init_memory
bl platform_mem_init
.endif /* _init_memory */
/* ---------------------------------------------------------------------
* Init C runtime environment:
* - Zero-initialise the NOBITS sections. There are 2 of them:
* - the .bss section;
* - the coherent memory section (if any).
* - Relocate the data section from ROM to RAM, if required.
* ---------------------------------------------------------------------
*/
.if \_init_c_runtime
#if IMAGE_BL32
/* -----------------------------------------------------------------
* Invalidate the RW memory used by the BL32 (SP_MIN) image. This
* includes the data and NOBITS sections. This is done to
* safeguard against possible corruption of this memory by
* dirty cache lines in a system cache as a result of use by
* an earlier boot loader stage.
* -----------------------------------------------------------------
*/
ldr r0, =__RW_START__
ldr r1, =__RW_END__
sub r1, r1, r0
bl inv_dcache_range
#endif /* IMAGE_BL32 */
ldr r0, =__BSS_START__
ldr r1, =__BSS_SIZE__
bl zeromem
#if USE_COHERENT_MEM
ldr r0, =__COHERENT_RAM_START__
ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
bl zeromem
#endif
#if IMAGE_BL1
/* -----------------------------------------------------
* Copy data from ROM to RAM.
* -----------------------------------------------------
*/
ldr r0, =__DATA_RAM_START__
ldr r1, =__DATA_ROM_START__
ldr r2, =__DATA_SIZE__
bl memcpy
#endif
.endif /* _init_c_runtime */
/* ---------------------------------------------------------------------
* Allocate a stack whose memory will be marked as Normal-IS-WBWA when
* the MMU is enabled. There is no risk of reading stale stack memory
* after enabling the MMU as only the primary CPU is running at the
* moment.
* ---------------------------------------------------------------------
*/
bl plat_set_my_stack
.endm
#endif /* __EL3_COMMON_MACROS_S__ */

View File

@ -191,6 +191,7 @@
/* NASCR definitions */
#define NSASEDIS_BIT (1 << 15)
#define NSTRCDIS_BIT (1 << 20)
#define NASCR_CP11_BIT (1 << 11)
#define NASCR_CP10_BIT (1 << 10)

View File

@ -187,6 +187,9 @@ void flush_dcache_range(uintptr_t addr, size_t size);
void clean_dcache_range(uintptr_t addr, size_t size);
void inv_dcache_range(uintptr_t addr, size_t size);
void disable_mmu_secure(void);
void disable_mmu_icache_secure(void);
DEFINE_SYSOP_FUNC(wfi)
DEFINE_SYSOP_FUNC(wfe)
DEFINE_SYSOP_FUNC(sev)
@ -196,6 +199,9 @@ DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_FUNC(isb)
void __dead2 smc(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3,
uint32_t r4, uint32_t r5, uint32_t r6, uint32_t r7);
DEFINE_SYSREG_RW_FUNCS(spsr)
DEFINE_SYSREG_RW_FUNCS(cpsr)
@ -289,4 +295,6 @@ DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
#define read_cntpct_el0() read64_cntpct()
#define read_ctr_el0() read_ctr()
#endif /* __ARCH_HELPERS_H__ */

View File

@ -42,12 +42,16 @@
CPU_MIDR: /* cpu_ops midr */
.space 4
/* Reset fn is needed during reset */
#if IMAGE_BL1 || IMAGE_BL32
CPU_RESET_FUNC: /* cpu_ops reset_func */
.space 4
#endif
#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
.space 4
CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
.space 4
#endif
CPU_OPS_SIZE = .
/*
@ -60,13 +64,17 @@ CPU_OPS_SIZE = .
.align 2
.type cpu_ops_\_name, %object
.word \_midr
#if IMAGE_BL1 || IMAGE_BL32
.if \_noresetfunc
.word 0
.else
.word \_name\()_reset_func
.endif
#endif
#if IMAGE_BL32
.word \_name\()_core_pwr_dwn
.word \_name\()_cluster_pwr_dwn
#endif
.endm
#endif /* __CPU_MACROS_S__ */

View File

@ -41,9 +41,13 @@
/*
* Platform binary types for linking
*/
#ifdef AARCH32
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
#define PLATFORM_LINKER_ARCH arm
#else
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
#endif /* AARCH32 */
/*
* Generic platform constants

View File

@ -86,7 +86,7 @@ uint32_t plat_interrupt_type_to_line(uint32_t type,
* Optional common functions (may be overridden)
******************************************************************************/
uintptr_t plat_get_my_stack(void);
void plat_report_exception(unsigned long);
void plat_report_exception(unsigned int exception_type);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);
void plat_error_handler(int err) __dead2;

View File

@ -32,7 +32,21 @@
#include <asm_macros.S>
#include <assert_macros.S>
.globl smc
.globl zeromem
.globl disable_mmu_icache_secure
.globl disable_mmu_secure
func smc
/*
* For AArch32 only r0-r3 will be in the registers;
* rest r4-r6 will be pushed on to the stack. So here, we'll
* have to load them from the stack to registers r4-r6 explicitly.
* Clobbers: r4-r6
*/
ldm sp, {r4, r5, r6}
smc #0
endfunc smc
/* -----------------------------------------------------------------------
* void zeromem(void *mem, unsigned int length);
@ -58,3 +72,25 @@ z_loop:
z_end:
bx lr
endfunc zeromem
/* ---------------------------------------------------------------------------
* Disable the MMU in Secure State
* ---------------------------------------------------------------------------
*/
func disable_mmu_secure
mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
do_disable_mmu:
ldcopr r0, SCTLR
bic r0, r0, r1
stcopr r0, SCTLR
isb // ensure MMU is off
dsb sy
bx lr
endfunc disable_mmu_secure
func disable_mmu_icache_secure
ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
b do_disable_mmu
endfunc disable_mmu_icache_secure

View File

@ -34,6 +34,7 @@
#include <cpu_data.h>
#include <cpu_macros.S>
#if IMAGE_BL1 || IMAGE_BL32
/*
* The reset handler common to all platforms. After a matching
* cpu_ops structure entry is found, the correponding reset_handler
@ -65,6 +66,9 @@ func reset_handler
bx lr
endfunc reset_handler
#endif /* IMAGE_BL1 || IMAGE_BL32 */
#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
/*
* The prepare core power down function for all platforms. After
* the cpu_ops pointer is retrieved from cpu_data, the corresponding
@ -132,6 +136,8 @@ func init_cpu_ops
pop {r4 - r6, pc}
endfunc init_cpu_ops
#endif /* IMAGE_BL32 */
/*
* The below function returns the cpu_ops structure matching the
* midr of the core. It reads the MIDR and finds the matching

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
.globl semihosting_call
func semihosting_call
svc #0x123456
bx lr
endfunc semihosting_call

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
#include <bl_common.h>
#include <v2m_def.h>
.globl plat_report_exception
/* -------------------------------------------------------
* void plat_report_exception(unsigned int type)
* Function to report an unhandled exception
* with platform-specific means.
* On FVP platform, it updates the LEDs
* to indicate where we are.
* SYS_LED[0] - 0x0
* SYS_LED[2:1] - 0x0
* SYS_LED[7:3] - Exception Mode.
* Clobbers: r0-r1
* -------------------------------------------------------
*/
func plat_report_exception
lsl r0, r0, #V2M_SYS_LED_EC_SHIFT
ldr r1, =V2M_SYSREGS_BASE
add r1, r1, #V2M_SYS_LED
str r0, [r1]
bx lr
endfunc plat_report_exception

View File

@ -31,10 +31,8 @@
PLAT_INCLUDES += -Iinclude/plat/arm/board/common/ \
-Iinclude/plat/arm/board/common/drivers
PLAT_BL_COMMON_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S
ifeq (${ARCH}, aarch64)
PLAT_BL_COMMON_SOURCES += plat/arm/board/common/aarch64/board_arm_helpers.S
endif
PLAT_BL_COMMON_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S \
plat/arm/board/common/${ARCH}/board_arm_helpers.S
BL1_SOURCES += plat/arm/board/common/drivers/norflash/norflash.c

View File

@ -34,9 +34,22 @@
#include "../drivers/pwrc/fvp_pwrc.h"
#include "../fvp_def.h"
.globl plat_secondary_cold_boot_setup
.globl plat_get_my_entrypoint
.globl plat_is_my_cpu_primary
/* --------------------------------------------------------------------
* void plat_secondary_cold_boot_setup (void);
*
* For AArch32, cold-booting secondary CPUs is not yet
* implemented and they panic.
* --------------------------------------------------------------------
*/
func plat_secondary_cold_boot_setup
cb_panic:
b cb_panic
endfunc plat_secondary_cold_boot_setup
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
*

View File

@ -33,6 +33,7 @@
.weak plat_my_core_pos
.weak plat_reset_handler
.weak plat_disable_acp
.weak platform_mem_init
.weak plat_panic_handler
@ -59,6 +60,15 @@ func plat_reset_handler
bx lr
endfunc plat_reset_handler
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
*/
func plat_disable_acp
bx lr
endfunc plat_disable_acp
/* ---------------------------------------------------------------------
* Placeholder function which should be redefined by
* each platform.

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <platform_def.h>
.globl plat_get_my_stack
.globl plat_set_my_stack
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
*
* For cold-boot BL images, only the primary CPU needs
* a stack. This function returns the stack pointer for
* a stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_get_my_stack
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
bx lr
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
*
* For cold-boot BL images, only the primary CPU needs
* a stack. This function sets the stack pointer to a
* stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_set_my_stack
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
mov sp, r0
bx lr
endfunc plat_set_my_stack
/* -----------------------------------------------------
* Per-cpu stacks in normal memory. Each cpu gets a
* stack of PLATFORM_STACK_SIZE bytes.
* -----------------------------------------------------
*/
declare_stack platform_normal_stacks, tzfw_normal_stacks, \
PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE