Rework BL3-1 unhandled exception handling and reporting

This patch implements the register reporting when unhandled exceptions are
taken in BL3-1. Unhandled exceptions will result in a dump of registers
to the console, before halting execution by that CPU. The Crash Stack,
previously called the Exception Stack, is used for this activity.
This stack is used to preserve the CPU context and runtime stack
contents for debugging and analysis.

This also introduces the per_cpu_ptr_cache, referenced by tpidr_el3,
to provide easy access to some of BL3-1 per-cpu data structures.
Initially, this is used to provide a pointer to the Crash stack.

panic() now prints the the error file and line number in Debug mode
and prints the PC value in release mode.

The Exception Stack is renamed to Crash Stack with this patch.
The original intention of exception stack is no longer valid
since we intend to support several valid exceptions like IRQ
and FIQ in the trusted firmware context. This stack is now
utilized for dumping and reporting the system state when a
crash happens and hence the rename.

Fixes ARM-software/tf-issues#79 Improve reporting of unhandled exception

Change-Id: I260791dc05536b78547412d147193cdccae7811a
This commit is contained in:
Soby Mathew 2014-04-07 15:28:55 +01:00
parent c5c9b69c13
commit a43d431b80
23 changed files with 578 additions and 170 deletions

View File

@ -60,6 +60,7 @@ else
endif
BL_COMMON_SOURCES := common/bl_common.c \
common/debug.c \
lib/aarch64/cache_helpers.S \
lib/aarch64/misc_helpers.S \
lib/aarch64/tlb_helpers.S \

View File

@ -0,0 +1,291 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <context.h>
#include <plat_macros.S>
.globl get_crash_stack
.globl dump_state_and_die
.globl dump_intr_state_and_die
/* ------------------------------------------------------
* The below section deals with dumping the system state
* when an unhandled exception is taken in EL3.
* The layout and the names of the registers which will
* be dumped during a unhandled exception is given below.
* ------------------------------------------------------
*/
.section .rodata.dump_reg_name, "aS"
caller_saved_regs: .asciz "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",\
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16",\
"x17", "x18", ""
callee_saved_regs: .asciz "x19", "x20", "x21", "x22", "x23", "x24",\
"x25", "x26", "x27", "x28", "x29", "x30", ""
el3_sys_regs: .asciz "scr_el3", "sctlr_el3", "cptr_el3", "tcr_el3",\
"daif", "mair_el3", "spsr_el3", "elr_el3", "ttbr0_el3", "esr_el3",\
"sp_el3", "far_el3", ""
non_el3_sys_0_regs: .asciz "spsr_el1", "elr_el1", "spsr_abt", "spsr_und",\
"spsr_irq", "spsr_fiq", "sctlr_el1", "actlr_el1", "cpacr_el1",\
"csselr_el1", "sp_el1", "esr_el1", "ttbr0_el1", "ttbr1_el1",\
"mair_el1", "amair_el1", "tcr_el1", "tpidr_el1", ""
non_el3_sys_1_regs: .asciz "tpidr_el0", "tpidrro_el0", "dacr32_el2",\
"ifsr32_el2", "par_el1", "far_el1", "afsr0_el1", "afsr1_el1",\
"contextidr_el1", "vbar_el1", "cntp_ctl_el0", "cntp_cval_el0",\
"cntv_ctl_el0", "cntv_cval_el0", "cntkctl_el1", "fpexc32_el2",\
"sp_el0", ""
/* -----------------------------------------------------
* Currently we are stack limited. Hence make sure that
* we dont try to dump more than 20 registers using the
* stack.
* -----------------------------------------------------
*/
#define REG_SIZE 0x8
/* The caller saved registers are X0 to X18 */
#define CALLER_SAVED_REG_SIZE (20 * REG_SIZE)
/* The caller saved registers are X19 to X30 */
#define CALLEE_SAVED_REG_SIZE (12 * REG_SIZE)
/* The EL3 sys regs*/
#define EL3_SYS_REG_SIZE (12 * REG_SIZE)
/* The non EL3 sys regs set-0 */
#define NON_EL3_SYS_0_REG_SIZE (18 * REG_SIZE)
/* The non EL3 sys regs set-1 */
#define NON_EL3_SYS_1_REG_SIZE (18 * REG_SIZE)
.macro print_caller_saved_regs
sub sp, sp, #CALLER_SAVED_REG_SIZE
stp x0, x1, [sp]
stp x2, x3, [sp, #(REG_SIZE * 2)]
stp x4, x5, [sp, #(REG_SIZE * 4)]
stp x6, x7, [sp, #(REG_SIZE * 6)]
stp x8, x9, [sp, #(REG_SIZE * 8)]
stp x10, x11, [sp, #(REG_SIZE * 10)]
stp x12, x13, [sp, #(REG_SIZE * 12)]
stp x14, x15, [sp, #(REG_SIZE * 14)]
stp x16, x17, [sp, #(REG_SIZE * 16)]
stp x18, xzr, [sp, #(REG_SIZE * 18)]
adr x0, caller_saved_regs
mov x1, sp
bl print_string_value
add sp, sp, #CALLER_SAVED_REG_SIZE
.endm
.macro print_callee_saved_regs
sub sp, sp, CALLEE_SAVED_REG_SIZE
stp x19, x20, [sp]
stp x21, x22, [sp, #(REG_SIZE * 2)]
stp x23, x24, [sp, #(REG_SIZE * 4)]
stp x25, x26, [sp, #(REG_SIZE * 6)]
stp x27, x28, [sp, #(REG_SIZE * 8)]
stp x29, x30, [sp, #(REG_SIZE * 10)]
adr x0, callee_saved_regs
mov x1, sp
bl print_string_value
add sp, sp, #CALLEE_SAVED_REG_SIZE
.endm
.macro print_el3_sys_regs
sub sp, sp, #EL3_SYS_REG_SIZE
mrs x9, scr_el3
mrs x10, sctlr_el3
mrs x11, cptr_el3
mrs x12, tcr_el3
mrs x13, daif
mrs x14, mair_el3
mrs x15, spsr_el3 /*save the elr and spsr regs seperately*/
mrs x16, elr_el3
mrs x17, ttbr0_el3
mrs x8, esr_el3
mrs x7, far_el3
stp x9, x10, [sp]
stp x11, x12, [sp, #(REG_SIZE * 2)]
stp x13, x14, [sp, #(REG_SIZE * 4)]
stp x15, x16, [sp, #(REG_SIZE * 6)]
stp x17, x8, [sp, #(REG_SIZE * 8)]
stp x0, x7, [sp, #(REG_SIZE * 10)] /* sp_el3 is in x0 */
adr x0, el3_sys_regs
mov x1, sp
bl print_string_value
add sp, sp, #EL3_SYS_REG_SIZE
.endm
.macro print_non_el3_sys_0_regs
sub sp, sp, #NON_EL3_SYS_0_REG_SIZE
mrs x9, spsr_el1
mrs x10, elr_el1
mrs x11, spsr_abt
mrs x12, spsr_und
mrs x13, spsr_irq
mrs x14, spsr_fiq
mrs x15, sctlr_el1
mrs x16, actlr_el1
mrs x17, cpacr_el1
mrs x8, csselr_el1
stp x9, x10, [sp]
stp x11, x12, [sp, #(REG_SIZE * 2)]
stp x13, x14, [sp, #(REG_SIZE * 4)]
stp x15, x16, [sp, #(REG_SIZE * 6)]
stp x17, x8, [sp, #(REG_SIZE * 8)]
mrs x10, sp_el1
mrs x11, esr_el1
mrs x12, ttbr0_el1
mrs x13, ttbr1_el1
mrs x14, mair_el1
mrs x15, amair_el1
mrs x16, tcr_el1
mrs x17, tpidr_el1
stp x10, x11, [sp, #(REG_SIZE * 10)]
stp x12, x13, [sp, #(REG_SIZE * 12)]
stp x14, x15, [sp, #(REG_SIZE * 14)]
stp x16, x17, [sp, #(REG_SIZE * 16)]
adr x0, non_el3_sys_0_regs
mov x1, sp
bl print_string_value
add sp, sp, #NON_EL3_SYS_0_REG_SIZE
.endm
.macro print_non_el3_sys_1_regs
sub sp, sp, #NON_EL3_SYS_1_REG_SIZE
mrs x9, tpidr_el0
mrs x10, tpidrro_el0
mrs x11, dacr32_el2
mrs x12, ifsr32_el2
mrs x13, par_el1
mrs x14, far_el1
mrs x15, afsr0_el1
mrs x16, afsr1_el1
mrs x17, contextidr_el1
mrs x8, vbar_el1
stp x9, x10, [sp]
stp x11, x12, [sp, #(REG_SIZE * 2)]
stp x13, x14, [sp, #(REG_SIZE * 4)]
stp x15, x16, [sp, #(REG_SIZE * 6)]
stp x17, x8, [sp, #(REG_SIZE * 8)]
mrs x10, cntp_ctl_el0
mrs x11, cntp_cval_el0
mrs x12, cntv_ctl_el0
mrs x13, cntv_cval_el0
mrs x14, cntkctl_el1
mrs x15, fpexc32_el2
mrs x8, sp_el0
stp x10, x11, [sp, #(REG_SIZE *10)]
stp x12, x13, [sp, #(REG_SIZE * 12)]
stp x14, x15, [sp, #(REG_SIZE * 14)]
stp x8, xzr, [sp, #(REG_SIZE * 16)]
adr x0, non_el3_sys_1_regs
mov x1, sp
bl print_string_value
add sp, sp, #NON_EL3_SYS_1_REG_SIZE
.endm
.macro init_crash_stack
msr cntfrq_el0, x0 /* we can corrupt this reg to free up x0 */
mrs x0, tpidr_el3
/* Check if tpidr is initialized */
cbz x0, infinite_loop
ldr x0, [x0, #PTR_CACHE_CRASH_STACK_OFFSET]
/* store the x30 and sp to stack */
str x30, [x0, #-(REG_SIZE)]!
mov x30, sp
str x30, [x0, #-(REG_SIZE)]!
mov sp, x0
mrs x0, cntfrq_el0
.endm
/* ---------------------------------------------------
* The below function initializes the crash dump stack ,
* and prints the system state. This function
* will not return.
* ---------------------------------------------------
*/
func dump_state_and_die
init_crash_stack
print_caller_saved_regs
b print_state
func dump_intr_state_and_die
init_crash_stack
print_caller_saved_regs
plat_print_gic_regs /* fall through to print_state */
print_state:
/* copy the original x30 from stack */
ldr x30, [sp, #REG_SIZE]
print_callee_saved_regs
/* copy the original SP_EL3 from stack to x0 and rewind stack */
ldr x0, [sp], #(REG_SIZE * 2)
print_el3_sys_regs
print_non_el3_sys_0_regs
print_non_el3_sys_1_regs
b infinite_loop
func infinite_loop
b infinite_loop
#define PCPU_CRASH_STACK_SIZE 0x140
/* -----------------------------------------------------
* void get_crash_stack (uint64_t mpidr) : This
* function is used to allocate a small stack for
* reporting unhandled exceptions
* -----------------------------------------------------
*/
func get_crash_stack
mov x10, x30 // lr
get_mp_stack pcpu_crash_stack, PCPU_CRASH_STACK_SIZE
ret x10
/* -----------------------------------------------------
* Per-cpu crash stacks in normal memory.
* -----------------------------------------------------
*/
declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT

View File

@ -37,7 +37,6 @@
.globl runtime_exceptions
.globl el3_exit
.globl get_exception_stack
.macro save_x18_to_x29_sp_el0
stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
@ -63,8 +62,7 @@ sync_exception_sp_el0:
* We don't expect any synchronous exceptions from EL3
* -----------------------------------------------------
*/
wfi
b sync_exception_sp_el0
bl dump_state_and_die
check_vector_size sync_exception_sp_el0
.align 7
@ -74,20 +72,17 @@ sync_exception_sp_el0:
* -----------------------------------------------------
*/
irq_sp_el0:
handle_async_exception IRQ_SP_EL0
b irq_sp_el0
bl dump_intr_state_and_die
check_vector_size irq_sp_el0
.align 7
fiq_sp_el0:
handle_async_exception FIQ_SP_EL0
b fiq_sp_el0
bl dump_intr_state_and_die
check_vector_size fiq_sp_el0
.align 7
serror_sp_el0:
handle_async_exception SERROR_SP_EL0
b serror_sp_el0
bl dump_state_and_die
check_vector_size serror_sp_el0
/* -----------------------------------------------------
@ -100,36 +95,25 @@ sync_exception_sp_elx:
* This exception will trigger if anything went wrong
* during a previous exception entry or exit or while
* handling an earlier unexpected synchronous exception.
* In any case we cannot rely on SP_EL3. Switching to a
* known safe area of memory will corrupt at least a
* single register. It is best to enter wfi in loop as
* that will preserve the system state for analysis
* through a debugger later.
* There is a high probability that SP_EL3 is corrupted.
* -----------------------------------------------------
*/
wfi
b sync_exception_sp_elx
bl dump_state_and_die
check_vector_size sync_exception_sp_elx
/* -----------------------------------------------------
* As mentioned in the previous comment, all bets are
* off if SP_EL3 cannot be relied upon. Report their
* occurrence.
* -----------------------------------------------------
*/
.align 7
irq_sp_elx:
b irq_sp_elx
bl dump_intr_state_and_die
check_vector_size irq_sp_elx
.align 7
fiq_sp_elx:
b fiq_sp_elx
bl dump_intr_state_and_die
check_vector_size fiq_sp_elx
.align 7
serror_sp_elx:
b serror_sp_elx
bl dump_state_and_die
check_vector_size serror_sp_elx
/* -----------------------------------------------------
@ -156,20 +140,17 @@ sync_exception_aarch64:
* -----------------------------------------------------
*/
irq_aarch64:
handle_async_exception IRQ_AARCH64
b irq_aarch64
bl dump_intr_state_and_die
check_vector_size irq_aarch64
.align 7
fiq_aarch64:
handle_async_exception FIQ_AARCH64
b fiq_aarch64
bl dump_intr_state_and_die
check_vector_size fiq_aarch64
.align 7
serror_aarch64:
handle_async_exception SERROR_AARCH64
b serror_aarch64
bl dump_state_and_die
check_vector_size serror_aarch64
/* -----------------------------------------------------
@ -196,20 +177,17 @@ sync_exception_aarch32:
* -----------------------------------------------------
*/
irq_aarch32:
handle_async_exception IRQ_AARCH32
b irq_aarch32
bl dump_intr_state_and_die
check_vector_size irq_aarch32
.align 7
fiq_aarch32:
handle_async_exception FIQ_AARCH32
b fiq_aarch32
bl dump_intr_state_and_die
check_vector_size fiq_aarch32
.align 7
serror_aarch32:
handle_async_exception SERROR_AARCH32
b serror_aarch32
bl dump_state_and_die
check_vector_size serror_aarch32
.align 7
@ -367,9 +345,7 @@ el3_exit: ; .type el3_exit, %function
msr elr_el3, x17
/* Restore saved general purpose registers and return */
bl restore_gp_registers
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
eret
b restore_gp_registers_eret
smc_unknown:
/*
@ -379,7 +355,8 @@ smc_unknown:
* content). Either way, we aren't leaking any secure information
* through them
*/
bl restore_gp_registers_callee
mov w0, #SMC_UNK
b restore_gp_registers_callee_eret
smc_prohibited:
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
@ -387,7 +364,8 @@ smc_prohibited:
eret
rt_svc_fw_critical_error:
b rt_svc_fw_critical_error
msr spsel, #1 /* Switch to SP_ELx */
bl dump_state_and_die
/* -----------------------------------------------------
* The following functions are used to saved and restore
@ -413,52 +391,24 @@ func save_gp_registers
save_x18_to_x29_sp_el0
ret
func restore_gp_registers
func restore_gp_registers_eret
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
restore_gp_registers_callee:
ldr x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
restore_gp_registers_callee_eret:
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
msr sp_el0, x17
ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
ret
/* -----------------------------------------------------
* 256 bytes of exception stack for each cpu
* -----------------------------------------------------
*/
#if DEBUG
#define PCPU_EXCEPTION_STACK_SIZE 0x300
#else
#define PCPU_EXCEPTION_STACK_SIZE 0x100
#endif
/* -----------------------------------------------------
* void get_exception_stack (uint64_t mpidr) : This
* function is used to allocate a small stack for
* reporting unhandled exceptions
* -----------------------------------------------------
*/
func get_exception_stack
mov x10, x30 // lr
get_mp_stack pcpu_exception_stack, PCPU_EXCEPTION_STACK_SIZE
ret x10
/* -----------------------------------------------------
* Per-cpu exception stacks in normal memory.
* -----------------------------------------------------
*/
declare_stack pcpu_exception_stack, tzfw_normal_stacks, \
PCPU_EXCEPTION_STACK_SIZE, PLATFORM_CORE_COUNT
ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
msr sp_el0, x17
ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
eret

View File

@ -35,6 +35,7 @@ BL31_SOURCES += bl31/bl31_main.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/context.S \
bl31/aarch64/runtime_exceptions.S \
bl31/aarch64/crash_reporting.S \
common/aarch64/early_exceptions.S \
lib/locks/bakery/bakery_lock.c \
lib/locks/exclusive/spinlock.S \

View File

@ -37,7 +37,6 @@
#include <runtime_svc.h>
#include <stdio.h>
/*******************************************************************************
* This function pointer is used to initialise the BL32 image. It's initialized
* by SPD calling bl31_register_bl32_init after setting up all things necessary
@ -99,6 +98,7 @@ void bl31_main(void)
*/
assert(cm_get_context(mpidr, NON_SECURE));
cm_set_next_eret_context(NON_SECURE);
cm_init_pcpu_ptr_cache();
write_vbar_el3((uint64_t) runtime_exceptions);
isb();
next_image_type = NON_SECURE;

View File

@ -31,6 +31,7 @@
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <bl31.h>
#include <context.h>
#include <context_mgmt.h>
#include <platform.h>
@ -47,6 +48,9 @@ typedef struct {
static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
/* The per_cpu_ptr_cache_t space allocation */
static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
/*******************************************************************************
* Context management library initialisation routine. This library is used by
* runtime services to share pointers to 'cpu_context' structures for the secure
@ -211,21 +215,31 @@ void cm_set_next_eret_context(uint32_t security_state)
: : "r" (ctx));
}
/*******************************************************************************
* This function is used to program exception stack in the 'cpu_context'
* structure. This is the initial stack used for taking and handling exceptions
* at EL3. This stack is expected to be initialized once by each security state
******************************************************************************/
void cm_init_exception_stack(uint64_t mpidr, uint32_t security_state)
/************************************************************************
* The following function is used to populate the per cpu pointer cache.
* The pointer will be stored in the tpidr_el3 register.
*************************************************************************/
void cm_init_pcpu_ptr_cache()
{
cpu_context_t *ctx;
el3_state_t *state;
unsigned long mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
per_cpu_ptr_cache_t *pcpu_ptr_cache;
ctx = cm_get_context(mpidr, security_state);
assert(ctx);
pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
assert(pcpu_ptr_cache);
pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
/* Set exception stack in the context */
state = get_el3state_ctx(ctx);
write_ctx_reg(state, CTX_EXCEPTION_SP, get_exception_stack(mpidr));
cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
}
void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
{
write_tpidr_el3((unsigned long)pcpu_ptr);
}
void *cm_get_pcpu_ptr_cache(void)
{
return (void *)read_tpidr_el3();
}

View File

@ -135,11 +135,3 @@ void runtime_svc_init()
error:
panic();
}
void fault_handler(void *handle)
{
gp_regs_t *gpregs_ctx = get_gpregs_ctx(handle);
ERROR("Unhandled synchronous fault. Register dump @ 0x%x \n",
gpregs_ctx);
panic();
}

95
common/debug.c Normal file
View File

@ -0,0 +1,95 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <console.h>
#include <debug.h>
#include <stdio.h>
/******************************************************************
* This function is invoked from assembler error handling routines and
* prints out the string and the value in 64 bit hex format. These
* are passed to the function as input parameters.
********************************************************************/
void print_string_value(char *s, unsigned long *mem)
{
unsigned char i, temp;
unsigned long val;
while (*s) {
i = 16;
while (*s)
console_putc(*s++);
s++;
console_putc('\t');
console_putc(':');
console_putc('0');
console_putc('x');
val = *mem++;
while (i--) {
temp = (val >> (i << 2)) & 0xf;
if (temp < 0xa)
console_putc('0' + temp);
else
console_putc('A' + (temp - 0xa));
}
console_putc('\n');
}
}
/***********************************************************
* The common implementation of do_panic for all BL stages
***********************************************************/
#if DEBUG
void __dead2 do_panic(const char *file, int line)
{
printf("PANIC in file: %s line: %d\n", file, line);
while (1)
;
}
#else
void __dead2 do_panic(void)
{
unsigned long pc_reg;
__asm__ volatile("mov %0, x30\n"
: "=r" (pc_reg) : );
/* x30 reports the next eligible instruction whereas we want the
* place where panic() is invoked. Hence decrement by 4.
*/
printf("PANIC in PC location 0x%016X\n", pc_reg - 0x4);
while (1)
;
}
#endif

View File

@ -194,6 +194,18 @@ constants defined. In the ARM FVP port, this file is found in
Defines the base address in non-secure DRAM where BL2 loads the BL3-3 binary
image. Must be aligned on a page-size boundary.
### File : platform_macros.S [mandatory]
Each platform must export a file of this name with the following
macro defined. In the ARM FVP port, this file is found in
[plat/fvp/include/platform_macros.S].
* **Macro : plat_print_gic_regs**
This macro allows the crash reporting routine to print GIC registers
in case of an unhandled IRQ or FIQ in BL3-1. This aids in debugging and
this macro can be defined to be empty in case GIC register reporting is
not desired.
### Other mandatory modifications
@ -1147,6 +1159,7 @@ _Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved._
[plat/common/aarch64/platform_mp_stack.S]: ../plat/common/aarch64/platform_mp_stack.S
[plat/common/aarch64/platform_up_stack.S]: ../plat/common/aarch64/platform_up_stack.S
[plat/fvp/platform.h]: ../plat/fvp/platform.h
[plat/fvp/include/platform_macros.S]: ../plat/fvp/include/platform_macros.S
[plat/fvp/aarch64/plat_common.c]: ../plat/fvp/aarch64/plat_common.c
[plat/fvp/plat_pm.c]: ../plat/fvp/plat_pm.c
[include/runtime_svc.h]: ../include/runtime_svc.h

View File

@ -66,15 +66,18 @@ void console_init(unsigned long base_addr)
}
#define WAIT_UNTIL_UART_FREE(base) while ((pl011_read_fr(base)\
& PL011_UARTFR_TXFF) == 1)
int console_putc(int c)
{
assert(uart_base);
if (c == '\n')
console_putc('\r');
if (c == '\n') {
WAIT_UNTIL_UART_FREE(uart_base);
pl011_write_dr(uart_base, '\r');
}
while ((pl011_read_fr(uart_base) & PL011_UARTFR_TXFF) == 1)
;
WAIT_UNTIL_UART_FREE(uart_base);
pl011_write_dr(uart_base, c);
return c;
}

View File

@ -30,12 +30,6 @@
#include <arch.h>
#include <context.h>
.macro switch_to_exception_stack reg1 reg2
mov \reg1 , sp
ldr \reg2, [\reg1, #CTX_EL3STATE_OFFSET + CTX_EXCEPTION_SP]
mov sp, \reg2
.endm
/* -----------------------------------------------------
* Handle SMC exceptions seperately from other sync.
* exceptions.
@ -54,45 +48,10 @@
/* -----------------------------------------------------
* The following code handles any synchronous exception
* that is not an SMC. SP_EL3 is pointing to a context
* structure where all the scratch registers are saved.
* An exception stack is also retrieved from the context
* Currently, a register dump is printed since BL31 does
* not expect any such exceptions.
* that is not an SMC.
* -----------------------------------------------------
*/
bl save_gp_registers
switch_to_exception_stack x0 x1
/* Save the core_context pointer for handled faults */
stp x0, xzr, [sp, #-0x10]!
bl fault_handler
ldp x0, xzr, [sp], #0x10
mov sp, x0
bl restore_gp_registers
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
eret
.endm
/* -----------------------------------------------------
* Use a platform defined mechanism to report an async.
* exception.
* -----------------------------------------------------
*/
.macro handle_async_exception type
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
bl save_gp_registers
switch_to_exception_stack x0 x1
/* Save the core_context pointer */
stp x0, xzr, [sp, #-0x10]!
mov x0, \type
bl plat_report_exception
ldp x0, xzr, [sp], #0x10
mov sp, x0
bl restore_gp_registers
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
bl dump_state_and_die
.endm

View File

@ -76,7 +76,7 @@
* 32-bits wide but are stored as 64-bit values for convenience
******************************************************************************/
#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
#define CTX_EXCEPTION_SP 0x0
#define CTX_VBAR_EL3 0x0 /* Currently unused */
#define CTX_RUNTIME_SP 0x8
#define CTX_SPSR_EL3 0x10
#define CTX_ELR_EL3 0x18
@ -89,7 +89,7 @@
#define CTX_TCR_EL3 0x50
#define CTX_TTBR0_EL3 0x58
#define CTX_DAIF_EL3 0x60
#define CTX_VBAR_EL3 0x68 /* Currently unused */
/* Unused space to honour alignment requirements */
#define CTX_EL3STATE_END 0x70
/*******************************************************************************
@ -176,6 +176,11 @@
#define CTX_FP_FPCR 0x208
#define CTX_FPREGS_END 0x210
/******************************************************************************
* Offsets for the per cpu cache implementation
******************************************************************************/
#define PTR_CACHE_CRASH_STACK_OFFSET 0x0
#ifndef __ASSEMBLY__
#include <cassert.h>
@ -316,6 +321,18 @@ void el1_sysregs_context_restore(el1_sys_regs_t *regs);
void fpregs_context_save(fp_regs_t *regs);
void fpregs_context_restore(fp_regs_t *regs);
/* Per-CPU pointer cache of recently used pointers and also the crash stack
* TODO: Add other commonly used variables to this (tf_issues#90)
*/
typedef struct per_cpu_ptr_cache {
uint64_t crash_stack;
} per_cpu_ptr_cache_t;
CASSERT(PTR_CACHE_CRASH_STACK_OFFSET == __builtin_offsetof\
(per_cpu_ptr_cache_t, crash_stack), \
assert_per_cpu_ptr_cache_crash_stack_offset_mismatch);
#undef CTX_SYSREG_ALL
#undef CTX_FP_ALL
#undef CTX_GPREG_ALL

View File

@ -49,6 +49,8 @@ extern void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint
uint32_t spsr, uint32_t scr);
extern void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint);
extern void cm_set_next_eret_context(uint32_t security_state);
extern void cm_init_exception_stack(uint64_t mpidr, uint32_t security_state);
extern void cm_init_pcpu_ptr_cache();
extern void cm_set_pcpu_ptr_cache(const void *pcpu_ptr);
extern void *cm_get_pcpu_ptr_cache(void);
#endif /* __CM_H__ */

View File

@ -262,8 +262,7 @@ CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \
extern void runtime_svc_init();
extern uint64_t __RT_SVC_DESCS_START__;
extern uint64_t __RT_SVC_DESCS_END__;
extern uint64_t get_exception_stack(uint64_t mpidr);
extern uint64_t get_crash_stack(uint64_t mpidr);
extern void runtime_exceptions(void);
extern void fault_handler(void *handle);
#endif /*__ASSEMBLY__*/
#endif /* __RUNTIME_SVC_H__ */

View File

@ -56,11 +56,16 @@
/* For the moment this Panic function is very basic, Report an error and
* spin. This can be expanded in the future to provide more information.
*/
static inline void __attribute__((noreturn)) panic(void)
{
ERROR("PANIC\n");
while (1)
;
}
#if DEBUG
extern void __dead2 do_panic(const char *file, int line);
#define panic() do_panic(__FILE__, __LINE__)
#else
extern void __dead2 do_panic(void);
#define panic() do_panic()
#endif
extern void print_string_value(char *s, unsigned long *mem);
#endif /* __DEBUG_H__ */

View File

@ -204,6 +204,8 @@ extern unsigned long read_cpuectlr(void);
extern unsigned int read_cntfrq_el0(void);
extern unsigned long read_cnthctl_el2(void);
extern unsigned long read_tpidr_el3(void);
extern void write_scr(unsigned long);
extern void write_hcr(unsigned long);
extern void write_cpacr(unsigned long);
@ -264,10 +266,13 @@ extern void write_cpuectlr(unsigned long);
extern void write_cptr_el2(unsigned long);
extern void write_cptr_el3(unsigned long);
extern void write_tpidr_el3(unsigned long);
#define IS_IN_EL(x) \
(GET_EL(read_current_el()) == MODE_EL##x)
#define IS_IN_EL1() IS_IN_EL(1)
#define IS_IN_EL3() IS_IN_EL(3)
#endif /* __ARCH_HELPERS_H__ */

View File

@ -155,6 +155,9 @@
.globl read_id_pfr1_el1
.globl read_id_aa64pfr0_el1
.globl write_tpidr_el3
.globl read_tpidr_el3
#if SUPPORT_VFP
.globl enable_vfp
#endif
@ -719,6 +722,13 @@ func read_mpidr
mrs x0, mpidr_el1
ret
func write_tpidr_el3
msr tpidr_el3, x0
ret
func read_tpidr_el3
mrs x0, tpidr_el3
ret
#if SUPPORT_VFP
func enable_vfp

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <gic_v2.h>
#include <platform.h>
.section .rodata.gic_reg_name, "aS"
gic_regs: .asciz "gic_iar", "gic_ctlr", ""
/* Currently we have only 2 GIC registers to report */
#define GIC_REG_SIZE (2 * 8)
/* ---------------------------------------------
* The below macro prints out relevant GIC
* registers whenever an unhandled exception is
* taken in BL31.
* ---------------------------------------------
*/
.macro plat_print_gic_regs
mov x0, #CONFIG_GICC_ADDR
bl platform_get_cfgvar
/* gic base address is now in x0 */
ldr w1, [x0, #GICC_IAR]
ldr w2, [x0, #GICD_CTLR]
sub sp, sp, #GIC_REG_SIZE
stp x1, x2, [sp] /* we store the gic registers as 64 bit */
adr x0, gic_regs
mov x1, sp
bl print_string_value
add sp, sp, #GIC_REG_SIZE
.endm

View File

@ -28,10 +28,7 @@
# POSSIBILITY OF SUCH DAMAGE.
#
#
# No additional platform system include directories required
#
# PLAT_INCLUDES :=
PLAT_INCLUDES := -Iplat/fvp/include/
PLAT_BL_COMMON_SOURCES := drivers/arm/pl011/pl011.c \
drivers/arm/pl011/pl011_console.c \

View File

@ -94,8 +94,6 @@ int32_t tspd_init_secure_context(uint64_t entrypoint,
spsr = make_spsr(MODE_EL1, MODE_SP_ELX, rw);
cm_set_el3_eret_context(SECURE, entrypoint, spsr, scr);
cm_init_exception_stack(mpidr, SECURE);
return 0;
}

View File

@ -374,11 +374,12 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
/*
* Use the more complex exception vectors to enable SPD
* initialisation. SP_EL3 should point to a 'cpu_context'
* structure which has an exception stack allocated. The
* calling cpu should have set the context already
* structure. The calling cpu should have set the
* context already
*/
assert(cm_get_context(mpidr, NON_SECURE));
cm_set_next_eret_context(NON_SECURE);
cm_init_pcpu_ptr_cache();
write_vbar_el3((uint64_t) runtime_exceptions);
/*

View File

@ -493,12 +493,12 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
/*
* Use the more complex exception vectors to enable SPD
* initialisation. SP_EL3 should point to a 'cpu_context'
* structure which has an exception stack allocated. The
* non-secure context should have been set on this cpu
* prior to suspension.
* structure. The non-secure context should have been
* set on this cpu prior to suspension.
*/
assert(cm_get_context(mpidr, NON_SECURE));
cm_set_next_eret_context(NON_SECURE);
cm_init_pcpu_ptr_cache();
write_vbar_el3((uint64_t) runtime_exceptions);
/*

View File

@ -198,8 +198,6 @@ static void psci_init_aff_map_node(unsigned long mpidr,
(void *) &psci_ns_context[linear_id],
NON_SECURE);
/* Initialize exception stack in the context */
cm_init_exception_stack(mpidr, NON_SECURE);
}
return;