Move context management code to common location

The upcoming Firmware Update feature needs transitioning across
Secure/Normal worlds to complete the FWU process and hence requires
context management code to perform this task.

Currently context management code is part of BL31 stage only.
This patch moves the code from (include)/bl31 to (include)/common.
Some function declarations/definitions and macros have also moved
to different files to help code sharing.

Change-Id: I3858b08aecdb76d390765ab2b099f457873f7b0c
This commit is contained in:
Yatharth Kochar 2015-10-02 17:56:48 +01:00
parent c76e0d13bf
commit bbf8f6f95b
12 changed files with 462 additions and 367 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,7 +31,6 @@
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <runtime_svc.h>
.globl bl1_exceptions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -36,7 +36,6 @@
#include <runtime_svc.h>
.globl runtime_exceptions
.globl el3_exit
/* -----------------------------------------------------
* Handle SMC exceptions separately from other sync.
@ -426,38 +425,7 @@ smc_handler64:
#endif
blr x15
/* -----------------------------------------------------
* This routine assumes that the SP_EL3 is pointing to
* a valid context structure from where the gp regs and
* other special registers can be retrieved.
*
* Keep it in the same section as smc_handler as this
* function uses a fall-through to el3_exit
* -----------------------------------------------------
*/
el3_exit: ; .type el3_exit, %function
/* -----------------------------------------------------
* Save the current SP_EL0 i.e. the EL3 runtime stack
* which will be used for handling the next SMC. Then
* switch to SP_EL3
* -----------------------------------------------------
*/
mov x17, sp
msr spsel, #1
str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
/* -----------------------------------------------------
* Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
* -----------------------------------------------------
*/
ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
msr scr_el3, x18
msr spsr_el3, x16
msr elr_el3, x17
/* Restore saved general purpose registers and return */
b restore_gp_registers_eret
b el3_exit
smc_unknown:
/*
@ -479,51 +447,3 @@ rt_svc_fw_critical_error:
msr spsel, #1 /* Switch to SP_ELx */
bl report_unhandled_exception
endfunc smc_handler
/* -----------------------------------------------------
* The following functions are used to saved and restore
* all the general pupose registers. Ideally we would
* only save and restore the callee saved registers when
* a world switch occurs but that type of implementation
* is more complex. So currently we will always save and
* restore these registers on entry and exit of EL3.
* These are not macros to ensure their invocation fits
* within the 32 instructions per exception vector.
* -----------------------------------------------------
*/
func save_gp_registers
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
save_x18_to_x29_sp_el0
ret
endfunc save_gp_registers
func restore_gp_registers_eret
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
restore_gp_registers_callee_eret:
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
msr sp_el0, x17
ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
eret
endfunc restore_gp_registers_eret

View File

@ -29,16 +29,17 @@
#
BL31_SOURCES += bl31/bl31_main.c \
bl31/context_mgmt.c \
bl31/cpu_data_array.c \
bl31/runtime_svc.c \
bl31/interrupt_mgmt.c \
bl31/aarch64/bl31_arch_setup.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/context.S \
bl31/aarch64/cpu_data.S \
bl31/aarch64/runtime_exceptions.S \
bl31/aarch64/crash_reporting.S \
bl31/bl31_context_mgmt.c \
common/aarch64/context.S \
common/context_mgmt.c \
lib/cpus/aarch64/cpu_helpers.S \
lib/locks/exclusive/spinlock.S \
services/std_svc/std_svc_setup.c \

133
bl31/bl31_context_mgmt.c Normal file
View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <bl31.h>
#include <context.h>
#include <context_mgmt.h>
#include <cpu_data.h>
#include <platform.h>
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the calling CPU that was set as the context for the specified security
* state. NULL is returned if no such structure has been specified.
******************************************************************************/
void *cm_get_context(uint32_t security_state)
{
assert(security_state <= NON_SECURE);
return get_cpu_data(cpu_context[security_state]);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the calling CPU
******************************************************************************/
void cm_set_context(void *context, uint32_t security_state)
{
assert(security_state <= NON_SECURE);
set_cpu_data(cpu_context[security_state], context);
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by `cpu_idx` that was set as the context for the
* specified security state. NULL is returned if no such structure has been
* specified.
******************************************************************************/
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by CPU index.
******************************************************************************/
void cm_set_context_by_index(unsigned int cpu_idx, void *context,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
}
#if !ERROR_DEPRECATED
/*
* These context management helpers are deprecated but are maintained for use
* by SPDs which have not migrated to the new API. If ERROR_DEPRECATED
* is enabled, these are excluded from the build so as to force users to
* migrate to the new API.
*/
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by MPIDR that was set as the context for the specified
* security state. NULL is returned if no such structure has been specified.
******************************************************************************/
void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by MPIDR
******************************************************************************/
void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
cm_set_context_by_index(platform_get_core_pos(mpidr),
context, security_state);
}
/*******************************************************************************
* The following function provides a compatibility function for SPDs using the
* existing cm library routines. This function is expected to be invoked for
* initializing the cpu_context for the CPU specified by MPIDR for first use.
******************************************************************************/
void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
{
if ((mpidr & MPIDR_AFFINITY_MASK) ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
cm_init_my_context(ep);
else
cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,6 +32,17 @@
#include <asm_macros.S>
#include <context.h>
.global el1_sysregs_context_save
.global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
.global fpregs_context_restore
#endif
.global save_gp_registers
.global restore_gp_registers_eret
.global restore_gp_registers_callee_eret
.global el3_exit
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
@ -40,7 +51,6 @@
* the register context will be saved.
* -----------------------------------------------------
*/
.global el1_sysregs_context_save
func el1_sysregs_context_save
mrs x9, spsr_el1
@ -127,7 +137,6 @@ endfunc el1_sysregs_context_save
* from where the register context will be restored
* -----------------------------------------------------
*/
.global el1_sysregs_context_restore
func el1_sysregs_context_restore
ldp x9, x10, [x0, #CTX_SPSR_EL1]
@ -225,7 +234,6 @@ endfunc el1_sysregs_context_restore
* -----------------------------------------------------
*/
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
func fpregs_context_save
stp q0, q1, [x0, #CTX_FP_Q0]
stp q2, q3, [x0, #CTX_FP_Q2]
@ -269,7 +277,6 @@ endfunc fpregs_context_save
* TODO: Revisit when VFP is used in secure world
* -----------------------------------------------------
*/
.global fpregs_context_restore
func fpregs_context_restore
ldp q0, q1, [x0, #CTX_FP_Q0]
ldp q2, q3, [x0, #CTX_FP_Q2]
@ -303,3 +310,92 @@ func fpregs_context_restore
ret
endfunc fpregs_context_restore
#endif /* CTX_INCLUDE_FPREGS */
/* -----------------------------------------------------
* The following functions are used to save and restore
* all the general purpose registers. Ideally we would
* only save and restore the callee saved registers when
* a world switch occurs but that type of implementation
* is more complex. So currently we will always save and
* restore these registers on entry and exit of EL3.
* These are not macros to ensure their invocation fits
* within the 32 instructions per exception vector.
* clobbers: x18
* -----------------------------------------------------
*/
func save_gp_registers
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
mrs x18, sp_el0
str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
ret
endfunc save_gp_registers
func restore_gp_registers_eret
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b restore_gp_registers_callee_eret
endfunc restore_gp_registers_eret
func restore_gp_registers_callee_eret
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
msr sp_el0, x17
ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
eret
endfunc restore_gp_registers_callee_eret
/* -----------------------------------------------------
* This routine assumes that the SP_EL3 is pointing to
* a valid context structure from where the gp regs and
* other special registers can be retrieved.
* -----------------------------------------------------
*/
func el3_exit
/* -----------------------------------------------------
* Save the current SP_EL0 i.e. the EL3 runtime stack
* which will be used for handling the next SMC. Then
* switch to SP_EL3
* -----------------------------------------------------
*/
mov x17, sp
msr spsel, #1
str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
/* -----------------------------------------------------
* Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
* -----------------------------------------------------
*/
ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
msr scr_el3, x18
msr spsr_el3, x16
msr elr_el3, x17
/* Restore saved general purpose registers and return */
b restore_gp_registers_eret
endfunc el3_exit

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -29,7 +29,7 @@
*/
#include <asm_macros.S>
#include <runtime_svc.h>
#include <bl_common.h>
.globl early_exceptions

View File

@ -32,14 +32,12 @@
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <bl31.h>
#include <context.h>
#include <context_mgmt.h>
#include <cpu_data.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <platform_def.h>
#include <runtime_svc.h>
#include <smcc_helpers.h>
#include <string.h>
@ -64,105 +62,6 @@ void cm_init(void)
*/
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by `cpu_idx` that was set as the context for the
* specified security state. NULL is returned if no such structure has been
* specified.
******************************************************************************/
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by CPU index.
******************************************************************************/
void cm_set_context_by_index(unsigned int cpu_idx, void *context,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
}
#if !ERROR_DEPRECATED
/*
* These context management helpers are deprecated but are maintained for use
* by SPDs which have not migrated to the new API. If ERROR_DEPRECATED
* is enabled, these are excluded from the build so as to force users to
* migrate to the new API.
*/
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by MPIDR that was set as the context for the specified
* security state. NULL is returned if no such structure has been specified.
******************************************************************************/
void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by MPIDR
******************************************************************************/
void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
cm_set_context_by_index(platform_get_core_pos(mpidr),
context, security_state);
}
/*******************************************************************************
* The following function provides a compatibility function for SPDs using the
* existing cm library routines. This function is expected to be invoked for
* initializing the cpu_context for the CPU specified by MPIDR for first use.
******************************************************************************/
void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
{
if ((mpidr & MPIDR_AFFINITY_MASK) ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
cm_init_my_context(ep);
else
cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
}
#endif
/*******************************************************************************
* This function is used to program the context that's used for exception
* return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
* the required security state
******************************************************************************/
static inline void cm_set_next_context(void *context)
{
#if DEBUG
uint64_t sp_mode;
/*
* Check that this function is called with SP_EL0 as the stack
* pointer
*/
__asm__ volatile("mrs %0, SPSel\n"
: "=r" (sp_mode));
assert(sp_mode == MODE_SP_EL0);
#endif
__asm__ volatile("msr spsel, #1\n"
"mov sp, %0\n"
"msr spsel, #0\n"
: : "r" (context));
}
/*******************************************************************************
* The following function initializes the cpu_context 'ctx' for
* first use, and sets the initial entrypoint state as specified by the
@ -212,7 +111,13 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
if (EP_GET_ST(ep->h.attr))
scr_el3 |= SCR_ST_BIT;
#if IMAGE_BL31
/*
* IRQ/FIQ bits only need setting if interrupt routing
* model has been set up for BL31.
*/
scr_el3 |= get_scr_el3_from_routing_model(security_state);
#endif
/*
* Set up SCTLR_ELx for the target exception level:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,74 +31,9 @@
#ifndef __RUNTIME_SVC_H__
#define __RUNTIME_SVC_H__
/*******************************************************************************
* Bit definitions inside the function id as per the SMC calling convention
******************************************************************************/
#define FUNCID_TYPE_SHIFT 31
#define FUNCID_CC_SHIFT 30
#define FUNCID_OEN_SHIFT 24
#define FUNCID_NUM_SHIFT 0
#include <bl_common.h> /* to include exception types */
#include <smcc_helpers.h> /* to include SMCC definitions */
#define FUNCID_TYPE_MASK 0x1
#define FUNCID_CC_MASK 0x1
#define FUNCID_OEN_MASK 0x3f
#define FUNCID_NUM_MASK 0xffff
#define FUNCID_TYPE_WIDTH 1
#define FUNCID_CC_WIDTH 1
#define FUNCID_OEN_WIDTH 6
#define FUNCID_NUM_WIDTH 16
#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \
FUNCID_CC_MASK)
#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \
FUNCID_TYPE_MASK)
#define SMC_64 1
#define SMC_32 0
#define SMC_UNK 0xffffffff
#define SMC_TYPE_FAST 1
#define SMC_TYPE_STD 0
#define SMC_PREEMPTED 0xfffffffe
/*******************************************************************************
* Owning entity number definitions inside the function id as per the SMC
* calling convention
******************************************************************************/
#define OEN_ARM_START 0
#define OEN_ARM_END 0
#define OEN_CPU_START 1
#define OEN_CPU_END 1
#define OEN_SIP_START 2
#define OEN_SIP_END 2
#define OEN_OEM_START 3
#define OEN_OEM_END 3
#define OEN_STD_START 4 /* Standard Calls */
#define OEN_STD_END 4
#define OEN_TAP_START 48 /* Trusted Applications */
#define OEN_TAP_END 49
#define OEN_TOS_START 50 /* Trusted OS */
#define OEN_TOS_END 63
#define OEN_LIMIT 64
/*******************************************************************************
* Constants to indicate type of exception to the common exception handler.
******************************************************************************/
#define SYNC_EXCEPTION_SP_EL0 0x0
#define IRQ_SP_EL0 0x1
#define FIQ_SP_EL0 0x2
#define SERROR_SP_EL0 0x3
#define SYNC_EXCEPTION_SP_ELX 0x4
#define IRQ_SP_ELX 0x5
#define FIQ_SP_ELX 0x6
#define SERROR_SP_ELX 0x7
#define SYNC_EXCEPTION_AARCH64 0x8
#define IRQ_AARCH64 0x9
#define FIQ_AARCH64 0xa
#define SERROR_AARCH64 0xb
#define SYNC_EXCEPTION_AARCH32 0xc
#define IRQ_AARCH32 0xd
#define FIQ_AARCH32 0xe
#define SERROR_AARCH32 0xf
/*******************************************************************************
* Structure definition, typedefs & constants for the runtime service framework
@ -122,68 +57,9 @@
#ifndef __ASSEMBLY__
#include <cassert.h>
#include <context.h>
#include <stdint.h>
/* Various flags passed to SMC handlers */
#define SMC_FROM_SECURE (0 << 0)
#define SMC_FROM_NON_SECURE (1 << 0)
#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE))
#define is_caller_secure(_f) (!(is_caller_non_secure(_f)))
/* Prototype for runtime service initializing function */
typedef int32_t (*rt_svc_init_t)(void);
/* Convenience macros to return from SMC handler */
#define SMC_RET0(_h) { \
return (uint64_t) (_h); \
}
#define SMC_RET1(_h, _x0) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
SMC_RET0(_h); \
}
#define SMC_RET2(_h, _x0, _x1) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
SMC_RET1(_h, (_x0)); \
}
#define SMC_RET3(_h, _x0, _x1, _x2) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
SMC_RET2(_h, (_x0), (_x1)); \
}
#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
SMC_RET3(_h, (_x0), (_x1), (_x2)); \
}
/*
* Convenience macros to access general purpose registers using handle provided
* to SMC handler. These takes the offset values defined in context.h
*/
#define SMC_GET_GP(_h, _g) \
read_ctx_reg(get_gpregs_ctx(_h), (_g));
#define SMC_SET_GP(_h, _g, _v) \
write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v));
/*
* Convenience macros to access EL3 context registers using handle provided to
* SMC handler. These takes the offset values defined in context.h
*/
#define SMC_GET_EL3(_h, _e) \
read_ctx_reg(get_el3state_ctx(_h), (_e));
#define SMC_SET_EL3(_h, _e, _v) \
write_ctx_reg(get_el3state_ctx(_h), (_e), (_v));
/* The macro below is used to identify a Standard Service SMC call */
#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
FUNCID_OEN_MASK) == OEN_STD_START)
/* The macro below is used to identify a valid Fast SMC call */
#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & 0xff)) && \
(GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
/*
* Prototype for runtime service SMC handler function. x0 (SMC Function ID) to
* x4 are as passed by the caller. Rest of the arguments to SMC and the context
@ -247,28 +123,6 @@ CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \
((call_type & FUNCID_TYPE_MASK) \
<< FUNCID_OEN_WIDTH))
/*
* Macro to define UUID for services. Apart from defining and initializing a
* uuid_t structure, this macro verifies that the first word of the defined UUID
* does not equal SMC_UNK. This is to ensure that the caller won't mistake the
* returned UUID in x0 for an invalid SMC error return
*/
#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
_n0, _n1, _n2, _n3, _n4, _n5) \
CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\
static const uuid_t _name = { \
_tl, _tm, _th, _cl, _ch, \
{ _n0, _n1, _n2, _n3, _n4, _n5 } \
}
/* Return a UUID in the SMC return registers */
#define SMC_UUID_RET(_h, _uuid) \
SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
((const uint32_t *) &(_uuid))[1], \
((const uint32_t *) &(_uuid))[2], \
((const uint32_t *) &(_uuid))[3])
/*******************************************************************************
* Function & variable prototypes
******************************************************************************/

View File

@ -89,6 +89,26 @@
(_p)->h.attr = (uint32_t)(_attr) ; \
} while (0)
/*******************************************************************************
* Constants to indicate type of exception to the common exception handler.
******************************************************************************/
#define SYNC_EXCEPTION_SP_EL0 0x0
#define IRQ_SP_EL0 0x1
#define FIQ_SP_EL0 0x2
#define SERROR_SP_EL0 0x3
#define SYNC_EXCEPTION_SP_ELX 0x4
#define IRQ_SP_ELX 0x5
#define FIQ_SP_ELX 0x6
#define SERROR_SP_ELX 0x7
#define SYNC_EXCEPTION_AARCH64 0x8
#define IRQ_AARCH64 0x9
#define FIQ_AARCH64 0xa
#define SERROR_AARCH64 0xb
#define SYNC_EXCEPTION_AARCH32 0xc
#define IRQ_AARCH32 0xd
#define FIQ_AARCH32 0xe
#define SERROR_AARCH32 0xf
#ifndef __ASSEMBLY__
#include <cdefs.h> /* For __dead2 */
#include <cassert.h>

View File

@ -31,9 +31,9 @@
#ifndef __CM_H__
#define __CM_H__
#include <arch.h>
#include <bl_common.h>
#include <common_def.h>
#include <cpu_data.h>
#include <stdint.h>
/*******************************************************************************
* Forward declarations
@ -46,7 +46,6 @@ struct entry_point_info;
void cm_init(void);
void *cm_get_context_by_mpidr(uint64_t mpidr,
uint32_t security_state) __warn_deprecated;
static inline void *cm_get_context(uint32_t security_state);
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state) __warn_deprecated;
@ -55,7 +54,9 @@ void *cm_get_context_by_index(unsigned int cpu_idx,
void cm_set_context_by_index(unsigned int cpu_idx,
void *context,
unsigned int security_state);
static inline void cm_set_context(void *context, uint32_t security_state);
void *cm_get_context(uint32_t security_state);
void cm_set_context(void *context, uint32_t security_state);
inline void cm_set_next_context(void *context);
void cm_init_context(uint64_t mpidr,
const struct entry_point_info *ep) __warn_deprecated;
void cm_init_my_context(const struct entry_point_info *ep);
@ -76,27 +77,28 @@ uint32_t cm_get_scr_el3(uint32_t security_state);
/* Inline definitions */
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the calling CPU that was set as the context for the specified security
* state. NULL is returned if no such structure has been specified.
* This function is used to program the context that's used for exception
* return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
* the required security state
******************************************************************************/
void *cm_get_context(uint32_t security_state)
inline void cm_set_next_context(void *context)
{
assert(security_state <= NON_SECURE);
#if DEBUG
uint64_t sp_mode;
return get_cpu_data(cpu_context[security_state]);
/*
* Check that this function is called with SP_EL0 as the stack
* pointer
*/
__asm__ volatile("mrs %0, SPSel\n"
: "=r" (sp_mode));
assert(sp_mode == MODE_SP_EL0);
#endif
__asm__ volatile("msr spsel, #1\n"
"mov sp, %0\n"
"msr spsel, #0\n"
: : "r" (context));
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the calling CPU
******************************************************************************/
void cm_set_context(void *context, uint32_t security_state)
{
assert(security_state <= NON_SECURE);
set_cpu_data(cpu_context[security_state], context);
}
#endif /* __CM_H__ */

View File

@ -0,0 +1,165 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SMCC_HELPERS_H__
#define __SMCC_HELPERS_H__
/*******************************************************************************
* Bit definitions inside the function id as per the SMC calling convention
******************************************************************************/
#define FUNCID_TYPE_SHIFT 31
#define FUNCID_CC_SHIFT 30
#define FUNCID_OEN_SHIFT 24
#define FUNCID_NUM_SHIFT 0
#define FUNCID_TYPE_MASK 0x1
#define FUNCID_CC_MASK 0x1
#define FUNCID_OEN_MASK 0x3f
#define FUNCID_NUM_MASK 0xffff
#define FUNCID_TYPE_WIDTH 1
#define FUNCID_CC_WIDTH 1
#define FUNCID_OEN_WIDTH 6
#define FUNCID_NUM_WIDTH 16
#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \
FUNCID_CC_MASK)
#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \
FUNCID_TYPE_MASK)
#define SMC_64 1
#define SMC_32 0
#define SMC_UNK 0xffffffff
#define SMC_TYPE_FAST 1
#define SMC_TYPE_STD 0
#define SMC_PREEMPTED 0xfffffffe
/*******************************************************************************
* Owning entity number definitions inside the function id as per the SMC
* calling convention
******************************************************************************/
#define OEN_ARM_START 0
#define OEN_ARM_END 0
#define OEN_CPU_START 1
#define OEN_CPU_END 1
#define OEN_SIP_START 2
#define OEN_SIP_END 2
#define OEN_OEM_START 3
#define OEN_OEM_END 3
#define OEN_STD_START 4 /* Standard Calls */
#define OEN_STD_END 4
#define OEN_TAP_START 48 /* Trusted Applications */
#define OEN_TAP_END 49
#define OEN_TOS_START 50 /* Trusted OS */
#define OEN_TOS_END 63
#define OEN_LIMIT 64
#ifndef __ASSEMBLY__
#include <cassert.h>
#include <context.h>
#include <stdint.h>
/* Various flags passed to SMC handlers */
#define SMC_FROM_SECURE (0 << 0)
#define SMC_FROM_NON_SECURE (1 << 0)
#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE))
#define is_caller_secure(_f) (!(is_caller_non_secure(_f)))
/* Convenience macros to return from SMC handler */
#define SMC_RET0(_h) { \
return (uint64_t) (_h); \
}
#define SMC_RET1(_h, _x0) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
SMC_RET0(_h); \
}
#define SMC_RET2(_h, _x0, _x1) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
SMC_RET1(_h, (_x0)); \
}
#define SMC_RET3(_h, _x0, _x1, _x2) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
SMC_RET2(_h, (_x0), (_x1)); \
}
#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
SMC_RET3(_h, (_x0), (_x1), (_x2)); \
}
/*
* Convenience macros to access general purpose registers using handle provided
* to SMC handler. These takes the offset values defined in context.h
*/
#define SMC_GET_GP(_h, _g) \
read_ctx_reg(get_gpregs_ctx(_h), (_g));
#define SMC_SET_GP(_h, _g, _v) \
write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v));
/*
* Convenience macros to access EL3 context registers using handle provided to
* SMC handler. These takes the offset values defined in context.h
*/
#define SMC_GET_EL3(_h, _e) \
read_ctx_reg(get_el3state_ctx(_h), (_e));
#define SMC_SET_EL3(_h, _e, _v) \
write_ctx_reg(get_el3state_ctx(_h), (_e), (_v));
/* The macro below is used to identify a Standard Service SMC call */
#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
FUNCID_OEN_MASK) == OEN_STD_START)
/* The macro below is used to identify a valid Fast SMC call */
#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & 0xff)) && \
(GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
/*
* Macro to define UUID for services. Apart from defining and initializing a
* uuid_t structure, this macro verifies that the first word of the defined UUID
* does not equal SMC_UNK. This is to ensure that the caller won't mistake the
* returned UUID in x0 for an invalid SMC error return
*/
#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
_n0, _n1, _n2, _n3, _n4, _n5) \
CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\
static const uuid_t _name = { \
_tl, _tm, _th, _cl, _ch, \
{ _n0, _n1, _n2, _n3, _n4, _n5 } \
}
/* Return a UUID in the SMC return registers */
#define SMC_UUID_RET(_h, _uuid) \
SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
((const uint32_t *) &(_uuid))[1], \
((const uint32_t *) &(_uuid))[2], \
((const uint32_t *) &(_uuid))[3])
#endif /*__ASSEMBLY__*/
#endif /* __SMCC_HELPERS_H__ */