Merge pull request #105 from athoelke:sm/support_normal_irq_in_tsp-v2

This commit is contained in:
Andrew Thoelke 2014-05-23 11:00:04 +01:00
commit 65335d45f5
9 changed files with 251 additions and 67 deletions

View File

@ -109,26 +109,35 @@ void runtime_svc_init()
goto error;
}
/* Call the initialisation routine for this runtime service */
rc = rt_svc_descs[index].init();
if (rc) {
ERROR("Error initializing runtime service %s\n",
rt_svc_descs[index].name);
} else {
/*
* Fill the indices corresponding to the start and end
* owning entity numbers with the index of the
* descriptor which will handle the SMCs for this owning
* entity range.
*/
start_idx = get_unique_oen(rt_svc_descs[index].start_oen,
rt_svc_descs[index].call_type);
end_idx = get_unique_oen(rt_svc_descs[index].end_oen,
rt_svc_descs[index].call_type);
for (; start_idx <= end_idx; start_idx++)
rt_svc_descs_indices[start_idx] = index;
/*
* The runtime service may have seperate rt_svc_desc_t
* for its fast smc and standard smc. Since the service itself
* need to be initialized only once, only one of them will have
* an initialisation routine defined. Call the initialisation
* routine for this runtime service, if it is defined.
*/
if (rt_svc_descs[index].init) {
rc = rt_svc_descs[index].init();
if (rc) {
ERROR("Error initializing runtime service %s\n",
rt_svc_descs[index].name);
continue;
}
}
/*
* Fill the indices corresponding to the start and end
* owning entity numbers with the index of the
* descriptor which will handle the SMCs for this owning
* entity range.
*/
start_idx = get_unique_oen(rt_svc_descs[index].start_oen,
rt_svc_descs[index].call_type);
end_idx = get_unique_oen(rt_svc_descs[index].end_oen,
rt_svc_descs[index].call_type);
for (; start_idx <= end_idx; start_idx++)
rt_svc_descs_indices[start_idx] = index;
}
return;

View File

@ -39,8 +39,11 @@
.globl tsp_cpu_suspend_entry
.globl tsp_cpu_resume_entry
.globl tsp_fast_smc_entry
.globl tsp_std_smc_entry
.globl tsp_fiq_entry
/* ---------------------------------------------
* Populate the params in x0-x7 from the pointer
* to the smc args structure in x0.
@ -317,8 +320,22 @@ tsp_cpu_resume_panic:
* ---------------------------------------------
*/
func tsp_fast_smc_entry
bl tsp_fast_smc_handler
bl tsp_smc_handler
restore_args_call_smc
tsp_fast_smc_entry_panic:
b tsp_fast_smc_entry_panic
/*---------------------------------------------
* This entrypoint is used by the TSPD to ask
* the TSP to service a std smc request.
* We will enable preemption during execution
* of tsp_smc_handler.
* ---------------------------------------------
*/
func tsp_std_smc_entry
msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
bl tsp_smc_handler
msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
restore_args_call_smc
tsp_std_smc_entry_panic:
b tsp_std_smc_entry_panic

View File

@ -120,7 +120,14 @@ sync_exception_sp_elx:
.align 7
irq_sp_elx:
b irq_sp_elx
save_caller_regs_and_lr
/* We just update some statistics in the handler */
bl tsp_irq_received
/* Hand over control to the normal world to handle the IRQ */
smc #0
/* The resume std smc starts from here */
restore_caller_regs_and_lr
eret
check_vector_size irq_sp_elx
.align 7

View File

@ -107,3 +107,18 @@ int32_t tsp_fiq_handler()
return 0;
}
int32_t tsp_irq_received()
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_stats[linear_id].irq_count++;
spin_lock(&console_lock);
printf("TSP: cpu 0x%x received irq\n\r", mpidr);
INFO("cpu 0x%x: %d irq requests \n",
mpidr, tsp_stats[linear_id].irq_count);
spin_unlock(&console_lock);
return TSP_PREEMPTED;
}

View File

@ -66,6 +66,7 @@ work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
* to change.
******************************************************************************/
static const entry_info_t tsp_entry_info = {
tsp_std_smc_entry,
tsp_fast_smc_entry,
tsp_cpu_on_entry,
tsp_cpu_off_entry,
@ -309,9 +310,9 @@ tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level,
* TSP fast smc handler. The secure monitor jumps to this function by
* doing the ERET after populating X0-X7 registers. The arguments are received
* in the function arguments in order. Once the service is rendered, this
* function returns to Secure Monitor by raising SMC
* function returns to Secure Monitor by raising SMC.
******************************************************************************/
tsp_args_t *tsp_fast_smc_handler(uint64_t func,
tsp_args_t *tsp_smc_handler(uint64_t func,
uint64_t arg1,
uint64_t arg2,
uint64_t arg3,
@ -324,18 +325,20 @@ tsp_args_t *tsp_fast_smc_handler(uint64_t func,
uint64_t service_args[2];
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
const char *smc_type;
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
printf("SP: cpu 0x%x received fast smc 0x%x\n", read_mpidr(), func);
smc_type = ((func >> 31) & 1) == 1 ? "fast" : "standard";
printf("SP: cpu 0x%x received %s smc 0x%x\n", read_mpidr(), smc_type, func);
INFO("cpu 0x%x: %d smcs, %d erets\n", mpidr,
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count);
/* Render secure services and obtain results here */
results[0] = arg1;
results[1] = arg2;
@ -346,20 +349,20 @@ tsp_args_t *tsp_fast_smc_handler(uint64_t func,
tsp_get_magic(service_args);
/* Determine the function to perform based on the function ID */
switch (func) {
case TSP_FID_ADD:
switch (TSP_BARE_FID(func)) {
case TSP_ADD:
results[0] += service_args[0];
results[1] += service_args[1];
break;
case TSP_FID_SUB:
case TSP_SUB:
results[0] -= service_args[0];
results[1] -= service_args[1];
break;
case TSP_FID_MUL:
case TSP_MUL:
results[0] *= service_args[0];
results[1] *= service_args[1];
break;
case TSP_FID_DIV:
case TSP_DIV:
results[0] /= service_args[0] ? service_args[0] : 1;
results[1] /= service_args[1] ? service_args[1] : 1;
break;
@ -367,9 +370,9 @@ tsp_args_t *tsp_fast_smc_handler(uint64_t func,
break;
}
return set_smc_args(func,
return set_smc_args(func, 0,
results[0],
results[1],
0, 0, 0, 0, 0);
0, 0, 0, 0);
}

View File

@ -51,13 +51,15 @@
#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \
FUNCID_CC_MASK)
#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \
FUNCID_TYPE_MASK)
#define SMC_64 1
#define SMC_32 0
#define SMC_UNK 0xffffffff
#define SMC_TYPE_FAST 1
#define SMC_TYPE_STD 0
#define SMC_PREEMPTED 0xfffffffe
/*******************************************************************************
* Owning entity number definitions inside the function id as per the SMC
* calling convention

View File

@ -40,7 +40,7 @@
#define TSP_OFF_DONE 0xf2000002
#define TSP_SUSPEND_DONE 0xf2000003
#define TSP_RESUME_DONE 0xf2000004
#define TSP_WORK_DONE 0xf2000005
#define TSP_PREEMPTED 0xf2000005
/*
* Function identifiers to handle FIQs through the synchronous handling model.
@ -49,16 +49,35 @@
*/
#define TSP_HANDLED_S_EL1_FIQ 0xf2000006
#define TSP_EL3_FIQ 0xf2000007
#define TSP_HANDLE_FIQ_AND_RETURN 0x2004
/* SMC function ID that TSP uses to request service from secure monitor */
#define TSP_GET_ARGS 0xf2001000
/* Function IDs for various TSP services */
#define TSP_FID_ADD 0xf2002000
#define TSP_FID_SUB 0xf2002001
#define TSP_FID_MUL 0xf2002002
#define TSP_FID_DIV 0xf2002003
/*
* Identifiers for various TSP services. Corresponding function IDs (whether
* fast or standard) are generated by macros defined below
*/
#define TSP_ADD 0x2000
#define TSP_SUB 0x2001
#define TSP_MUL 0x2002
#define TSP_DIV 0x2003
#define TSP_HANDLE_FIQ_AND_RETURN 0x2004
/*
* Generate function IDs for TSP services to be used in SMC calls, by
* appropriately setting bit 31 to differentiate standard and fast SMC calls
*/
#define TSP_STD_FID(fid) ((fid) | 0x72000000 | (0 << 31))
#define TSP_FAST_FID(fid) ((fid) | 0x72000000 | (1 << 31))
/* SMC function ID to request a previously preempted std smc */
#define TSP_FID_RESUME TSP_STD_FID(0x3000)
/*
* Identify a TSP service from function ID filtering the last 16 bits from the
* SMC function ID
*/
#define TSP_BARE_FID(fid) ((fid) & 0xffff)
/*
* Total number of function IDs implemented for services offered to NS clients.
@ -108,6 +127,7 @@ typedef void (*tsp_generic_fptr_t)(uint64_t arg0,
uint64_t arg7);
typedef struct entry_info {
tsp_generic_fptr_t std_smc_entry;
tsp_generic_fptr_t fast_smc_entry;
tsp_generic_fptr_t cpu_on_entry;
tsp_generic_fptr_t cpu_off_entry;
@ -118,6 +138,7 @@ typedef struct entry_info {
typedef struct work_statistics {
uint32_t fiq_count; /* Number of FIQs on this cpu */
uint32_t irq_count; /* Number of IRQs on this cpu */
uint32_t sync_fiq_count; /* Number of sync. fiqs on this cpu */
uint32_t sync_fiq_ret_count; /* Number of fiq returns on this cpu */
uint32_t smc_count; /* Number of returns on this cpu */
@ -153,6 +174,14 @@ extern void tsp_fiq_entry(uint64_t arg0,
uint64_t arg5,
uint64_t arg6,
uint64_t arg7);
extern void tsp_std_smc_entry(uint64_t arg0,
uint64_t arg1,
uint64_t arg2,
uint64_t arg3,
uint64_t arg4,
uint64_t arg5,
uint64_t arg6,
uint64_t arg7);
extern void tsp_fast_smc_entry(uint64_t arg0,
uint64_t arg1,
uint64_t arg2,

View File

@ -257,7 +257,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
uint64_t flags)
{
cpu_context_t *ns_cpu_context;
gp_regs_t *ns_gp_regs;
unsigned long mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr), ns;
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
@ -267,6 +266,31 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
switch (smc_fid) {
/*
* This function ID is used by TSP to indicate that it was
* preempted by a normal world IRQ.
*
*/
case TSP_PREEMPTED:
if (ns)
SMC_RET1(handle, SMC_UNK);
assert(handle == cm_get_context(mpidr, SECURE));
cm_el1_sysregs_context_save(SECURE);
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context);
/*
* Restore non-secure state. There is no need to save the
* secure system register context since the TSP was supposed
* to preserve it during S-EL1 interrupt handling.
*/
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
/*
* This function ID is used only by the TSP to indicate that it has
* finished handling a S-EL1 FIQ interrupt. Execution should resume
@ -357,9 +381,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
*/
tspd_synchronous_sp_exit(tsp_ctx, x1);
/* Should never reach here */
assert(0);
/*
* These function IDs is used only by the SP to indicate it has
* finished:
@ -392,18 +413,20 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
*/
tspd_synchronous_sp_exit(tsp_ctx, x1);
/* Should never reach here */
assert(0);
/*
* Request from non-secure client to perform an
* arithmetic operation or response from secure
* payload to an earlier request.
*/
case TSP_FID_ADD:
case TSP_FID_SUB:
case TSP_FID_MUL:
case TSP_FID_DIV:
case TSP_FAST_FID(TSP_ADD):
case TSP_FAST_FID(TSP_SUB):
case TSP_FAST_FID(TSP_MUL):
case TSP_FAST_FID(TSP_DIV):
case TSP_STD_FID(TSP_ADD):
case TSP_STD_FID(TSP_SUB):
case TSP_STD_FID(TSP_MUL):
case TSP_STD_FID(TSP_DIV):
if (ns) {
/*
* This is a fresh request from the non-secure client.
@ -412,11 +435,15 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
* state and send the request to the secure payload.
*/
assert(handle == cm_get_context(mpidr, NON_SECURE));
/* Check if we are already preempted */
if (get_std_smc_active_flag(tsp_ctx->state))
SMC_RET1(handle, SMC_UNK);
cm_el1_sysregs_context_save(NON_SECURE);
/* Save x1 and x2 for use by TSP_GET_ARGS call below */
SMC_SET_GP(handle, CTX_GPREG_X1, x1);
SMC_SET_GP(handle, CTX_GPREG_X2, x2);
store_tsp_args(tsp_ctx, x1, x2);
/*
* We are done stashing the non-secure context. Ask the
@ -431,17 +458,27 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
* from this function.
*/
assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE));
set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0,
0, 0, 0);
cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry);
/* Set appropriate entry for SMC.
* We expect the TSP to manage the PSTATE.I and PSTATE.F
* flags as appropriate.
*/
if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
cm_set_elr_el3(SECURE, (uint64_t)
tsp_entry_info->fast_smc_entry);
} else {
set_std_smc_active_flag(tsp_ctx->state);
cm_set_elr_el3(SECURE, (uint64_t)
tsp_entry_info->std_smc_entry);
}
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
return smc_fid;
SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
} else {
/*
* This is the result from the secure client of an
* earlier request. The results are in x1-x2. Copy it
* earlier request. The results are in x1-x3. Copy it
* into the non-secure context, save the secure state
* and return to the non-secure state.
*/
@ -451,17 +488,52 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context);
ns_gp_regs = get_gpregs_ctx(ns_cpu_context);
/* Restore non-secure state */
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
SMC_RET2(ns_gp_regs, x1, x2);
if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD)
clr_std_smc_active_flag(tsp_ctx->state);
SMC_RET3(ns_cpu_context, x1, x2, x3);
}
break;
/*
* Request from non secure world to resume the preempted
* Standard SMC call.
*/
case TSP_FID_RESUME:
/* RESUME should be invoked only by normal world */
if (!ns) {
assert(0);
break;
}
/*
* This is a resume request from the non-secure client.
* save the non-secure state and send the request to
* the secure payload.
*/
assert(handle == cm_get_context(mpidr, NON_SECURE));
/* Check if we are already preempted before resume */
if (!get_std_smc_active_flag(tsp_ctx->state))
SMC_RET1(handle, SMC_UNK);
cm_el1_sysregs_context_save(NON_SECURE);
/*
* We are done stashing the non-secure context. Ask the
* secure payload to do the work now.
*/
/* We just need to return to the preempted point in
* TSP and the execution will resume as normal.
*/
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
/*
* This is a request from the secure payload for more arguments
* for an ongoing arithmetic operation requested by the
@ -475,10 +547,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context);
ns_gp_regs = get_gpregs_ctx(ns_cpu_context);
SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1),
read_ctx_reg(ns_gp_regs, CTX_GPREG_X2));
get_tsp_args(tsp_ctx, x1, x2);
SMC_RET2(handle, x1, x2);
case TOS_CALL_COUNT:
/*
@ -502,9 +573,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
SMC_RET1(handle, SMC_UNK);
}
/* Define a SPD runtime service descriptor */
/* Define a SPD runtime service descriptor for fast SMC calls */
DECLARE_RT_SVC(
spd,
tspd_fast,
OEN_TOS_START,
OEN_TOS_END,
@ -512,3 +583,14 @@ DECLARE_RT_SVC(
tspd_setup,
tspd_smc_handler
);
/* Define a SPD runtime service descriptor for standard SMC calls */
DECLARE_RT_SVC(
tspd_std,
OEN_TOS_START,
OEN_TOS_END,
SMC_TYPE_STD,
NULL,
tspd_smc_handler
);

View File

@ -125,6 +125,12 @@
#include <cassert.h>
#include <stdint.h>
/*
* The number of arguments to save during a SMC call for TSP.
* Currently only x1 and x2 are used by TSP.
*/
#define TSP_NUM_ARGS 0x2
/* AArch64 callee saved general purpose register context structure. */
DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES);
@ -147,6 +153,8 @@ CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \
* 'c_rt_ctx' - stack address to restore C runtime context from after
* returning from a synchronous entry into the SP.
* 'cpu_ctx' - space to maintain SP architectural state
* 'saved_tsp_args' - space to store arguments for TSP arithmetic operations
* which will queried using the TSP_GET_ARGS SMC by TSP.
******************************************************************************/
typedef struct tsp_context {
uint64_t saved_elr_el3;
@ -155,8 +163,20 @@ typedef struct tsp_context {
uint64_t mpidr;
uint64_t c_rt_ctx;
cpu_context_t cpu_ctx;
uint64_t saved_tsp_args[TSP_NUM_ARGS];
} tsp_context_t;
/* Helper macros to store and retrieve tsp args from tsp_context */
#define store_tsp_args(tsp_ctx, x1, x2) do {\
tsp_ctx->saved_tsp_args[0] = x1;\
tsp_ctx->saved_tsp_args[1] = x2;\
} while (0)
#define get_tsp_args(tsp_ctx, x1, x2) do {\
x1 = tsp_ctx->saved_tsp_args[0];\
x2 = tsp_ctx->saved_tsp_args[1];\
} while (0)
/* TSPD power management handlers */
extern const spd_pm_ops_t tspd_pm;