From 317ba09021b90ef50c13a1c44b4d2091736b6a86 Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Fri, 9 May 2014 19:32:25 +0100 Subject: [PATCH 01/14] Fix broken standby state implementation in PSCI This patch fixes the broken support for entry into standby states introduced under commit-id 'd118f9f864' (tf-issues#94). Upon exit from the platform defined standby state instead of returning to the caller of the SMC, execution would get stuck in the wfi instruction meant for entering a power down state. This patch ensures that exit from a standby state and entry into a power down state do not interfere with each other. Fixes ARM-software/tf-issues#154 Change-Id: I56e5df353368e44d6eefc94ffedefe21929f5cfe --- include/bl31/services/psci.h | 1 + services/std_svc/psci/psci_entry.S | 18 ++++++----- services/std_svc/psci/psci_main.c | 48 ++++++++++++++++++++++-------- 3 files changed, 47 insertions(+), 20 deletions(-) diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h index 570fe5b8a..b6e272c3a 100644 --- a/include/bl31/services/psci.h +++ b/include/bl31/services/psci.h @@ -190,6 +190,7 @@ extern void psci_system_reset(void); extern int psci_cpu_on(unsigned long, unsigned long, unsigned long); +extern void __dead2 psci_power_down_wfi(void); extern void psci_aff_on_finish_entry(void); extern void psci_aff_suspend_finish_entry(void); extern void psci_register_spd_pm_hook(const spd_pm_ops_t *); diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S index 256c538dd..3d0181a8e 100644 --- a/services/std_svc/psci/psci_entry.S +++ b/services/std_svc/psci/psci_entry.S @@ -37,6 +37,7 @@ .globl psci_aff_suspend_finish_entry .globl __psci_cpu_off .globl __psci_cpu_suspend + .globl psci_power_down_wfi /* ----------------------------------------------------- * This cpu has been physically powered up. Depending @@ -120,9 +121,6 @@ func __psci_cpu_off mrs x0, mpidr_el1 bl platform_set_coherent_stack bl psci_cpu_off - mov x1, #PSCI_E_SUCCESS - cmp x0, x1 - b.eq final_wfi mov sp, x19 ldp x19, x20, [sp,#0] add sp, sp, #0x10 @@ -144,9 +142,6 @@ func __psci_cpu_suspend mov x1, x21 mov x2, x22 bl psci_cpu_suspend - mov x1, #PSCI_E_SUCCESS - cmp x0, x1 - b.eq final_wfi mov sp, x19 ldp x21, x22, [sp,#0x10] ldp x19, x20, [sp,#0] @@ -154,7 +149,16 @@ func __psci_cpu_suspend func_epilogue ret -func final_wfi + /* -------------------------------------------- + * This function is called to indicate to the + * power controller that it is safe to power + * down this cpu. It should not exit the wfi + * and will be released from reset upon power + * up. 'wfi_spill' is used to catch erroneous + * exits from wfi. + * -------------------------------------------- + */ +func psci_power_down_wfi dsb sy // ensure write buffer empty wfi wfi_spill: diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c index 1bcf21661..c0866fb64 100644 --- a/services/std_svc/psci/psci_main.c +++ b/services/std_svc/psci/psci_main.c @@ -90,23 +90,37 @@ int psci_cpu_suspend(unsigned int power_state, if (target_afflvl > MPIDR_MAX_AFFLVL) return PSCI_E_INVALID_PARAMS; + /* Determine the 'state type' in the 'power_state' parameter */ pstate_type = psci_get_pstate_type(power_state); + + /* + * Ensure that we have a platform specific handler for entering + * a standby state. + */ if (pstate_type == PSTATE_TYPE_STANDBY) { - if (psci_plat_pm_ops->affinst_standby) - rc = psci_plat_pm_ops->affinst_standby(power_state); - else + if (!psci_plat_pm_ops->affinst_standby) return PSCI_E_INVALID_PARAMS; - } else { - mpidr = read_mpidr(); - rc = psci_afflvl_suspend(mpidr, - entrypoint, - context_id, - power_state, - MPIDR_AFFLVL0, - target_afflvl); + + rc = psci_plat_pm_ops->affinst_standby(power_state); + assert(rc == PSCI_E_INVALID_PARAMS || rc == PSCI_E_SUCCESS); + return rc; } - assert(rc == PSCI_E_INVALID_PARAMS || rc == PSCI_E_SUCCESS); + /* + * Do what is needed to enter the power down state. Upon success, + * enter the final wfi which will power down this cpu else return + * an error. + */ + mpidr = read_mpidr(); + rc = psci_afflvl_suspend(mpidr, + entrypoint, + context_id, + power_state, + MPIDR_AFFLVL0, + target_afflvl); + if (rc == PSCI_E_SUCCESS) + psci_power_down_wfi(); + assert(rc == PSCI_E_INVALID_PARAMS); return rc; } @@ -126,11 +140,19 @@ int psci_cpu_off(void) */ rc = psci_afflvl_off(mpidr, MPIDR_AFFLVL0, target_afflvl); + /* + * Check if all actions needed to safely power down this cpu have + * successfully completed. Enter a wfi loop which will allow the + * power controller to physically power down this cpu. + */ + if (rc == PSCI_E_SUCCESS) + psci_power_down_wfi(); + /* * The only error cpu_off can return is E_DENIED. So check if that's * indeed the case. */ - assert (rc == PSCI_E_SUCCESS || rc == PSCI_E_DENIED); + assert (rc == PSCI_E_DENIED); return rc; } From bb5ffdba18198cffa7480c7dd66c3e0d60a7af30 Mon Sep 17 00:00:00 2001 From: Andrew Thoelke Date: Fri, 16 May 2014 15:38:04 +0100 Subject: [PATCH 02/14] Set SCR_EL3.RW correctly before exiting bl31_main SCR_EL3.RW was not updated immediately before exiting bl31_main() and running BL3-3. If a AArch32 Secure-EL1 Payload had just been initialised, then the SCR_EL3.RW bit would be left indicating a 32-bit BL3-3, which may not be correct. This patch explicitly sets SCR_EL3.RW appropriately based on the provided SPSR_EL3 value for the BL3-3 image. Fixes ARM-software/tf-issues#126 Change-Id: Ic7716fe8bc87e577c4bfaeb46702e88deedd9895 --- bl31/bl31_main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c index 755320d38..823557176 100644 --- a/bl31/bl31_main.c +++ b/bl31/bl31_main.c @@ -169,9 +169,15 @@ void bl31_prepare_next_image_entry() assert(next_image_info); scr = read_scr(); + scr &= ~SCR_NS_BIT; if (image_type == NON_SECURE) scr |= SCR_NS_BIT; + scr &= ~SCR_RW_BIT; + if ((next_image_info->spsr & (1 << MODE_RW_SHIFT)) == + (MODE_RW_64 << MODE_RW_SHIFT)) + scr |= SCR_RW_BIT; + /* * Tell the context mgmt. library to ensure that SP_EL3 points to * the right context to exit from EL3 correctly. From dc24fe4801449eb8b60d06a26966b5ee65ec1111 Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Sun, 4 May 2014 18:23:26 +0100 Subject: [PATCH 03/14] Rework 'state' field usage in per-cpu TSP context This patch lays the foundation for using the per-cpu 'state' field in the 'tsp_context' structure for other flags apart from the power state of the TSP. It allocates 2 bits for the power state, introduces the necessary macros to manipulate the power state in the 'state' field and accordingly reworks all use of the TSP_STATE_* states. It also allocates a flag bit to determine if the TSP is handling a standard SMC. If this flag is set then the TSP was interrupted due to non-secure or EL3 interupt depending upon the chosen routing model. Macros to get, set and clear this flag have been added as well. This flag will be used by subsequent patches. Change-Id: Ic6ee80bd5895812c83b35189cf2c3be70a9024a6 --- services/spd/tspd/tspd_common.c | 9 +++++++- services/spd/tspd/tspd_main.c | 2 +- services/spd/tspd/tspd_pm.c | 16 +++++++------- services/spd/tspd/tspd_private.h | 37 +++++++++++++++++++++++++++++--- 4 files changed, 51 insertions(+), 13 deletions(-) diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c index a4c393630..9168ffc1b 100644 --- a/services/spd/tspd/tspd_common.c +++ b/services/spd/tspd/tspd_common.c @@ -85,7 +85,14 @@ int32_t tspd_init_secure_context(uint64_t entrypoint, write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr); /* Set this context as ready to be initialised i.e OFF */ - tsp_ctx->state = TSP_STATE_OFF; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); + + /* + * This context has not been used yet. It will become valid + * when the TSP is interrupted and wants the TSPD to preserve + * the context. + */ + clr_std_smc_active_flag(tsp_ctx->state); /* Associate this context with the cpu specified */ tsp_ctx->mpidr = mpidr; diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index 9fda3074b..6200d571d 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -155,7 +155,7 @@ int32_t tspd_init(meminfo_t *bl32_meminfo) rc = tspd_synchronous_sp_entry(tsp_ctx); assert(rc != 0); if (rc) { - tsp_ctx->state = TSP_STATE_ON; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); /* * TSP has been successfully initialized. Register power diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c index 2447d9e83..75b4b3008 100644 --- a/services/spd/tspd/tspd_pm.c +++ b/services/spd/tspd/tspd_pm.c @@ -56,7 +56,7 @@ static int32_t tspd_cpu_off_handler(uint64_t cookie) tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); - assert(tsp_ctx->state == TSP_STATE_ON); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); /* Program the entry point and enter the TSP */ cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry); @@ -73,7 +73,7 @@ static int32_t tspd_cpu_off_handler(uint64_t cookie) * Reset TSP's context for a fresh start when this cpu is turned on * subsequently. */ - tsp_ctx->state = TSP_STATE_OFF; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); return 0; } @@ -90,7 +90,7 @@ static void tspd_cpu_suspend_handler(uint64_t power_state) tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); - assert(tsp_ctx->state == TSP_STATE_ON); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); /* Program the entry point, power_state parameter and enter the TSP */ write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), @@ -107,7 +107,7 @@ static void tspd_cpu_suspend_handler(uint64_t power_state) panic(); /* Update its context to reflect the state the TSP is in */ - tsp_ctx->state = TSP_STATE_SUSPEND; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_SUSPEND); } /******************************************************************************* @@ -124,7 +124,7 @@ static void tspd_cpu_on_finish_handler(uint64_t cookie) tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); - assert(tsp_ctx->state == TSP_STATE_OFF); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_OFF); /* Initialise this cpu's secure context */ tspd_init_secure_context((uint64_t) tsp_entry_info->cpu_on_entry, @@ -143,7 +143,7 @@ static void tspd_cpu_on_finish_handler(uint64_t cookie) panic(); /* Update its context to reflect the state the SP is in */ - tsp_ctx->state = TSP_STATE_ON; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); } /******************************************************************************* @@ -159,7 +159,7 @@ static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level) tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); - assert(tsp_ctx->state == TSP_STATE_SUSPEND); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_SUSPEND); /* Program the entry point, suspend_level and enter the SP */ write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), @@ -176,7 +176,7 @@ static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level) panic(); /* Update its context to reflect the state the SP is in */ - tsp_ctx->state = TSP_STATE_ON; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); } /******************************************************************************* diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h index 81484e1cf..bb0afcd96 100644 --- a/services/spd/tspd/tspd_private.h +++ b/services/spd/tspd/tspd_private.h @@ -38,10 +38,41 @@ /******************************************************************************* * Secure Payload PM state information e.g. SP is suspended, uninitialised etc + * and macros to access the state information in the per-cpu 'state' flags ******************************************************************************/ -#define TSP_STATE_OFF 0 -#define TSP_STATE_ON 1 -#define TSP_STATE_SUSPEND 2 +#define TSP_PSTATE_OFF 0 +#define TSP_PSTATE_ON 1 +#define TSP_PSTATE_SUSPEND 2 +#define TSP_PSTATE_SHIFT 0 +#define TSP_PSTATE_MASK 0x3 +#define get_tsp_pstate(state) ((state >> TSP_PSTATE_SHIFT) & TSP_PSTATE_MASK) +#define clr_tsp_pstate(state) (state &= ~(TSP_PSTATE_MASK \ + << TSP_PSTATE_SHIFT)) +#define set_tsp_pstate(st, pst) do { \ + clr_tsp_pstate(st); \ + st |= (pst & TSP_PSTATE_MASK) << \ + TSP_PSTATE_SHIFT; \ + } while (0); + + +/* + * This flag is used by the TSPD to determine if the TSP is servicing a standard + * SMC request prior to programming the next entry into the TSP e.g. if TSP + * execution is preempted by a non-secure interrupt and handed control to the + * normal world. If another request which is distinct from what the TSP was + * previously doing arrives, then this flag will be help the TSPD to either + * reject the new request or service it while ensuring that the previous context + * is not corrupted. + */ +#define STD_SMC_ACTIVE_FLAG_SHIFT 2 +#define STD_SMC_ACTIVE_FLAG_MASK 1 +#define get_std_smc_active_flag(state) ((state >> STD_SMC_ACTIVE_FLAG_SHIFT) \ + & STD_SMC_ACTIVE_FLAG_MASK) +#define set_std_smc_active_flag(state) (state |= \ + 1 << STD_SMC_ACTIVE_FLAG_SHIFT) +#define clr_std_smc_active_flag(state) (state &= \ + ~(STD_SMC_ACTIVE_FLAG_MASK \ + << STD_SMC_ACTIVE_FLAG_SHIFT)) /******************************************************************************* * Secure Payload execution state information i.e. aarch32 or aarch64 From e33981f59ae947afa59d0910aede4d65c686a963 Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Sun, 4 May 2014 18:38:28 +0100 Subject: [PATCH 04/14] Add context library API to change a bit in SCR_EL3 This patch adds an API to write to any bit in the SCR_EL3 member of the 'cpu_context' structure of the current CPU for a specified security state. This API will be used in subsequent patches which introduce interrupt management in EL3 to specify the interrupt routing model when execution is not in EL3. It also renames the cm_set_el3_elr() function to cm_set_elr_el3() which is more in line with the system register name being targeted by the API. Change-Id: I310fa7d8f827ad3f350325eca2fb28cb350a85ed --- bl31/context_mgmt.c | 62 +++++++++++++++++++++++++++++++---- include/bl31/context_mgmt.h | 7 ++-- include/lib/aarch64/arch.h | 1 + services/spd/tspd/tspd_main.c | 2 +- services/spd/tspd/tspd_pm.c | 6 ++-- 5 files changed, 65 insertions(+), 13 deletions(-) diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c index eae608c5f..bdb851385 100644 --- a/bl31/context_mgmt.c +++ b/bl31/context_mgmt.c @@ -28,6 +28,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include #include @@ -145,10 +146,10 @@ void cm_el1_sysregs_context_restore(uint32_t security_state) } /******************************************************************************* - * This function function populates 'cpu_context' pertaining to the given - * security state with the entrypoint, SPSR and SCR values so that an ERET from - * this securit state correctly restores corresponding values to drop the CPU to - * the next exception level + * This function populates 'cpu_context' pertaining to the given security state + * with the entrypoint, SPSR and SCR values so that an ERET from this security + * state correctly restores corresponding values to drop the CPU to the next + * exception level ******************************************************************************/ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, uint32_t spsr, uint32_t scr) @@ -167,10 +168,10 @@ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, } /******************************************************************************* - * This function function populates ELR_EL3 member of 'cpu_context' pertaining - * to the given security state with the given entrypoint + * This function populates ELR_EL3 member of 'cpu_context' pertaining to the + * given security state with the given entrypoint ******************************************************************************/ -void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint) +void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) { cpu_context_t *ctx; el3_state_t *state; @@ -183,6 +184,53 @@ void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint) write_ctx_reg(state, CTX_ELR_EL3, entrypoint); } +/******************************************************************************* + * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' + * pertaining to the given security state using the value and bit position + * specified in the parameters. It preserves all other bits. + ******************************************************************************/ +void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value) +{ + cpu_context_t *ctx; + el3_state_t *state; + uint32_t scr_el3; + + ctx = cm_get_context(read_mpidr(), security_state); + assert(ctx); + + /* Ensure that the bit position is a valid one */ + assert((1 << bit_pos) & SCR_VALID_BIT_MASK); + + /* + * Get the SCR_EL3 value from the cpu context, clear the desired bit + * and set it to its new value. + */ + state = get_el3state_ctx(ctx); + scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); + scr_el3 &= ~(1 << bit_pos); + scr_el3 |= value << bit_pos; + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); +} + +/******************************************************************************* + * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the + * given security state. + ******************************************************************************/ +uint32_t cm_get_scr_el3(uint32_t security_state) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(read_mpidr(), security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + return read_ctx_reg(state, CTX_SCR_EL3); +} + /******************************************************************************* * This function is used to program the context that's used for exception * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h index d2598eef7..ad9d78565 100644 --- a/include/bl31/context_mgmt.h +++ b/include/bl31/context_mgmt.h @@ -47,10 +47,13 @@ extern void cm_el1_sysregs_context_save(uint32_t security_state); extern void cm_el1_sysregs_context_restore(uint32_t security_state); extern void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, uint32_t spsr, uint32_t scr); -extern void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint); +extern void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint); +extern void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value); extern void cm_set_next_eret_context(uint32_t security_state); extern void cm_init_pcpu_ptr_cache(); extern void cm_set_pcpu_ptr_cache(const void *pcpu_ptr); extern void *cm_get_pcpu_ptr_cache(void); - +extern uint32_t cm_get_scr_el3(uint32_t security_state); #endif /* __CM_H__ */ diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index d7e65b38e..3a00f6efe 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -148,6 +148,7 @@ #define SCR_FIQ_BIT (1 << 2) #define SCR_IRQ_BIT (1 << 1) #define SCR_NS_BIT (1 << 0) +#define SCR_VALID_BIT_MASK 0x2f8f /* HCR definitions */ #define HCR_RW_BIT (1ull << 31) diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index 6200d571d..06e21c408 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -295,7 +295,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0, 0, 0, 0); - cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); + cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c index 75b4b3008..d99aa2224 100644 --- a/services/spd/tspd/tspd_pm.c +++ b/services/spd/tspd/tspd_pm.c @@ -59,7 +59,7 @@ static int32_t tspd_cpu_off_handler(uint64_t cookie) assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); /* Program the entry point and enter the TSP */ - cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry); + cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry); rc = tspd_synchronous_sp_entry(tsp_ctx); /* @@ -96,7 +96,7 @@ static void tspd_cpu_suspend_handler(uint64_t power_state) write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), CTX_GPREG_X0, power_state); - cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry); + cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry); rc = tspd_synchronous_sp_entry(tsp_ctx); /* @@ -165,7 +165,7 @@ static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level) write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), CTX_GPREG_X0, suspend_level); - cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_resume_entry); + cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_resume_entry); rc = tspd_synchronous_sp_entry(tsp_ctx); /* From 126747f7f31fc929bbfe750638288710b8e375f8 Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Fri, 9 May 2014 10:03:15 +0100 Subject: [PATCH 05/14] Introduce interrupt registration framework in BL3-1 This patch introduces a framework for registering interrupts routed to EL3. The interrupt routing model is governed by the SCR_EL3.IRQ and FIQ bits and the security state an interrupt is generated in. The framework recognizes three type of interrupts depending upon which exception level and security state they should be handled in i.e. Secure EL1 interrupts, Non-secure interrupts and EL3 interrupts. It provides an API and macros that allow a runtime service to register an handler for a type of interrupt and specify the routing model. The framework validates the routing model and uses the context management framework to ensure that it is applied to the SCR_EL3 prior to entry into the target security state. It saves the handler in internal data structures. An API is provided to retrieve the handler when an interrupt of a particular type is asserted. Registration is expected to be done once by the primary CPU. The same handler and routing model is used for all CPUs. Support for EL3 interrupts will be added to the framework in the future. A makefile flag has been added to allow the FVP port choose between ARM GIC v2 and v3 support in EL3. The latter version is currently unsupported. A framework for handling interrupts in BL3-1 will be introduced in subsequent patches. The default routing model in the absence of any handlers expects no interrupts to be routed to EL3. Change-Id: Idf7c023b34fcd4800a5980f2bef85e4b5c29e649 --- bl31/bl31.mk | 1 + bl31/context_mgmt.c | 6 + bl31/interrupt_mgmt.c | 206 ++++++++++++++++++++++++++++++++++ docs/user-guide.md | 4 + drivers/arm/gic/gic_v2.c | 26 +++++ include/bl31/interrupt_mgmt.h | 128 +++++++++++++++++++++ include/drivers/arm/gic_v2.h | 6 + plat/fvp/plat_gic.c | 38 +++++++ plat/fvp/platform.h | 2 + plat/fvp/platform.mk | 6 + 10 files changed, 423 insertions(+) create mode 100644 bl31/interrupt_mgmt.c create mode 100644 include/bl31/interrupt_mgmt.h diff --git a/bl31/bl31.mk b/bl31/bl31.mk index c0dc2fd15..54e8cc424 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -31,6 +31,7 @@ BL31_SOURCES += bl31/bl31_main.c \ bl31/context_mgmt.c \ bl31/runtime_svc.c \ + bl31/interrupt_mgmt.c \ bl31/aarch64/bl31_arch_setup.c \ bl31/aarch64/bl31_entrypoint.S \ bl31/aarch64/context.S \ diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c index bdb851385..78bfa89d1 100644 --- a/bl31/context_mgmt.c +++ b/bl31/context_mgmt.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -160,6 +161,11 @@ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, ctx = cm_get_context(read_mpidr(), security_state); assert(ctx); + /* Program the interrupt routing model for this security state */ + scr &= ~SCR_FIQ_BIT; + scr &= ~SCR_IRQ_BIT; + scr |= get_scr_el3_from_routing_model(security_state); + /* Populate EL3 state so that we've the right context before doing ERET */ state = get_el3state_ctx(ctx); write_ctx_reg(state, CTX_SPSR_EL3, spsr); diff --git a/bl31/interrupt_mgmt.c b/bl31/interrupt_mgmt.c new file mode 100644 index 000000000..2b0c79706 --- /dev/null +++ b/bl31/interrupt_mgmt.c @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * Local structure and corresponding array to keep track of the state of the + * registered interrupt handlers for each interrupt type. + * The field descriptions are: + * + * 'flags' : Bit[0], Routing model for this interrupt type when execution is + * not in EL3 in the secure state. '1' implies that this + * interrupt will be routed to EL3. '0' implies that this + * interrupt will be routed to the current exception level. + * + * Bit[1], Routing model for this interrupt type when execution is + * not in EL3 in the non-secure state. '1' implies that this + * interrupt will be routed to EL3. '0' implies that this + * interrupt will be routed to the current exception level. + * + * All other bits are reserved and SBZ. + * + * 'scr_el3[2]' : Mapping of the routing model in the 'flags' field to the + * value of the SCR_EL3.IRQ or FIQ bit for each security state. + * There are two instances of this field corresponding to the + * two security states. + ******************************************************************************/ +typedef struct intr_type_desc { + interrupt_type_handler_t handler; + uint32_t flags; + uint32_t scr_el3[2]; +} intr_type_desc_t; + +static intr_type_desc_t intr_type_descs[MAX_INTR_TYPES]; + +/******************************************************************************* + * This function validates the interrupt type. EL3 interrupts are currently not + * supported. + ******************************************************************************/ +static int32_t validate_interrupt_type(uint32_t type) +{ + if (type == INTR_TYPE_EL3) + return -ENOTSUP; + + if (type != INTR_TYPE_S_EL1 && type != INTR_TYPE_NS) + return -EINVAL; + + return 0; +} + +/******************************************************************************* +* This function validates the routing model for this type of interrupt + ******************************************************************************/ +static int32_t validate_routing_model(uint32_t type, uint32_t flags) +{ + flags >>= INTR_RM_FLAGS_SHIFT; + flags &= INTR_RM_FLAGS_MASK; + + if (type == INTR_TYPE_S_EL1) + return validate_sel1_interrupt_rm(flags); + + if (type == INTR_TYPE_NS) + return validate_ns_interrupt_rm(flags); + + return -EINVAL; +} + +/******************************************************************************* + * This function returns the cached copy of the SCR_EL3 which contains the + * routing model (expressed through the IRQ and FIQ bits) for a security state + * which was stored through a call to 'set_routing_model()' earlier. + ******************************************************************************/ +uint32_t get_scr_el3_from_routing_model(uint32_t security_state) +{ + uint32_t scr_el3; + + assert(security_state <= NON_SECURE); + scr_el3 = intr_type_descs[INTR_TYPE_NS].scr_el3[security_state]; + scr_el3 |= intr_type_descs[INTR_TYPE_S_EL1].scr_el3[security_state]; + scr_el3 |= intr_type_descs[INTR_TYPE_EL3].scr_el3[security_state]; + return scr_el3; +} + +/******************************************************************************* + * This function uses the 'interrupt_type_flags' parameter to obtain the value + * of the trap bit (IRQ/FIQ) in the SCR_EL3 for a security state for this + * interrupt type. It uses it to update the SCR_EL3 in the cpu context and the + * 'intr_type_desc' for that security state. + ******************************************************************************/ +static void set_scr_el3_from_rm(uint32_t type, + uint32_t interrupt_type_flags, + uint32_t security_state) +{ + uint32_t flag, bit_pos; + + flag = get_interrupt_rm_flag(interrupt_type_flags, security_state); + bit_pos = plat_interrupt_type_to_line(type, security_state); + intr_type_descs[type].scr_el3[security_state] = flag << bit_pos; + cm_write_scr_el3_bit(security_state, bit_pos, flag); +} + +/******************************************************************************* + * This function validates the routing model specified in the 'flags' and + * updates internal data structures to reflect the new routing model. It also + * updates the copy of SCR_EL3 for each security state with the new routing + * model in the 'cpu_context' structure for this cpu. + ******************************************************************************/ +int32_t set_routing_model(uint32_t type, uint32_t flags) +{ + int32_t rc; + + rc = validate_interrupt_type(type); + if (rc) + return rc; + + rc = validate_routing_model(type, flags); + if (rc) + return rc; + + /* Update the routing model in internal data structures */ + intr_type_descs[type].flags = flags; + set_scr_el3_from_rm(type, flags, SECURE); + set_scr_el3_from_rm(type, flags, NON_SECURE); + + return 0; +} + +/******************************************************************************* + * This function registers a handler for the 'type' of interrupt specified. It + * also validates the routing model specified in the 'flags' for this type of + * interrupt. + ******************************************************************************/ +int32_t register_interrupt_type_handler(uint32_t type, + interrupt_type_handler_t handler, + uint32_t flags) +{ + int32_t rc; + + /* Validate the 'handler' parameter */ + if (!handler) + return -EINVAL; + + /* Validate the 'flags' parameter */ + if (flags & INTR_TYPE_FLAGS_MASK) + return -EINVAL; + + /* Check if a handler has already been registered */ + if (intr_type_descs[type].handler) + return -EALREADY; + + rc = set_routing_model(type, flags); + if (rc) + return rc; + + /* Save the handler */ + intr_type_descs[type].handler = handler; + + return 0; +} + +/******************************************************************************* + * This function is called when an interrupt is generated and returns the + * handler for the interrupt type (if registered). It returns NULL if the + * interrupt type is not supported or its handler has not been registered. + ******************************************************************************/ +interrupt_type_handler_t get_interrupt_type_handler(uint32_t type) +{ + if (validate_interrupt_type(type)) + return NULL; + + return intr_type_descs[type].handler; +} + diff --git a/docs/user-guide.md b/docs/user-guide.md index e7f0df54c..201db384b 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -157,6 +157,10 @@ performed. * `V`: Verbose build. If assigned anything other than 0, the build commands are printed. Default is 0 +* `FVP_GIC_ARCH`: Choice of ARM GIC architecture version used by the FVP port + for implementing the platform GIC API. This API is used by the interrupt + management framework. Default is 2 i.e. version 2.0 + ### Creating a Firmware Image Package FIPs are automatically created as part of the build instructions described in diff --git a/drivers/arm/gic/gic_v2.c b/drivers/arm/gic/gic_v2.c index 00464cbd2..27a39b9c7 100644 --- a/drivers/arm/gic/gic_v2.c +++ b/drivers/arm/gic/gic_v2.c @@ -28,8 +28,10 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include +#include #include /******************************************************************************* @@ -290,3 +292,27 @@ void gicd_set_itargetsr(unsigned int base, unsigned int id, unsigned int iface) (1 << iface) << (byte_off << 3)); } +/******************************************************************************* + * This function allows the interrupt management framework to determine (through + * the platform) which interrupt line (IRQ/FIQ) to use for an interrupt type to + * route it to EL3. The interrupt line is represented as the bit position of the + * IRQ or FIQ bit in the SCR_EL3. + ******************************************************************************/ +uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type) +{ + uint32_t gicc_ctlr; + + /* Non-secure interrupts are signalled on the IRQ line always */ + if (type == INTR_TYPE_NS) + return __builtin_ctz(SCR_IRQ_BIT); + + /* + * Secure interrupts are signalled using the IRQ line if the FIQ_EN + * bit is not set else they are signalled using the FIQ line. + */ + gicc_ctlr = gicc_read_ctlr(cpuif_base); + if (gicc_ctlr & FIQ_EN) + return __builtin_ctz(SCR_FIQ_BIT); + else + return __builtin_ctz(SCR_IRQ_BIT); +} diff --git a/include/bl31/interrupt_mgmt.h b/include/bl31/interrupt_mgmt.h new file mode 100644 index 000000000..0b24f39a5 --- /dev/null +++ b/include/bl31/interrupt_mgmt.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __INTERRUPT_MGMT_H__ +#define __INTERRUPT_MGMT_H__ + +#include + +/******************************************************************************* + * Constants for the types of interrupts recognised by the IM framework + ******************************************************************************/ +#define INTR_TYPE_S_EL1 0 +#define INTR_TYPE_EL3 1 +#define INTR_TYPE_NS 2 +#define MAX_INTR_TYPES 3 +#define INTR_TYPE_INVAL MAX_INTR_TYPES +/* + * Constant passed to the interrupt handler in the 'id' field when the + * framework does not read the gic registers to determine the interrupt id. + */ +#define INTR_ID_UNAVAILABLE 0xFFFFFFFF + + +/******************************************************************************* + * Mask for _both_ the routing model bits in the 'flags' parameter and + * constants to define the valid routing models for each supported interrupt + * type + ******************************************************************************/ +#define INTR_RM_FLAGS_SHIFT 0x0 +#define INTR_RM_FLAGS_MASK 0x3 +/* Routed to EL3 from NS. Taken to S-EL1 from Secure */ +#define INTR_SEL1_VALID_RM0 0x2 +/* Routed to EL3 from NS and Secure */ +#define INTR_SEL1_VALID_RM1 0x3 +/* Routed to EL1/EL2 from NS and to S-EL1 from Secure */ +#define INTR_NS_VALID_RM0 0x0 +/* Routed to EL1/EL2 from NS and to EL3 from Secure */ +#define INTR_NS_VALID_RM1 0x1 + + +/******************************************************************************* + * Constants for the _individual_ routing model bits in the 'flags' field for + * each interrupt type and mask to validate the 'flags' parameter while + * registering an interrupt handler + ******************************************************************************/ +#define INTR_TYPE_FLAGS_MASK 0xFFFFFFFC + +#define INTR_RM_FROM_SEC_SHIFT SECURE /* BIT[0] */ +#define INTR_RM_FROM_NS_SHIFT NON_SECURE /* BIT[1] */ +#define INTR_RM_FROM_FLAG_MASK 1 +#define get_interrupt_rm_flag(flag, ss) (((flag >> INTR_RM_FLAGS_SHIFT) >> ss) \ + & INTR_RM_FROM_FLAG_MASK) +#define set_interrupt_rm_flag(flag, ss) (flag |= 1 << ss) +#define clr_interrupt_rm_flag(flag, ss) (flag &= ~(1 << ss)) + + +/******************************************************************************* + * Macros to validate the routing model bits in the 'flags' for a type + * of interrupt. If the model does not match one of the valid masks + * -EINVAL is returned. + ******************************************************************************/ +#define validate_sel1_interrupt_rm(x) (x == INTR_SEL1_VALID_RM0 ? 0 : \ + (x == INTR_SEL1_VALID_RM1 ? 0 :\ + -EINVAL)) + +#define validate_ns_interrupt_rm(x) (x == INTR_NS_VALID_RM0 ? 0 : \ + (x == INTR_NS_VALID_RM1 ? 0 :\ + -EINVAL)) + +/******************************************************************************* + * Macros to set the 'flags' parameter passed to an interrupt type handler. Only + * the flag to indicate the security state when the exception was generated is + * supported. + ******************************************************************************/ +#define INTR_SRC_SS_FLAG_SHIFT 0 /* BIT[0] */ +#define INTR_SRC_SS_FLAG_MASK 1 +#define set_interrupt_src_ss(flag, val) (flag |= val << INTR_SRC_SS_FLAG_SHIFT) +#define clr_interrupt_src_ss(flag) (flag &= ~(1 << INTR_SRC_SS_FLAG_SHIFT)) +#define get_interrupt_src_ss(flag) ((flag >> INTR_SRC_SS_FLAG_SHIFT) & \ + INTR_SRC_SS_FLAG_MASK) + +#ifndef __ASSEMBLY__ + +/* Prototype for defining a handler for an interrupt type */ +typedef uint64_t (*interrupt_type_handler_t)(uint32_t id, + uint32_t flags, + void *handle, + void *cookie); + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ +extern uint32_t get_scr_el3_from_routing_model(uint32_t security_state); +extern int32_t set_routing_model(uint32_t type, uint32_t flags); +extern int32_t register_interrupt_type_handler(uint32_t type, + interrupt_type_handler_t handler, + uint32_t flags); +extern interrupt_type_handler_t get_interrupt_type_handler(uint32_t interrupt_type); + +#endif /*__ASSEMBLY__*/ +#endif /* __INTERRUPT_MGMT_H__ */ diff --git a/include/drivers/arm/gic_v2.h b/include/drivers/arm/gic_v2.h index ccf3d32c4..91c3f11e2 100644 --- a/include/drivers/arm/gic_v2.h +++ b/include/drivers/arm/gic_v2.h @@ -298,6 +298,12 @@ static inline void gicc_write_dir(unsigned int base, unsigned int val) mmio_write_32(base + GICC_DIR, val); } +/******************************************************************************* + * Prototype of function to map an interrupt type to the interrupt line used to + * signal it. + ******************************************************************************/ +uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type); + #endif /*__ASSEMBLY__*/ #endif /* __GIC_V2_H__ */ diff --git a/plat/fvp/plat_gic.c b/plat/fvp/plat_gic.c index db3c9cf6f..dd409f56a 100644 --- a/plat/fvp/plat_gic.c +++ b/plat/fvp/plat_gic.c @@ -29,9 +29,12 @@ */ #include +#include +#include #include #include #include +#include #include #include @@ -284,3 +287,38 @@ void gic_setup(void) gic_cpuif_setup(gicc_base); gic_distif_setup(gicd_base); } + +/******************************************************************************* + * An ARM processor signals interrupt exceptions through the IRQ and FIQ pins. + * The interrupt controller knows which pin/line it uses to signal a type of + * interrupt. The platform knows which interrupt controller type is being used + * in a particular security state e.g. with an ARM GIC, normal world could use + * the GICv2 features while the secure world could use GICv3 features and vice + * versa. + * This function is exported by the platform to let the interrupt management + * framework determine for a type of interrupt and security state, which line + * should be used in the SCR_EL3 to control its routing to EL3. The interrupt + * line is represented as the bit position of the IRQ or FIQ bit in the SCR_EL3. + ******************************************************************************/ +uint32_t plat_interrupt_type_to_line(uint32_t type, uint32_t security_state) +{ + uint32_t gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + + assert(type == INTR_TYPE_S_EL1 || + type == INTR_TYPE_EL3 || + type == INTR_TYPE_NS); + + assert(security_state == NON_SECURE || security_state == SECURE); + + /* + * We ignore the security state parameter under the assumption that + * both normal and secure worlds are using ARM GICv2. This parameter + * will be used when the secure world starts using GICv3. + */ +#if FVP_GIC_ARCH == 2 + return gicv2_interrupt_type_to_line(gicc_base, type); +#else +#error "Invalid GIC architecture version specified for FVP port" +#endif +} + diff --git a/plat/fvp/platform.h b/plat/fvp/platform.h index 40f780eff..e1580debd 100644 --- a/plat/fvp/platform.h +++ b/plat/fvp/platform.h @@ -397,6 +397,8 @@ extern void gic_cpuif_deactivate(unsigned int); extern void gic_cpuif_setup(unsigned int); extern void gic_pcpu_distif_setup(unsigned int); extern void gic_setup(void); +extern uint32_t plat_interrupt_type_to_line(uint32_t type, + uint32_t security_state); /* Declarations for fvp_topology.c */ extern int plat_setup_topology(void); diff --git a/plat/fvp/platform.mk b/plat/fvp/platform.mk index 511a25c2a..a8054bf3b 100644 --- a/plat/fvp/platform.mk +++ b/plat/fvp/platform.mk @@ -67,3 +67,9 @@ BL31_SOURCES += drivers/arm/gic/gic_v2.c \ plat/fvp/aarch64/plat_helpers.S \ plat/fvp/aarch64/plat_common.c \ plat/fvp/drivers/pwrc/fvp_pwrc.c + +# Flag used by the FVP port to determine the version of ARM GIC architecture +# to use for interrupt management in EL3. +FVP_GIC_ARCH := 2 +$(eval $(call add_define,FVP_GIC_ARCH)) + From 618bc607e9e47342a4ae7ff4c5dcc7a2d82784aa Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Sun, 4 May 2014 19:02:52 +0100 Subject: [PATCH 06/14] Introduce platform api to access an ARM GIC This patch introduces a set of functions which allow generic firmware code e.g. the interrupt management framework to access the platform interrupt controller. APIs for finding the type and id of the highest pending interrupt, acknowledging and EOIing an interrupt and finding the security state of an interrupt have been added. It is assumed that the platform interrupt controller implements the v2.0 of the ARM GIC architecture specification. Support for v3.0 of the specification for managing interrupts in EL3 and the platform port will be added in the future. Change-Id: Ib3a01c2cf3e3ab27806930f1be79db2b29f91bcf --- include/drivers/arm/gic_v2.h | 7 +++ plat/fvp/plat_gic.c | 94 +++++++++++++++++++++++++++++++++--- plat/fvp/platform.h | 9 +++- 3 files changed, 102 insertions(+), 8 deletions(-) diff --git a/include/drivers/arm/gic_v2.h b/include/drivers/arm/gic_v2.h index 91c3f11e2..e81967638 100644 --- a/include/drivers/arm/gic_v2.h +++ b/include/drivers/arm/gic_v2.h @@ -43,6 +43,7 @@ #define GIC_LOWEST_SEC_PRIORITY 127 #define GIC_HIGHEST_NS_PRIORITY 128 #define GIC_LOWEST_NS_PRIORITY 254 /* 255 would disable an interrupt */ +#define GIC_SPURIOUS_INTERRUPT 1023 #define ENABLE_GRP0 (1 << 0) #define ENABLE_GRP1 (1 << 1) @@ -88,6 +89,7 @@ #define GICC_EOIR 0x10 #define GICC_RPR 0x14 #define GICC_HPPIR 0x18 +#define GICC_AHPPIR 0x28 #define GICC_IIDR 0xFC #define GICC_DIR 0x1000 #define GICC_PRIODROP GICC_EOIR @@ -247,6 +249,11 @@ static inline unsigned int gicc_read_hppir(unsigned int base) return mmio_read_32(base + GICC_HPPIR); } +static inline unsigned int gicc_read_ahppir(unsigned int base) +{ + return mmio_read_32(base + GICC_AHPPIR); +} + static inline unsigned int gicc_read_dir(unsigned int base) { return mmio_read_32(base + GICC_DIR); diff --git a/plat/fvp/plat_gic.c b/plat/fvp/plat_gic.c index dd409f56a..7dec404f8 100644 --- a/plat/fvp/plat_gic.c +++ b/plat/fvp/plat_gic.c @@ -38,12 +38,6 @@ #include #include - -/******************************************************************************* - * TODO: Revisit if priorities are being set such that no non-secure interrupt - * can have a higher priority than a secure one as recommended in the GICv2 spec - ******************************************************************************/ - /******************************************************************************* * This function does some minimal GICv3 configuration. The Firmware itself does * not fully support GICv3 at this time and relies on GICv2 emulation as @@ -322,3 +316,91 @@ uint32_t plat_interrupt_type_to_line(uint32_t type, uint32_t security_state) #endif } +#if FVP_GIC_ARCH == 2 +/******************************************************************************* + * This function returns the type of the highest priority pending interrupt at + * the GIC cpu interface. INTR_TYPE_INVAL is returned when there is no + * interrupt pending. + ******************************************************************************/ +uint32_t ic_get_pending_interrupt_type() +{ + uint32_t id, gicc_base; + + gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + id = gicc_read_hppir(gicc_base); + + /* Assume that all secure interrupts are S-EL1 interrupts */ + if (id < 1022) + return INTR_TYPE_S_EL1; + + if (id == GIC_SPURIOUS_INTERRUPT) + return INTR_TYPE_INVAL; + + return INTR_TYPE_NS; +} + +/******************************************************************************* + * This function returns the id of the highest priority pending interrupt at + * the GIC cpu interface. INTR_ID_UNAVAILABLE is returned when there is no + * interrupt pending. + ******************************************************************************/ +uint32_t ic_get_pending_interrupt_id() +{ + uint32_t id, gicc_base; + + gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + id = gicc_read_hppir(gicc_base); + + if (id < 1022) + return id; + + if (id == 1023) + return INTR_ID_UNAVAILABLE; + + /* + * Find out which non-secure interrupt it is under the assumption that + * the GICC_CTLR.AckCtl bit is 0. + */ + return gicc_read_ahppir(gicc_base); +} + +/******************************************************************************* + * This functions reads the GIC cpu interface Interrupt Acknowledge register + * to start handling the pending interrupt. It returns the contents of the IAR. + ******************************************************************************/ +uint32_t ic_acknowledge_interrupt() +{ + return gicc_read_IAR(platform_get_cfgvar(CONFIG_GICC_ADDR)); +} + +/******************************************************************************* + * This functions writes the GIC cpu interface End Of Interrupt register with + * the passed value to finish handling the active interrupt + ******************************************************************************/ +void ic_end_of_interrupt(uint32_t id) +{ + gicc_write_EOIR(platform_get_cfgvar(CONFIG_GICC_ADDR), id); + return; +} + +/******************************************************************************* + * This function returns the type of the interrupt id depending upon the group + * this interrupt has been configured under by the interrupt controller i.e. + * group0 or group1. + ******************************************************************************/ +uint32_t ic_get_interrupt_type(uint32_t id) +{ + uint32_t group; + + group = gicd_get_igroupr(platform_get_cfgvar(CONFIG_GICD_ADDR), id); + + /* Assume that all secure interrupts are S-EL1 interrupts */ + if (group == GRP0) + return INTR_TYPE_S_EL1; + else + return INTR_TYPE_NS; +} + +#else +#error "Invalid GIC architecture version specified for FVP port" +#endif diff --git a/plat/fvp/platform.h b/plat/fvp/platform.h index e1580debd..328190584 100644 --- a/plat/fvp/platform.h +++ b/plat/fvp/platform.h @@ -392,7 +392,12 @@ extern unsigned long plat_get_ns_image_entrypoint(void); extern unsigned long platform_get_stack(unsigned long mpidr); extern uint64_t plat_get_syscnt_freq(void); -/* Declarations for fvp_gic.c */ +/* Declarations for plat_gic.c */ +extern uint32_t ic_get_pending_interrupt_id(void); +extern uint32_t ic_get_pending_interrupt_type(void); +extern uint32_t ic_acknowledge_interrupt(void); +extern uint32_t ic_get_interrupt_type(uint32_t id); +extern void ic_end_of_interrupt(uint32_t id); extern void gic_cpuif_deactivate(unsigned int); extern void gic_cpuif_setup(unsigned int); extern void gic_pcpu_distif_setup(unsigned int); @@ -400,7 +405,7 @@ extern void gic_setup(void); extern uint32_t plat_interrupt_type_to_line(uint32_t type, uint32_t security_state); -/* Declarations for fvp_topology.c */ +/* Declarations for plat_topology.c */ extern int plat_setup_topology(void); extern int plat_get_max_afflvl(void); extern unsigned int plat_get_aff_count(unsigned int, unsigned long); From 383d4ac754c4c97c3e52642a69d97ebd31e215aa Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Fri, 9 May 2014 11:07:09 +0100 Subject: [PATCH 07/14] Introduce interrupt handling framework in BL3-1 This patch adds a common handler for FIQ and IRQ exceptions in the BL3-1 runtime exception vector table. This function determines the interrupt type and calls its handler. A crash is reported if an inconsistency in the interrupt management framework is detected. In the event of a spurious interrupt, execution resumes from the instruction where the interrupt was generated. This patch also removes 'cm_macros.S' as its contents have been moved to 'runtime_exceptions.S' Change-Id: I3c85ecf8eaf43a3fac429b119ed0bd706d2e2093 --- bl31/aarch64/bl31_entrypoint.S | 2 - bl31/aarch64/runtime_exceptions.S | 114 +++++++++++++++++++++++++++-- bl31/bl31.mk | 8 ++ docs/user-guide.md | 5 ++ include/bl31/cm_macros.S | 57 --------------- services/std_svc/psci/psci_entry.S | 1 - 6 files changed, 122 insertions(+), 65 deletions(-) delete mode 100644 include/bl31/cm_macros.S diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index 763303b38..273ca084e 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -31,8 +31,6 @@ #include #include #include -#include - .globl bl31_entrypoint diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index b6dcccb75..4789b33ba 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -30,14 +30,118 @@ #include #include -#include #include +#include #include #include .globl runtime_exceptions .globl el3_exit + /* ----------------------------------------------------- + * Handle SMC exceptions seperately from other sync. + * exceptions. + * ----------------------------------------------------- + */ + .macro handle_sync_exception + str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + mrs x30, esr_el3 + ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH + + cmp x30, #EC_AARCH32_SMC + b.eq smc_handler32 + + cmp x30, #EC_AARCH64_SMC + b.eq smc_handler64 + + /* ----------------------------------------------------- + * The following code handles any synchronous exception + * that is not an SMC. + * ----------------------------------------------------- + */ + + bl dump_state_and_die + .endm + + + /* ----------------------------------------------------- + * This macro handles FIQ or IRQ interrupts i.e. EL3, + * S-EL1 and NS interrupts. + * ----------------------------------------------------- + */ + .macro handle_interrupt_exception label + str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + bl save_gp_registers + + /* Switch to the runtime stack i.e. SP_EL0 */ + ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] + mov x20, sp + msr spsel, #0 + mov sp, x2 + + /* + * Find out whether this is a valid interrupt type. If the + * interrupt controller reports a spurious interrupt then + * return to where we came from. + */ + bl ic_get_pending_interrupt_type + cmp x0, #INTR_TYPE_INVAL + b.eq interrupt_exit_\label + + /* + * Get the registered handler for this interrupt type. A + * NULL return value implies that an interrupt was generated + * for which there is no handler registered or the interrupt + * was routed incorrectly. This is a problem of the framework + * so report it as an error. + */ + bl get_interrupt_type_handler + cbz x0, interrupt_error_\label + mov x21, x0 + + mov x0, #INTR_ID_UNAVAILABLE +#if IMF_READ_INTERRUPT_ID + /* + * Read the id of the highest priority pending interrupt. If + * no interrupt is asserted then return to where we came from. + */ + bl ic_get_pending_interrupt_id + cmp x0, #INTR_ID_UNAVAILABLE + b.eq interrupt_exit_\label +#endif + + /* + * Save the EL3 system registers needed to return from + * this exception. + */ + mrs x3, spsr_el3 + mrs x4, elr_el3 + stp x3, x4, [x20, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] + + /* Set the current security state in the 'flags' parameter */ + mrs x2, scr_el3 + ubfx x1, x2, #0, #1 + + /* Restore the reference to the 'handle' i.e. SP_EL3 */ + mov x2, x20 + + /* Call the interrupt type handler */ + blr x21 + +interrupt_exit_\label: + /* Return from exception, possibly in a different security state */ + b el3_exit + + /* + * This label signifies a problem with the interrupt management + * framework where it is not safe to go back to the instruction + * where the interrupt was generated. + */ +interrupt_error_\label: + bl dump_intr_state_and_die + .endm + + .macro save_x18_to_x29_sp_el0 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] @@ -140,12 +244,12 @@ sync_exception_aarch64: * ----------------------------------------------------- */ irq_aarch64: - bl dump_intr_state_and_die + handle_interrupt_exception irq_aarch64 check_vector_size irq_aarch64 .align 7 fiq_aarch64: - bl dump_intr_state_and_die + handle_interrupt_exception fiq_aarch64 check_vector_size fiq_aarch64 .align 7 @@ -177,12 +281,12 @@ sync_exception_aarch32: * ----------------------------------------------------- */ irq_aarch32: - bl dump_intr_state_and_die + handle_interrupt_exception irq_aarch32 check_vector_size irq_aarch32 .align 7 fiq_aarch32: - bl dump_intr_state_and_die + handle_interrupt_exception fiq_aarch32 check_vector_size fiq_aarch32 .align 7 diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 54e8cc424..9ce4f9f0e 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -50,3 +50,11 @@ BL31_SOURCES += bl31/bl31_main.c \ services/std_svc/psci/psci_setup.c BL31_LINKERFILE := bl31/bl31.ld.S + +# Flag used by the generic interrupt management framework to determine if +# upon the assertion of an interrupt, it should pass the interrupt id or not +IMF_READ_INTERRUPT_ID := 0 + +$(eval $(call assert_boolean,IMF_READ_INTERRUPT_ID)) +$(eval $(call add_define,IMF_READ_INTERRUPT_ID)) + diff --git a/docs/user-guide.md b/docs/user-guide.md index 201db384b..a13d29b07 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -161,6 +161,11 @@ performed. for implementing the platform GIC API. This API is used by the interrupt management framework. Default is 2 i.e. version 2.0 +* `IMF_READ_INTERRUPT_ID`: Boolean flag used by the interrupt management + framework to enable passing of the interrupt id to its handler. The id is + read using a platform GIC API. `INTR_ID_UNAVAILABLE` is passed instead if + this option set to 0. Default is 0. + ### Creating a Firmware Image Package FIPs are automatically created as part of the build instructions described in diff --git a/include/bl31/cm_macros.S b/include/bl31/cm_macros.S deleted file mode 100644 index f12f8c306..000000000 --- a/include/bl31/cm_macros.S +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include - - /* ----------------------------------------------------- - * Handle SMC exceptions seperately from other sync. - * exceptions. - * ----------------------------------------------------- - */ - .macro handle_sync_exception - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - mrs x30, esr_el3 - ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH - - cmp x30, #EC_AARCH32_SMC - b.eq smc_handler32 - - cmp x30, #EC_AARCH64_SMC - b.eq smc_handler64 - - /* ----------------------------------------------------- - * The following code handles any synchronous exception - * that is not an SMC. - * ----------------------------------------------------- - */ - - bl dump_state_and_die - .endm - diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S index 256c538dd..c243d2766 100644 --- a/services/std_svc/psci/psci_entry.S +++ b/services/std_svc/psci/psci_entry.S @@ -30,7 +30,6 @@ #include #include -#include #include .globl psci_aff_on_finish_entry From 31b57b7e9a731cb6f6975adab03038d4662df7e5 Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Fri, 9 May 2014 12:00:17 +0100 Subject: [PATCH 08/14] Use secure timer to generate S-EL1 interrupts This patch adds support in the TSP to program the secure physical generic timer to generate a EL-1 interrupt every half second. It also adds support for maintaining the timer state across power management operations. The TSPD ensures that S-EL1 can access the timer by programming the SCR_EL3.ST bit. This patch does not actually enable the timer. This will be done in a subsequent patch once the complete framework for handling S-EL1 interrupts is in place. Change-Id: I1b3985cfb50262f60824be3a51c6314ce90571bc --- bl32/tsp/tsp.mk | 3 +- bl32/tsp/tsp_timer.c | 106 +++++++++++++++++++++++++++++ include/bl32/payloads/tsp.h | 7 ++ include/lib/aarch64/arch.h | 22 ++++++ include/lib/aarch64/arch_helpers.h | 7 ++ lib/aarch64/sysreg_helpers.S | 37 ++++++++++ services/spd/tspd/tspd_common.c | 6 +- 7 files changed, 186 insertions(+), 2 deletions(-) create mode 100644 bl32/tsp/tsp_timer.c diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk index c478b435f..07bd9c649 100644 --- a/bl32/tsp/tsp.mk +++ b/bl32/tsp/tsp.mk @@ -32,7 +32,8 @@ BL32_SOURCES += bl32/tsp/tsp_main.c \ bl32/tsp/aarch64/tsp_entrypoint.S \ bl32/tsp/aarch64/tsp_request.S \ common/aarch64/early_exceptions.S \ - lib/locks/exclusive/spinlock.S + lib/locks/exclusive/spinlock.S \ + bl32/tsp/tsp_timer.c BL32_LINKERFILE := bl32/tsp/tsp.ld.S diff --git a/bl32/tsp/tsp_timer.c b/bl32/tsp/tsp_timer.c new file mode 100644 index 000000000..f66ff9fa5 --- /dev/null +++ b/bl32/tsp/tsp_timer.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include + +/******************************************************************************* + * Data structure to keep track of per-cpu secure generic timer context across + * power management operations. + ******************************************************************************/ +typedef struct timer_context { + uint64_t cval; + uint32_t ctl; +} timer_context_t; + +static timer_context_t pcpu_timer_context[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * This function initializes the generic timer to fire every 0.5 second + ******************************************************************************/ +void tsp_generic_timer_start() +{ + uint64_t cval; + uint32_t ctl = 0; + + /* The timer will fire every 0.5 second */ + cval = read_cntpct_el0() + (read_cntfrq_el0() >> 1); + write_cntps_cval_el1(cval); + + /* Enable the secure physical timer */ + set_cntp_ctl_enable(ctl); + write_cntps_ctl_el1(ctl); +} + +/******************************************************************************* + * This function deasserts the timer interrupt and sets it up again + ******************************************************************************/ +void tsp_generic_timer_handler() +{ + /* Ensure that the timer did assert the interrupt */ + assert(get_cntp_ctl_istatus(read_cntps_ctl_el1())); + + /* Disable the timer and reprogram it */ + write_cntps_ctl_el1(0); + tsp_generic_timer_start(); +} + +/******************************************************************************* + * This function deasserts the timer interrupt prior to cpu power down + ******************************************************************************/ +void tsp_generic_timer_stop() +{ + /* Disable the timer */ + write_cntps_ctl_el1(0); +} + +/******************************************************************************* + * This function saves the timer context prior to cpu suspension + ******************************************************************************/ +void tsp_generic_timer_save() +{ + uint32_t linear_id = platform_get_core_pos(read_mpidr()); + + pcpu_timer_context[linear_id].cval = read_cntps_cval_el1(); + pcpu_timer_context[linear_id].ctl = read_cntps_ctl_el1(); + flush_dcache_range((uint64_t) &pcpu_timer_context[linear_id], + sizeof(pcpu_timer_context[linear_id])); +} + +/******************************************************************************* + * This function restores the timer context post cpu resummption + ******************************************************************************/ +void tsp_generic_timer_restore() +{ + uint32_t linear_id = platform_get_core_pos(read_mpidr()); + + write_cntps_cval_el1(pcpu_timer_context[linear_id].cval); + write_cntps_ctl_el1(pcpu_timer_context[linear_id].ctl); +} diff --git a/include/bl32/payloads/tsp.h b/include/bl32/payloads/tsp.h index 1f542d536..385d09c21 100644 --- a/include/bl32/payloads/tsp.h +++ b/include/bl32/payloads/tsp.h @@ -196,6 +196,13 @@ extern tsp_args_t *tsp_cpu_off_main(uint64_t arg0, uint64_t arg5, uint64_t arg6, uint64_t arg7); + +/* Generic Timer functions */ +extern void tsp_generic_timer_start(void); +extern void tsp_generic_timer_handler(void); +extern void tsp_generic_timer_stop(void); +extern void tsp_generic_timer_save(void); +extern void tsp_generic_timer_restore(void); #endif /* __ASSEMBLY__ */ #endif /* __BL2_H__ */ diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 3a00f6efe..ab3f38a85 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -223,6 +223,28 @@ #define AARCH32_MODE_SVC 0x13 #define AARCH32_MODE_HYP 0x1a +/* Physical timer control register bit fields shifts and masks */ +#define CNTP_CTL_ENABLE_SHIFT 0 +#define CNTP_CTL_IMASK_SHIFT 1 +#define CNTP_CTL_ISTATUS_SHIFT 2 + +#define CNTP_CTL_ENABLE_MASK 1 +#define CNTP_CTL_IMASK_MASK 1 +#define CNTP_CTL_ISTATUS_MASK 1 + +#define get_cntp_ctl_enable(x) ((x >> CNTP_CTL_ENABLE_SHIFT) & \ + CNTP_CTL_ENABLE_MASK) +#define get_cntp_ctl_imask(x) ((x >> CNTP_CTL_IMASK_SHIFT) & \ + CNTP_CTL_IMASK_MASK) +#define get_cntp_ctl_istatus(x) ((x >> CNTP_CTL_ISTATUS_SHIFT) & \ + CNTP_CTL_ISTATUS_MASK) + +#define set_cntp_ctl_enable(x) (x |= 1 << CNTP_CTL_ENABLE_SHIFT) +#define set_cntp_ctl_imask(x) (x |= 1 << CNTP_CTL_IMASK_SHIFT) + +#define clr_cntp_ctl_enable(x) (x &= ~(1 << CNTP_CTL_ENABLE_SHIFT)) +#define clr_cntp_ctl_imask(x) (x &= ~(1 << CNTP_CTL_IMASK_SHIFT)) + /* Miscellaneous MMU related constants */ #define NUM_2MB_IN_GB (1 << 9) #define NUM_4K_IN_2MB (1 << 9) diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h index 0a398d081..f30301d17 100644 --- a/include/lib/aarch64/arch_helpers.h +++ b/include/lib/aarch64/arch_helpers.h @@ -202,6 +202,10 @@ extern unsigned long read_cptr_el3(void); extern unsigned long read_cpacr(void); extern unsigned long read_cpuectlr(void); extern unsigned int read_cntfrq_el0(void); +extern unsigned int read_cntps_ctl_el1(void); +extern unsigned int read_cntps_tval_el1(void); +extern unsigned long read_cntps_cval_el1(void); +extern unsigned long read_cntpct_el0(void); extern unsigned long read_cnthctl_el2(void); extern unsigned long read_tpidr_el3(void); @@ -210,6 +214,9 @@ extern void write_scr(unsigned long); extern void write_hcr(unsigned long); extern void write_cpacr(unsigned long); extern void write_cntfrq_el0(unsigned int); +extern void write_cntps_ctl_el1(unsigned int); +extern void write_cntps_tval_el1(unsigned int); +extern void write_cntps_cval_el1(unsigned long); extern void write_cnthctl_el2(unsigned long); extern void write_vbar_el1(unsigned long); diff --git a/lib/aarch64/sysreg_helpers.S b/lib/aarch64/sysreg_helpers.S index c86fdba05..925e93e37 100644 --- a/lib/aarch64/sysreg_helpers.S +++ b/lib/aarch64/sysreg_helpers.S @@ -142,6 +142,15 @@ .globl read_cntfrq_el0 .globl write_cntfrq_el0 + .globl read_cntps_ctl_el1 + .globl write_cntps_ctl_el1 + + .globl read_cntps_cval_el1 + .globl write_cntps_cval_el1 + + .globl read_cntps_tval_el1 + .globl write_cntps_tval_el1 + .globl read_scr .globl write_scr @@ -151,6 +160,7 @@ .globl read_midr .globl read_mpidr + .globl read_cntpct_el0 .globl read_current_el .globl read_id_pfr1_el1 .globl read_id_aa64pfr0_el1 @@ -672,6 +682,33 @@ func write_cntfrq_el0 msr cntfrq_el0, x0 ret +func read_cntps_ctl_el1 + mrs x0, cntps_ctl_el1 + ret + +func write_cntps_ctl_el1 + msr cntps_ctl_el1, x0 + ret + +func read_cntps_cval_el1 + mrs x0, cntps_cval_el1 + ret + +func write_cntps_cval_el1 + msr cntps_cval_el1, x0 + ret + +func read_cntps_tval_el1 + mrs x0, cntps_tval_el1 + ret + +func write_cntps_tval_el1 + msr cntps_tval_el1, x0 + ret + +func read_cntpct_el0 + mrs x0, cntpct_el0 + ret func read_cpuectlr mrs x0, CPUECTLR_EL1 diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c index 9168ffc1b..0baa7dd89 100644 --- a/services/spd/tspd/tspd_common.c +++ b/services/spd/tspd/tspd_common.c @@ -65,10 +65,14 @@ int32_t tspd_init_secure_context(uint64_t entrypoint, */ memset(tsp_ctx, 0, sizeof(*tsp_ctx)); - /* Set the right security state and register width for the SP */ + /* + * Set the right security state, register width and enable access to + * the secure physical timer for the SP. + */ scr = read_scr(); scr &= ~SCR_NS_BIT; scr &= ~SCR_RW_BIT; + scr |= SCR_ST_BIT; if (rw == TSP_AARCH64) scr |= SCR_RW_BIT; From 1ad9e8fbc044f95fdf2c44be4ebda7f206d992e2 Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Fri, 9 May 2014 11:42:56 +0100 Subject: [PATCH 09/14] Add support for synchronous FIQ handling in TSP This patch adds support in the TSP for handling S-EL1 interrupts handed over by the TSPD. It includes GIC support in its platform port, updates various statistics related to FIQ handling, exports an entry point that the TSPD can use to hand over interrupts and defines the handover protocol w.r.t what context is the TSP expected to preserve and the state in which the entry point is invoked by the TSPD. Change-Id: I93b22e5a8133400e4da366f5fc862f871038df39 --- bl32/tsp/aarch64/tsp_entrypoint.S | 69 +++++++++++++++++++ bl32/tsp/tsp-fvp.mk | 8 ++- bl32/tsp/tsp.mk | 5 +- bl32/tsp/tsp_interrupt.c | 109 ++++++++++++++++++++++++++++++ bl32/tsp/tsp_main.c | 3 +- include/bl32/payloads/tsp.h | 47 ++++++++++--- plat/fvp/bl32_plat_setup.c | 2 + 7 files changed, 228 insertions(+), 15 deletions(-) create mode 100644 bl32/tsp/tsp_interrupt.c diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S index aeb54bc41..641f37fb3 100644 --- a/bl32/tsp/aarch64/tsp_entrypoint.S +++ b/bl32/tsp/aarch64/tsp_entrypoint.S @@ -39,6 +39,7 @@ .globl tsp_cpu_suspend_entry .globl tsp_cpu_resume_entry .globl tsp_fast_smc_entry + .globl tsp_fiq_entry /* --------------------------------------------- * Populate the params in x0-x7 from the pointer @@ -53,6 +54,22 @@ smc #0 .endm + .macro save_eret_context reg1 reg2 + mrs \reg1, elr_el1 + mrs \reg2, spsr_el1 + stp \reg1, \reg2, [sp, #-0x10]! + stp x30, x18, [sp, #-0x10]! + .endm + + .macro restore_eret_context reg1 reg2 + ldp x30, x18, [sp], #0x10 + ldp \reg1, \reg2, [sp], #0x10 + msr elr_el1, \reg1 + msr spsr_el1, \reg2 + .endm + + .section .text, "ax" + .align 3 func tsp_entrypoint /*--------------------------------------------- @@ -238,6 +255,58 @@ func tsp_cpu_suspend_entry bl tsp_cpu_suspend_main restore_args_call_smc + /*--------------------------------------------- + * This entrypoint is used by the TSPD to pass + * control for handling a pending S-EL1 FIQ. + * 'x0' contains a magic number which indicates + * this. TSPD expects control to be handed back + * at the end of FIQ processing. This is done + * through an SMC. The handover agreement is: + * + * 1. PSTATE.DAIF are set upon entry. 'x1' has + * the ELR_EL3 from the non-secure state. + * 2. TSP has to preserve the callee saved + * general purpose registers, SP_EL1/EL0 and + * LR. + * 3. TSP has to preserve the system and vfp + * registers (if applicable). + * 4. TSP can use 'x0-x18' to enable its C + * runtime. + * 5. TSP returns to TSPD using an SMC with + * 'x0' = TSP_HANDLED_S_EL1_FIQ + * --------------------------------------------- + */ +func tsp_fiq_entry +#if DEBUG + mov x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff) + movk x2, #(TSP_HANDLE_FIQ_AND_RETURN & 0xffff) + cmp x0, x2 + b.ne tsp_fiq_entry_panic +#endif + /*--------------------------------------------- + * Save any previous context needed to perform + * an exception return from S-EL1 e.g. context + * from a previous IRQ. Update statistics and + * handle the FIQ before returning to the TSPD. + * IRQ/FIQs are not enabled since that will + * complicate the implementation. Execution + * will be transferred back to the normal world + * in any case. A non-zero return value from the + * fiq handler is an error. + * --------------------------------------------- + */ + save_eret_context x2 x3 + bl tsp_update_sync_fiq_stats + bl tsp_fiq_handler + cbnz x0, tsp_fiq_entry_panic + restore_eret_context x2 x3 + mov x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff) + movk x0, #(TSP_HANDLED_S_EL1_FIQ & 0xffff) + smc #0 + +tsp_fiq_entry_panic: + b tsp_fiq_entry_panic + /*--------------------------------------------- * This entrypoint is used by the TSPD when this * cpu resumes execution after an earlier diff --git a/bl32/tsp/tsp-fvp.mk b/bl32/tsp/tsp-fvp.mk index 5d8a0e344..b1d0afef1 100644 --- a/bl32/tsp/tsp-fvp.mk +++ b/bl32/tsp/tsp-fvp.mk @@ -29,7 +29,9 @@ # # TSP source files specific to FVP platform -BL32_SOURCES += plat/common/aarch64/platform_mp_stack.S \ - plat/fvp/bl32_plat_setup.c \ +BL32_SOURCES += drivers/arm/gic/gic_v2.c \ + plat/common/aarch64/platform_mp_stack.S \ plat/fvp/aarch64/plat_common.c \ - plat/fvp/aarch64/plat_helpers.S + plat/fvp/aarch64/plat_helpers.S \ + plat/fvp/bl32_plat_setup.c \ + plat/fvp/plat_gic.c diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk index 07bd9c649..297556bb8 100644 --- a/bl32/tsp/tsp.mk +++ b/bl32/tsp/tsp.mk @@ -31,9 +31,10 @@ BL32_SOURCES += bl32/tsp/tsp_main.c \ bl32/tsp/aarch64/tsp_entrypoint.S \ bl32/tsp/aarch64/tsp_request.S \ + bl32/tsp/tsp_interrupt.c \ + bl32/tsp/tsp_timer.c \ common/aarch64/early_exceptions.S \ - lib/locks/exclusive/spinlock.S \ - bl32/tsp/tsp_timer.c + lib/locks/exclusive/spinlock.S BL32_LINKERFILE := bl32/tsp/tsp.ld.S diff --git a/bl32/tsp/tsp_interrupt.c b/bl32/tsp/tsp_interrupt.c new file mode 100644 index 000000000..d5d02c304 --- /dev/null +++ b/bl32/tsp/tsp_interrupt.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * This function updates the TSP statistics for FIQs handled synchronously i.e + * the ones that have been handed over by the TSPD. It also keeps count of the + * number of times control was passed back to the TSPD after handling an FIQ. + * In the future it will be possible that the TSPD hands over an FIQ to the TSP + * but does not expect it to return execution. This statistic will be useful to + * distinguish between these two models of synchronous FIQ handling. + * The 'elr_el3' parameter contains the address of the instruction in normal + * world where this FIQ was generated. + ******************************************************************************/ +void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3) +{ + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + + tsp_stats[linear_id].sync_fiq_count++; + if (type == TSP_HANDLE_FIQ_AND_RETURN) + tsp_stats[linear_id].sync_fiq_ret_count++; + + spin_lock(&console_lock); + printf("TSP: cpu 0x%x sync fiq request from 0x%llx \n\r", + mpidr, elr_el3); + INFO("cpu 0x%x: %d sync fiq requests, %d sync fiq returns\n", + mpidr, + tsp_stats[linear_id].sync_fiq_count, + tsp_stats[linear_id].sync_fiq_ret_count); + spin_unlock(&console_lock); +} + +/******************************************************************************* + * TSP FIQ handler called as a part of both synchronous and asynchronous + * handling of FIQ interrupts. It returns 0 upon successfully handling a S-EL1 + * FIQ and treats all other FIQs as EL3 interrupts. It assumes that the GIC + * architecture version in v2.0 and the secure physical timer interrupt is the + * only S-EL1 interrupt that it needs to handle. + ******************************************************************************/ +int32_t tsp_fiq_handler() +{ + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr), id; + + /* + * Get the highest priority pending interrupt id and see if it is the + * secure physical generic timer interrupt in which case, handle it. + * Otherwise throw this interrupt at the EL3 firmware. + */ + id = ic_get_pending_interrupt_id(); + + /* TSP can only handle the secure physical timer interrupt */ + if (id != IRQ_SEC_PHY_TIMER) + return TSP_EL3_FIQ; + + /* + * Handle the interrupt. Also sanity check if it has been preempted by + * another secure interrupt through an assertion. + */ + id = ic_acknowledge_interrupt(); + assert(id == IRQ_SEC_PHY_TIMER); + tsp_generic_timer_handler(); + ic_end_of_interrupt(id); + + /* Update the statistics and print some messages */ + tsp_stats[linear_id].fiq_count++; + spin_lock(&console_lock); + printf("TSP: cpu 0x%x handled fiq %d \n\r", + mpidr, id); + INFO("cpu 0x%x: %d fiq requests \n", + mpidr, tsp_stats[linear_id].fiq_count); + spin_unlock(&console_lock); + + return 0; +} diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index a667ffc23..4ffc5216f 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -51,7 +51,7 @@ static tsp_args_t tsp_smc_args[PLATFORM_CORE_COUNT]; /******************************************************************************* * Per cpu data structure to keep track of TSP activity ******************************************************************************/ -static work_statistics_t tsp_stats[PLATFORM_CORE_COUNT]; +work_statistics_t tsp_stats[PLATFORM_CORE_COUNT]; /******************************************************************************* * Single reference to the various entry points exported by the test secure @@ -64,6 +64,7 @@ static const entry_info_t tsp_entry_info = { tsp_cpu_off_entry, tsp_cpu_resume_entry, tsp_cpu_suspend_entry, + tsp_fiq_entry, }; static tsp_args_t *set_smc_args(uint64_t arg0, diff --git a/include/bl32/payloads/tsp.h b/include/bl32/payloads/tsp.h index 385d09c21..3aa3e8c19 100644 --- a/include/bl32/payloads/tsp.h +++ b/include/bl32/payloads/tsp.h @@ -42,7 +42,16 @@ #define TSP_RESUME_DONE 0xf2000004 #define TSP_WORK_DONE 0xf2000005 -/* SMC function ID that TSP uses to request service from secure montior */ +/* + * Function identifiers to handle FIQs through the synchronous handling model. + * If the TSP was previously interrupted then control has to be returned to + * the TSPD after handling the interrupt else execution can remain in the TSP. + */ +#define TSP_HANDLED_S_EL1_FIQ 0xf2000006 +#define TSP_EL3_FIQ 0xf2000007 +#define TSP_HANDLE_FIQ_AND_RETURN 0x2004 + +/* SMC function ID that TSP uses to request service from secure monitor */ #define TSP_GET_ARGS 0xf2001000 /* Function IDs for various TSP services */ @@ -86,16 +95,17 @@ #include #include /* For CACHE_WRITEBACK_GRANULE */ +#include #include typedef void (*tsp_generic_fptr_t)(uint64_t arg0, - uint64_t arg1, - uint64_t arg2, - uint64_t arg3, - uint64_t arg4, - uint64_t arg5, - uint64_t arg6, - uint64_t arg7); + uint64_t arg1, + uint64_t arg2, + uint64_t arg3, + uint64_t arg4, + uint64_t arg5, + uint64_t arg6, + uint64_t arg7); typedef struct entry_info { tsp_generic_fptr_t fast_smc_entry; @@ -103,9 +113,13 @@ typedef struct entry_info { tsp_generic_fptr_t cpu_off_entry; tsp_generic_fptr_t cpu_resume_entry; tsp_generic_fptr_t cpu_suspend_entry; + tsp_generic_fptr_t fiq_entry; } entry_info_t; typedef struct work_statistics { + uint32_t fiq_count; /* Number of FIQs on this cpu */ + uint32_t sync_fiq_count; /* Number of sync. fiqs on this cpu */ + uint32_t sync_fiq_ret_count; /* Number of fiq returns on this cpu */ uint32_t smc_count; /* Number of returns on this cpu */ uint32_t eret_count; /* Number of entries on this cpu */ uint32_t cpu_on_count; /* Number of cpu on requests */ @@ -120,7 +134,7 @@ typedef struct tsp_args { /* Macros to access members of the above structure using their offsets */ #define read_sp_arg(args, offset) ((args)->_regs[offset >> 3]) -#define write_sp_arg(args, offset, val)(((args)->_regs[offset >> 3]) \ +#define write_sp_arg(args, offset, val) (((args)->_regs[offset >> 3]) \ = val) /* @@ -131,6 +145,14 @@ CASSERT(TSP_ARGS_SIZE == sizeof(tsp_args_t), assert_sp_args_size_mismatch); extern void tsp_get_magic(uint64_t args[4]); +extern void tsp_fiq_entry(uint64_t arg0, + uint64_t arg1, + uint64_t arg2, + uint64_t arg3, + uint64_t arg4, + uint64_t arg5, + uint64_t arg6, + uint64_t arg7); extern void tsp_fast_smc_entry(uint64_t arg0, uint64_t arg1, uint64_t arg2, @@ -203,6 +225,13 @@ extern void tsp_generic_timer_handler(void); extern void tsp_generic_timer_stop(void); extern void tsp_generic_timer_save(void); extern void tsp_generic_timer_restore(void); + +/* FIQ management functions */ +extern void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3); + +/* Data structure to keep track of TSP statistics */ +extern spinlock_t console_lock; +extern work_statistics_t tsp_stats[PLATFORM_CORE_COUNT]; #endif /* __ASSEMBLY__ */ #endif /* __BL2_H__ */ diff --git a/plat/fvp/bl32_plat_setup.c b/plat/fvp/bl32_plat_setup.c index bb2b602f1..dd534a91a 100644 --- a/plat/fvp/bl32_plat_setup.c +++ b/plat/fvp/bl32_plat_setup.c @@ -95,6 +95,8 @@ void bl32_early_platform_setup(meminfo_t *mem_layout, bl32_tzdram_layout.attr = mem_layout->attr; bl32_tzdram_layout.next = 0; + /* Initialize the platform config for future decision making */ + platform_config_setup(); } /******************************************************************************* From 757d591168a4de8f6507b79e62e9129849243e8e Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Fri, 9 May 2014 12:17:56 +0100 Subject: [PATCH 10/14] Add support for asynchronous FIQ handling in TSP This patch adds support in the TSP to handle FIQ interrupts that are generated when execution is in the TSP. S-EL1 interrupt are handled normally and execution resumes at the instruction where the exception was originally taken. S-EL3 interrupts i.e. any interrupt not recognized by the TSP are handed to the TSPD. Execution resumes normally once such an interrupt has been handled at EL3. Change-Id: Ia3ada9a4fb15670afcc12538a6456f21efe58a8f --- bl32/tsp/aarch64/tsp_entrypoint.S | 4 +- bl32/tsp/aarch64/tsp_exceptions.S | 200 ++++++++++++++++++++++++++++++ bl32/tsp/tsp.mk | 1 + 3 files changed, 203 insertions(+), 2 deletions(-) create mode 100644 bl32/tsp/aarch64/tsp_exceptions.S diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S index 641f37fb3..260dcc6e3 100644 --- a/bl32/tsp/aarch64/tsp_entrypoint.S +++ b/bl32/tsp/aarch64/tsp_entrypoint.S @@ -97,7 +97,7 @@ func tsp_entrypoint * Set the exception vector to something sane. * --------------------------------------------- */ - adr x0, early_exceptions + adr x0, tsp_exceptions msr vbar_el1, x0 /* --------------------------------------------- @@ -196,7 +196,7 @@ func tsp_cpu_on_entry * Set the exception vector to something sane. * --------------------------------------------- */ - adr x0, early_exceptions + adr x0, tsp_exceptions msr vbar_el1, x0 /* --------------------------------------------- diff --git a/bl32/tsp/aarch64/tsp_exceptions.S b/bl32/tsp/aarch64/tsp_exceptions.S new file mode 100644 index 000000000..ccb4cdddd --- /dev/null +++ b/bl32/tsp/aarch64/tsp_exceptions.S @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + + /* ---------------------------------------------------- + * The caller-saved registers x0-x18 and LR are saved + * here. + * ---------------------------------------------------- + */ + +#define SCRATCH_REG_SIZE #(20 * 8) + + .macro save_caller_regs_and_lr + sub sp, sp, SCRATCH_REG_SIZE + stp x0, x1, [sp] + stp x2, x3, [sp, #0x10] + stp x4, x5, [sp, #0x20] + stp x6, x7, [sp, #0x30] + stp x8, x9, [sp, #0x40] + stp x10, x11, [sp, #0x50] + stp x12, x13, [sp, #0x60] + stp x14, x15, [sp, #0x70] + stp x16, x17, [sp, #0x80] + stp x18, x30, [sp, #0x90] + .endm + + .macro restore_caller_regs_and_lr + ldp x0, x1, [sp] + ldp x2, x3, [sp, #0x10] + ldp x4, x5, [sp, #0x20] + ldp x6, x7, [sp, #0x30] + ldp x8, x9, [sp, #0x40] + ldp x10, x11, [sp, #0x50] + ldp x12, x13, [sp, #0x60] + ldp x14, x15, [sp, #0x70] + ldp x16, x17, [sp, #0x80] + ldp x18, x30, [sp, #0x90] + add sp, sp, SCRATCH_REG_SIZE + .endm + + .globl tsp_exceptions + + /* ----------------------------------------------------- + * TSP exception handlers. + * ----------------------------------------------------- + */ + .section .vectors, "ax"; .align 11 + + .align 7 +tsp_exceptions: + /* ----------------------------------------------------- + * Current EL with _sp_el0 : 0x0 - 0x180. No exceptions + * are expected and treated as irrecoverable errors. + * ----------------------------------------------------- + */ +sync_exception_sp_el0: + wfi + b sync_exception_sp_el0 + check_vector_size sync_exception_sp_el0 + + .align 7 + +irq_sp_el0: + b irq_sp_el0 + check_vector_size irq_sp_el0 + + .align 7 +fiq_sp_el0: + b fiq_sp_el0 + check_vector_size fiq_sp_el0 + + .align 7 +serror_sp_el0: + b serror_sp_el0 + check_vector_size serror_sp_el0 + + + /* ----------------------------------------------------- + * Current EL with SPx: 0x200 - 0x380. Only IRQs/FIQs + * are expected and handled + * ----------------------------------------------------- + */ + .align 7 +sync_exception_sp_elx: + wfi + b sync_exception_sp_elx + check_vector_size sync_exception_sp_elx + + .align 7 +irq_sp_elx: + b irq_sp_elx + check_vector_size irq_sp_elx + + .align 7 +fiq_sp_elx: + save_caller_regs_and_lr + bl tsp_fiq_handler + cbz x0, fiq_sp_elx_done + + /* + * This FIQ was not targetted to S-EL1 so send it to + * the monitor and wait for execution to resume. + */ + smc #0 +fiq_sp_elx_done: + restore_caller_regs_and_lr + eret + check_vector_size fiq_sp_elx + + .align 7 +serror_sp_elx: + b serror_sp_elx + check_vector_size serror_sp_elx + + + /* ----------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x580. No exceptions + * are handled since TSP does not implement a lower EL + * ----------------------------------------------------- + */ + .align 7 +sync_exception_aarch64: + wfi + b sync_exception_aarch64 + check_vector_size sync_exception_aarch64 + + .align 7 +irq_aarch64: + b irq_aarch64 + check_vector_size irq_aarch64 + + .align 7 +fiq_aarch64: + b fiq_aarch64 + check_vector_size fiq_aarch64 + + .align 7 +serror_aarch64: + b serror_aarch64 + check_vector_size serror_aarch64 + + + /* ----------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x780. No exceptions + * handled since the TSP does not implement a lower EL. + * ----------------------------------------------------- + */ + .align 7 +sync_exception_aarch32: + wfi + b sync_exception_aarch32 + check_vector_size sync_exception_aarch32 + + .align 7 +irq_aarch32: + b irq_aarch32 + check_vector_size irq_aarch32 + + .align 7 +fiq_aarch32: + b fiq_aarch32 + check_vector_size fiq_aarch32 + + .align 7 +serror_aarch32: + b serror_aarch32 + check_vector_size serror_aarch32 + .align 7 diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk index 297556bb8..b9084d549 100644 --- a/bl32/tsp/tsp.mk +++ b/bl32/tsp/tsp.mk @@ -30,6 +30,7 @@ BL32_SOURCES += bl32/tsp/tsp_main.c \ bl32/tsp/aarch64/tsp_entrypoint.S \ + bl32/tsp/aarch64/tsp_exceptions.S \ bl32/tsp/aarch64/tsp_request.S \ bl32/tsp/tsp_interrupt.c \ bl32/tsp/tsp_timer.c \ From 843ff7336932696aba9379ee801bd5b774ce1373 Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Fri, 9 May 2014 13:21:31 +0100 Subject: [PATCH 11/14] Add S-EL1 interrupt handling support in the TSPD This patch adds support in the TSPD for registering a handler for S-EL1 interrupts. This handler ferries the interrupts generated in the non-secure state to the TSP at 'tsp_fiq_entry'. Support has been added to the smc handler to resume execution in the non-secure state once interrupt handling has been completed by the TSP. There is also support for resuming execution in the normal world if the TSP receives a EL3 interrupt. This code is currently unused. Change-Id: I816732595a2635e299572965179f11aa0bf93b69 --- include/bl31/runtime_svc.h | 5 +- services/spd/tspd/tspd_common.c | 6 +- services/spd/tspd/tspd_main.c | 159 ++++++++++++++++++++++++++++++- services/spd/tspd/tspd_private.h | 17 +++- 4 files changed, 174 insertions(+), 13 deletions(-) diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h index 6d7089678..0f510f760 100644 --- a/include/bl31/runtime_svc.h +++ b/include/bl31/runtime_svc.h @@ -135,9 +135,12 @@ typedef int32_t (*rt_svc_init_t)(void); /* Convenience macros to return from SMC handler */ +#define SMC_RET0(_h) { \ + return (uint64_t) (_h); \ +} #define SMC_RET1(_h, _x0) { \ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \ - return _x0; \ + SMC_RET0(_h); \ } #define SMC_RET2(_h, _x0, _x1) { \ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \ diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c index 0baa7dd89..80c140503 100644 --- a/services/spd/tspd/tspd_common.c +++ b/services/spd/tspd/tspd_common.c @@ -42,9 +42,9 @@ * programming an entry into the secure payload. ******************************************************************************/ int32_t tspd_init_secure_context(uint64_t entrypoint, - uint32_t rw, - uint64_t mpidr, - tsp_context_t *tsp_ctx) + uint32_t rw, + uint64_t mpidr, + tsp_context_t *tsp_ctx) { uint32_t scr, sctlr; el1_sys_regs_t *el1_state; diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index 06e21c408..e6b04920b 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -43,6 +43,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -68,6 +71,75 @@ DEFINE_SVC_UUID(tsp_uuid, int32_t tspd_init(meminfo_t *bl32_meminfo); +/******************************************************************************* + * This function is the handler registered for S-EL1 interrupts by the TSPD. It + * validates the interrupt and upon success arranges entry into the TSP at + * 'tsp_fiq_entry()' for handling the interrupt. + ******************************************************************************/ +static uint64_t tspd_sel1_interrupt_handler(uint32_t id, + uint32_t flags, + void *handle, + void *cookie) +{ + uint32_t linear_id; + uint64_t mpidr; + tsp_context_t *tsp_ctx; + + /* Check the security state when the exception was generated */ + assert(get_interrupt_src_ss(flags) == NON_SECURE); + +#if IMF_READ_INTERRUPT_ID + /* Check the security status of the interrupt */ + assert(ic_get_interrupt_group(id) == SECURE); +#endif + + /* Sanity check the pointer to this cpu's context */ + mpidr = read_mpidr(); + assert(handle == cm_get_context(mpidr, NON_SECURE)); + + /* Save the non-secure context before entering the TSP */ + cm_el1_sysregs_context_save(NON_SECURE); + + /* Get a reference to this cpu's TSP context */ + linear_id = platform_get_core_pos(mpidr); + tsp_ctx = &tspd_sp_context[linear_id]; + assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); + + /* + * Determine if the TSP was previously preempted. Its last known + * context has to be preserved in this case. + * The TSP should return control to the TSPD after handling this + * FIQ. Preserve essential EL3 context to allow entry into the + * TSP at the FIQ entry point using the 'cpu_context' structure. + * There is no need to save the secure system register context + * since the TSP is supposed to preserve it during S-EL1 interrupt + * handling. + */ + if (get_std_smc_active_flag(tsp_ctx->state)) { + tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, + CTX_SPSR_EL3); + tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, + CTX_ELR_EL3); + } + + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_SPSR_EL3, + make_spsr(MODE_EL1, MODE_SP_ELX, TSP_AARCH64)); + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_ELR_EL3, + (uint64_t) tsp_entry_info->fiq_entry); + cm_el1_sysregs_context_restore(SECURE); + cm_set_next_eret_context(SECURE); + + /* + * Tell the TSP that it has to handle an FIQ synchronously. Also the + * instruction in normal world where the interrupt was generated is + * passed for debugging purposes. It is safe to retrieve this address + * from ELR_EL3 as the secure context will not take effect until + * el3_exit(). + */ + SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_FIQ_AND_RETURN, read_elr_el3()); +} /******************************************************************************* * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type @@ -105,9 +177,9 @@ int32_t tspd_setup(void) * for the time being. */ rc = tspd_init_secure_context(image_info->entrypoint, - TSP_AARCH64, - mpidr, - &tspd_sp_context[linear_id]); + TSP_AARCH64, + mpidr, + &tspd_sp_context[linear_id]); assert(rc == 0); /* @@ -132,7 +204,7 @@ int32_t tspd_setup(void) int32_t tspd_init(meminfo_t *bl32_meminfo) { uint64_t mpidr = read_mpidr(); - uint32_t linear_id = platform_get_core_pos(mpidr); + uint32_t linear_id = platform_get_core_pos(mpidr), flags; uint64_t rc; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; @@ -164,6 +236,18 @@ int32_t tspd_init(meminfo_t *bl32_meminfo) psci_register_spd_pm_hook(&tspd_pm); } + /* + * Register an interrupt handler for S-EL1 interrupts when generated + * during code executing in the non-secure state. + */ + flags = 0; + set_interrupt_rm_flag(flags, NON_SECURE); + rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, + tspd_sel1_interrupt_handler, + flags); + if (rc) + panic(); + return rc; } @@ -196,6 +280,73 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, switch (smc_fid) { + /* + * This function ID is used only by the TSP to indicate that it has + * finished handling a S-EL1 FIQ interrupt. Execution should resume + * in the normal world. + */ + case TSP_HANDLED_S_EL1_FIQ: + if (ns) + SMC_RET1(handle, SMC_UNK); + + assert(handle == cm_get_context(mpidr, SECURE)); + + /* + * Restore the relevant EL3 state which saved to service + * this SMC. + */ + if (get_std_smc_active_flag(tsp_ctx->state)) { + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_SPSR_EL3, + tsp_ctx->saved_spsr_el3); + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_ELR_EL3, + tsp_ctx->saved_elr_el3); + } + + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(mpidr, NON_SECURE); + assert(ns_cpu_context); + + /* + * Restore non-secure state. There is no need to save the + * secure system register context since the TSP was supposed + * to preserve it during S-EL1 interrupt handling. + */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + SMC_RET0((uint64_t) ns_cpu_context); + + + /* + * This function ID is used only by the TSP to indicate that it was + * interrupted due to a EL3 FIQ interrupt. Execution should resume + * in the normal world. + */ + case TSP_EL3_FIQ: + if (ns) + SMC_RET1(handle, SMC_UNK); + + assert(handle == cm_get_context(mpidr, SECURE)); + + /* Assert that standard SMC execution has been preempted */ + assert(get_std_smc_active_flag(tsp_ctx->state)); + + /* Save the secure system register state */ + cm_el1_sysregs_context_save(SECURE); + + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(mpidr, NON_SECURE); + assert(ns_cpu_context); + + /* Restore non-secure state */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + SMC_RET1(ns_cpu_context, TSP_EL3_FIQ); + + /* * This function ID is used only by the SP to indicate it has * finished initialising itself after a cold boot diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h index bb0afcd96..b9cf496d4 100644 --- a/services/spd/tspd/tspd_private.h +++ b/services/spd/tspd/tspd_private.h @@ -33,6 +33,7 @@ #include #include +#include #include #include @@ -137,13 +138,19 @@ CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \ /******************************************************************************* * Structure which helps the SPD to maintain the per-cpu state of the SP. - * 'state' - collection of flags to track SP state e.g. on/off - * 'mpidr' - mpidr to associate a context with a cpu - * 'c_rt_ctx' - stack address to restore C runtime context from after returning - * from a synchronous entry into the SP. - * 'cpu_ctx' - space to maintain SP architectural state + * 'saved_spsr_el3' - temporary copy to allow FIQ handling when the TSP has been + * preempted. + * 'saved_elr_el3' - temporary copy to allow FIQ handling when the TSP has been + * preempted. + * 'state' - collection of flags to track SP state e.g. on/off + * 'mpidr' - mpidr to associate a context with a cpu + * 'c_rt_ctx' - stack address to restore C runtime context from after + * returning from a synchronous entry into the SP. + * 'cpu_ctx' - space to maintain SP architectural state ******************************************************************************/ typedef struct tsp_context { + uint64_t saved_elr_el3; + uint32_t saved_spsr_el3; uint32_t state; uint64_t mpidr; uint64_t c_rt_ctx; From 92e6e4df5bf40bb420c54ea96dbe2fcc2651a860 Mon Sep 17 00:00:00 2001 From: Achin Gupta Date: Fri, 9 May 2014 13:33:42 +0100 Subject: [PATCH 12/14] Enable secure timer to generate S-EL1 interrupts This patch enables secure physical timer during TSP initialisation and maintains it across power management operations so that a timer interrupt is generated every half second. Fixes ARM-software/tf-issues#104 Fixes ARM-software/tf-issues#134 Change-Id: I66c6cfd24bd5e6035ba75ebf0f047e568770a369 --- bl32/tsp/tsp_main.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index 4ffc5216f..3c250f81b 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -116,6 +116,7 @@ uint64_t tsp_main(void) bl32_platform_setup(); /* Initialize secure/applications state here */ + tsp_generic_timer_start(); /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; @@ -152,6 +153,9 @@ tsp_args_t *tsp_cpu_on_main(void) uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + /* Initialize secure/applications state here */ + tsp_generic_timer_start(); + /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; @@ -185,6 +189,13 @@ tsp_args_t *tsp_cpu_off_main(uint64_t arg0, uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + /* + * This cpu is being turned off, so disable the timer to prevent the + * secure timer interrupt from interfering with power down. A pending + * interrupt will be lost but we do not care as we are turning off. + */ + tsp_generic_timer_stop(); + /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; @@ -220,6 +231,13 @@ tsp_args_t *tsp_cpu_suspend_main(uint64_t power_state, uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + /* + * Save the time context and disable it to prevent the secure timer + * interrupt from interfering with wakeup from the suspend state. + */ + tsp_generic_timer_save(); + tsp_generic_timer_stop(); + /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; @@ -255,6 +273,9 @@ tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level, uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + /* Restore the generic timer context */ + tsp_generic_timer_restore(); + /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; From f4d58669d0f1747a9ac9740808506f9abcb1b8b3 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Fri, 9 May 2014 20:49:17 +0100 Subject: [PATCH 13/14] Non-Secure Interrupt support during Standard SMC processing in TSP Implements support for Non Secure Interrupts preempting the Standard SMC call in EL1. Whenever an IRQ is trapped in the Secure world we securely handover to the Normal world to process the interrupt. The normal world then issues "resume" smc call to resume the previous interrupted SMC call. Fixes ARM-software/tf-issues#105 Change-Id: I72b760617dee27438754cdfc9fe9bcf4cc024858 --- bl31/runtime_svc.c | 47 ++++++----- bl32/tsp/aarch64/tsp_entrypoint.S | 19 ++++- bl32/tsp/aarch64/tsp_exceptions.S | 9 +- bl32/tsp/tsp_interrupt.c | 15 ++++ bl32/tsp/tsp_main.c | 25 +++--- include/bl31/runtime_svc.h | 4 +- include/bl32/payloads/tsp.h | 43 ++++++++-- services/spd/tspd/tspd_main.c | 136 ++++++++++++++++++++++++------ services/spd/tspd/tspd_private.h | 20 +++++ 9 files changed, 251 insertions(+), 67 deletions(-) diff --git a/bl31/runtime_svc.c b/bl31/runtime_svc.c index b2ba6858f..08cd2d859 100644 --- a/bl31/runtime_svc.c +++ b/bl31/runtime_svc.c @@ -109,26 +109,35 @@ void runtime_svc_init() goto error; } - /* Call the initialisation routine for this runtime service */ - rc = rt_svc_descs[index].init(); - if (rc) { - ERROR("Error initializing runtime service %s\n", - rt_svc_descs[index].name); - } else { - /* - * Fill the indices corresponding to the start and end - * owning entity numbers with the index of the - * descriptor which will handle the SMCs for this owning - * entity range. - */ - start_idx = get_unique_oen(rt_svc_descs[index].start_oen, - rt_svc_descs[index].call_type); - end_idx = get_unique_oen(rt_svc_descs[index].end_oen, - rt_svc_descs[index].call_type); - - for (; start_idx <= end_idx; start_idx++) - rt_svc_descs_indices[start_idx] = index; + /* + * The runtime service may have seperate rt_svc_desc_t + * for its fast smc and standard smc. Since the service itself + * need to be initialized only once, only one of them will have + * an initialisation routine defined. Call the initialisation + * routine for this runtime service, if it is defined. + */ + if (rt_svc_descs[index].init) { + rc = rt_svc_descs[index].init(); + if (rc) { + ERROR("Error initializing runtime service %s\n", + rt_svc_descs[index].name); + continue; + } } + + /* + * Fill the indices corresponding to the start and end + * owning entity numbers with the index of the + * descriptor which will handle the SMCs for this owning + * entity range. + */ + start_idx = get_unique_oen(rt_svc_descs[index].start_oen, + rt_svc_descs[index].call_type); + end_idx = get_unique_oen(rt_svc_descs[index].end_oen, + rt_svc_descs[index].call_type); + + for (; start_idx <= end_idx; start_idx++) + rt_svc_descs_indices[start_idx] = index; } return; diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S index 260dcc6e3..6fc244efc 100644 --- a/bl32/tsp/aarch64/tsp_entrypoint.S +++ b/bl32/tsp/aarch64/tsp_entrypoint.S @@ -39,8 +39,11 @@ .globl tsp_cpu_suspend_entry .globl tsp_cpu_resume_entry .globl tsp_fast_smc_entry + .globl tsp_std_smc_entry .globl tsp_fiq_entry + + /* --------------------------------------------- * Populate the params in x0-x7 from the pointer * to the smc args structure in x0. @@ -329,8 +332,22 @@ tsp_cpu_resume_panic: * --------------------------------------------- */ func tsp_fast_smc_entry - bl tsp_fast_smc_handler + bl tsp_smc_handler restore_args_call_smc tsp_fast_smc_entry_panic: b tsp_fast_smc_entry_panic + /*--------------------------------------------- + * This entrypoint is used by the TSPD to ask + * the TSP to service a std smc request. + * We will enable preemption during execution + * of tsp_smc_handler. + * --------------------------------------------- + */ +func tsp_std_smc_entry + msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT + bl tsp_smc_handler + msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT + restore_args_call_smc +tsp_std_smc_entry_panic: + b tsp_std_smc_entry_panic diff --git a/bl32/tsp/aarch64/tsp_exceptions.S b/bl32/tsp/aarch64/tsp_exceptions.S index ccb4cdddd..f84b5e099 100644 --- a/bl32/tsp/aarch64/tsp_exceptions.S +++ b/bl32/tsp/aarch64/tsp_exceptions.S @@ -120,7 +120,14 @@ sync_exception_sp_elx: .align 7 irq_sp_elx: - b irq_sp_elx + save_caller_regs_and_lr + /* We just update some statistics in the handler */ + bl tsp_irq_received + /* Hand over control to the normal world to handle the IRQ */ + smc #0 + /* The resume std smc starts from here */ + restore_caller_regs_and_lr + eret check_vector_size irq_sp_elx .align 7 diff --git a/bl32/tsp/tsp_interrupt.c b/bl32/tsp/tsp_interrupt.c index d5d02c304..5719c063e 100644 --- a/bl32/tsp/tsp_interrupt.c +++ b/bl32/tsp/tsp_interrupt.c @@ -107,3 +107,18 @@ int32_t tsp_fiq_handler() return 0; } + +int32_t tsp_irq_received() +{ + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + + tsp_stats[linear_id].irq_count++; + spin_lock(&console_lock); + printf("TSP: cpu 0x%x received irq\n\r", mpidr); + INFO("cpu 0x%x: %d irq requests \n", + mpidr, tsp_stats[linear_id].irq_count); + spin_unlock(&console_lock); + + return TSP_PREEMPTED; +} diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index 3c250f81b..a941cb205 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -59,6 +59,7 @@ work_statistics_t tsp_stats[PLATFORM_CORE_COUNT]; * to change. ******************************************************************************/ static const entry_info_t tsp_entry_info = { + tsp_std_smc_entry, tsp_fast_smc_entry, tsp_cpu_on_entry, tsp_cpu_off_entry, @@ -298,9 +299,9 @@ tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level, * TSP fast smc handler. The secure monitor jumps to this function by * doing the ERET after populating X0-X7 registers. The arguments are received * in the function arguments in order. Once the service is rendered, this - * function returns to Secure Monitor by raising SMC + * function returns to Secure Monitor by raising SMC. ******************************************************************************/ -tsp_args_t *tsp_fast_smc_handler(uint64_t func, +tsp_args_t *tsp_smc_handler(uint64_t func, uint64_t arg1, uint64_t arg2, uint64_t arg3, @@ -313,18 +314,20 @@ tsp_args_t *tsp_fast_smc_handler(uint64_t func, uint64_t service_args[2]; uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + const char *smc_type; /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; - printf("SP: cpu 0x%x received fast smc 0x%x\n", read_mpidr(), func); + smc_type = ((func >> 31) & 1) == 1 ? "fast" : "standard"; + + printf("SP: cpu 0x%x received %s smc 0x%x\n", read_mpidr(), smc_type, func); INFO("cpu 0x%x: %d smcs, %d erets\n", mpidr, tsp_stats[linear_id].smc_count, tsp_stats[linear_id].eret_count); /* Render secure services and obtain results here */ - results[0] = arg1; results[1] = arg2; @@ -335,20 +338,20 @@ tsp_args_t *tsp_fast_smc_handler(uint64_t func, tsp_get_magic(service_args); /* Determine the function to perform based on the function ID */ - switch (func) { - case TSP_FID_ADD: + switch (TSP_BARE_FID(func)) { + case TSP_ADD: results[0] += service_args[0]; results[1] += service_args[1]; break; - case TSP_FID_SUB: + case TSP_SUB: results[0] -= service_args[0]; results[1] -= service_args[1]; break; - case TSP_FID_MUL: + case TSP_MUL: results[0] *= service_args[0]; results[1] *= service_args[1]; break; - case TSP_FID_DIV: + case TSP_DIV: results[0] /= service_args[0] ? service_args[0] : 1; results[1] /= service_args[1] ? service_args[1] : 1; break; @@ -356,9 +359,9 @@ tsp_args_t *tsp_fast_smc_handler(uint64_t func, break; } - return set_smc_args(func, + return set_smc_args(func, 0, results[0], results[1], - 0, 0, 0, 0, 0); + 0, 0, 0, 0); } diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h index 0f510f760..66562e153 100644 --- a/include/bl31/runtime_svc.h +++ b/include/bl31/runtime_svc.h @@ -51,13 +51,15 @@ #define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \ FUNCID_CC_MASK) +#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \ + FUNCID_TYPE_MASK) #define SMC_64 1 #define SMC_32 0 #define SMC_UNK 0xffffffff #define SMC_TYPE_FAST 1 #define SMC_TYPE_STD 0 - +#define SMC_PREEMPTED 0xfffffffe /******************************************************************************* * Owning entity number definitions inside the function id as per the SMC * calling convention diff --git a/include/bl32/payloads/tsp.h b/include/bl32/payloads/tsp.h index 3aa3e8c19..2e32c77e5 100644 --- a/include/bl32/payloads/tsp.h +++ b/include/bl32/payloads/tsp.h @@ -40,7 +40,7 @@ #define TSP_OFF_DONE 0xf2000002 #define TSP_SUSPEND_DONE 0xf2000003 #define TSP_RESUME_DONE 0xf2000004 -#define TSP_WORK_DONE 0xf2000005 +#define TSP_PREEMPTED 0xf2000005 /* * Function identifiers to handle FIQs through the synchronous handling model. @@ -49,16 +49,35 @@ */ #define TSP_HANDLED_S_EL1_FIQ 0xf2000006 #define TSP_EL3_FIQ 0xf2000007 -#define TSP_HANDLE_FIQ_AND_RETURN 0x2004 /* SMC function ID that TSP uses to request service from secure monitor */ #define TSP_GET_ARGS 0xf2001000 -/* Function IDs for various TSP services */ -#define TSP_FID_ADD 0xf2002000 -#define TSP_FID_SUB 0xf2002001 -#define TSP_FID_MUL 0xf2002002 -#define TSP_FID_DIV 0xf2002003 +/* + * Identifiers for various TSP services. Corresponding function IDs (whether + * fast or standard) are generated by macros defined below + */ +#define TSP_ADD 0x2000 +#define TSP_SUB 0x2001 +#define TSP_MUL 0x2002 +#define TSP_DIV 0x2003 +#define TSP_HANDLE_FIQ_AND_RETURN 0x2004 + +/* + * Generate function IDs for TSP services to be used in SMC calls, by + * appropriately setting bit 31 to differentiate standard and fast SMC calls + */ +#define TSP_STD_FID(fid) ((fid) | 0x72000000 | (0 << 31)) +#define TSP_FAST_FID(fid) ((fid) | 0x72000000 | (1 << 31)) + +/* SMC function ID to request a previously preempted std smc */ +#define TSP_FID_RESUME TSP_STD_FID(0x3000) + +/* + * Identify a TSP service from function ID filtering the last 16 bits from the + * SMC function ID + */ +#define TSP_BARE_FID(fid) ((fid) & 0xffff) /* * Total number of function IDs implemented for services offered to NS clients. @@ -108,6 +127,7 @@ typedef void (*tsp_generic_fptr_t)(uint64_t arg0, uint64_t arg7); typedef struct entry_info { + tsp_generic_fptr_t std_smc_entry; tsp_generic_fptr_t fast_smc_entry; tsp_generic_fptr_t cpu_on_entry; tsp_generic_fptr_t cpu_off_entry; @@ -118,6 +138,7 @@ typedef struct entry_info { typedef struct work_statistics { uint32_t fiq_count; /* Number of FIQs on this cpu */ + uint32_t irq_count; /* Number of IRQs on this cpu */ uint32_t sync_fiq_count; /* Number of sync. fiqs on this cpu */ uint32_t sync_fiq_ret_count; /* Number of fiq returns on this cpu */ uint32_t smc_count; /* Number of returns on this cpu */ @@ -153,6 +174,14 @@ extern void tsp_fiq_entry(uint64_t arg0, uint64_t arg5, uint64_t arg6, uint64_t arg7); +extern void tsp_std_smc_entry(uint64_t arg0, + uint64_t arg1, + uint64_t arg2, + uint64_t arg3, + uint64_t arg4, + uint64_t arg5, + uint64_t arg6, + uint64_t arg7); extern void tsp_fast_smc_entry(uint64_t arg0, uint64_t arg1, uint64_t arg2, diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index e6b04920b..1aa3d0e83 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -270,7 +270,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, uint64_t flags) { cpu_context_t *ns_cpu_context; - gp_regs_t *ns_gp_regs; unsigned long mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr), ns; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; @@ -280,6 +279,31 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, switch (smc_fid) { + /* + * This function ID is used by TSP to indicate that it was + * preempted by a normal world IRQ. + * + */ + case TSP_PREEMPTED: + if (ns) + SMC_RET1(handle, SMC_UNK); + + assert(handle == cm_get_context(mpidr, SECURE)); + cm_el1_sysregs_context_save(SECURE); + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(mpidr, NON_SECURE); + assert(ns_cpu_context); + + /* + * Restore non-secure state. There is no need to save the + * secure system register context since the TSP was supposed + * to preserve it during S-EL1 interrupt handling. + */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + SMC_RET1(ns_cpu_context, SMC_PREEMPTED); + /* * This function ID is used only by the TSP to indicate that it has * finished handling a S-EL1 FIQ interrupt. Execution should resume @@ -370,9 +394,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, */ tspd_synchronous_sp_exit(tsp_ctx, x1); - /* Should never reach here */ - assert(0); - /* * These function IDs is used only by the SP to indicate it has * finished: @@ -405,18 +426,20 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, */ tspd_synchronous_sp_exit(tsp_ctx, x1); - /* Should never reach here */ - assert(0); - /* * Request from non-secure client to perform an * arithmetic operation or response from secure * payload to an earlier request. */ - case TSP_FID_ADD: - case TSP_FID_SUB: - case TSP_FID_MUL: - case TSP_FID_DIV: + case TSP_FAST_FID(TSP_ADD): + case TSP_FAST_FID(TSP_SUB): + case TSP_FAST_FID(TSP_MUL): + case TSP_FAST_FID(TSP_DIV): + + case TSP_STD_FID(TSP_ADD): + case TSP_STD_FID(TSP_SUB): + case TSP_STD_FID(TSP_MUL): + case TSP_STD_FID(TSP_DIV): if (ns) { /* * This is a fresh request from the non-secure client. @@ -425,11 +448,15 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, * state and send the request to the secure payload. */ assert(handle == cm_get_context(mpidr, NON_SECURE)); + + /* Check if we are already preempted */ + if (get_std_smc_active_flag(tsp_ctx->state)) + SMC_RET1(handle, SMC_UNK); + cm_el1_sysregs_context_save(NON_SECURE); /* Save x1 and x2 for use by TSP_GET_ARGS call below */ - SMC_SET_GP(handle, CTX_GPREG_X1, x1); - SMC_SET_GP(handle, CTX_GPREG_X2, x2); + store_tsp_args(tsp_ctx, x1, x2); /* * We are done stashing the non-secure context. Ask the @@ -444,17 +471,27 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, * from this function. */ assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); - set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0, - 0, 0, 0); - cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); + + /* Set appropriate entry for SMC. + * We expect the TSP to manage the PSTATE.I and PSTATE.F + * flags as appropriate. + */ + if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { + cm_set_elr_el3(SECURE, (uint64_t) + tsp_entry_info->fast_smc_entry); + } else { + set_std_smc_active_flag(tsp_ctx->state); + cm_set_elr_el3(SECURE, (uint64_t) + tsp_entry_info->std_smc_entry); + } + cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); - - return smc_fid; + SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); } else { /* * This is the result from the secure client of an - * earlier request. The results are in x1-x2. Copy it + * earlier request. The results are in x1-x3. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ @@ -464,17 +501,52 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(mpidr, NON_SECURE); assert(ns_cpu_context); - ns_gp_regs = get_gpregs_ctx(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); - - SMC_RET2(ns_gp_regs, x1, x2); + if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD) + clr_std_smc_active_flag(tsp_ctx->state); + SMC_RET3(ns_cpu_context, x1, x2, x3); } break; + /* + * Request from non secure world to resume the preempted + * Standard SMC call. + */ + case TSP_FID_RESUME: + /* RESUME should be invoked only by normal world */ + if (!ns) { + assert(0); + break; + } + + /* + * This is a resume request from the non-secure client. + * save the non-secure state and send the request to + * the secure payload. + */ + assert(handle == cm_get_context(mpidr, NON_SECURE)); + + /* Check if we are already preempted before resume */ + if (!get_std_smc_active_flag(tsp_ctx->state)) + SMC_RET1(handle, SMC_UNK); + + cm_el1_sysregs_context_save(NON_SECURE); + + /* + * We are done stashing the non-secure context. Ask the + * secure payload to do the work now. + */ + + /* We just need to return to the preempted point in + * TSP and the execution will resume as normal. + */ + cm_el1_sysregs_context_restore(SECURE); + cm_set_next_eret_context(SECURE); + /* * This is a request from the secure payload for more arguments * for an ongoing arithmetic operation requested by the @@ -488,10 +560,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(mpidr, NON_SECURE); assert(ns_cpu_context); - ns_gp_regs = get_gpregs_ctx(ns_cpu_context); - SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1), - read_ctx_reg(ns_gp_regs, CTX_GPREG_X2)); + get_tsp_args(tsp_ctx, x1, x2); + SMC_RET2(handle, x1, x2); case TOS_CALL_COUNT: /* @@ -515,9 +586,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, SMC_RET1(handle, SMC_UNK); } -/* Define a SPD runtime service descriptor */ +/* Define a SPD runtime service descriptor for fast SMC calls */ DECLARE_RT_SVC( - spd, + tspd_fast, OEN_TOS_START, OEN_TOS_END, @@ -525,3 +596,14 @@ DECLARE_RT_SVC( tspd_setup, tspd_smc_handler ); + +/* Define a SPD runtime service descriptor for standard SMC calls */ +DECLARE_RT_SVC( + tspd_std, + + OEN_TOS_START, + OEN_TOS_END, + SMC_TYPE_STD, + NULL, + tspd_smc_handler +); diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h index b9cf496d4..7395bb9ee 100644 --- a/services/spd/tspd/tspd_private.h +++ b/services/spd/tspd/tspd_private.h @@ -125,6 +125,12 @@ #include #include +/* + * The number of arguments to save during a SMC call for TSP. + * Currently only x1 and x2 are used by TSP. + */ +#define TSP_NUM_ARGS 0x2 + /* AArch64 callee saved general purpose register context structure. */ DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES); @@ -147,6 +153,8 @@ CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \ * 'c_rt_ctx' - stack address to restore C runtime context from after * returning from a synchronous entry into the SP. * 'cpu_ctx' - space to maintain SP architectural state + * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations + * which will queried using the TSP_GET_ARGS SMC by TSP. ******************************************************************************/ typedef struct tsp_context { uint64_t saved_elr_el3; @@ -155,8 +163,20 @@ typedef struct tsp_context { uint64_t mpidr; uint64_t c_rt_ctx; cpu_context_t cpu_ctx; + uint64_t saved_tsp_args[TSP_NUM_ARGS]; } tsp_context_t; +/* Helper macros to store and retrieve tsp args from tsp_context */ +#define store_tsp_args(tsp_ctx, x1, x2) do {\ + tsp_ctx->saved_tsp_args[0] = x1;\ + tsp_ctx->saved_tsp_args[1] = x2;\ + } while (0) + +#define get_tsp_args(tsp_ctx, x1, x2) do {\ + x1 = tsp_ctx->saved_tsp_args[0];\ + x2 = tsp_ctx->saved_tsp_args[1];\ + } while (0) + /* TSPD power management handlers */ extern const spd_pm_ops_t tspd_pm; From 36eb6a755245e4eb0a40567783db6234f9f20d3f Mon Sep 17 00:00:00 2001 From: Harry Liebel Date: Thu, 1 May 2014 14:09:16 +0100 Subject: [PATCH 14/14] Improve BL3-0 documentation Provide some information about the expected use of BL3-0. Fixes ARM-software/tf-issues#144 Change-Id: I5c8d59a675578394be89481ae4ec39ca37522750 --- docs/firmware-design.md | 9 +++++++++ tools/fip_create/fip_create.c | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/firmware-design.md b/docs/firmware-design.md index a40ddac5f..76c27f724 100644 --- a/docs/firmware-design.md +++ b/docs/firmware-design.md @@ -219,6 +219,15 @@ access to access controlled components. On the Base FVP a TrustZone controller abstraction layer is initialized which is used to load further bootloader images. +#### BL3-0 (System Control Processor Firmware) image load + +Some systems have a separate System Control Processor (SCP) for power, clock, +reset and system control. BL2 loads the optional BL3-0 image from platform +storage into a platform-specific region of secure memory. The subsequent +handling of BL3-0 is platform specific. Typically the image is transferred into +SCP memory using a platform-specific protocol. The SCP executes BL3-0 and +signals to the Application Processor (AP) for BL2 execution to continue. + #### BL3-1 (EL3 Runtime Firmware) image load BL2 loads the BL3-1 image from platform storage into a platform-specific address diff --git a/tools/fip_create/fip_create.c b/tools/fip_create/fip_create.c index c97204ab0..d1802b7fd 100644 --- a/tools/fip_create/fip_create.c +++ b/tools/fip_create/fip_create.c @@ -53,7 +53,7 @@ uuid_t uuid_null = {0}; * const char* format_type_str[] = { "RAW", "ELF", "PIC" }; */ -/* Currently only BL2 and BL31 images are supported. */ +/* The images used depends on the platform. */ static entry_lookup_list_t toc_entry_lookup_list[] = { { "Trusted Boot Firmware BL2", UUID_TRUSTED_BOOT_FIRMWARE_BL2, "bl2", NULL, FLAG_FILENAME },