Merge changes from topic "bs/pmf32" into integration

* changes:
  pmf: Make the runtime instrumentation work on AArch32
  SiP: Don't validate entrypoint if state switch is impossible
This commit is contained in:
György Szing 2019-12-20 10:33:43 +00:00 committed by TrustedFirmware Code Review
commit b8e17967bb
9 changed files with 131 additions and 14 deletions

View File

@ -9,7 +9,7 @@
#include <arch.h>
#include <common/bl_common.h>
#include <el3_common_macros.S>
#include <lib/pmf/pmf_asm_macros.S>
#include <lib/pmf/aarch64/pmf_asm_macros.S>
#include <lib/runtime_instr.h>
#include <lib/xlat_tables/xlat_mmu_helpers.h>

View File

@ -10,6 +10,9 @@
#include <common/runtime_svc.h>
#include <context.h>
#include <el3_common_macros.S>
#include <lib/el3_runtime/cpu_data.h>
#include <lib/pmf/aarch32/pmf_asm_macros.S>
#include <lib/runtime_instr.h>
#include <lib/xlat_tables/xlat_tables_defs.h>
#include <smccc_helpers.h>
#include <smccc_macros.S>
@ -164,6 +167,20 @@ func sp_min_handle_smc
/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
str lr, [sp, #SMC_CTX_LR_MON]
#if ENABLE_RUNTIME_INSTRUMENTATION
/*
* Read the timestamp value and store it on top of the C runtime stack.
* The value will be saved to the per-cpu data once the C stack is
* available, as a valid stack is needed to call _cpu_data()
*/
strd r0, r1, [sp, #SMC_CTX_GPREG_R0]
ldcopr16 r0, r1, CNTPCT_64
ldr lr, [sp, #SMC_CTX_SP_MON]
strd r0, r1, [lr, #-8]!
str lr, [sp, #SMC_CTX_SP_MON]
ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0]
#endif
smccc_save_gp_mode_regs
clrex_on_monitor_entry
@ -175,6 +192,23 @@ func sp_min_handle_smc
mov r2, sp /* handle */
ldr sp, [r2, #SMC_CTX_SP_MON]
#if ENABLE_RUNTIME_INSTRUMENTATION
/* Save handle to a callee saved register */
mov r6, r2
/*
* Restore the timestamp value and store it in per-cpu data. The value
* will be extracted from per-cpu data by the C level SMC handler and
* saved to the PMF timestamp region.
*/
ldrd r4, r5, [sp], #8
bl _cpu_data
strd r4, r5, [r0, #CPU_DATA_PMF_TS0_OFFSET]
/* Restore handle */
mov r2, r6
#endif
ldr r0, [r2, #SMC_CTX_SCR]
and r3, r0, #SCR_NS_BIT /* flags */
@ -239,6 +273,16 @@ endfunc sp_min_handle_fiq
* The Warm boot entrypoint for SP_MIN.
*/
func sp_min_warm_entrypoint
#if ENABLE_RUNTIME_INSTRUMENTATION
/*
* This timestamp update happens with cache off. The next
* timestamp collection will need to do cache maintenance prior
* to timestamp update.
*/
pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
ldcopr16 r2, r3, CNTPCT_64
strd r2, r3, [r0]
#endif
/*
* On the warm boot path, most of the EL3 initialisations performed by
* 'el3_entrypoint_common' must be skipped:
@ -295,6 +339,30 @@ func sp_min_warm_entrypoint
bl smc_get_next_ctx
/* r0 points to `smc_ctx_t` */
/* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
#if ENABLE_RUNTIME_INSTRUMENTATION
/* Save smc_ctx_t */
mov r5, r0
pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
mov r4, r0
/*
* Invalidate before updating timestamp to ensure previous timestamp
* updates on the same cache line with caches disabled are properly
* seen by the same core. Without the cache invalidate, the core might
* write into a stale cache line.
*/
mov r1, #PMF_TS_SIZE
bl inv_dcache_range
ldcopr16 r0, r1, CNTPCT_64
strd r0, r1, [r4]
/* Restore smc_ctx_t */
mov r0, r5
#endif
b sp_min_exit
endfunc sp_min_warm_entrypoint

View File

@ -55,6 +55,14 @@ SECTIONS
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
#if ENABLE_PMF
/* Ensure 4-byte alignment for descriptors and ensure inclusion */
. = ALIGN(4);
__PMF_SVC_DESCS_START__ = .;
KEEP(*(pmf_svc_descs))
__PMF_SVC_DESCS_END__ = .;
#endif /* ENABLE_PMF */
/*
* Ensure 4-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.

View File

@ -19,7 +19,9 @@
#include <context.h>
#include <drivers/console.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/pmf/pmf.h>
#include <lib/psci/psci.h>
#include <lib/runtime_instr.h>
#include <lib/utils.h>
#include <plat/common/platform.h>
#include <platform_sp_min.h>
@ -28,6 +30,11 @@
#include "sp_min_private.h"
#if ENABLE_RUNTIME_INSTRUMENTATION
PMF_REGISTER_SERVICE_SMC(rt_instr_svc, PMF_RT_INSTR_SVC_ID,
RT_INSTR_TOTAL_IDS, PMF_STORE_ENABLE)
#endif
/* Pointers to per-core cpu contexts */
static void *sp_min_cpu_ctx_ptr[PLATFORM_CORE_COUNT];

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2019, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PMF_ASM_MACROS_S
#define PMF_ASM_MACROS_S
#define PMF_TS_SIZE 8
/*
* This macro calculates the address of the per-cpu timestamp
* for the given service name and local timestamp id.
* Clobbers: r0 - r4
*/
.macro pmf_calc_timestamp_addr _name, _tid
mov r4, lr
bl plat_my_core_pos
mov lr, r4
ldr r1, =__PERCPU_TIMESTAMP_SIZE__
mov r2, #(\_tid * PMF_TS_SIZE)
mla r0, r0, r1, r2
ldr r1, =pmf_ts_mem_\_name
add r0, r0, r1
.endm
#endif /* PMF_ASM_MACROS_S */

View File

@ -39,8 +39,6 @@ int arm_execution_state_switch(unsigned int smc_fid,
uint32_t cookie_lo,
void *handle)
{
/* Execution state can be switched only if EL3 is AArch64 */
#ifdef __aarch64__
bool caller_64, thumb = false, from_el2;
unsigned int el, endianness;
u_register_t spsr, pc, scr, sctlr;
@ -48,6 +46,11 @@ int arm_execution_state_switch(unsigned int smc_fid,
cpu_context_t *ctx = (cpu_context_t *) handle;
el3_state_t *el3_ctx = get_el3state_ctx(ctx);
/* Validate supplied entry point */
pc = (u_register_t) (((uint64_t) pc_hi << 32) | pc_lo);
if (arm_validate_ns_entrypoint(pc) != 0)
goto invalid_param;
/* That the SMC originated from NS is already validated by the caller */
/*
@ -173,7 +176,6 @@ invalid_param:
SMC_RET1(handle, STATE_SW_E_PARAM);
exec_denied:
#endif /* __aarch64__ */
/* State switch denied */
SMC_RET1(handle, STATE_SW_E_DENIED);
}

View File

@ -215,12 +215,17 @@ BL2U_SOURCES += drivers/delay_timer/delay_timer.c \
BL31_SOURCES += plat/arm/common/arm_bl31_setup.c \
plat/arm/common/arm_pm.c \
plat/arm/common/arm_topology.c \
plat/arm/common/execution_state_switch.c \
plat/common/plat_psci_common.c
ifeq (${ENABLE_PMF}, 1)
BL31_SOURCES += plat/arm/common/arm_sip_svc.c \
ifeq (${ARCH}, aarch64)
BL31_SOURCES += plat/arm/common/aarch64/execution_state_switch.c\
plat/arm/common/arm_sip_svc.c \
lib/pmf/pmf_smc.c
else
BL32_SOURCES += plat/arm/common/arm_sip_svc.c \
lib/pmf/pmf_smc.c
endif
endif
ifeq (${EL3_EXCEPTION_HANDLING},1)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -50,23 +50,22 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
switch (smc_fid) {
case ARM_SIP_SVC_EXE_STATE_SWITCH: {
u_register_t pc;
/* Execution state can be switched only if EL3 is AArch64 */
#ifdef __aarch64__
/* Allow calls from non-secure only */
if (!is_caller_non_secure(flags))
SMC_RET1(handle, STATE_SW_E_DENIED);
/* Validate supplied entry point */
pc = (u_register_t) ((x1 << 32) | (uint32_t) x2);
if (arm_validate_ns_entrypoint(pc) != 0)
SMC_RET1(handle, STATE_SW_E_PARAM);
/*
* Pointers used in execution state switch are all 32 bits wide
*/
return (uintptr_t) arm_execution_state_switch(smc_fid,
(uint32_t) x1, (uint32_t) x2, (uint32_t) x3,
(uint32_t) x4, handle);
#else
/* State switch denied */
SMC_RET1(handle, STATE_SW_E_DENIED);
#endif /* __aarch64__ */
}
case ARM_SIP_SVC_CALL_COUNT: