SiP: MISRA fixes for execution state switch
These changes address most of the required MISRA rules. In the process, some from generic code is also fixed. No functional changes. Change-Id: I707dbec9b34b802397e99da2f5ae738165d6feba Signed-off-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
This commit is contained in:
parent
30a8d96e46
commit
b634fa910e
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -23,7 +23,7 @@
|
|||
#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \
|
||||
(_p)->h.type = (uint8_t)(_type); \
|
||||
(_p)->h.version = (uint8_t)(_ver); \
|
||||
(_p)->h.size = (uint16_t)sizeof(*_p); \
|
||||
(_p)->h.size = (uint16_t)sizeof(*(_p)); \
|
||||
(_p)->h.attr = (uint32_t)(_attr) ; \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -64,9 +64,9 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
|
|||
/*
|
||||
* Pointers used in execution state switch are all 32 bits wide
|
||||
*/
|
||||
return arm_execution_state_switch(smc_fid, (uint32_t) x1,
|
||||
(uint32_t) x2, (uint32_t) x3, (uint32_t) x4,
|
||||
handle);
|
||||
return (uintptr_t) arm_execution_state_switch(smc_fid,
|
||||
(uint32_t) x1, (uint32_t) x2, (uint32_t) x3,
|
||||
(uint32_t) x4, handle);
|
||||
}
|
||||
|
||||
case ARM_SIP_SVC_CALL_COUNT:
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <plat_arm.h>
|
||||
#include <psci.h>
|
||||
#include <smccc_helpers.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <utils.h>
|
||||
|
||||
|
@ -39,7 +40,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
|||
{
|
||||
/* Execution state can be switched only if EL3 is AArch64 */
|
||||
#ifdef AARCH64
|
||||
int caller_64, from_el2, el, endianness, thumb = 0;
|
||||
bool caller_64, thumb = false, from_el2;
|
||||
unsigned int el, endianness;
|
||||
u_register_t spsr, pc, scr, sctlr;
|
||||
entry_point_info_t ep;
|
||||
cpu_context_t *ctx = (cpu_context_t *) handle;
|
||||
|
@ -50,7 +52,7 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
|||
/*
|
||||
* Disallow state switch if any of the secondaries have been brought up.
|
||||
*/
|
||||
if (psci_secondaries_brought_up())
|
||||
if (psci_secondaries_brought_up() != 0)
|
||||
goto exec_denied;
|
||||
|
||||
spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
|
||||
|
@ -61,20 +63,20 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
|||
* If the call originated from AArch64, expect 32-bit pointers when
|
||||
* switching to AArch32.
|
||||
*/
|
||||
if ((pc_hi != 0) || (cookie_hi != 0))
|
||||
if ((pc_hi != 0U) || (cookie_hi != 0U))
|
||||
goto invalid_param;
|
||||
|
||||
pc = pc_lo;
|
||||
|
||||
/* Instruction state when entering AArch32 */
|
||||
thumb = pc & 1;
|
||||
thumb = (pc & 1U) != 0U;
|
||||
} else {
|
||||
/* Construct AArch64 PC */
|
||||
pc = (((u_register_t) pc_hi) << 32) | pc_lo;
|
||||
}
|
||||
|
||||
/* Make sure PC is 4-byte aligned, except for Thumb */
|
||||
if ((pc & 0x3) && !thumb)
|
||||
if (((pc & 0x3U) != 0U) && !thumb)
|
||||
goto invalid_param;
|
||||
|
||||
/*
|
||||
|
@ -95,7 +97,7 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
|||
* Disallow switching state if there's a Hypervisor in place;
|
||||
* this request must be taken up with the Hypervisor instead.
|
||||
*/
|
||||
if (scr & SCR_HCE_BIT)
|
||||
if ((scr & SCR_HCE_BIT) != 0U)
|
||||
goto exec_denied;
|
||||
}
|
||||
|
||||
|
@ -105,11 +107,11 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
|||
* directly.
|
||||
*/
|
||||
sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
|
||||
endianness = !!(sctlr & SCTLR_EE_BIT);
|
||||
endianness = ((sctlr & SCTLR_EE_BIT) != 0U) ? 1U : 0U;
|
||||
|
||||
/* Construct SPSR for the exception state we're about to switch to */
|
||||
if (caller_64) {
|
||||
int impl;
|
||||
unsigned long long impl;
|
||||
|
||||
/*
|
||||
* Switching from AArch64 to AArch32. Ensure this CPU implements
|
||||
|
@ -121,7 +123,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
|||
|
||||
/* Return to the equivalent AArch32 privilege level */
|
||||
el = from_el2 ? MODE32_hyp : MODE32_svc;
|
||||
spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM,
|
||||
spsr = SPSR_MODE32((u_register_t) el,
|
||||
thumb ? SPSR_T_THUMB : SPSR_T_ARM,
|
||||
endianness, DISABLE_ALL_EXCEPTIONS);
|
||||
} else {
|
||||
/*
|
||||
|
@ -130,7 +133,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
|||
* raised), it's safe to assume AArch64 is also implemented.
|
||||
*/
|
||||
el = from_el2 ? MODE_EL2 : MODE_EL1;
|
||||
spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
|
||||
spsr = SPSR_64((u_register_t) el, MODE_SP_ELX,
|
||||
DISABLE_ALL_EXCEPTIONS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -143,10 +147,11 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
|||
*/
|
||||
zeromem(&ep, sizeof(ep));
|
||||
ep.pc = pc;
|
||||
ep.spsr = spsr;
|
||||
ep.spsr = (uint32_t) spsr;
|
||||
SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
|
||||
((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE |
|
||||
EP_ST_DISABLE));
|
||||
((unsigned int) ((endianness != 0U) ? EP_EE_BIG :
|
||||
EP_EE_LITTLE)
|
||||
| NON_SECURE | EP_ST_DISABLE));
|
||||
|
||||
/*
|
||||
* Re-initialize the system register context, and exit EL3 as if for the
|
||||
|
|
Loading…
Reference in New Issue