PSCI: Fix MISRA defects in common and setup code

MISRA C-2012 Rules 10.1, 10.3, 17.8 and 20.7.

Change-Id: I3980bd2a1d845559af4bbe2887a0250d0506a064
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
This commit is contained in:
Antonio Nino Diaz 2018-07-17 15:10:08 +01:00
parent abce1dce8b
commit 6b7b0f3686
6 changed files with 214 additions and 164 deletions

View File

@ -344,7 +344,7 @@ int psci_affinity_info(u_register_t target_affinity,
unsigned int lowest_affinity_level); unsigned int lowest_affinity_level);
int psci_migrate(u_register_t target_cpu); int psci_migrate(u_register_t target_cpu);
int psci_migrate_info_type(void); int psci_migrate_info_type(void);
long psci_migrate_info_up_cpu(void); u_register_t psci_migrate_info_up_cpu(void);
int psci_node_hw_state(u_register_t target_cpu, int psci_node_hw_state(u_register_t target_cpu,
unsigned int power_level); unsigned int power_level);
int psci_features(unsigned int psci_fid); int psci_features(unsigned int psci_fid);

View File

@ -58,17 +58,17 @@ typedef struct psci_lib_args {
.h.type = (uint8_t)PARAM_PSCI_LIB_ARGS, \ .h.type = (uint8_t)PARAM_PSCI_LIB_ARGS, \
.h.version = (uint8_t)VERSION_1, \ .h.version = (uint8_t)VERSION_1, \
.h.size = (uint16_t)sizeof(_name), \ .h.size = (uint16_t)sizeof(_name), \
.h.attr = 0, \ .h.attr = 0U, \
.mailbox_ep = (_entry) \ .mailbox_ep = (_entry) \
} }
/* Helper macro to verify the pointer to psci_lib_args_t structure */ /* Helper macro to verify the pointer to psci_lib_args_t structure */
#define VERIFY_PSCI_LIB_ARGS_V1(_p) ((_p) \ #define VERIFY_PSCI_LIB_ARGS_V1(_p) (((_p) != NULL) \
&& ((_p)->h.type == PARAM_PSCI_LIB_ARGS) \ && ((_p)->h.type == PARAM_PSCI_LIB_ARGS) \
&& ((_p)->h.version == VERSION_1) \ && ((_p)->h.version == VERSION_1) \
&& ((_p)->h.size == sizeof(*(_p))) \ && ((_p)->h.size == sizeof(*(_p))) \
&& ((_p)->h.attr == 0) \ && ((_p)->h.attr == 0) \
&& ((_p)->mailbox_ep)) && ((_p)->mailbox_ep != NULL))
/****************************************************************************** /******************************************************************************
* PSCI Library Interfaces * PSCI Library Interfaces

View File

@ -68,9 +68,9 @@ const plat_psci_ops_t *psci_plat_pm_ops;
/****************************************************************************** /******************************************************************************
* Check that the maximum power level supported by the platform makes sense * Check that the maximum power level supported by the platform makes sense
*****************************************************************************/ *****************************************************************************/
CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \ CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \ (PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
assert_platform_max_pwrlvl_check); assert_platform_max_pwrlvl_check);
/* /*
* The plat_local_state used by the platform is one of these types: RUN, * The plat_local_state used by the platform is one of these types: RUN,
@ -111,7 +111,7 @@ static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
* Check that the maximum retention level supported by the platform is less * Check that the maximum retention level supported by the platform is less
* than the maximum off level. * than the maximum off level.
*****************************************************************************/ *****************************************************************************/
CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \ CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
assert_platform_max_off_and_retn_state_check); assert_platform_max_off_and_retn_state_check);
/****************************************************************************** /******************************************************************************
@ -122,10 +122,10 @@ int psci_validate_power_state(unsigned int power_state,
psci_power_state_t *state_info) psci_power_state_t *state_info)
{ {
/* Check SBZ bits in power state are zero */ /* Check SBZ bits in power state are zero */
if (psci_check_power_state(power_state)) if (psci_check_power_state(power_state) != 0U)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
assert(psci_plat_pm_ops->validate_power_state); assert(psci_plat_pm_ops->validate_power_state != NULL);
/* Validate the power_state using platform pm_ops */ /* Validate the power_state using platform pm_ops */
return psci_plat_pm_ops->validate_power_state(power_state, state_info); return psci_plat_pm_ops->validate_power_state(power_state, state_info);
@ -141,7 +141,7 @@ void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
* Assert that the required pm_ops hook is implemented to ensure that * Assert that the required pm_ops hook is implemented to ensure that
* the capability detected during psci_setup() is valid. * the capability detected during psci_setup() is valid.
*/ */
assert(psci_plat_pm_ops->get_sys_suspend_power_state); assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
/* /*
* Query the platform for the power_state required for system suspend * Query the platform for the power_state required for system suspend
@ -157,7 +157,7 @@ void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
******************************************************************************/ ******************************************************************************/
unsigned int psci_is_last_on_cpu(void) unsigned int psci_is_last_on_cpu(void)
{ {
unsigned int cpu_idx, my_idx = plat_my_core_pos(); int cpu_idx, my_idx = (int) plat_my_core_pos();
for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
if (cpu_idx == my_idx) { if (cpu_idx == my_idx) {
@ -209,7 +209,7 @@ static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
assert(pwrlvl > PSCI_CPU_PWR_LVL); assert(pwrlvl > PSCI_CPU_PWR_LVL);
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds" #pragma GCC diagnostic ignored "-Warray-bounds"
psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state; psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
} }
@ -219,8 +219,15 @@ static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
void psci_init_req_local_pwr_states(void) void psci_init_req_local_pwr_states(void)
{ {
/* Initialize the requested state of all non CPU power domains as OFF */ /* Initialize the requested state of all non CPU power domains as OFF */
memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE, unsigned int pwrlvl;
sizeof(psci_req_local_pwr_states)); int core;
for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
for (core = 0; core < PLATFORM_CORE_COUNT; core++) {
psci_req_local_pwr_states[pwrlvl][core] =
PLAT_MAX_OFF_STATE;
}
}
} }
/****************************************************************************** /******************************************************************************
@ -232,11 +239,11 @@ void psci_init_req_local_pwr_states(void)
* assertion is added to prevent us from accessing the CPU power level. * assertion is added to prevent us from accessing the CPU power level.
*****************************************************************************/ *****************************************************************************/
static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
unsigned int cpu_idx) int cpu_idx)
{ {
assert(pwrlvl > PSCI_CPU_PWR_LVL); assert(pwrlvl > PSCI_CPU_PWR_LVL);
return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx]; return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
} }
/* /*
@ -299,7 +306,7 @@ void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
/* Copy the local power state from node to state_info */ /* Copy the local power state from node to state_info */
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx); pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
} }
@ -332,7 +339,7 @@ static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
/* Copy the local_state from state_info */ /* Copy the local_state from state_info */
for (lvl = 1; lvl <= end_pwrlvl; lvl++) { for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]); set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
} }
@ -342,15 +349,17 @@ static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
/******************************************************************************* /*******************************************************************************
* PSCI helper function to get the parent nodes corresponding to a cpu_index. * PSCI helper function to get the parent nodes corresponding to a cpu_index.
******************************************************************************/ ******************************************************************************/
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, void psci_get_parent_pwr_domain_nodes(int cpu_idx,
unsigned int end_lvl, unsigned int end_lvl,
unsigned int node_index[]) unsigned int *node_index)
{ {
unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
unsigned int i; unsigned int i;
unsigned int *node = node_index;
for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) { for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
*node_index++ = parent_node; *node = parent_node;
node++;
parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
} }
} }
@ -366,7 +375,7 @@ void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Reset the local_state to RUN for the non cpu power domains. */ /* Reset the local_state to RUN for the non cpu power domains. */
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
set_non_cpu_pd_node_local_state(parent_idx, set_non_cpu_pd_node_local_state(parent_idx,
PSCI_LOCAL_STATE_RUN); PSCI_LOCAL_STATE_RUN);
psci_set_req_local_pwr_state(lvl, psci_set_req_local_pwr_state(lvl,
@ -406,7 +415,8 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info) psci_power_state_t *state_info)
{ {
unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
unsigned int start_idx, ncpus; int start_idx;
unsigned int ncpus;
plat_local_state_t target_state, *req_states; plat_local_state_t target_state, *req_states;
assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
@ -414,7 +424,7 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
/* For level 0, the requested state will be equivalent /* For level 0, the requested state will be equivalent
to target state */ to target state */
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
/* First update the requested power state */ /* First update the requested power state */
psci_set_req_local_pwr_state(lvl, cpu_idx, psci_set_req_local_pwr_state(lvl, cpu_idx,
@ -436,7 +446,7 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
state_info->pwr_domain_state[lvl] = target_state; state_info->pwr_domain_state[lvl] = target_state;
/* Break early if the negotiated target power state is RUN */ /* Break early if the negotiated target power state is RUN */
if (is_local_state_run(state_info->pwr_domain_state[lvl])) if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
break; break;
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
@ -448,7 +458,7 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
* We update the requested power state from state_info and then * We update the requested power state from state_info and then
* set the target state as RUN. * set the target state as RUN.
*/ */
for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) { for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
psci_set_req_local_pwr_state(lvl, cpu_idx, psci_set_req_local_pwr_state(lvl, cpu_idx,
state_info->pwr_domain_state[lvl]); state_info->pwr_domain_state[lvl]);
state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
@ -486,7 +496,7 @@ int psci_validate_suspend_req(const psci_power_state_t *state_info,
/* All power domain levels are in a RUN state to begin with */ /* All power domain levels are in a RUN state to begin with */
deepest_state_type = STATE_TYPE_RUN; deepest_state_type = STATE_TYPE_RUN;
for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) { for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
state = state_info->pwr_domain_state[i]; state = state_info->pwr_domain_state[i];
req_state_type = find_local_state_type(state); req_state_type = find_local_state_type(state);
@ -515,8 +525,9 @@ int psci_validate_suspend_req(const psci_power_state_t *state_info,
* has to be invalid and max retention level has to be a valid power * has to be invalid and max retention level has to be a valid power
* level. * level.
*/ */
if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL || if ((is_power_down_state == 0U) &&
max_retn_lvl == PSCI_INVALID_PWR_LVL)) ((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
(max_retn_lvl == PSCI_INVALID_PWR_LVL)))
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
@ -530,9 +541,9 @@ unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
{ {
int i; int i;
for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
if (is_local_state_off(state_info->pwr_domain_state[i])) if (is_local_state_off(state_info->pwr_domain_state[i]) != 0)
return i; return (unsigned int) i;
} }
return PSCI_INVALID_PWR_LVL; return PSCI_INVALID_PWR_LVL;
@ -546,9 +557,9 @@ unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
{ {
int i; int i;
for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
if (!is_local_state_run(state_info->pwr_domain_state[i])) if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
return i; return (unsigned int) i;
} }
return PSCI_INVALID_PWR_LVL; return PSCI_INVALID_PWR_LVL;
@ -559,14 +570,13 @@ unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
* tree that the operation should be applied to. It picks up locks in order of * tree that the operation should be applied to. It picks up locks in order of
* increasing power domain level in the range specified. * increasing power domain level in the range specified.
******************************************************************************/ ******************************************************************************/
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx)
unsigned int cpu_idx)
{ {
unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
unsigned int level; unsigned int level;
/* No locking required for level 0. Hence start locking from level 1 */ /* No locking required for level 0. Hence start locking from level 1 */
for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
} }
@ -577,18 +587,17 @@ void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
* tree that the operation should be applied to. It releases the locks in order * tree that the operation should be applied to. It releases the locks in order
* of decreasing power domain level in the range specified. * of decreasing power domain level in the range specified.
******************************************************************************/ ******************************************************************************/
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx)
unsigned int cpu_idx)
{ {
unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0}; unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
int level; unsigned int level;
/* Get the parent nodes */ /* Get the parent nodes */
psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
/* Unlock top down. No unlocking required for level 0. */ /* Unlock top down. No unlocking required for level 0. */
for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) { for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1U; level--) {
parent_idx = parent_nodes[level - 1]; parent_idx = parent_nodes[level - 1U];
psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
} }
} }
@ -664,11 +673,12 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
u_register_t ns_scr_el3 = read_scr_el3(); u_register_t ns_scr_el3 = read_scr_el3();
u_register_t ns_sctlr_el1 = read_sctlr_el1(); u_register_t ns_sctlr_el1 = read_sctlr_el1();
sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
read_sctlr_el2() : ns_sctlr_el1;
ee = 0; ee = 0;
ep_attr = NON_SECURE | EP_ST_DISABLE; ep_attr = NON_SECURE | EP_ST_DISABLE;
if (sctlr & SCTLR_EE_BIT) { if ((sctlr & SCTLR_EE_BIT) != 0U) {
ep_attr |= EP_EE_BIG; ep_attr |= EP_EE_BIG;
ee = 1; ee = 1;
} }
@ -682,21 +692,22 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
* Figure out whether the cpu enters the non-secure address space * Figure out whether the cpu enters the non-secure address space
* in aarch32 or aarch64 * in aarch32 or aarch64
*/ */
if (ns_scr_el3 & SCR_RW_BIT) { if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
/* /*
* Check whether a Thumb entry point has been provided for an * Check whether a Thumb entry point has been provided for an
* aarch64 EL * aarch64 EL
*/ */
if (entrypoint & 0x1) if ((entrypoint & 0x1UL) != 0UL)
return PSCI_E_INVALID_ADDRESS; return PSCI_E_INVALID_ADDRESS;
mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
} else { } else {
mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
MODE32_hyp : MODE32_svc;
/* /*
* TODO: Choose async. exception bits if HYP mode is not * TODO: Choose async. exception bits if HYP mode is not
@ -723,7 +734,7 @@ int psci_validate_entry_point(entry_point_info_t *ep,
int rc; int rc;
/* Validate the entrypoint using platform psci_ops */ /* Validate the entrypoint using platform psci_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) { if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS) if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_ADDRESS; return PSCI_E_INVALID_ADDRESS;
@ -749,7 +760,8 @@ int psci_validate_entry_point(entry_point_info_t *ep,
******************************************************************************/ ******************************************************************************/
void psci_warmboot_entrypoint(void) void psci_warmboot_entrypoint(void)
{ {
unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos(); unsigned int end_pwrlvl;
int cpu_idx = (int) plat_my_core_pos();
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
/* /*
@ -772,8 +784,7 @@ void psci_warmboot_entrypoint(void)
* that by the time all locks are taken, the system topology is snapshot * that by the time all locks are taken, the system topology is snapshot
* and state management can be done safely. * and state management can be done safely.
*/ */
psci_acquire_pwr_domain_locks(end_pwrlvl, psci_acquire_pwr_domain_locks(end_pwrlvl, cpu_idx);
cpu_idx);
psci_get_target_local_pwr_states(end_pwrlvl, &state_info); psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
@ -818,8 +829,7 @@ void psci_warmboot_entrypoint(void)
* This loop releases the lock corresponding to each power level * This loop releases the lock corresponding to each power level
* in the reverse order to which they were acquired. * in the reverse order to which they were acquired.
*/ */
psci_release_pwr_domain_locks(end_pwrlvl, psci_release_pwr_domain_locks(end_pwrlvl, cpu_idx);
cpu_idx);
} }
/******************************************************************************* /*******************************************************************************
@ -829,13 +839,13 @@ void psci_warmboot_entrypoint(void)
******************************************************************************/ ******************************************************************************/
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
{ {
assert(pm); assert(pm != NULL);
psci_spd_pm = pm; psci_spd_pm = pm;
if (pm->svc_migrate) if (pm->svc_migrate != NULL)
psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
if (pm->svc_migrate_info) if (pm->svc_migrate_info != NULL)
psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
| define_psci_cap(PSCI_MIG_INFO_TYPE); | define_psci_cap(PSCI_MIG_INFO_TYPE);
} }
@ -851,13 +861,13 @@ int psci_spd_migrate_info(u_register_t *mpidr)
{ {
int rc; int rc;
if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info) if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
return PSCI_E_NOT_SUPPORTED; return PSCI_E_NOT_SUPPORTED;
rc = psci_spd_pm->svc_migrate_info(mpidr); rc = psci_spd_pm->svc_migrate_info(mpidr);
assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \ assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
|| rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED); (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
return rc; return rc;
} }
@ -870,7 +880,7 @@ int psci_spd_migrate_info(u_register_t *mpidr)
void psci_print_power_domain_map(void) void psci_print_power_domain_map(void)
{ {
#if LOG_LEVEL >= LOG_LEVEL_INFO #if LOG_LEVEL >= LOG_LEVEL_INFO
unsigned int idx; int idx;
plat_local_state_t state; plat_local_state_t state;
plat_local_state_type_t state_type; plat_local_state_type_t state_type;
@ -916,16 +926,16 @@ void psci_print_power_domain_map(void)
*****************************************************************************/ *****************************************************************************/
int psci_secondaries_brought_up(void) int psci_secondaries_brought_up(void)
{ {
unsigned int idx, n_valid = 0; unsigned int idx, n_valid = 0U;
for (idx = 0; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) { for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR) if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
n_valid++; n_valid++;
} }
assert(n_valid); assert(n_valid > 0U);
return (n_valid > 1); return (n_valid > 1U) ? 1 : 0;
} }
#if ENABLE_PLAT_COMPAT #if ENABLE_PLAT_COMPAT
@ -972,8 +982,8 @@ int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
return PSCI_INVALID_DATA; return PSCI_INVALID_DATA;
/* Sanity check to verify that the CPU is in CPU_SUSPEND */ /* Sanity check to verify that the CPU is in CPU_SUSPEND */
if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON && if ((psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON) &&
!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))) (!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))))
return psci_get_pstate_id(psci_power_state_compat[cpu_idx]); return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
return PSCI_INVALID_DATA; return PSCI_INVALID_DATA;

View File

@ -82,8 +82,8 @@ int psci_cpu_suspend(unsigned int power_state,
} }
/* Fast path for CPU standby.*/ /* Fast path for CPU standby.*/
if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) { if (is_cpu_standby_req(is_power_down_state, target_pwrlvl) != 0) {
if (!psci_plat_pm_ops->cpu_standby) if (psci_plat_pm_ops->cpu_standby == NULL)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
/* /*
@ -128,7 +128,7 @@ int psci_cpu_suspend(unsigned int power_state,
* If a power down state has been requested, we need to verify entry * If a power down state has been requested, we need to verify entry
* point and program entry information. * point and program entry information.
*/ */
if (is_power_down_state) { if (is_power_down_state != 0U) {
rc = psci_validate_entry_point(&ep, entrypoint, context_id); rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS) if (rc != PSCI_E_SUCCESS)
return rc; return rc;
@ -156,7 +156,7 @@ int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
entry_point_info_t ep; entry_point_info_t ep;
/* Check if the current CPU is the last ON CPU in the system */ /* Check if the current CPU is the last ON CPU in the system */
if (!psci_is_last_on_cpu()) if (psci_is_last_on_cpu() == 0U)
return PSCI_E_DENIED; return PSCI_E_DENIED;
/* Validate the entry point and get the entry_point_info */ /* Validate the entry point and get the entry_point_info */
@ -171,7 +171,8 @@ int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL); assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN) assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
== PSCI_E_SUCCESS); == PSCI_E_SUCCESS);
assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL])); assert(is_local_state_off(
state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]) != 0);
/* /*
* Do what is needed to enter the system suspend state. This function * Do what is needed to enter the system suspend state. This function
@ -236,7 +237,8 @@ int psci_affinity_info(u_register_t target_affinity,
* target CPUs shutdown was not seen by the current CPU's cluster. And * target CPUs shutdown was not seen by the current CPU's cluster. And
* so the cache may contain stale data for the target CPU. * so the cache may contain stale data for the target CPU.
*/ */
flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); flush_cpu_data_by_index((unsigned int)target_idx,
psci_svc_cpu_data.aff_info_state);
return psci_get_aff_info_state_by_idx(target_idx); return psci_get_aff_info_state_by_idx(target_idx);
} }
@ -263,10 +265,10 @@ int psci_migrate(u_register_t target_cpu)
if (rc != PSCI_E_SUCCESS) if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
assert(psci_spd_pm && psci_spd_pm->svc_migrate); assert((psci_spd_pm != NULL) && (psci_spd_pm->svc_migrate != NULL));
rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu); rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL));
return rc; return rc;
} }
@ -278,7 +280,7 @@ int psci_migrate_info_type(void)
return psci_spd_migrate_info(&resident_cpu_mpidr); return psci_spd_migrate_info(&resident_cpu_mpidr);
} }
long psci_migrate_info_up_cpu(void) u_register_t psci_migrate_info_up_cpu(void)
{ {
u_register_t resident_cpu_mpidr; u_register_t resident_cpu_mpidr;
int rc; int rc;
@ -288,8 +290,8 @@ long psci_migrate_info_up_cpu(void)
* psci_spd_migrate_info() returns. * psci_spd_migrate_info() returns.
*/ */
rc = psci_spd_migrate_info(&resident_cpu_mpidr); rc = psci_spd_migrate_info(&resident_cpu_mpidr);
if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP) if ((rc != PSCI_TOS_NOT_UP_MIG_CAP) && (rc != PSCI_TOS_UP_MIG_CAP))
return PSCI_E_INVALID_PARAMS; return (u_register_t)(register_t) PSCI_E_INVALID_PARAMS;
return resident_cpu_mpidr; return resident_cpu_mpidr;
} }
@ -312,10 +314,11 @@ int psci_node_hw_state(u_register_t target_cpu,
* Dispatch this call to platform to query power controller, and pass on * Dispatch this call to platform to query power controller, and pass on
* to the caller what it returns * to the caller what it returns
*/ */
assert(psci_plat_pm_ops->get_node_hw_state); assert(psci_plat_pm_ops->get_node_hw_state != NULL);
rc = psci_plat_pm_ops->get_node_hw_state(target_cpu, power_level); rc = psci_plat_pm_ops->get_node_hw_state(target_cpu, power_level);
assert((rc >= HW_ON && rc <= HW_STANDBY) || rc == PSCI_E_NOT_SUPPORTED assert(((rc >= HW_ON) && (rc <= HW_STANDBY))
|| rc == PSCI_E_INVALID_PARAMS); || (rc == PSCI_E_NOT_SUPPORTED)
|| (rc == PSCI_E_INVALID_PARAMS));
return rc; return rc;
} }
@ -337,17 +340,19 @@ int psci_features(unsigned int psci_fid)
/* Check if the psci fid is supported or not */ /* Check if the psci fid is supported or not */
if (!(local_caps & define_psci_cap(psci_fid))) if ((local_caps & define_psci_cap(psci_fid)) == 0U)
return PSCI_E_NOT_SUPPORTED; return PSCI_E_NOT_SUPPORTED;
/* Format the feature flags */ /* Format the feature flags */
if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 || if ((psci_fid == PSCI_CPU_SUSPEND_AARCH32) ||
psci_fid == PSCI_CPU_SUSPEND_AARCH64) { (psci_fid == PSCI_CPU_SUSPEND_AARCH64)) {
/* /*
* The trusted firmware does not support OS Initiated Mode. * The trusted firmware does not support OS Initiated Mode.
*/ */
return (FF_PSTATE << FF_PSTATE_SHIFT) | unsigned int ret = ((FF_PSTATE << FF_PSTATE_SHIFT) |
((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT); (((FF_SUPPORTS_OS_INIT_MODE == 1U) ? 0U : 1U)
<< FF_MODE_SUPPORT_SHIFT));
return (int) ret;
} }
/* Return 0 for all other fid's */ /* Return 0 for all other fid's */
@ -366,50 +371,62 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
void *handle, void *handle,
u_register_t flags) u_register_t flags)
{ {
u_register_t ret;
if (is_caller_secure(flags)) if (is_caller_secure(flags))
return SMC_UNK; return (u_register_t)SMC_UNK;
/* Check the fid against the capabilities */ /* Check the fid against the capabilities */
if (!(psci_caps & define_psci_cap(smc_fid))) if ((psci_caps & define_psci_cap(smc_fid)) == 0U)
return SMC_UNK; return (u_register_t)SMC_UNK;
if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
/* 32-bit PSCI function, clear top parameter bits */ /* 32-bit PSCI function, clear top parameter bits */
x1 = (uint32_t)x1; uint32_t r1 = (uint32_t)x1;
x2 = (uint32_t)x2; uint32_t r2 = (uint32_t)x2;
x3 = (uint32_t)x3; uint32_t r3 = (uint32_t)x3;
switch (smc_fid) { switch (smc_fid) {
case PSCI_VERSION: case PSCI_VERSION:
return psci_version(); ret = (u_register_t)psci_version();
break;
case PSCI_CPU_OFF: case PSCI_CPU_OFF:
return psci_cpu_off(); ret = (u_register_t)psci_cpu_off();
break;
case PSCI_CPU_SUSPEND_AARCH32: case PSCI_CPU_SUSPEND_AARCH32:
return psci_cpu_suspend(x1, x2, x3); ret = (u_register_t)psci_cpu_suspend(r1, r2, r3);
break;
case PSCI_CPU_ON_AARCH32: case PSCI_CPU_ON_AARCH32:
return psci_cpu_on(x1, x2, x3); ret = (u_register_t)psci_cpu_on(r1, r2, r3);
break;
case PSCI_AFFINITY_INFO_AARCH32: case PSCI_AFFINITY_INFO_AARCH32:
return psci_affinity_info(x1, x2); ret = (u_register_t)psci_affinity_info(r1, r2);
break;
case PSCI_MIG_AARCH32: case PSCI_MIG_AARCH32:
return psci_migrate(x1); ret = (u_register_t)psci_migrate(r1);
break;
case PSCI_MIG_INFO_TYPE: case PSCI_MIG_INFO_TYPE:
return psci_migrate_info_type(); ret = (u_register_t)psci_migrate_info_type();
break;
case PSCI_MIG_INFO_UP_CPU_AARCH32: case PSCI_MIG_INFO_UP_CPU_AARCH32:
return psci_migrate_info_up_cpu(); ret = psci_migrate_info_up_cpu();
break;
case PSCI_NODE_HW_STATE_AARCH32: case PSCI_NODE_HW_STATE_AARCH32:
return psci_node_hw_state(x1, x2); ret = (u_register_t)psci_node_hw_state(r1, r2);
break;
case PSCI_SYSTEM_SUSPEND_AARCH32: case PSCI_SYSTEM_SUSPEND_AARCH32:
return psci_system_suspend(x1, x2); ret = (u_register_t)psci_system_suspend(r1, r2);
break;
case PSCI_SYSTEM_OFF: case PSCI_SYSTEM_OFF:
psci_system_off(); psci_system_off();
@ -422,26 +439,34 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
break; break;
case PSCI_FEATURES: case PSCI_FEATURES:
return psci_features(x1); ret = (u_register_t)psci_features(r1);
break;
#if ENABLE_PSCI_STAT #if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH32: case PSCI_STAT_RESIDENCY_AARCH32:
return psci_stat_residency(x1, x2); ret = psci_stat_residency(r1, r2);
break;
case PSCI_STAT_COUNT_AARCH32: case PSCI_STAT_COUNT_AARCH32:
return psci_stat_count(x1, x2); ret = psci_stat_count(r1, r2);
break;
#endif #endif
case PSCI_MEM_PROTECT: case PSCI_MEM_PROTECT:
return psci_mem_protect(x1); ret = psci_mem_protect(r1);
break;
case PSCI_MEM_CHK_RANGE_AARCH32: case PSCI_MEM_CHK_RANGE_AARCH32:
return psci_mem_chk_range(x1, x2); ret = psci_mem_chk_range(r1, r2);
break;
case PSCI_SYSTEM_RESET2_AARCH32: case PSCI_SYSTEM_RESET2_AARCH32:
/* We should never return from psci_system_reset2() */ /* We should never return from psci_system_reset2() */
return psci_system_reset2(x1, x2); ret = psci_system_reset2(r1, r2);
break;
default: default:
WARN("Unimplemented PSCI Call: 0x%x\n", smc_fid);
ret = (u_register_t)SMC_UNK;
break; break;
} }
} else { } else {
@ -449,46 +474,61 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
switch (smc_fid) { switch (smc_fid) {
case PSCI_CPU_SUSPEND_AARCH64: case PSCI_CPU_SUSPEND_AARCH64:
return psci_cpu_suspend(x1, x2, x3); ret = (u_register_t)
psci_cpu_suspend((unsigned int)x1, x2, x3);
break;
case PSCI_CPU_ON_AARCH64: case PSCI_CPU_ON_AARCH64:
return psci_cpu_on(x1, x2, x3); ret = (u_register_t)psci_cpu_on(x1, x2, x3);
break;
case PSCI_AFFINITY_INFO_AARCH64: case PSCI_AFFINITY_INFO_AARCH64:
return psci_affinity_info(x1, x2); ret = (u_register_t)
psci_affinity_info(x1, (unsigned int)x2);
break;
case PSCI_MIG_AARCH64: case PSCI_MIG_AARCH64:
return psci_migrate(x1); ret = (u_register_t)psci_migrate(x1);
break;
case PSCI_MIG_INFO_UP_CPU_AARCH64: case PSCI_MIG_INFO_UP_CPU_AARCH64:
return psci_migrate_info_up_cpu(); ret = psci_migrate_info_up_cpu();
break;
case PSCI_NODE_HW_STATE_AARCH64: case PSCI_NODE_HW_STATE_AARCH64:
return psci_node_hw_state(x1, x2); ret = (u_register_t)psci_node_hw_state(
x1, (unsigned int) x2);
break;
case PSCI_SYSTEM_SUSPEND_AARCH64: case PSCI_SYSTEM_SUSPEND_AARCH64:
return psci_system_suspend(x1, x2); ret = (u_register_t)psci_system_suspend(x1, x2);
break;
#if ENABLE_PSCI_STAT #if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH64: case PSCI_STAT_RESIDENCY_AARCH64:
return psci_stat_residency(x1, x2); ret = psci_stat_residency(x1, (unsigned int) x2);
break;
case PSCI_STAT_COUNT_AARCH64: case PSCI_STAT_COUNT_AARCH64:
return psci_stat_count(x1, x2); ret = psci_stat_count(x1, (unsigned int) x2);
break;
#endif #endif
case PSCI_MEM_CHK_RANGE_AARCH64: case PSCI_MEM_CHK_RANGE_AARCH64:
return psci_mem_chk_range(x1, x2); ret = psci_mem_chk_range(x1, x2);
break;
case PSCI_SYSTEM_RESET2_AARCH64: case PSCI_SYSTEM_RESET2_AARCH64:
/* We should never return from psci_system_reset2() */ /* We should never return from psci_system_reset2() */
return psci_system_reset2(x1, x2); ret = psci_system_reset2((uint32_t) x1, x2);
break;
default: default:
WARN("Unimplemented PSCI Call: 0x%x\n", smc_fid);
ret = (u_register_t)SMC_UNK;
break; break;
} }
} }
WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid); return ret;
return SMC_UNK;
} }

View File

@ -267,15 +267,13 @@ void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
psci_power_state_t *target_state); psci_power_state_t *target_state);
int psci_validate_entry_point(entry_point_info_t *ep, int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint, u_register_t context_id); uintptr_t entrypoint, u_register_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, void psci_get_parent_pwr_domain_nodes(int cpu_idx,
unsigned int end_lvl, unsigned int end_lvl,
unsigned int node_index[]); unsigned int *node_index);
void psci_do_state_coordination(unsigned int end_pwrlvl, void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info); psci_power_state_t *state_info);
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx);
unsigned int cpu_idx); void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx);
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx);
int psci_validate_suspend_req(const psci_power_state_t *state_info, int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state); unsigned int is_power_down_state);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -32,9 +32,9 @@ unsigned int psci_caps;
* Function which initializes the 'psci_non_cpu_pd_nodes' or the * Function which initializes the 'psci_non_cpu_pd_nodes' or the
* 'psci_cpu_pd_nodes' corresponding to the power level. * 'psci_cpu_pd_nodes' corresponding to the power level.
******************************************************************************/ ******************************************************************************/
static void psci_init_pwr_domain_node(unsigned int node_idx, static void psci_init_pwr_domain_node(unsigned char node_idx,
unsigned int parent_idx, unsigned int parent_idx,
unsigned int level) unsigned char level)
{ {
if (level > PSCI_CPU_PWR_LVL) { if (level > PSCI_CPU_PWR_LVL) {
psci_non_cpu_pd_nodes[node_idx].level = level; psci_non_cpu_pd_nodes[node_idx].level = level;
@ -82,15 +82,15 @@ static void psci_init_pwr_domain_node(unsigned int node_idx,
*******************************************************************************/ *******************************************************************************/
static void psci_update_pwrlvl_limits(void) static void psci_update_pwrlvl_limits(void)
{ {
int j; int j, cpu_idx;
unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx; unsigned int temp_index[PLAT_MAX_PWR_LVL];
for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
psci_get_parent_pwr_domain_nodes(cpu_idx, psci_get_parent_pwr_domain_nodes(cpu_idx,
PLAT_MAX_PWR_LVL, (unsigned int)PLAT_MAX_PWR_LVL,
temp_index); temp_index);
for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { for (j = (int) PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
if (temp_index[j] != nodes_idx[j]) { if (temp_index[j] != nodes_idx[j]) {
nodes_idx[j] = temp_index[j]; nodes_idx[j] = temp_index[j];
psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
@ -109,9 +109,10 @@ static void psci_update_pwrlvl_limits(void)
******************************************************************************/ ******************************************************************************/
static void populate_power_domain_tree(const unsigned char *topology) static void populate_power_domain_tree(const unsigned char *topology)
{ {
unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl; unsigned int i, j = 0U, num_nodes_at_lvl = 1U, num_nodes_at_next_lvl;
unsigned int node_index = 0, parent_node_index = 0, num_children; unsigned int node_index = 0U, num_children;
int level = PLAT_MAX_PWR_LVL; int parent_node_index = 0;
int level = (int) PLAT_MAX_PWR_LVL;
/* /*
* For each level the inputs are: * For each level the inputs are:
@ -122,8 +123,8 @@ static void populate_power_domain_tree(const unsigned char *topology)
* - Index of first free entry in psci_non_cpu_pd_nodes[] or * - Index of first free entry in psci_non_cpu_pd_nodes[] or
* psci_cpu_pd_nodes[] i.e. node_index depending upon the level. * psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
*/ */
while (level >= PSCI_CPU_PWR_LVL) { while (level >= (int) PSCI_CPU_PWR_LVL) {
num_nodes_at_next_lvl = 0; num_nodes_at_next_lvl = 0U;
/* /*
* For each entry (parent node) at this level in the plat_array: * For each entry (parent node) at this level in the plat_array:
* - Find the number of children * - Find the number of children
@ -132,16 +133,16 @@ static void populate_power_domain_tree(const unsigned char *topology)
* - Increment parent_node_index to point to the next parent * - Increment parent_node_index to point to the next parent
* - Accumulate the number of children at next level. * - Accumulate the number of children at next level.
*/ */
for (i = 0; i < num_nodes_at_lvl; i++) { for (i = 0U; i < num_nodes_at_lvl; i++) {
assert(parent_node_index <= assert(parent_node_index <=
PSCI_NUM_NON_CPU_PWR_DOMAINS); PSCI_NUM_NON_CPU_PWR_DOMAINS);
num_children = topology[parent_node_index]; num_children = topology[parent_node_index];
for (j = node_index; for (j = node_index;
j < node_index + num_children; j++) j < (node_index + num_children); j++)
psci_init_pwr_domain_node(j, psci_init_pwr_domain_node((unsigned char)j,
parent_node_index - 1, parent_node_index - 1,
level); (unsigned char)level);
node_index = j; node_index = j;
num_nodes_at_next_lvl += num_children; num_nodes_at_next_lvl += num_children;
@ -152,12 +153,12 @@ static void populate_power_domain_tree(const unsigned char *topology)
level--; level--;
/* Reset the index for the cpu power domain array */ /* Reset the index for the cpu power domain array */
if (level == PSCI_CPU_PWR_LVL) if (level == (int) PSCI_CPU_PWR_LVL)
node_index = 0; node_index = 0;
} }
/* Validate the sanity of array exported by the platform */ /* Validate the sanity of array exported by the platform */
assert(j == PLATFORM_CORE_COUNT); assert((int) j == PLATFORM_CORE_COUNT);
} }
/******************************************************************************* /*******************************************************************************
@ -213,8 +214,9 @@ int psci_setup(const psci_lib_args_t *lib_args)
*/ */
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops); (void) plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep,
assert(psci_plat_pm_ops); &psci_plat_pm_ops);
assert(psci_plat_pm_ops != NULL);
/* /*
* Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
@ -226,29 +228,29 @@ int psci_setup(const psci_lib_args_t *lib_args)
/* Initialize the psci capability */ /* Initialize the psci capability */
psci_caps = PSCI_GENERIC_CAP; psci_caps = PSCI_GENERIC_CAP;
if (psci_plat_pm_ops->pwr_domain_off) if (psci_plat_pm_ops->pwr_domain_off != NULL)
psci_caps |= define_psci_cap(PSCI_CPU_OFF); psci_caps |= define_psci_cap(PSCI_CPU_OFF);
if (psci_plat_pm_ops->pwr_domain_on && if ((psci_plat_pm_ops->pwr_domain_on != NULL) &&
psci_plat_pm_ops->pwr_domain_on_finish) (psci_plat_pm_ops->pwr_domain_on_finish != NULL))
psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
if (psci_plat_pm_ops->pwr_domain_suspend && if ((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
psci_plat_pm_ops->pwr_domain_suspend_finish) { (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)) {
psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
if (psci_plat_pm_ops->get_sys_suspend_power_state) if (psci_plat_pm_ops->get_sys_suspend_power_state != NULL)
psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
} }
if (psci_plat_pm_ops->system_off) if (psci_plat_pm_ops->system_off != NULL)
psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF);
if (psci_plat_pm_ops->system_reset) if (psci_plat_pm_ops->system_reset != NULL)
psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET);
if (psci_plat_pm_ops->get_node_hw_state) if (psci_plat_pm_ops->get_node_hw_state != NULL)
psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64); psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
if (psci_plat_pm_ops->read_mem_protect && if ((psci_plat_pm_ops->read_mem_protect != NULL) &&
psci_plat_pm_ops->write_mem_protect) (psci_plat_pm_ops->write_mem_protect != NULL))
psci_caps |= define_psci_cap(PSCI_MEM_PROTECT); psci_caps |= define_psci_cap(PSCI_MEM_PROTECT);
if (psci_plat_pm_ops->mem_protect_chk) if (psci_plat_pm_ops->mem_protect_chk != NULL)
psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64); psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64);
if (psci_plat_pm_ops->system_reset2) if (psci_plat_pm_ops->system_reset2 != NULL)
psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64); psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64);
#if ENABLE_PSCI_STAT #if ENABLE_PSCI_STAT
@ -266,7 +268,7 @@ int psci_setup(const psci_lib_args_t *lib_args)
******************************************************************************/ ******************************************************************************/
void psci_arch_setup(void) void psci_arch_setup(void)
{ {
#if ARM_ARCH_MAJOR > 7 || defined(ARMV7_SUPPORTS_GENERIC_TIMER) #if (ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
/* Program the counter frequency */ /* Program the counter frequency */
write_cntfrq_el0(plat_get_syscnt_freq2()); write_cntfrq_el0(plat_get_syscnt_freq2());
#endif #endif