PSCI: Rework generic code to conform to coding guidelines

This patch reworks the PSCI generic implementation to conform to ARM
Trusted Firmware coding guidelines as described here:
https://github.com/ARM-software/arm-trusted-firmware/wiki

This patch also reviews the use of signed data types within PSCI
Generic code and replaces them with their unsigned counterparts wherever
they are not appropriate. The PSCI_INVALID_DATA macro which was defined
to -1 is now replaced with PSCI_INVALID_PWR_LVL macro which is defined
to PLAT_MAX_PWR_LVL + 1.

Change-Id: Iaea422d0e46fc314e0b173c2b4c16e0d56b2515a
This commit is contained in:
Soby Mathew 2015-07-29 17:05:03 +01:00 committed by Achin Gupta
parent 58523c076a
commit 9d070b9928
10 changed files with 115 additions and 99 deletions

View File

@ -167,7 +167,7 @@
#define PSCI_E_DISABLED -8
#define PSCI_E_INVALID_ADDRESS -9
#define PSCI_INVALID_MPIDR ~(0ULL)
#define PSCI_INVALID_MPIDR ~((u_register_t)0)
#ifndef __ASSEMBLY__
@ -188,7 +188,7 @@ typedef enum {
/*
* Macro to represent invalid affinity level within PSCI.
*/
#define PSCI_INVALID_DATA -1
#define PSCI_INVALID_PWR_LVL (PLAT_MAX_PWR_LVL + 1)
/*
* Type for representing the local power state at a particular level.
@ -242,11 +242,13 @@ typedef struct psci_power_state {
typedef struct psci_cpu_data {
/* State as seen by PSCI Affinity Info API */
aff_info_state_t aff_info_state;
/*
* Highest power level which takes part in a power management
* operation.
*/
int8_t target_pwrlvl;
unsigned char target_pwrlvl;
/* The local power state of this CPU */
plat_local_state_t local_state;
#if !USE_COHERENT_MEM
@ -270,7 +272,7 @@ typedef struct plat_psci_ops {
void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state,
psci_power_state_t *req_state);
int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint);
void (*get_sys_suspend_power_state)(
psci_power_state_t *req_state);
} plat_psci_ops_t;
@ -297,17 +299,23 @@ typedef struct spd_pm_ops {
* Function & Data prototypes
******************************************************************************/
unsigned int psci_version(void);
int psci_affinity_info(unsigned long, unsigned int);
int psci_migrate(unsigned long);
int psci_cpu_on(u_register_t target_cpu,
uintptr_t entrypoint,
u_register_t context_id);
int psci_cpu_suspend(unsigned int power_state,
uintptr_t entrypoint,
u_register_t context_id);
int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id);
int psci_cpu_off(void);
int psci_affinity_info(u_register_t target_affinity,
unsigned int lowest_affinity_level);
int psci_migrate(u_register_t target_cpu);
int psci_migrate_info_type(void);
long psci_migrate_info_up_cpu(void);
int psci_cpu_on(unsigned long,
unsigned long,
unsigned long);
int psci_features(unsigned int psci_fid);
void __dead2 psci_power_down_wfi(void);
void psci_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *);
uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
@ -318,7 +326,7 @@ uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t flags);
/* PSCI setup function */
int32_t psci_setup(void);
int psci_setup(void);
#endif /*__ASSEMBLY__*/

View File

@ -65,6 +65,10 @@
#define PLAT_MAX_RET_STATE 1
#define PLAT_MAX_OFF_STATE 2
/*
* Macro to represent invalid affinity level within PSCI.
*/
#define PSCI_INVALID_DATA -1
#define psci_get_pstate_afflvl(pstate) psci_get_pstate_pwrlvl(pstate)

View File

@ -189,9 +189,9 @@ unsigned int psci_is_last_on_cpu(void)
* been physically powered up. It is expected to be called immediately after
* reset from assembler code.
******************************************************************************/
static int get_power_on_target_pwrlvl(void)
static unsigned int get_power_on_target_pwrlvl(void)
{
int pwrlvl;
unsigned int pwrlvl;
/*
* Assume that this cpu was suspended and retrieve its target power
@ -200,7 +200,7 @@ static int get_power_on_target_pwrlvl(void)
* cpu can be turned off to.
*/
pwrlvl = psci_get_suspend_pwrlvl();
if (pwrlvl == PSCI_INVALID_DATA)
if (pwrlvl == PSCI_INVALID_PWR_LVL)
pwrlvl = PLAT_MAX_PWR_LVL;
return pwrlvl;
}
@ -236,8 +236,8 @@ void psci_init_req_local_pwr_states(void)
* target state for this power domain during psci state coordination. An
* assertion is added to prevent us from accessing the CPU power level.
*****************************************************************************/
static plat_local_state_t *psci_get_req_local_pwr_states(int pwrlvl,
int cpu_idx)
static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
unsigned int cpu_idx)
{
assert(pwrlvl > PSCI_CPU_PWR_LVL);
@ -250,11 +250,10 @@ static plat_local_state_t *psci_get_req_local_pwr_states(int pwrlvl,
* function will be called after a cpu is powered on to find the local state
* each power domain has emerged from.
*****************************************************************************/
static void psci_get_target_local_pwr_states(uint32_t end_pwrlvl,
static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
psci_power_state_t *target_state)
{
int lvl;
unsigned int parent_idx;
unsigned int parent_idx, lvl;
plat_local_state_t *pd_state = target_state->pwr_domain_state;
pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
@ -270,7 +269,7 @@ static void psci_get_target_local_pwr_states(uint32_t end_pwrlvl,
* code runs before caches are enabled.
*/
flush_dcache_range(
(uint64_t)&psci_non_cpu_pd_nodes[parent_idx],
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state;
@ -288,11 +287,10 @@ static void psci_get_target_local_pwr_states(uint32_t end_pwrlvl,
* enter. This function will be called after coordination of requested power
* states has been done for each power level.
*****************************************************************************/
static void psci_set_target_local_pwr_states(uint32_t end_pwrlvl,
static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
const psci_power_state_t *target_state)
{
int lvl;
unsigned int parent_idx;
unsigned int parent_idx, lvl;
const plat_local_state_t *pd_state = target_state->pwr_domain_state;
psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
@ -310,8 +308,8 @@ static void psci_set_target_local_pwr_states(uint32_t end_pwrlvl,
psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl];
#if !USE_COHERENT_MEM
flush_dcache_range(
(uint64_t)&psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
(uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
@ -322,7 +320,7 @@ static void psci_set_target_local_pwr_states(uint32_t end_pwrlvl,
* PSCI helper function to get the parent nodes corresponding to a cpu_index.
******************************************************************************/
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
int end_lvl,
unsigned int end_lvl,
unsigned int node_index[])
{
unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
@ -339,10 +337,9 @@ void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
* affinity info state, target power state and requested power state for the
* current CPU and all its ancestor power domains to RUN.
*****************************************************************************/
void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl)
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
{
int lvl;
unsigned int parent_idx, cpu_idx = plat_my_core_pos();
unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Reset the local_state to RUN for the non cpu power domains. */
@ -351,7 +348,7 @@ void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl)
PSCI_LOCAL_STATE_RUN;
#if !USE_COHERENT_MEM
flush_dcache_range(
(uint64_t)&psci_non_cpu_pd_nodes[parent_idx],
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
psci_set_req_local_pwr_state(lvl,
@ -387,7 +384,8 @@ void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl)
* This function will only be invoked with data cache enabled and while
* powering down a core.
*****************************************************************************/
void psci_do_state_coordination(int end_pwrlvl, psci_power_state_t *state_info)
void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info)
{
unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
unsigned int start_idx, ncpus;
@ -463,7 +461,7 @@ int psci_validate_suspend_req(const psci_power_state_t *state_info,
/* Find the target suspend power level */
target_lvl = psci_find_target_suspend_lvl(state_info);
if (target_lvl == PSCI_INVALID_DATA)
if (target_lvl == PSCI_INVALID_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* All power domain levels are in a RUN state to begin with */
@ -489,7 +487,7 @@ int psci_validate_suspend_req(const psci_power_state_t *state_info,
max_off_lvl = psci_find_max_off_lvl(state_info);
/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
max_retn_lvl = PSCI_INVALID_DATA;
max_retn_lvl = PSCI_INVALID_PWR_LVL;
if (target_lvl != max_off_lvl)
max_retn_lvl = target_lvl;
@ -498,8 +496,8 @@ int psci_validate_suspend_req(const psci_power_state_t *state_info,
* has to be invalid and max retention level has to be a valid power
* level.
*/
if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_DATA ||
max_retn_lvl == PSCI_INVALID_DATA))
if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
max_retn_lvl == PSCI_INVALID_PWR_LVL))
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
@ -518,7 +516,7 @@ unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
return i;
}
return PSCI_INVALID_DATA;
return PSCI_INVALID_PWR_LVL;
}
/******************************************************************************
@ -534,7 +532,7 @@ unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
return i;
}
return PSCI_INVALID_DATA;
return PSCI_INVALID_PWR_LVL;
}
/*******************************************************************************
@ -542,10 +540,11 @@ unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
* tree that the operation should be applied to. It picks up locks in order of
* increasing power domain level in the range specified.
******************************************************************************/
void psci_acquire_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx)
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx)
{
unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
int level;
unsigned int level;
/* No locking required for level 0. Hence start locking from level 1 */
for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
@ -559,7 +558,8 @@ void psci_acquire_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx)
* tree that the operation should be applied to. It releases the locks in order
* of decreasing power domain level in the range specified.
******************************************************************************/
void psci_release_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx)
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx)
{
unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
int level;
@ -577,7 +577,7 @@ void psci_release_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx)
/*******************************************************************************
* Simple routine to determine whether a mpidr is valid or not.
******************************************************************************/
int psci_validate_mpidr(unsigned long mpidr)
int psci_validate_mpidr(u_register_t mpidr)
{
if (plat_core_pos_by_mpidr(mpidr) < 0)
return PSCI_E_INVALID_PARAMS;
@ -590,11 +590,13 @@ int psci_validate_mpidr(unsigned long mpidr)
* PSCI entrypoint on power on/resume and returns it.
******************************************************************************/
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id)
uintptr_t entrypoint,
u_register_t context_id)
{
uint32_t ep_attr, mode, sctlr, daif, ee;
uint32_t ns_scr_el3 = read_scr_el3();
uint32_t ns_sctlr_el1 = read_sctlr_el1();
unsigned long ep_attr, sctlr;
unsigned int daif, ee, mode;
unsigned long ns_scr_el3 = read_scr_el3();
unsigned long ns_sctlr_el1 = read_sctlr_el1();
sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
ee = 0;
@ -648,7 +650,8 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
* 'entry_point_info'.
******************************************************************************/
int psci_validate_entry_point(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id)
uintptr_t entrypoint,
u_register_t context_id)
{
int rc;
@ -679,9 +682,8 @@ int psci_validate_entry_point(entry_point_info_t *ep,
******************************************************************************/
void psci_power_up_finish(void)
{
unsigned int cpu_idx = plat_my_core_pos();
unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
int end_pwrlvl;
/*
* Verify that we have been explicitly turned ON or resumed from
@ -764,7 +766,7 @@ void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
* is resident through the mpidr parameter. Else the value of the parameter on
* return is undefined.
******************************************************************************/
int psci_spd_migrate_info(uint64_t *mpidr)
int psci_spd_migrate_info(u_register_t *mpidr)
{
int rc;

View File

@ -37,7 +37,7 @@
.globl psci_do_pwrup_cache_maintenance
/* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(uint32_t power level);
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
*
* This function performs cache maintenance for the specified power
* level. The levels of cache affected are determined by the power
@ -66,7 +66,7 @@ func psci_do_pwrdown_cache_maintenance
* platform.
* ---------------------------------------------
*/
cmp x0, #PSCI_CPU_PWR_LVL
cmp w0, #PSCI_CPU_PWR_LVL
b.eq do_core_pwr_dwn
bl prepare_cluster_pwr_dwn
b do_stack_maintenance

View File

@ -41,9 +41,9 @@
/*******************************************************************************
* PSCI frontend api for servicing SMCs. Described in the PSCI spec.
******************************************************************************/
int psci_cpu_on(unsigned long target_cpu,
unsigned long entrypoint,
unsigned long context_id)
int psci_cpu_on(u_register_t target_cpu,
uintptr_t entrypoint,
u_register_t context_id)
{
int rc;
@ -77,8 +77,8 @@ unsigned int psci_version(void)
}
int psci_cpu_suspend(unsigned int power_state,
unsigned long entrypoint,
unsigned long context_id)
uintptr_t entrypoint,
u_register_t context_id)
{
int rc;
unsigned int target_pwrlvl, is_power_down_state;
@ -147,8 +147,8 @@ int psci_cpu_suspend(unsigned int power_state,
return PSCI_E_SUCCESS;
}
int psci_system_suspend(unsigned long entrypoint,
unsigned long context_id)
int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
{
int rc;
psci_power_state_t state_info;
@ -188,7 +188,7 @@ int psci_system_suspend(unsigned long entrypoint,
int psci_cpu_off(void)
{
int rc;
int target_pwrlvl = PLAT_MAX_PWR_LVL;
unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
/*
* Do what is needed to power off this CPU and possible higher power
@ -206,7 +206,7 @@ int psci_cpu_off(void)
return rc;
}
int psci_affinity_info(unsigned long target_affinity,
int psci_affinity_info(u_register_t target_affinity,
unsigned int lowest_affinity_level)
{
unsigned int target_idx;
@ -223,10 +223,10 @@ int psci_affinity_info(unsigned long target_affinity,
return psci_get_aff_info_state_by_idx(target_idx);
}
int psci_migrate(unsigned long target_cpu)
int psci_migrate(u_register_t target_cpu)
{
int rc;
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
rc = psci_spd_migrate_info(&resident_cpu_mpidr);
if (rc != PSCI_TOS_UP_MIG_CAP)
@ -255,14 +255,14 @@ int psci_migrate(unsigned long target_cpu)
int psci_migrate_info_type(void)
{
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
return psci_spd_migrate_info(&resident_cpu_mpidr);
}
long psci_migrate_info_up_cpu(void)
{
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
int rc;
/*
@ -278,7 +278,7 @@ long psci_migrate_info_up_cpu(void)
int psci_features(unsigned int psci_fid)
{
uint32_t local_caps = psci_caps;
unsigned int local_caps = psci_caps;
/* Check if it is a 64 bit function */
if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)

View File

@ -60,7 +60,7 @@ static void psci_set_power_off_state(psci_power_state_t *state_info)
* interconnect level if the cpu is the last in the cluster and also the
* program the power controller.
******************************************************************************/
int psci_do_cpu_off(int end_pwrlvl)
int psci_do_cpu_off(unsigned int end_pwrlvl)
{
int rc, idx = plat_my_core_pos();
psci_power_state_t state_info;

View File

@ -84,9 +84,9 @@ static void psci_set_aff_info_state_by_idx(unsigned int cpu_idx,
* The state of all the relevant power domains are changed after calling the
* platform handler as it can return error.
******************************************************************************/
int psci_cpu_on_start(unsigned long target_cpu,
int psci_cpu_on_start(u_register_t target_cpu,
entry_point_info_t *ep,
int end_pwrlvl)
unsigned int end_pwrlvl)
{
int rc;
unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
@ -130,7 +130,7 @@ int psci_cpu_on_start(unsigned long target_cpu,
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
rc = psci_plat_pm_ops->pwr_domain_on((u_register_t)target_cpu);
rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
if (rc == PSCI_E_SUCCESS)

View File

@ -149,7 +149,7 @@ typedef struct non_cpu_pwr_domain_node {
} non_cpu_pd_node_t;
typedef struct cpu_pwr_domain_node {
unsigned long mpidr;
u_register_t mpidr;
/*
* Index of the parent power domain node.
@ -172,7 +172,7 @@ typedef struct cpu_pwr_domain_node {
extern const plat_psci_ops_t *psci_plat_pm_ops;
extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
extern uint32_t psci_caps;
extern unsigned int psci_caps;
/*******************************************************************************
* SPD's power management hooks registered with PSCI
@ -186,43 +186,43 @@ extern const spd_pm_ops_t *psci_spd_pm;
int psci_validate_power_state(unsigned int power_state,
psci_power_state_t *state_info);
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
int psci_validate_mpidr(unsigned long mpidr);
int psci_validate_mpidr(u_register_t mpidr);
void psci_init_req_local_pwr_states(void);
void psci_power_up_finish(void);
int psci_validate_entry_point(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id);
uintptr_t entrypoint, u_register_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
int end_lvl,
unsigned int end_lvl,
unsigned int node_index[]);
void psci_do_state_coordination(int end_pwrlvl,
void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info);
void psci_acquire_pwr_domain_locks(int end_pwrlvl,
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx);
void psci_release_pwr_domain_locks(int end_pwrlvl,
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx);
int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state_req);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl);
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
void psci_print_power_domain_map(void);
unsigned int psci_is_last_on_cpu(void);
int psci_spd_migrate_info(uint64_t *mpidr);
int psci_spd_migrate_info(u_register_t *mpidr);
/* Private exported functions from psci_on.c */
int psci_cpu_on_start(unsigned long target_cpu,
entry_point_info_t *ep,
int end_pwrlvl);
unsigned int end_pwrlvl);
void psci_cpu_on_finish(unsigned int cpu_idx,
psci_power_state_t *state_info);
/* Private exported functions from psci_cpu_off.c */
int psci_do_cpu_off(int end_pwrlvl);
int psci_do_cpu_off(unsigned int end_pwrlvl);
/* Private exported functions from psci_pwrlvl_suspend.c */
void psci_cpu_suspend_start(entry_point_info_t *ep,
int end_pwrlvl,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state_req);
@ -230,7 +230,7 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx,
psci_power_state_t *state_info);
/* Private exported functions from psci_helpers.S */
void psci_do_pwrdown_cache_maintenance(uint32_t pwr_level);
void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
void psci_do_pwrup_cache_maintenance(void);
/* Private exported functions from psci_system_off.c */

View File

@ -49,13 +49,15 @@ static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
/******************************************************************************
* Define the psci capability variable.
*****************************************************************************/
uint32_t psci_caps;
unsigned int psci_caps;
/*******************************************************************************
* Function which initializes the 'psci_non_cpu_pd_nodes' or the
* 'psci_cpu_pd_nodes' corresponding to the power level.
******************************************************************************/
static void psci_init_pwr_domain_node(int node_idx, int parent_idx, int level)
static void psci_init_pwr_domain_node(unsigned int node_idx,
unsigned int parent_idx,
unsigned int level)
{
if (level > PSCI_CPU_PWR_LVL) {
psci_non_cpu_pd_nodes[node_idx].level = level;
@ -78,12 +80,12 @@ static void psci_init_pwr_domain_node(int node_idx, int parent_idx, int level)
svc_cpu_data->aff_info_state = AFF_STATE_OFF;
/* Invalidate the suspend level for the cpu */
svc_cpu_data->target_pwrlvl = PSCI_INVALID_DATA;
svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
/* Set the power state to OFF state */
svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
flush_dcache_range((uint64_t)svc_cpu_data,
flush_dcache_range((uintptr_t)svc_cpu_data,
sizeof(*svc_cpu_data));
cm_set_context_by_index(node_idx,
@ -103,9 +105,9 @@ static void psci_init_pwr_domain_node(int node_idx, int parent_idx, int level)
*******************************************************************************/
static void psci_update_pwrlvl_limits(void)
{
int cpu_idx, j;
int j;
unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
unsigned int temp_index[PLAT_MAX_PWR_LVL];
unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
psci_get_parent_pwr_domain_nodes(cpu_idx,
@ -182,7 +184,7 @@ static void populate_power_domain_tree(const unsigned char *topology)
#if !USE_COHERENT_MEM
/* Flush the non CPU power domain data to memory */
flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes,
flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
sizeof(psci_non_cpu_pd_nodes));
#endif
}
@ -208,7 +210,7 @@ static void populate_power_domain_tree(const unsigned char *topology)
* | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
* ------------------------------------------------
******************************************************************************/
int32_t psci_setup(void)
int psci_setup(void)
{
const unsigned char *topology_tree;
@ -230,11 +232,11 @@ int32_t psci_setup(void)
* The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in
* coherent memory.
*/
flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes,
flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
sizeof(psci_non_cpu_pd_nodes));
#endif
flush_dcache_range((uint64_t) &psci_cpu_pd_nodes,
flush_dcache_range((uintptr_t) &psci_cpu_pd_nodes,
sizeof(psci_cpu_pd_nodes));
psci_init_req_local_pwr_states();

View File

@ -72,7 +72,7 @@ static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
* This function does generic and platform specific suspend to power down
* operations.
******************************************************************************/
static void psci_suspend_to_pwrdown_start(int end_pwrlvl,
static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
entry_point_info_t *ep,
psci_power_state_t *state_info)
{
@ -127,7 +127,7 @@ static void psci_suspend_to_pwrdown_start(int end_pwrlvl,
* not possible to undo any of the actions taken beyond that point.
******************************************************************************/
void psci_cpu_suspend_start(entry_point_info_t *ep,
int end_pwrlvl,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state)
{
@ -212,8 +212,8 @@ exit:
void psci_cpu_suspend_finish(unsigned int cpu_idx,
psci_power_state_t *state_info)
{
int32_t suspend_level;
uint64_t counter_freq;
unsigned long long counter_freq;
unsigned int suspend_level;
/* Ensure we have been woken up from a suspended state */
assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
@ -246,12 +246,12 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx,
*/
if (psci_spd_pm && psci_spd_pm->svc_suspend) {
suspend_level = psci_get_suspend_pwrlvl();
assert (suspend_level != PSCI_INVALID_DATA);
assert (suspend_level != PSCI_INVALID_PWR_LVL);
psci_spd_pm->svc_suspend_finish(suspend_level);
}
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_DATA);
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
/*
* Generic management: Now we just need to retrieve the