Merge pull request #511 from soby-mathew/sm/psci_on_race_v2

Fix PSCI CPU ON race when setting state to ON_PENDING
This commit is contained in:
danh-arm 2016-02-01 19:10:02 +00:00
commit dbc807179f
3 changed files with 29 additions and 20 deletions

View File

@ -129,10 +129,13 @@ exit:
* Set the affinity info state to OFF. This writes directly to
* main memory as caches are disabled, so cache maintenance is
* required to ensure that later cached reads of aff_info_state
* return AFF_STATE_OFF.
* return AFF_STATE_OFF. A dsbish() ensures ordering of the
* update to the affinity info state prior to cache line
* invalidation.
*/
flush_cpu_data(psci_svc_cpu_data.aff_info_state);
psci_set_aff_info_state(AFF_STATE_OFF);
dsbish();
inv_cpu_data(psci_svc_cpu_data.aff_info_state);
/*

View File

@ -56,24 +56,6 @@ static int cpu_on_validate_state(aff_info_state_t aff_state)
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* This function sets the aff_info_state in the per-cpu data of the CPU
* specified by cpu_idx
******************************************************************************/
static void psci_set_aff_info_state_by_idx(unsigned int cpu_idx,
aff_info_state_t aff_state)
{
set_cpu_data_by_index(cpu_idx,
psci_svc_cpu_data.aff_info_state,
aff_state);
/*
* Flush aff_info_state as it will be accessed with caches turned OFF.
*/
flush_cpu_data_by_index(cpu_idx, psci_svc_cpu_data.aff_info_state);
}
/*******************************************************************************
* Generic handler which is called to physically power on a cpu identified by
* its mpidr. It performs the generic, architectural, platform setup and state
@ -90,6 +72,7 @@ int psci_cpu_on_start(u_register_t target_cpu,
{
int rc;
unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
aff_info_state_t target_aff_state;
/*
* This function must only be called on platforms where the
@ -119,8 +102,26 @@ int psci_cpu_on_start(u_register_t target_cpu,
/*
* Set the Affinity info state of the target cpu to ON_PENDING.
* Flush aff_info_state as it will be accessed with caches
* turned OFF.
*/
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
/*
* The cache line invalidation by the target CPU after setting the
* state to OFF (see psci_do_cpu_off()), could cause the update to
* aff_info_state to be invalidated. Retry the update if the target
* CPU aff_info_state is not ON_PENDING.
*/
target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
if (target_aff_state != AFF_STATE_ON_PENDING) {
assert(target_aff_state == AFF_STATE_OFF);
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING);
}
/*
* Perform generic, architecture and platform specific handling.
@ -136,9 +137,11 @@ int psci_cpu_on_start(u_register_t target_cpu,
if (rc == PSCI_E_SUCCESS)
/* Store the re-entry information for the non-secure world. */
cm_init_context_by_index(target_idx, ep);
else
else {
/* Restore the state on error. */
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
}
exit:
psci_spin_unlock_cpu(target_idx);

View File

@ -78,6 +78,9 @@
get_cpu_data(psci_svc_cpu_data.aff_info_state)
#define psci_get_aff_info_state_by_idx(idx) \
get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
#define psci_set_aff_info_state_by_idx(idx, aff_state) \
set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
aff_state)
#define psci_get_suspend_pwrlvl() \
get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
#define psci_set_suspend_pwrlvl(target_lvl) \