Tegra186: mce: fix MISRA defects

Main fixes:

* Added explicit casts (e.g. 0U) to integers in order for them to be
  compatible with whatever operation they're used in [Rule 10.1]
* Force operands of an operator to the same type category [Rule 10.4]
* Added curly braces ({}) around if/while statements in order to
  make them compound [Rule 15.6]
* Added parentheses [Rule 12.1]
* Voided non C-library functions whose return types are not used [Rule 17.7]

Change-Id: I91404edec2e2194b1ce2672d2a3fc6a1f5bf41f1
Signed-off-by: Anthony Zhou <anzhou@nvidia.com>
Signed-off-by: Varun Wadekar <vwadekar@nvidia.com>
This commit is contained in:
Anthony Zhou 2017-03-06 16:06:45 +08:00 committed by Varun Wadekar
parent 3436089d67
commit ab712fd86b
5 changed files with 604 additions and 553 deletions

View File

@ -63,14 +63,14 @@ typedef struct mce_cstate_info {
} mce_cstate_info_t;
/* public interfaces */
int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
int mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
uint64_t arg2);
int mce_update_reset_vector(void);
int mce_update_gsc_videomem(void);
int mce_update_gsc_tzdram(void);
int mce_update_gsc_tzram(void);
__dead2 void mce_enter_ccplex_state(uint32_t state_idx);
void mce_update_cstate_info(mce_cstate_info_t *cstate);
void mce_update_cstate_info(const mce_cstate_info_t *cstate);
void mce_verify_firmware_version(void);
#endif /* __MCE_H__ */

View File

@ -14,127 +14,68 @@
* Macros to prepare CSTATE info request
******************************************************************************/
/* Description of the parameters for UPDATE_CSTATE_INFO request */
#define CLUSTER_CSTATE_MASK 0x7ULL
#define CLUSTER_CSTATE_SHIFT 0U
#define CLUSTER_CSTATE_UPDATE_BIT (1ULL << 7)
#define CCPLEX_CSTATE_MASK 0x3ULL
#define CCPLEX_CSTATE_SHIFT 8ULL
#define CCPLEX_CSTATE_UPDATE_BIT (1ULL << 15)
#define SYSTEM_CSTATE_MASK 0xFULL
#define SYSTEM_CSTATE_SHIFT 16ULL
#define SYSTEM_CSTATE_FORCE_UPDATE_SHIFT 22ULL
#define SYSTEM_CSTATE_FORCE_UPDATE_BIT (1ULL << 22)
#define SYSTEM_CSTATE_UPDATE_BIT (1ULL << 23)
#define CSTATE_WAKE_MASK_UPDATE_BIT (1ULL << 31)
#define CSTATE_WAKE_MASK_SHIFT 32ULL
#define CSTATE_WAKE_MASK_CLEAR 0xFFFFFFFFU
#define CLUSTER_CSTATE_MASK ULL(0x7)
#define CLUSTER_CSTATE_SHIFT U(0)
#define CLUSTER_CSTATE_UPDATE_BIT (ULL(1) << 7)
#define CCPLEX_CSTATE_MASK ULL(0x3)
#define CCPLEX_CSTATE_SHIFT ULL(8)
#define CCPLEX_CSTATE_UPDATE_BIT (ULL(1) << 15)
#define SYSTEM_CSTATE_MASK ULL(0xF)
#define SYSTEM_CSTATE_SHIFT ULL(16)
#define SYSTEM_CSTATE_FORCE_UPDATE_SHIFT ULL(22)
#define SYSTEM_CSTATE_FORCE_UPDATE_BIT (ULL(1) << 22)
#define SYSTEM_CSTATE_UPDATE_BIT (ULL(1) << 23)
#define CSTATE_WAKE_MASK_UPDATE_BIT (ULL(1) << 31)
#define CSTATE_WAKE_MASK_SHIFT ULL(32)
#define CSTATE_WAKE_MASK_CLEAR U(0xFFFFFFFF)
/*******************************************************************************
* Auto-CC3 control macros
******************************************************************************/
#define MCE_AUTO_CC3_FREQ_MASK 0x1FFU
#define MCE_AUTO_CC3_FREQ_SHIFT 0U
#define MCE_AUTO_CC3_VTG_MASK 0x7FU
#define MCE_AUTO_CC3_VTG_SHIFT 16U
#define MCE_AUTO_CC3_ENABLE_BIT (1U << 31)
#define MCE_AUTO_CC3_FREQ_MASK U(0x1FF)
#define MCE_AUTO_CC3_FREQ_SHIFT U(0)
#define MCE_AUTO_CC3_VTG_MASK U(0x7F)
#define MCE_AUTO_CC3_VTG_SHIFT U(16)
#define MCE_AUTO_CC3_ENABLE_BIT (U(1) << 31)
/*******************************************************************************
* Macros for the 'IS_SC7_ALLOWED' command
******************************************************************************/
#define MCE_SC7_ALLOWED_MASK 0x7U
#define MCE_SC7_WAKE_TIME_SHIFT 32U
#define MCE_SC7_ALLOWED_MASK U(0x7)
#define MCE_SC7_WAKE_TIME_SHIFT U(32)
/*******************************************************************************
* Macros for 'read/write ctats' commands
******************************************************************************/
#define MCE_CSTATE_STATS_TYPE_SHIFT 32ULL
#define MCE_CSTATE_WRITE_DATA_LO_MASK 0xFU
#define MCE_CSTATE_STATS_TYPE_SHIFT ULL(32)
#define MCE_CSTATE_WRITE_DATA_LO_MASK U(0xF)
/*******************************************************************************
* Macros for 'update crossover threshold' command
******************************************************************************/
#define MCE_CROSSOVER_THRESHOLD_TIME_SHIFT 32U
#define MCE_CROSSOVER_THRESHOLD_TIME_SHIFT U(32)
/*******************************************************************************
* MCA command struct
* MCA argument macros
******************************************************************************/
typedef union mca_cmd {
struct command {
uint8_t cmd;
uint8_t idx;
uint8_t subidx;
} command;
struct input {
uint32_t low;
uint32_t high;
} input;
uint64_t data;
} mca_cmd_t;
/*******************************************************************************
* MCA argument struct
******************************************************************************/
typedef union mca_arg {
struct err {
uint32_t error:8;
uint32_t unused:24;
uint32_t unused2:24;
uint32_t finish:8;
} err;
struct arg {
uint32_t low;
uint32_t high;
} arg;
uint64_t data;
} mca_arg_t;
#define MCA_ARG_ERROR_MASK U(0xFF)
#define MCA_ARG_FINISH_SHIFT U(24)
#define MCA_ARG_FINISH_MASK U(0xFF)
/*******************************************************************************
* Uncore PERFMON ARI struct
******************************************************************************/
typedef union uncore_perfmon_req {
struct perfmon_command {
/*
* Commands: 0 = READ, 1 = WRITE
*/
uint32_t cmd:8;
/*
* The unit group: L2=0, L3=1, ROC=2, MC=3, IOB=4
*/
uint32_t grp:4;
/*
* Unit selector: Selects the unit instance, with 0 = Unit
* = (number of units in group) - 1.
*/
uint32_t unit:4;
/*
* Selects the uncore perfmon register to access
*/
uint32_t reg:8;
/*
* Counter number. Selects which counter to use for
* registers NV_PMEVCNTR and NV_PMEVTYPER.
*/
uint32_t counter:8;
} perfmon_command;
struct perfmon_status {
/*
* Resulting command status
*/
uint32_t val:8;
uint32_t unused:24;
} perfmon_status;
uint64_t data;
} uncore_perfmon_req_t;
#define UNCORE_PERFMON_CMD_READ U(0)
#define UNCORE_PERFMON_CMD_WRITE U(1)
#define UNCORE_PERFMON_CMD_READ 0U
#define UNCORE_PERFMON_CMD_WRITE 1U
#define UNCORE_PERFMON_CMD_MASK 0xFFU
#define UNCORE_PERFMON_UNIT_GRP_MASK 0xFU
#define UNCORE_PERFMON_SELECTOR_MASK 0xFU
#define UNCORE_PERFMON_REG_MASK 0xFFU
#define UNCORE_PERFMON_CTR_MASK 0xFFU
#define UNCORE_PERFMON_RESP_STATUS_MASK 0xFFU
#define UNCORE_PERFMON_CMD_MASK U(0xFF)
#define UNCORE_PERFMON_CMD_SHIFT U(24)
#define UNCORE_PERFMON_UNIT_GRP_MASK U(0xF)
#define UNCORE_PERFMON_SELECTOR_MASK U(0xF)
#define UNCORE_PERFMON_REG_MASK U(0xFF)
#define UNCORE_PERFMON_CTR_MASK U(0xFF)
#define UNCORE_PERFMON_RESP_STATUS_MASK U(0xFF)
#define UNCORE_PERFMON_RESP_STATUS_SHIFT U(24)
/*******************************************************************************
* Structure populated by arch specific code to export routines which perform
@ -146,13 +87,13 @@ typedef struct arch_mce_ops {
* of STANDBYWFI, update the core power state and expected wake time,
* then determine the proper power state to enter.
*/
int (*enter_cstate)(uint32_t ari_base, uint32_t state,
int32_t (*enter_cstate)(uint32_t ari_base, uint32_t state,
uint32_t wake_time);
/*
* This ARI request allows updating of the CLUSTER_CSTATE,
* CCPLEX_CSTATE, and SYSTEM_CSTATE register values.
*/
int (*update_cstate_info)(uint32_t ari_base,
int32_t (*update_cstate_info)(uint32_t ari_base,
uint32_t cluster,
uint32_t ccplex,
uint32_t system,
@ -164,7 +105,7 @@ typedef struct arch_mce_ops {
* threshold times. An index value specifies which crossover
* state is being updated.
*/
int (*update_crossover_time)(uint32_t ari_base,
int32_t (*update_crossover_time)(uint32_t ari_base,
uint32_t type,
uint32_t time);
/*
@ -177,7 +118,7 @@ typedef struct arch_mce_ops {
* This ARI request allows write access to statistical information
* related to power states.
*/
int (*write_cstate_stats)(uint32_t ari_base,
int32_t (*write_cstate_stats)(uint32_t ari_base,
uint32_t state,
uint32_t stats);
/*
@ -193,7 +134,7 @@ typedef struct arch_mce_ops {
* must be entered. If the CCx state is not allowed, the response
* indicates CC6/CC7 can't be entered
*/
int (*is_ccx_allowed)(uint32_t ari_base, uint32_t state,
int32_t (*is_ccx_allowed)(uint32_t ari_base, uint32_t state,
uint32_t wake_time);
/*
* This ARI request allows querying the CCPLEX to determine if
@ -203,19 +144,19 @@ typedef struct arch_mce_ops {
* indicates SC7 must be entered. If the SC7 state is not allowed,
* the response indicates SC7 can't be entered
*/
int (*is_sc7_allowed)(uint32_t ari_base, uint32_t state,
int32_t (*is_sc7_allowed)(uint32_t ari_base, uint32_t state,
uint32_t wake_time);
/*
* This ARI request allows a core to bring another offlined core
* back online to the C0 state. Note that a core is offlined by
* entering a C-state where the WAKE_MASK is all 0.
*/
int (*online_core)(uint32_t ari_base, uint32_t cpuid);
int32_t (*online_core)(uint32_t ari_base, uint32_t cpuid);
/*
* This ARI request allows the CPU to enable/disable Auto-CC3 idle
* state.
*/
int (*cc3_ctrl)(uint32_t ari_base,
int32_t (*cc3_ctrl)(uint32_t ari_base,
uint32_t freq,
uint32_t volt,
uint8_t enable);
@ -223,30 +164,30 @@ typedef struct arch_mce_ops {
* This ARI request allows updating the reset vector register for
* D15 and A57 CPUs.
*/
int (*update_reset_vector)(uint32_t ari_base);
int32_t (*update_reset_vector)(uint32_t ari_base);
/*
* This ARI request instructs the ROC to flush A57 data caches in
* order to maintain coherency with the Denver cluster.
*/
int (*roc_flush_cache)(uint32_t ari_base);
int32_t (*roc_flush_cache)(uint32_t ari_base);
/*
* This ARI request instructs the ROC to flush A57 data caches along
* with the caches covering ARM code in order to maintain coherency
* with the Denver cluster.
*/
int (*roc_flush_cache_trbits)(uint32_t ari_base);
int32_t (*roc_flush_cache_trbits)(uint32_t ari_base);
/*
* This ARI request instructs the ROC to clean A57 data caches along
* with the caches covering ARM code in order to maintain coherency
* with the Denver cluster.
*/
int (*roc_clean_cache)(uint32_t ari_base);
int32_t (*roc_clean_cache)(uint32_t ari_base);
/*
* This ARI request reads/writes the Machine Check Arch. (MCA)
* registers.
*/
uint64_t (*read_write_mca)(uint32_t ari_base,
mca_cmd_t cmd,
uint64_t cmd,
uint64_t *data);
/*
* Some MC GSC (General Security Carveout) register values are
@ -258,7 +199,7 @@ typedef struct arch_mce_ops {
* register value. This ARI request allows updating the GSC register
* value for a certain carveout in the CCPLEX.
*/
int (*update_ccplex_gsc)(uint32_t ari_base, uint32_t gsc_idx);
int32_t (*update_ccplex_gsc)(uint32_t ari_base, uint32_t gsc_idx);
/*
* This ARI request instructs the CCPLEX to either shutdown or
* reset the entire system
@ -268,8 +209,8 @@ typedef struct arch_mce_ops {
* This ARI request reads/writes data from/to Uncore PERFMON
* registers
*/
int (*read_write_uncore_perfmon)(uint32_t ari_base,
uncore_perfmon_req_t req, uint64_t *data);
int32_t (*read_write_uncore_perfmon)(uint32_t ari_base,
uint64_t req, uint64_t *data);
/*
* This ARI implements ARI_MISC_CCPLEX commands. This can be
* used to enable/disable coresight clock gating.
@ -279,39 +220,42 @@ typedef struct arch_mce_ops {
} arch_mce_ops_t;
/* declarations for ARI/NVG handler functions */
int ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
uint8_t update_wake_mask);
int ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state);
int ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data);
int ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int ari_online_core(uint32_t ari_base, uint32_t core);
int ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
int ari_reset_vector_update(uint32_t ari_base);
int ari_roc_flush_cache_trbits(uint32_t ari_base);
int ari_roc_flush_cache(uint32_t ari_base);
int ari_roc_clean_cache(uint32_t ari_base);
uint64_t ari_read_write_mca(uint32_t ari_base, mca_cmd_t cmd, uint64_t *data);
int ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx);
int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int32_t ari_online_core(uint32_t ari_base, uint32_t core);
int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
int32_t ari_reset_vector_update(uint32_t ari_base);
int32_t ari_roc_flush_cache_trbits(uint32_t ari_base);
int32_t ari_roc_flush_cache(uint32_t ari_base);
int32_t ari_roc_clean_cache(uint32_t ari_base);
uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data);
int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx);
void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx);
int ari_read_write_uncore_perfmon(uint32_t ari_base,
uncore_perfmon_req_t req, uint64_t *data);
int32_t ari_read_write_uncore_perfmon(uint32_t ari_base,
uint64_t req, uint64_t *data);
void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value);
int nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
uint8_t update_wake_mask);
int nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state);
int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t val);
int nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int nvg_online_core(uint32_t ari_base, uint32_t core);
int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
int32_t nvg_online_core(uint32_t ari_base, uint32_t core);
int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
extern void nvg_set_request_data(uint64_t req, uint64_t data);
extern void nvg_set_request(uint64_t req);
extern uint64_t nvg_get_result(void);
#endif /* __MCE_PRIVATE_H__ */

View File

@ -19,13 +19,13 @@
/*******************************************************************************
* Register offsets for ARI request/results
******************************************************************************/
#define ARI_REQUEST 0x0
#define ARI_REQUEST_EVENT_MASK 0x4
#define ARI_STATUS 0x8
#define ARI_REQUEST_DATA_LO 0xC
#define ARI_REQUEST_DATA_HI 0x10
#define ARI_RESPONSE_DATA_LO 0x14
#define ARI_RESPONSE_DATA_HI 0x18
#define ARI_REQUEST 0x0U
#define ARI_REQUEST_EVENT_MASK 0x4U
#define ARI_STATUS 0x8U
#define ARI_REQUEST_DATA_LO 0xCU
#define ARI_REQUEST_DATA_HI 0x10U
#define ARI_RESPONSE_DATA_LO 0x14U
#define ARI_RESPONSE_DATA_HI 0x18U
/* Status values for the current request */
#define ARI_REQ_PENDING 1U
@ -41,12 +41,12 @@
******************************************************************************/
static inline uint32_t ari_read_32(uint32_t ari_base, uint32_t reg)
{
return mmio_read_32(ari_base + reg);
return mmio_read_32((uint64_t)ari_base + (uint64_t)reg);
}
static inline void ari_write_32(uint32_t ari_base, uint32_t val, uint32_t reg)
{
mmio_write_32(ari_base + reg, val);
mmio_write_32((uint64_t)ari_base + (uint64_t)reg, val);
}
static inline uint32_t ari_get_request_low(uint32_t ari_base)
@ -75,11 +75,12 @@ static inline void ari_clobber_response(uint32_t ari_base)
ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_HI);
}
static int ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
static int32_t ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
uint32_t lo, uint32_t hi)
{
uint32_t retries = ARI_MAX_RETRY_COUNT;
uint32_t status;
int32_t ret = 0;
/* program the request, event_mask, hi and lo registers */
ari_write_32(ari_base, lo, ARI_REQUEST_DATA_LO);
@ -92,236 +93,270 @@ static int ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
* ARI_STATUS polling, since MCE is waiting for SW to trigger
* the event.
*/
if (evt_mask)
return 0;
if (evt_mask != 0U) {
ret = 0;
} else {
/* For shutdown/reboot commands, we dont have to check for timeouts */
if ((req == (uint32_t)TEGRA_ARI_MISC_CCPLEX) &&
((lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) ||
(lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) {
ret = 0;
} else {
/*
* Wait for the command response for not more than the timeout
*/
while (retries != 0U) {
/* For shutdown/reboot commands, we dont have to check for timeouts */
if ((req == (uint32_t)TEGRA_ARI_MISC_CCPLEX) &&
((lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) ||
(lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) {
return 0;
/* read the command status */
status = ari_read_32(ari_base, ARI_STATUS);
if ((status & (ARI_REQ_ONGOING | ARI_REQ_PENDING)) == 0U) {
break;
}
/* delay 1 ms */
mdelay(1);
/* decrement the retry count */
retries--;
}
/* assert if the command timed out */
if (retries == 0U) {
ERROR("ARI request timed out: req %d on CPU %d\n",
req, plat_my_core_pos());
assert(retries != 0U);
}
}
}
/*
* Wait for the command response for not more than the timeout
*/
while (retries != 0U) {
/* read the command status */
status = ari_read_32(ari_base, ARI_STATUS);
if ((status & (ARI_REQ_ONGOING | ARI_REQ_PENDING)) == 0U)
break;
/* delay 1 ms */
mdelay(1);
/* decrement the retry count */
retries--;
}
/* assert if the command timed out */
if (retries == 0U) {
ERROR("ARI request timed out: req %d on CPU %d\n",
req, plat_my_core_pos());
assert(retries != 0U);
}
return 0;
return ret;
}
int ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
int32_t ret = 0;
/* check for allowed power state */
if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
if ((state != TEGRA_ARI_CORE_C0) &&
(state != TEGRA_ARI_CORE_C1) &&
(state != TEGRA_ARI_CORE_C6) &&
(state != TEGRA_ARI_CORE_C7)) {
ERROR("%s: unknown cstate (%d)\n", __func__, state);
return EINVAL;
ret = EINVAL;
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
/* Enter the cstate, to be woken up after wake_time (TSC ticks) */
ret = ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
TEGRA_ARI_ENTER_CSTATE, state, wake_time);
}
/* clean the previous response state */
ari_clobber_response(ari_base);
/* Enter the cstate, to be woken up after wake_time (TSC ticks) */
return ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
TEGRA_ARI_ENTER_CSTATE, state, wake_time);
return ret;
}
int ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
uint8_t update_wake_mask)
{
uint32_t val = 0;
uint32_t val = 0U;
/* clean the previous response state */
ari_clobber_response(ari_base);
/* update CLUSTER_CSTATE? */
if (cluster)
val |= (cluster & CLUSTER_CSTATE_MASK) |
CLUSTER_CSTATE_UPDATE_BIT;
if (cluster != 0U) {
val |= (cluster & (uint32_t)CLUSTER_CSTATE_MASK) |
(uint32_t)CLUSTER_CSTATE_UPDATE_BIT;
}
/* update CCPLEX_CSTATE? */
if (ccplex)
val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
CCPLEX_CSTATE_UPDATE_BIT;
if (ccplex != 0U) {
val |= ((ccplex & (uint32_t)CCPLEX_CSTATE_MASK) << (uint32_t)CCPLEX_CSTATE_SHIFT) |
(uint32_t)CCPLEX_CSTATE_UPDATE_BIT;
}
/* update SYSTEM_CSTATE? */
if (system)
val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
SYSTEM_CSTATE_UPDATE_BIT);
if (system != 0U) {
val |= ((system & (uint32_t)SYSTEM_CSTATE_MASK) << (uint32_t)SYSTEM_CSTATE_SHIFT) |
(((uint32_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
(uint32_t)SYSTEM_CSTATE_UPDATE_BIT);
}
/* update wake mask value? */
if (update_wake_mask)
val |= CSTATE_WAKE_MASK_UPDATE_BIT;
if (update_wake_mask != 0U) {
val |= (uint32_t)CSTATE_WAKE_MASK_UPDATE_BIT;
}
/* set the updated cstate info */
return ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CSTATE_INFO, val,
return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO, val,
wake_mask);
}
int ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
{
int32_t ret = 0;
/* sanity check crossover type */
if ((type == TEGRA_ARI_CROSSOVER_C1_C6) ||
(type > TEGRA_ARI_CROSSOVER_CCP3_SC1))
return EINVAL;
(type > TEGRA_ARI_CROSSOVER_CCP3_SC1)) {
ret = EINVAL;
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
/* clean the previous response state */
ari_clobber_response(ari_base);
/* update crossover threshold time */
return ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CROSSOVER,
/* update crossover threshold time */
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CROSSOVER,
type, time);
}
return ret;
}
uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
{
int ret;
int32_t ret;
uint64_t result;
/* sanity check crossover type */
if (state == 0)
return EINVAL;
if (state == 0U) {
result = EINVAL;
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0, TEGRA_ARI_CSTATE_STATS, state, 0);
if (ret != 0)
return EINVAL;
return (uint64_t)ari_get_response_low(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_CSTATE_STATS, state, 0U);
if (ret != 0) {
result = EINVAL;
} else {
result = (uint64_t)ari_get_response_low(ari_base);
}
}
return result;
}
int ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
{
/* clean the previous response state */
ari_clobber_response(ari_base);
/* write the cstate stats */
return ari_request_wait(ari_base, 0, TEGRA_ARI_WRITE_CSTATE_STATS, state,
return ari_request_wait(ari_base, 0U, TEGRA_ARI_WRITE_CSTATE_STATS, state,
stats);
}
uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
{
uint64_t resp;
int ret;
int32_t ret;
uint32_t local_data = data;
/* clean the previous response state */
ari_clobber_response(ari_base);
/* ARI_REQUEST_DATA_HI is reserved for commands other than 'ECHO' */
if (cmd != TEGRA_ARI_MISC_ECHO)
data = 0;
if (cmd != TEGRA_ARI_MISC_ECHO) {
local_data = 0U;
}
ret = ari_request_wait(ari_base, 0, TEGRA_ARI_MISC, cmd, data);
if (ret)
return (uint64_t)ret;
/* get the command response */
resp = ari_get_response_low(ari_base);
resp |= ((uint64_t)ari_get_response_high(ari_base) << 32);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC, cmd, local_data);
if (ret != 0) {
resp = (uint64_t)ret;
} else {
/* get the command response */
resp = ari_get_response_low(ari_base);
resp |= ((uint64_t)ari_get_response_high(ari_base) << 32);
}
return resp;
}
int ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
int ret;
int32_t ret;
uint32_t result;
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7,
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7U,
wake_time);
if (ret) {
if (ret != 0) {
ERROR("%s: failed (%d)\n", __func__, ret);
return 0;
result = 0U;
} else {
result = ari_get_response_low(ari_base) & 0x1U;
}
/* 1 = CCx allowed, 0 = CCx not allowed */
return (ari_get_response_low(ari_base) & 0x1);
return (int32_t)result;
}
int ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
int ret;
int32_t ret, result;
/* check for allowed power state */
if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
if ((state != TEGRA_ARI_CORE_C0) &&
(state != TEGRA_ARI_CORE_C1) &&
(state != TEGRA_ARI_CORE_C6) &&
(state != TEGRA_ARI_CORE_C7)) {
ERROR("%s: unknown cstate (%d)\n", __func__, state);
return EINVAL;
result = EINVAL;
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_SC7_ALLOWED, state,
wake_time);
if (ret != 0) {
ERROR("%s: failed (%d)\n", __func__, ret);
result = 0;
} else {
/* 1 = SC7 allowed, 0 = SC7 not allowed */
result = (ari_get_response_low(ari_base) != 0U) ? 1 : 0;
}
}
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0, TEGRA_ARI_IS_SC7_ALLOWED, state,
wake_time);
if (ret) {
ERROR("%s: failed (%d)\n", __func__, ret);
return 0;
}
/* 1 = SC7 allowed, 0 = SC7 not allowed */
return !!ari_get_response_low(ari_base);
return result;
}
int ari_online_core(uint32_t ari_base, uint32_t core)
int32_t ari_online_core(uint32_t ari_base, uint32_t core)
{
uint32_t cpu = read_mpidr() & MPIDR_CPU_MASK;
int cluster = (read_mpidr() & MPIDR_CLUSTER_MASK) >>
MPIDR_AFFINITY_BITS;
int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
uint64_t cpu = read_mpidr() & (uint64_t)(MPIDR_CPU_MASK);
uint64_t cluster = (read_mpidr() & (uint64_t)(MPIDR_CLUSTER_MASK)) >>
(uint64_t)(MPIDR_AFFINITY_BITS);
uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
int32_t ret;
/* construct the current CPU # */
cpu |= (cluster << 2);
/* sanity check target core id */
if ((core >= (uint32_t)MCE_CORE_ID_MAX) || (cpu == core)) {
if ((core >= MCE_CORE_ID_MAX) || (cpu == (uint64_t)core)) {
ERROR("%s: unsupported core id (%d)\n", __func__, core);
return EINVAL;
ret = EINVAL;
} else {
/*
* The Denver cluster has 2 CPUs only - 0, 1.
*/
if ((impl == (uint32_t)DENVER_IMPL) &&
((core == 2U) || (core == 3U))) {
ERROR("%s: unknown core id (%d)\n", __func__, core);
ret = EINVAL;
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_ONLINE_CORE, core, 0U);
}
}
/*
* The Denver cluster has 2 CPUs only - 0, 1.
*/
if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
ERROR("%s: unknown core id (%d)\n", __func__, core);
return EINVAL;
}
/* clean the previous response state */
ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0, TEGRA_ARI_ONLINE_CORE, core, 0);
return ret;
}
int ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
{
int val;
uint32_t val;
/* clean the previous response state */
ari_clobber_response(ari_base);
@ -338,12 +373,12 @@ int ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable
*/
val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
(enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));
((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
return ari_request_wait(ari_base, 0, TEGRA_ARI_CC3_CTRL, val, 0);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_CC3_CTRL, val, 0U);
}
int ari_reset_vector_update(uint32_t ari_base)
int32_t ari_reset_vector_update(uint32_t ari_base)
{
/* clean the previous response state */
ari_clobber_response(ari_base);
@ -352,85 +387,97 @@ int ari_reset_vector_update(uint32_t ari_base)
* Need to program the CPU reset vector one time during cold boot
* and SC7 exit
*/
ari_request_wait(ari_base, 0, TEGRA_ARI_COPY_MISCREG_AA64_RST, 0, 0);
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U);
return 0;
}
int ari_roc_flush_cache_trbits(uint32_t ari_base)
int32_t ari_roc_flush_cache_trbits(uint32_t ari_base)
{
/* clean the previous response state */
ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS,
0, 0);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS,
0U, 0U);
}
int ari_roc_flush_cache(uint32_t ari_base)
int32_t ari_roc_flush_cache(uint32_t ari_base)
{
/* clean the previous response state */
ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY,
0, 0);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY,
0U, 0U);
}
int ari_roc_clean_cache(uint32_t ari_base)
int32_t ari_roc_clean_cache(uint32_t ari_base)
{
/* clean the previous response state */
ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY,
0, 0);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY,
0U, 0U);
}
uint64_t ari_read_write_mca(uint32_t ari_base, mca_cmd_t cmd, uint64_t *data)
uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data)
{
mca_arg_t mca_arg;
int ret;
uint64_t mca_arg_data, result = 0;
uint32_t resp_lo, resp_hi;
uint32_t mca_arg_err, mca_arg_finish;
int32_t ret;
/* Set data (write) */
mca_arg.data = data ? *data : 0ull;
mca_arg_data = (data != NULL) ? *data : 0ULL;
/* Set command */
ari_write_32(ari_base, cmd.input.low, ARI_RESPONSE_DATA_LO);
ari_write_32(ari_base, cmd.input.high, ARI_RESPONSE_DATA_HI);
ari_write_32(ari_base, (uint32_t)cmd, ARI_RESPONSE_DATA_LO);
ari_write_32(ari_base, (uint32_t)(cmd >> 32U), ARI_RESPONSE_DATA_HI);
ret = ari_request_wait(ari_base, 0, TEGRA_ARI_MCA, mca_arg.arg.low,
mca_arg.arg.high);
if (!ret) {
mca_arg.arg.low = ari_get_response_low(ari_base);
mca_arg.arg.high = ari_get_response_high(ari_base);
if (!mca_arg.err.finish)
return (uint64_t)mca_arg.err.error;
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MCA,
(uint32_t)mca_arg_data,
(uint32_t)(mca_arg_data >> 32UL));
if (ret == 0) {
resp_lo = ari_get_response_low(ari_base);
resp_hi = ari_get_response_high(ari_base);
if (data) {
mca_arg.arg.low = ari_get_request_low(ari_base);
mca_arg.arg.high = ari_get_request_high(ari_base);
*data = mca_arg.data;
mca_arg_err = resp_lo & MCA_ARG_ERROR_MASK;
mca_arg_finish = (resp_hi >> MCA_ARG_FINISH_SHIFT) &
MCA_ARG_FINISH_MASK;
if (mca_arg_finish == 0U) {
result = (uint64_t)mca_arg_err;
} else {
if (data != NULL) {
resp_lo = ari_get_request_low(ari_base);
resp_hi = ari_get_request_high(ari_base);
*data = ((uint64_t)resp_hi << 32UL) |
(uint64_t)resp_lo;
}
}
}
return 0;
return result;
}
int ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
{
int32_t ret = 0;
/* sanity check GSC ID */
if (gsc_idx > TEGRA_ARI_GSC_VPR_IDX)
return EINVAL;
if (gsc_idx > (uint32_t)TEGRA_ARI_GSC_VPR_IDX) {
ret = EINVAL;
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
/* clean the previous response state */
ari_clobber_response(ari_base);
/*
* The MCE code will read the GSC carveout value, corrseponding to
* the ID, from the MC registers and update the internal GSC registers
* of the CCPLEX.
*/
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U);
}
/*
* The MCE code will read the GSC carveout value, corrseponding to
* the ID, from the MC registers and update the internal GSC registers
* of the CCPLEX.
*/
ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0);
return 0;
return ret;
}
void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
@ -441,48 +488,55 @@ void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
/*
* The MCE will shutdown or restart the entire system
*/
(void)ari_request_wait(ari_base, 0, TEGRA_ARI_MISC_CCPLEX, state_idx, 0);
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, state_idx, 0U);
}
int ari_read_write_uncore_perfmon(uint32_t ari_base,
uncore_perfmon_req_t req, uint64_t *data)
int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
uint64_t *data)
{
int ret;
int32_t ret, result;
uint32_t val;
uint8_t req_cmd, req_status;
req_cmd = (uint8_t)(req >> UNCORE_PERFMON_CMD_SHIFT);
/* clean the previous response state */
ari_clobber_response(ari_base);
/* sanity check input parameters */
if (req.perfmon_command.cmd == UNCORE_PERFMON_CMD_READ && !data) {
if ((req_cmd == UNCORE_PERFMON_CMD_READ) && (data == NULL)) {
ERROR("invalid parameters\n");
return EINVAL;
result = EINVAL;
} else {
/*
* For "write" commands get the value that has to be written
* to the uncore perfmon registers
*/
val = (req_cmd == UNCORE_PERFMON_CMD_WRITE) ?
(uint32_t)*data : 0UL;
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_PERFMON, val,
(uint32_t)req);
if (ret != 0) {
result = ret;
} else {
/* read the command status value */
req_status = (uint8_t)ari_get_response_high(ari_base) &
UNCORE_PERFMON_RESP_STATUS_MASK;
/*
* For "read" commands get the data from the uncore
* perfmon registers
*/
req_status >>= UNCORE_PERFMON_RESP_STATUS_SHIFT;
if ((req_status == 0U) && (req_cmd == UNCORE_PERFMON_CMD_READ)) {
*data = ari_get_response_low(ari_base);
}
result = (int32_t)req_status;
}
}
/*
* For "write" commands get the value that has to be written
* to the uncore perfmon registers
*/
val = (req.perfmon_command.cmd == UNCORE_PERFMON_CMD_WRITE) ?
*data : 0;
ret = ari_request_wait(ari_base, 0, TEGRA_ARI_PERFMON, val, req.data);
if (ret)
return ret;
/* read the command status value */
req.perfmon_status.val = ari_get_response_high(ari_base) &
UNCORE_PERFMON_RESP_STATUS_MASK;
/*
* For "read" commands get the data from the uncore
* perfmon registers
*/
if ((req.perfmon_status.val == 0) && (req.perfmon_command.cmd ==
UNCORE_PERFMON_CMD_READ))
*data = ari_get_response_low(ari_base);
return (int)req.perfmon_status.val;
return result;
}
void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value)
@ -494,12 +548,11 @@ void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value)
if ((index > TEGRA_ARI_MISC_CCPLEX_EDBGREQ) ||
((index == TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) &&
(value > 1))) {
(value > 1U))) {
ERROR("%s: invalid parameters \n", __func__);
return;
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, index, value);
}
/* clean the previous response state */
ari_clobber_response(ari_base);
(void)ari_request_wait(ari_base, 0, TEGRA_ARI_MISC_CCPLEX, index, value);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -67,7 +67,7 @@ static arch_mce_ops_t ari_mce_ops = {
.misc_ccplex = ari_misc_ccplex
};
typedef struct mce_config {
typedef struct {
uint32_t ari_base;
arch_mce_ops_t *ops;
} mce_config_t;
@ -108,9 +108,9 @@ static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
static uint32_t mce_get_curr_cpu_ari_base(void)
{
uint32_t mpidr = read_mpidr();
int cpuid = mpidr & MPIDR_CPU_MASK;
int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
uint64_t mpidr = read_mpidr();
uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
/*
* T186 has 2 CPU clusters, one with Denver CPUs and the other with
@ -119,17 +119,19 @@ static uint32_t mce_get_curr_cpu_ari_base(void)
* struct, we have to convert the Denver CPU ids to the corresponding
* indices in the mce_ops_table array.
*/
if (impl == DENVER_IMPL)
cpuid |= 0x4;
if (impl == DENVER_IMPL) {
cpuid |= 0x4U;
}
return mce_cfg_table[cpuid].ari_base;
}
static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
{
uint32_t mpidr = read_mpidr();
int cpuid = mpidr & MPIDR_CPU_MASK;
int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
uint64_t mpidr = read_mpidr();
uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
(uint64_t)MIDR_IMPL_MASK;
/*
* T186 has 2 CPU clusters, one with Denver CPUs and the other with
@ -138,8 +140,9 @@ static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
* struct, we have to convert the Denver CPU ids to the corresponding
* indices in the mce_ops_table array.
*/
if (impl == DENVER_IMPL)
cpuid |= 0x4;
if (impl == DENVER_IMPL) {
cpuid |= 0x4U;
}
return mce_cfg_table[cpuid].ops;
}
@ -147,20 +150,16 @@ static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
/*******************************************************************************
* Common handler for all MCE commands
******************************************************************************/
int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
uint64_t arg2)
{
arch_mce_ops_t *ops;
const arch_mce_ops_t *ops;
gp_regs_t *gp_regs = get_gpregs_ctx(cm_get_context(NON_SECURE));
uint32_t cpu_ari_base;
uint64_t ret64 = 0, arg3, arg4, arg5;
int ret = 0;
mca_cmd_t mca_cmd;
uncore_perfmon_req_t req;
cpu_context_t *ctx = cm_get_context(NON_SECURE);
gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
int32_t ret = 0;
assert(ctx);
assert(gp_regs);
assert(gp_regs != NULL);
/* get a pointer to the CPU's arch_mce_ops_t struct */
ops = mce_get_curr_cpu_ops();
@ -171,8 +170,9 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
switch (cmd) {
case MCE_CMD_ENTER_CSTATE:
ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
if (ret < 0)
if (ret < 0) {
ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
}
break;
@ -181,28 +181,30 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
* get the parameters required for the update cstate info
* command
*/
arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
arg3 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4));
arg4 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5));
arg5 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6));
ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
(uint32_t)arg4, (uint8_t)arg5);
if (ret < 0)
if (ret < 0) {
ERROR("%s: update_cstate_info failed(%d)\n",
__func__, ret);
}
write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
write_ctx_reg(gp_regs, CTX_GPREG_X6, 0);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4), (0));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5), (0));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6), (0));
break;
case MCE_CMD_UPDATE_CROSSOVER_TIME:
ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
if (ret < 0)
if (ret < 0) {
ERROR("%s: update_crossover_time failed(%d)\n",
__func__, ret);
}
break;
@ -210,16 +212,17 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
/* update context to return cstate stats value */
write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (ret64));
break;
case MCE_CMD_WRITE_CSTATE_STATS:
ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
if (ret < 0)
if (ret < 0) {
ERROR("%s: write_cstate_stats failed(%d)\n",
__func__, ret);
}
break;
@ -231,7 +234,8 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
}
/* update context to return CCx status value */
write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
(uint64_t)(ret));
break;
@ -243,22 +247,26 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
}
/* update context to return SC7 status value */
write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
write_ctx_reg(gp_regs, CTX_GPREG_X3, ret);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
(uint64_t)(ret));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3),
(uint64_t)(ret));
break;
case MCE_CMD_ONLINE_CORE:
ret = ops->online_core(cpu_ari_base, arg0);
if (ret < 0)
if (ret < 0) {
ERROR("%s: online_core failed(%d)\n", __func__, ret);
}
break;
case MCE_CMD_CC3_CTRL:
ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
if (ret < 0)
if (ret < 0) {
ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
}
break;
@ -267,8 +275,10 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
arg0);
/* update context to return if echo'd data matched source */
write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64 == arg0);
write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64 == arg0);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
((ret64 == arg0) ? 1ULL : 0ULL));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
((ret64 == arg0) ? 1ULL : 0ULL));
break;
@ -280,8 +290,10 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
* version = minor(63:32) | major(31:0). Update context
* to return major and minor version number.
*/
write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint32_t)ret64);
write_ctx_reg(gp_regs, CTX_GPREG_X2, (uint32_t)(ret64 >> 32));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
(ret64));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
(ret64 >> 32ULL));
break;
@ -290,50 +302,51 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
/* update context to return features value */
write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
break;
case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
ret = ops->roc_flush_cache_trbits(cpu_ari_base);
if (ret < 0)
if (ret < 0) {
ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
ret);
}
break;
case MCE_CMD_ROC_FLUSH_CACHE:
ret = ops->roc_flush_cache(cpu_ari_base);
if (ret < 0)
if (ret < 0) {
ERROR("%s: flush cache failed(%d)\n", __func__, ret);
}
break;
case MCE_CMD_ROC_CLEAN_CACHE:
ret = ops->roc_clean_cache(cpu_ari_base);
if (ret < 0)
if (ret < 0) {
ERROR("%s: clean cache failed(%d)\n", __func__, ret);
}
break;
case MCE_CMD_ENUM_READ_MCA:
memcpy(&mca_cmd, &arg0, sizeof(arg0));
ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
/* update context to return MCA data/error */
write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
write_ctx_reg(gp_regs, CTX_GPREG_X2, arg1);
write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (arg1));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
break;
case MCE_CMD_ENUM_WRITE_MCA:
memcpy(&mca_cmd, &arg0, sizeof(arg0));
ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
/* update context to return MCA error */
write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
break;
@ -357,11 +370,10 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
#endif
case MCE_CMD_UNCORE_PERFMON_REQ:
memcpy(&req, &arg0, sizeof(arg0));
ret = ops->read_write_uncore_perfmon(cpu_ari_base, req, &arg1);
ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
/* update context to return data */
write_ctx_reg(gp_regs, CTX_GPREG_X1, arg1);
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (arg1));
break;
case MCE_CMD_MISC_CCPLEX:
@ -370,8 +382,9 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
break;
default:
ERROR("unknown MCE command (%d)\n", cmd);
return EINVAL;
ERROR("unknown MCE command (%lu)\n", cmd);
ret = EINVAL;
break;
}
return ret;
@ -380,18 +393,18 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
/*******************************************************************************
* Handler to update the reset vector for CPUs
******************************************************************************/
int mce_update_reset_vector(void)
int32_t mce_update_reset_vector(void)
{
arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
ops->update_reset_vector(mce_get_curr_cpu_ari_base());
return 0;
}
static int mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
static int32_t mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
{
arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
@ -401,7 +414,7 @@ static int mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
/*******************************************************************************
* Handler to update carveout values for Video Memory Carveout region
******************************************************************************/
int mce_update_gsc_videomem(void)
int32_t mce_update_gsc_videomem(void)
{
return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
}
@ -409,7 +422,7 @@ int mce_update_gsc_videomem(void)
/*******************************************************************************
* Handler to update carveout values for TZDRAM aperture
******************************************************************************/
int mce_update_gsc_tzdram(void)
int32_t mce_update_gsc_tzdram(void)
{
return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
}
@ -417,7 +430,7 @@ int mce_update_gsc_tzdram(void)
/*******************************************************************************
* Handler to update carveout values for TZ SysRAM aperture
******************************************************************************/
int mce_update_gsc_tzram(void)
int32_t mce_update_gsc_tzram(void)
{
return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
}
@ -427,28 +440,29 @@ int mce_update_gsc_tzram(void)
******************************************************************************/
__dead2 void mce_enter_ccplex_state(uint32_t state_idx)
{
arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
/* sanity check state value */
if (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF &&
state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)
if ((state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) &&
(state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)) {
panic();
}
ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
/* wait till the CCPLEX powers down */
for (;;)
for (;;) {
;
}
panic();
}
/*******************************************************************************
* Handler to issue the UPDATE_CSTATE_INFO request
******************************************************************************/
void mce_update_cstate_info(mce_cstate_info_t *cstate)
void mce_update_cstate_info(const mce_cstate_info_t *cstate)
{
arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
/* issue the UPDATE_CSTATE_INFO request */
ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
@ -462,7 +476,7 @@ void mce_update_cstate_info(mce_cstate_info_t *cstate)
******************************************************************************/
void mce_verify_firmware_version(void)
{
arch_mce_ops_t *ops;
const arch_mce_ops_t *ops;
uint32_t cpu_ari_base;
uint64_t version;
uint32_t major, minor;
@ -470,37 +484,40 @@ void mce_verify_firmware_version(void)
/*
* MCE firmware is not supported on simulation platforms.
*/
if (tegra_platform_is_emulation())
return;
if (tegra_platform_is_emulation()) {
/* get a pointer to the CPU's arch_mce_ops_t struct */
ops = mce_get_curr_cpu_ops();
INFO("MCE firmware is not supported\n");
/* get the CPU's ARI base address */
cpu_ari_base = mce_get_curr_cpu_ari_base();
} else {
/* get a pointer to the CPU's arch_mce_ops_t struct */
ops = mce_get_curr_cpu_ops();
/*
* Read the MCE firmware version and extract the major and minor
* version fields
*/
version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
major = (uint32_t)version;
minor = (uint32_t)(version >> 32);
/* get the CPU's ARI base address */
cpu_ari_base = mce_get_curr_cpu_ari_base();
INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
/*
* Read the MCE firmware version and extract the major and minor
* version fields
*/
version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
major = (uint32_t)version;
minor = (uint32_t)(version >> 32);
/*
* Verify that the MCE firmware version and the interface header
* match
*/
if (major != TEGRA_ARI_VERSION_MAJOR) {
ERROR("ARI major version mismatch\n");
panic();
}
INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
if (minor < TEGRA_ARI_VERSION_MINOR) {
ERROR("ARI minor version mismatch\n");
panic();
/*
* Verify that the MCE firmware version and the interface header
* match
*/
if (major != TEGRA_ARI_VERSION_MAJOR) {
ERROR("ARI major version mismatch\n");
panic();
}
if (minor < TEGRA_ARI_VERSION_MINOR) {
ERROR("ARI minor version mismatch\n");
panic();
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -13,57 +13,63 @@
#include <sys/errno.h>
#include <t18x_ari.h>
extern void nvg_set_request_data(uint64_t req, uint64_t data);
extern void nvg_set_request(uint64_t req);
extern uint64_t nvg_get_result(void);
int nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
int32_t ret = 0;
(void)ari_base;
/* check for allowed power state */
if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
(state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
ERROR("%s: unknown cstate (%d)\n", __func__, state);
return EINVAL;
ret = EINVAL;
} else {
/* time (TSC ticks) until the core is expected to get a wake event */
nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
/* set the core cstate */
write_actlr_el1(state);
}
/* time (TSC ticks) until the core is expected to get a wake event */
nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
/* set the core cstate */
write_actlr_el1(state);
return 0;
return ret;
}
/*
* This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
* SYSTEM_CSTATE values.
*/
int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
uint8_t update_wake_mask)
{
uint64_t val = 0;
uint64_t val = 0ULL;
(void)ari_base;
/* update CLUSTER_CSTATE? */
if (cluster)
val |= (cluster & CLUSTER_CSTATE_MASK) |
if (cluster != 0U) {
val |= ((uint64_t)cluster & CLUSTER_CSTATE_MASK) |
CLUSTER_CSTATE_UPDATE_BIT;
}
/* update CCPLEX_CSTATE? */
if (ccplex)
val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
if (ccplex != 0U) {
val |= (((uint64_t)ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
CCPLEX_CSTATE_UPDATE_BIT;
}
/* update SYSTEM_CSTATE? */
if (system)
val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
if (system != 0U) {
val |= (((uint64_t)system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
(((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
SYSTEM_CSTATE_UPDATE_BIT);
}
/* update wake mask value? */
if (update_wake_mask)
if (update_wake_mask != 0U) {
val |= CSTATE_WAKE_MASK_UPDATE_BIT;
}
/* set the wake mask */
val &= CSTATE_WAKE_MASK_CLEAR;
@ -75,46 +81,60 @@ int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
return 0;
}
int nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
{
int32_t ret = 0;
(void)ari_base;
/* sanity check crossover type */
if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)
return EINVAL;
if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1) {
ret = EINVAL;
} else {
/*
* The crossover threshold limit types start from
* TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7.
* The command indices for updating the threshold be generated
* by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
* command index.
*/
nvg_set_request_data((TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 +
(uint64_t)type), (uint64_t)time);
}
/*
* The crossover threshold limit types start from
* TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7. The
* command indices for updating the threshold can be generated
* by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
* command index.
*/
nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 + type,
(uint64_t)time);
return 0;
return ret;
}
uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state)
{
uint64_t ret;
(void)ari_base;
/* sanity check state */
if (state == 0)
return EINVAL;
if (state == 0U) {
ret = EINVAL;
} else {
/*
* The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
* to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
* reading the threshold can be generated by adding the type to
* the NVG_CLEAR_CSTATE_STATS command index.
*/
nvg_set_request((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
(uint64_t)state));
ret = nvg_get_result();
}
/*
* The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
* to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
* reading the threshold can be generated by adding the type to
* the NVG_CLEAR_CSTATE_STATS command index.
*/
nvg_set_request(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state);
return (int64_t)nvg_get_result();
return ret;
}
int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
{
uint64_t val;
(void)ari_base;
/*
* The only difference between a CSTATE_STATS_WRITE and
* CSTATE_STATS_READ is the usage of the 63:32 in the request.
@ -129,71 +149,88 @@ int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
* reading the threshold can be generated by adding the type to
* the NVG_CLEAR_CSTATE_STATS command index.
*/
nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state, val);
nvg_set_request_data((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
(uint64_t)state), val);
return 0;
}
int nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
(void)ari_base;
(void)state;
(void)wake_time;
/* This does not apply to the Denver cluster */
return 0;
}
int nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
uint64_t val;
int32_t ret;
(void)ari_base;
/* check for allowed power state */
if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
(state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
ERROR("%s: unknown cstate (%d)\n", __func__, state);
return EINVAL;
ret = EINVAL;
} else {
/*
* Request format -
* 63:32 = wake time
* 31:0 = C-state for this core
*/
val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
((uint64_t)state & MCE_SC7_ALLOWED_MASK);
/* issue command to check if SC7 is allowed */
nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
/* 1 = SC7 allowed, 0 = SC7 not allowed */
ret = (nvg_get_result() != 0ULL) ? 1 : 0;
}
/*
* Request format -
* 63:32 = wake time
* 31:0 = C-state for this core
*/
val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
(state & MCE_SC7_ALLOWED_MASK);
/* issue command to check if SC7 is allowed */
nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
/* 1 = SC7 allowed, 0 = SC7 not allowed */
return !!nvg_get_result();
return ret;
}
int nvg_online_core(uint32_t ari_base, uint32_t core)
int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
{
uint32_t cpu = read_mpidr() & MPIDR_CPU_MASK;
int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
uint64_t cpu = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
(uint64_t)MIDR_IMPL_MASK;
int32_t ret = 0;
(void)ari_base;
/* sanity check code id */
if ((core >= (uint32_t)MCE_CORE_ID_MAX) || (cpu == core)) {
ERROR("%s: unsupported core id (%d)\n", __func__, core);
return EINVAL;
ret = EINVAL;
} else {
/*
* The Denver cluster has 2 CPUs only - 0, 1.
*/
if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) {
ERROR("%s: unknown core id (%d)\n", __func__, core);
ret = EINVAL;
} else {
/* get a core online */
nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE,
((uint64_t)core & MCE_CORE_ID_MASK));
}
}
/*
* The Denver cluster has 2 CPUs only - 0, 1.
*/
if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
ERROR("%s: unknown core id (%d)\n", __func__, core);
return EINVAL;
}
/* get a core online */
nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE, core & MCE_CORE_ID_MASK);
return 0;
return ret;
}
int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
{
int val;
uint32_t val;
(void)ari_base;
/*
* If the enable bit is cleared, Auto-CC3 will be disabled by setting
@ -207,9 +244,9 @@ int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable
*/
val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
(enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));
((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, val);
nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val);
return 0;
}