Merge pull request #1764 from vwadekar/tf2.0-tegra-downstream-rebase-1.7.19

Tf2.0 tegra downstream rebase 1.7.19
This commit is contained in:
Antonio Niño Díaz 2019-01-21 14:02:32 +00:00 committed by GitHub
commit c40c88f81b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 2407 additions and 749 deletions

View File

@ -82,6 +82,8 @@ uint64\_t tzdram\_base;
int uart\_id;
/* L2 ECC parity protection disable flag \*/
int l2\_ecc\_parity\_prot\_dis;
/* SHMEM base address for storing the boot logs \*/
uint64\_t boot\_profiler\_shmem\_base;
} plat\_params\_from\_bl2\_t;
Power Management

View File

@ -18,16 +18,21 @@
/*******************************************************************************
* Implementation defined ACTLR_EL3 bit definitions
******************************************************************************/
#define ACTLR_EL3_L2ACTLR_BIT (1 << 6)
#define ACTLR_EL3_L2ECTLR_BIT (1 << 5)
#define ACTLR_EL3_L2CTLR_BIT (1 << 4)
#define ACTLR_EL3_CPUECTLR_BIT (1 << 1)
#define ACTLR_EL3_CPUACTLR_BIT (1 << 0)
#define ACTLR_EL3_L2ACTLR_BIT (U(1) << 6)
#define ACTLR_EL3_L2ECTLR_BIT (U(1) << 5)
#define ACTLR_EL3_L2CTLR_BIT (U(1) << 4)
#define ACTLR_EL3_CPUECTLR_BIT (U(1) << 1)
#define ACTLR_EL3_CPUACTLR_BIT (U(1) << 0)
#define ACTLR_EL3_ENABLE_ALL_MASK (ACTLR_EL3_L2ACTLR_BIT | \
ACTLR_EL3_L2ECTLR_BIT | \
ACTLR_EL3_L2CTLR_BIT | \
ACTLR_EL3_CPUECTLR_BIT | \
ACTLR_EL3_CPUACTLR_BIT)
#define ACTLR_EL3_ENABLE_ALL_ACCESS (ACTLR_EL3_L2ACTLR_BIT | \
ACTLR_EL3_L2ECTLR_BIT | \
ACTLR_EL3_L2CTLR_BIT | \
ACTLR_EL3_CPUECTLR_BIT | \
ACTLR_EL3_CPUACTLR_BIT)
ACTLR_EL3_L2ECTLR_BIT | \
ACTLR_EL3_L2CTLR_BIT | \
ACTLR_EL3_CPUECTLR_BIT | \
ACTLR_EL3_CPUACTLR_BIT)
/* Global functions */
.globl plat_is_my_cpu_primary
@ -87,8 +92,17 @@
* Enable L2 and CPU ECTLR RW access from non-secure world
* -------------------------------------------------------
*/
mov x0, #ACTLR_EL3_ENABLE_ALL_ACCESS
mrs x0, actlr_el3
mov x1, #ACTLR_EL3_ENABLE_ALL_MASK
bic x0, x0, x1
mov x1, #ACTLR_EL3_ENABLE_ALL_ACCESS
orr x0, x0, x1
msr actlr_el3, x0
mrs x0, actlr_el2
mov x1, #ACTLR_EL3_ENABLE_ALL_MASK
bic x0, x0, x1
mov x1, #ACTLR_EL3_ENABLE_ALL_ACCESS
orr x0, x0, x1
msr actlr_el2, x0
isb
@ -130,17 +144,20 @@ func plat_is_my_cpu_primary
ret
endfunc plat_is_my_cpu_primary
/* -----------------------------------------------------
/* ----------------------------------------------------------
* unsigned int plat_my_core_pos(void);
*
* result: CorePos = CoreId + (ClusterId << 2)
* -----------------------------------------------------
* result: CorePos = CoreId + (ClusterId * cpus per cluster)
* ----------------------------------------------------------
*/
func plat_my_core_pos
mrs x0, mpidr_el1
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
lsr x0, x0, #MPIDR_AFFINITY_BITS
mov x2, #PLATFORM_MAX_CPUS_PER_CLUSTER
mul x0, x0, x2
add x0, x1, x0
ret
endfunc plat_my_core_pos
@ -162,14 +179,17 @@ endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
* int platform_get_core_pos(int mpidr);
*
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* result: CorePos = (ClusterId * cpus per cluster) +
* CoreId
* -----------------------------------------------------
*/
func platform_get_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
lsr x0, x0, #MPIDR_AFFINITY_BITS
mov x2, #PLATFORM_MAX_CPUS_PER_CLUSTER
mul x0, x0, x2
add x0, x1, x0
ret
endfunc platform_get_core_pos
@ -400,31 +420,6 @@ restore_oslock:
mov x0, #1
msr oslar_el1, x0
cpu_init_common
/* ---------------------------------------------------------------------
* The initial state of the Architectural feature trap register
* (CPTR_EL3) is unknown and it must be set to a known state. All
* feature traps are disabled. Some bits in this register are marked as
* Reserved and should not be modified.
*
* CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
* or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
* CPTR_EL3.TTA: This causes access to the Trace functionality to trap
* to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
* access to trace functionality is not supported, this bit is RES0.
* CPTR_EL3.TFP: This causes instructions that access the registers
* associated with Floating Point and Advanced SIMD execution to trap
* to EL3 when executed from any exception level, unless trapped to EL1
* or EL2.
* ---------------------------------------------------------------------
*/
mrs x1, cptr_el3
bic w1, w1, #TCPAC_BIT
bic w1, w1, #TTA_BIT
bic w1, w1, #TFP_BIT
msr cptr_el3, x1
/* --------------------------------------------------
* Get secure world's entry point and jump to it
* --------------------------------------------------

View File

@ -16,7 +16,7 @@
#include <string.h>
#include <tegra_def.h>
#define BPMP_TIMEOUT_10US 10
#define BPMP_TIMEOUT 2
static uint32_t channel_base[NR_CHANNELS];
static uint32_t bpmp_init_state = BPMP_INIT_PENDING;
@ -58,15 +58,15 @@ int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz,
if (bpmp_init_state == BPMP_INIT_COMPLETE) {
/* loop until BPMP is free */
for (timeout = 0; timeout < BPMP_TIMEOUT_10US; timeout++) {
for (timeout = 0; timeout < BPMP_TIMEOUT; timeout++) {
if (master_free(ch) == true) {
break;
}
udelay(1);
mdelay(1);
}
if (timeout != BPMP_TIMEOUT_10US) {
if (timeout != BPMP_TIMEOUT) {
/* generate the command struct */
p->code = mrq;
@ -76,18 +76,18 @@ int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz,
/* signal command ready to the BPMP */
signal_slave(ch);
mmio_write_32(TEGRA_PRI_ICTLR_BASE + CPU_IEP_FIR_SET,
(1UL << INT_SHR_SEM_OUTBOX_FULL));
(1U << INT_SHR_SEM_OUTBOX_FULL));
/* loop until the command is executed */
for (timeout = 0; timeout < BPMP_TIMEOUT_10US; timeout++) {
for (timeout = 0; timeout < BPMP_TIMEOUT; timeout++) {
if (master_acked(ch) == true) {
break;
}
udelay(1);
mdelay(1);
}
if (timeout != BPMP_TIMEOUT_10US) {
if (timeout != BPMP_TIMEOUT) {
/* get the command response */
(void)memcpy(ib_data, (const void *)p->data,
@ -106,8 +106,8 @@ int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz,
ret = -EINVAL;
}
if (timeout == BPMP_TIMEOUT_10US) {
ERROR("Timed out waiting for bpmp's response");
if (timeout == BPMP_TIMEOUT) {
ERROR("Timed out waiting for bpmp's response\n");
}
return ret;
@ -154,7 +154,7 @@ int tegra_bpmp_init(void)
channel_base[ch] = mmio_read_32(base);
/* increment result register offset */
base += 4UL;
base += 4U;
}
/* mark state as "initialized" */

View File

@ -0,0 +1,188 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <common/debug.h>
#include <delay_timer.h>
#include <errno.h>
#include <gpcdma.h>
#include <mmio.h>
#include <platform_def.h>
#include <stdbool.h>
#include <tegra_def.h>
#include <utils_def.h>
/* DMA channel registers */
#define DMA_CH_CSR U(0x0)
#define DMA_CH_CSR_WEIGHT_SHIFT U(10)
#define DMA_CH_CSR_XFER_MODE_SHIFT U(21)
#define DMA_CH_CSR_DMA_MODE_MEM2MEM U(4)
#define DMA_CH_CSR_DMA_MODE_FIXEDPATTERN U(6)
#define DMA_CH_CSR_IRQ_MASK_ENABLE (U(1) << 15)
#define DMA_CH_CSR_RUN_ONCE (U(1) << 27)
#define DMA_CH_CSR_ENABLE (U(1) << 31)
#define DMA_CH_STAT U(0x4)
#define DMA_CH_STAT_BUSY (U(1) << 31)
#define DMA_CH_SRC_PTR U(0xC)
#define DMA_CH_DST_PTR U(0x10)
#define DMA_CH_HI_ADR_PTR U(0x14)
#define DMA_CH_HI_ADR_PTR_SRC_MASK U(0xFF)
#define DMA_CH_HI_ADR_PTR_DST_SHIFT U(16)
#define DMA_CH_HI_ADR_PTR_DST_MASK U(0xFF)
#define DMA_CH_MC_SEQ U(0x18)
#define DMA_CH_MC_SEQ_REQ_CNT_SHIFT U(25)
#define DMA_CH_MC_SEQ_REQ_CNT_VAL U(0x10)
#define DMA_CH_MC_SEQ_BURST_SHIFT U(23)
#define DMA_CH_MC_SEQ_BURST_16_WORDS U(0x3)
#define DMA_CH_WORD_COUNT U(0x20)
#define DMA_CH_FIXED_PATTERN U(0x34)
#define DMA_CH_TZ U(0x38)
#define DMA_CH_TZ_ACCESS_ENABLE U(0)
#define DMA_CH_TZ_ACCESS_DISABLE U(3)
#define MAX_TRANSFER_SIZE (1U*1024U*1024U*1024U) /* 1GB */
#define GPCDMA_TIMEOUT_MS U(100)
#define GPCDMA_RESET_BIT (U(1) << 1)
static bool init_done;
static void tegra_gpcdma_write32(uint32_t offset, uint32_t val)
{
mmio_write_32(TEGRA_GPCDMA_BASE + offset, val);
}
static uint32_t tegra_gpcdma_read32(uint32_t offset)
{
return mmio_read_32(TEGRA_GPCDMA_BASE + offset);
}
static void tegra_gpcdma_init(void)
{
/* assert reset for DMA engine */
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_GPCDMA_RST_SET_REG_OFFSET,
GPCDMA_RESET_BIT);
udelay(2);
/* de-assert reset for DMA engine */
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_GPCDMA_RST_CLR_REG_OFFSET,
GPCDMA_RESET_BIT);
}
static void tegra_gpcdma_memcpy_priv(uint64_t dst_addr, uint64_t src_addr,
uint32_t num_bytes, uint32_t mode)
{
uint32_t val, timeout = 0;
int32_t ret = 0;
/* sanity check byte count */
if ((num_bytes > MAX_TRANSFER_SIZE) || ((num_bytes & 0x3U) != U(0))) {
ret = -EINVAL;
}
/* initialise GPCDMA block */
if (!init_done) {
tegra_gpcdma_init();
init_done = true;
}
/* make sure channel isn't busy */
val = tegra_gpcdma_read32(DMA_CH_STAT);
if ((val & DMA_CH_STAT_BUSY) == DMA_CH_STAT_BUSY) {
ERROR("DMA channel is busy\n");
ret = -EBUSY;
}
if (ret == 0) {
/* disable any DMA transfers */
tegra_gpcdma_write32(DMA_CH_CSR, 0);
/* enable DMA access to TZDRAM */
tegra_gpcdma_write32(DMA_CH_TZ, DMA_CH_TZ_ACCESS_ENABLE);
/* configure MC sequencer */
val = (DMA_CH_MC_SEQ_REQ_CNT_VAL << DMA_CH_MC_SEQ_REQ_CNT_SHIFT) |
(DMA_CH_MC_SEQ_BURST_16_WORDS << DMA_CH_MC_SEQ_BURST_SHIFT);
tegra_gpcdma_write32(DMA_CH_MC_SEQ, val);
/* reset fixed pattern */
tegra_gpcdma_write32(DMA_CH_FIXED_PATTERN, 0);
/* populate src and dst address registers */
tegra_gpcdma_write32(DMA_CH_SRC_PTR, (uint32_t)src_addr);
tegra_gpcdma_write32(DMA_CH_DST_PTR, (uint32_t)dst_addr);
val = (uint32_t)((src_addr >> 32) & DMA_CH_HI_ADR_PTR_SRC_MASK);
val |= (uint32_t)(((dst_addr >> 32) & DMA_CH_HI_ADR_PTR_DST_MASK) <<
DMA_CH_HI_ADR_PTR_DST_SHIFT);
tegra_gpcdma_write32(DMA_CH_HI_ADR_PTR, val);
/* transfer size (in words) */
tegra_gpcdma_write32(DMA_CH_WORD_COUNT, ((num_bytes >> 2) - 1U));
/* populate value for CSR */
val = (mode << DMA_CH_CSR_XFER_MODE_SHIFT) |
DMA_CH_CSR_RUN_ONCE | (U(1) << DMA_CH_CSR_WEIGHT_SHIFT) |
DMA_CH_CSR_IRQ_MASK_ENABLE;
tegra_gpcdma_write32(DMA_CH_CSR, val);
/* enable transfer */
val = tegra_gpcdma_read32(DMA_CH_CSR);
val |= DMA_CH_CSR_ENABLE;
tegra_gpcdma_write32(DMA_CH_CSR, val);
/* wait till transfer completes */
do {
/* read the status */
val = tegra_gpcdma_read32(DMA_CH_STAT);
if ((val & DMA_CH_STAT_BUSY) != DMA_CH_STAT_BUSY) {
break;
}
mdelay(1);
timeout++;
} while (timeout < GPCDMA_TIMEOUT_MS);
/* flag timeout error */
if (timeout == GPCDMA_TIMEOUT_MS) {
ERROR("DMA transfer timed out\n");
}
dsbsy();
/* disable DMA access to TZDRAM */
tegra_gpcdma_write32(DMA_CH_TZ, DMA_CH_TZ_ACCESS_DISABLE);
isb();
}
}
/*******************************************************************************
* Memcpy using GPCDMA block (Mem2Mem copy)
******************************************************************************/
void tegra_gpcdma_memcpy(uint64_t dst_addr, uint64_t src_addr,
uint32_t num_bytes)
{
tegra_gpcdma_memcpy_priv(dst_addr, src_addr, num_bytes,
DMA_CH_CSR_DMA_MODE_MEM2MEM);
}
/*******************************************************************************
* Memset using GPCDMA block (Fixed pattern write)
******************************************************************************/
void tegra_gpcdma_zeromem(uint64_t dst_addr, uint32_t num_bytes)
{
tegra_gpcdma_memcpy_priv(dst_addr, 0, num_bytes,
DMA_CH_CSR_DMA_MODE_FIXEDPATTERN);
}

View File

@ -109,13 +109,16 @@ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
unsigned long long non_overlap_area_size)
{
int ret;
/*
* Map the NS memory first, clean it and then unmap it.
*/
mmap_add_dynamic_region(non_overlap_area_start, /* PA */
ret = mmap_add_dynamic_region(non_overlap_area_start, /* PA */
non_overlap_area_start, /* VA */
non_overlap_area_size, /* size */
MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */
assert(ret == 0);
zeromem((void *)non_overlap_area_start, non_overlap_area_size);
flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
@ -206,3 +209,16 @@ void tegra_memctrl_disable_ahb_redirection(void)
/* lock the aperture registers */
tegra_mc_write_32(MC_IRAM_REG_CTRL, MC_DISABLE_IRAM_CFG_WRITES);
}
void tegra_memctrl_clear_pending_interrupts(void)
{
uint32_t mcerr;
/* check if there are any pending interrupts */
mcerr = mmio_read_32(TEGRA_MC_BASE + MC_INTSTATUS);
if (mcerr != (uint32_t)0U) { /* should not see error here */
WARN("MC_INTSTATUS = 0x%x (should be zero)\n", mcerr);
mmio_write_32((TEGRA_MC_BASE + MC_INTSTATUS), mcerr);
}
}

View File

@ -287,13 +287,13 @@ static void tegra_memctrl_reconfig_mss_clients(void)
static void tegra_memctrl_set_overrides(void)
{
tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
const tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
const mc_txn_override_cfg_t *mc_txn_override_cfgs;
uint32_t num_txn_override_cfgs;
uint32_t i, val;
/* Get the settings from the platform */
assert(plat_mc_settings);
assert(plat_mc_settings != NULL);
mc_txn_override_cfgs = plat_mc_settings->txn_override_cfg;
num_txn_override_cfgs = plat_mc_settings->num_txn_override_cfgs;
@ -302,24 +302,24 @@ static void tegra_memctrl_set_overrides(void)
*/
if ((tegra_chipid_is_t186()) &&
(!tegra_platform_is_silicon() ||
(tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1)))) {
(tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1U)))) {
/*
* GPU and NVENC settings for Tegra186 simulation and
* Silicon rev. A01
*/
val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2);
val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2,
val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR);
val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR,
val | MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID);
@ -330,7 +330,7 @@ static void tegra_memctrl_set_overrides(void)
*/
for (i = 0; i < num_txn_override_cfgs; i++) {
val = tegra_mc_read_32(mc_txn_override_cfgs[i].offset);
val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(mc_txn_override_cfgs[i].offset,
val | mc_txn_override_cfgs[i].cgid_tag);
}
@ -347,7 +347,7 @@ void tegra_memctrl_setup(void)
uint32_t num_streamid_override_regs;
const mc_streamid_security_cfg_t *mc_streamid_sec_cfgs;
uint32_t num_streamid_sec_cfgs;
tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
const tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
uint32_t i;
INFO("Tegra Memory Controller (v2)\n");
@ -357,7 +357,7 @@ void tegra_memctrl_setup(void)
tegra_smmu_init();
#endif
/* Get the settings from the platform */
assert(plat_mc_settings);
assert(plat_mc_settings != NULL);
mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg;
num_streamid_override_regs = plat_mc_settings->num_streamid_override_cfgs;
mc_streamid_sec_cfgs = plat_mc_settings->streamid_security_cfg;
@ -421,7 +421,7 @@ void tegra_memctrl_restore_settings(void)
tegra_memctrl_set_overrides();
/* video memory carveout region */
if (video_mem_base) {
if (video_mem_base != 0ULL) {
tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO,
(uint32_t)video_mem_base);
tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
@ -444,6 +444,8 @@ void tegra_memctrl_restore_settings(void)
*/
void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
{
uint32_t val;
/*
* Setup the Memory controller to allow only secure accesses to
* the TZDRAM carveout
@ -458,15 +460,20 @@ void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
* When TZ encryption enabled,
* We need setup TZDRAM before CPU to access TZ Carveout,
* otherwise CPU will fetch non-decrypted data.
* So save TZDRAM setting for retore by SC7 resume FW.
* So save TZDRAM setting for restore by SC7 resume FW.
* Scratch registers map:
* RSV55_0 = CFG1[12:0] | CFG0[31:20]
* RSV55_1 = CFG3[1:0]
*/
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_LO,
tegra_mc_read_32(MC_SECURITY_CFG0_0));
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_HI,
tegra_mc_read_32(MC_SECURITY_CFG3_0));
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV54_HI,
tegra_mc_read_32(MC_SECURITY_CFG1_0));
val = tegra_mc_read_32(MC_SECURITY_CFG1_0) & MC_SECURITY_SIZE_MB_MASK;
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV54_HI, val);
val |= tegra_mc_read_32(MC_SECURITY_CFG0_0) & MC_SECURITY_BOM_MASK;
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_LO, val);
val = tegra_mc_read_32(MC_SECURITY_CFG3_0) & MC_SECURITY_BOM_HI_MASK;
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_HI, val);
/*
* MCE propagates the security configuration values across the
@ -525,7 +532,7 @@ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
* at all.
*/
val = tegra_mc_read_32(MC_TZRAM_CARVEOUT_CFG);
val &= ~MC_GSC_ENABLE_TZ_LOCK_BIT;
val &= (uint32_t)~MC_GSC_ENABLE_TZ_LOCK_BIT;
val |= MC_GSC_LOCK_CFG_SETTINGS_BIT;
tegra_mc_write_32(MC_TZRAM_CARVEOUT_CFG, val);
@ -600,18 +607,21 @@ static void tegra_unlock_videomem_nonoverlap(void)
static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
unsigned long long non_overlap_area_size)
{
int ret;
/*
* Map the NS memory first, clean it and then unmap it.
*/
mmap_add_dynamic_region(non_overlap_area_start, /* PA */
ret = mmap_add_dynamic_region(non_overlap_area_start, /* PA */
non_overlap_area_start, /* VA */
non_overlap_area_size, /* size */
MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */
assert(ret == 0);
zero_normalmem((void *)non_overlap_area_start, non_overlap_area_size);
flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
mmap_remove_dynamic_region(non_overlap_area_start,
(void)mmap_remove_dynamic_region(non_overlap_area_start,
non_overlap_area_size);
}
@ -658,17 +668,19 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
*/
INFO("Cleaning previous Video Memory Carveout\n");
if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) {
if ((phys_base > vmem_end_old) || (video_mem_base > vmem_end_new)) {
tegra_clear_videomem(video_mem_base,
(uint64_t)video_mem_size_mb << 20);
(uint32_t)video_mem_size_mb << 20U);
} else {
if (video_mem_base < phys_base) {
non_overlap_area_size = phys_base - video_mem_base;
tegra_clear_videomem(video_mem_base, non_overlap_area_size);
tegra_clear_videomem(video_mem_base,
(uint32_t)non_overlap_area_size);
}
if (vmem_end_old > vmem_end_new) {
non_overlap_area_size = vmem_end_old - vmem_end_new;
tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
tegra_clear_videomem(vmem_end_new,
(uint32_t)non_overlap_area_size);
}
}
@ -700,3 +712,8 @@ void tegra_memctrl_disable_ahb_redirection(void)
{
; /* do nothing */
}
void tegra_memctrl_clear_pending_interrupts(void)
{
; /* do nothing */
}

View File

@ -19,47 +19,55 @@ extern void memcpy16(void *dest, const void *src, unsigned int length);
/* SMMU IDs currently supported by the driver */
enum {
TEGRA_SMMU0,
TEGRA_SMMU0 = 0U,
TEGRA_SMMU1,
TEGRA_SMMU2
};
static uint32_t tegra_smmu_read_32(uint32_t smmu_id, uint32_t off)
{
uint32_t ret = 0U;
#if defined(TEGRA_SMMU0_BASE)
if (smmu_id == TEGRA_SMMU0)
return mmio_read_32(TEGRA_SMMU0_BASE + off);
if (smmu_id == TEGRA_SMMU0) {
ret = mmio_read_32(TEGRA_SMMU0_BASE + (uint64_t)off);
}
#endif
#if defined(TEGRA_SMMU1_BASE)
if (smmu_id == TEGRA_SMMU1)
return mmio_read_32(TEGRA_SMMU1_BASE + off);
if (smmu_id == TEGRA_SMMU1) {
ret = mmio_read_32(TEGRA_SMMU1_BASE + (uint64_t)off);
}
#endif
#if defined(TEGRA_SMMU2_BASE)
if (smmu_id == TEGRA_SMMU2)
return mmio_read_32(TEGRA_SMMU2_BASE + off);
if (smmu_id == TEGRA_SMMU2) {
ret = mmio_read_32(TEGRA_SMMU2_BASE + (uint64_t)off);
}
#endif
return 0;
return ret;
}
static void tegra_smmu_write_32(uint32_t smmu_id,
uint32_t off, uint32_t val)
{
#if defined(TEGRA_SMMU0_BASE)
if (smmu_id == TEGRA_SMMU0)
mmio_write_32(TEGRA_SMMU0_BASE + off, val);
if (smmu_id == TEGRA_SMMU0) {
mmio_write_32(TEGRA_SMMU0_BASE + (uint64_t)off, val);
}
#endif
#if defined(TEGRA_SMMU1_BASE)
if (smmu_id == TEGRA_SMMU1)
mmio_write_32(TEGRA_SMMU1_BASE + off, val);
if (smmu_id == TEGRA_SMMU1) {
mmio_write_32(TEGRA_SMMU1_BASE + (uint64_t)off, val);
}
#endif
#if defined(TEGRA_SMMU2_BASE)
if (smmu_id == TEGRA_SMMU2)
mmio_write_32(TEGRA_SMMU2_BASE + off, val);
if (smmu_id == TEGRA_SMMU2) {
mmio_write_32(TEGRA_SMMU2_BASE + (uint64_t)off, val);
}
#endif
}
@ -70,23 +78,23 @@ void tegra_smmu_save_context(uint64_t smmu_ctx_addr)
{
uint32_t i, num_entries = 0;
smmu_regs_t *smmu_ctx_regs;
plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
uint64_t tzdram_base = params_from_bl2->tzdram_base;
uint64_t tzdram_end = tzdram_base + params_from_bl2->tzdram_size;
uint32_t reg_id1, pgshift, cb_size;
/* sanity check SMMU settings c*/
reg_id1 = mmio_read_32((TEGRA_SMMU0_BASE + SMMU_GNSR0_IDR1));
pgshift = (reg_id1 & ID1_PAGESIZE) ? 16 : 12;
cb_size = (2 << pgshift) * \
(1 << (((reg_id1 >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1));
pgshift = ((reg_id1 & ID1_PAGESIZE) != 0U) ? 16U : 12U;
cb_size = ((uint32_t)2 << pgshift) * \
((uint32_t)1 << (((reg_id1 >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1U));
assert(!((pgshift != PGSHIFT) || (cb_size != CB_SIZE)));
assert((smmu_ctx_addr >= tzdram_base) && (smmu_ctx_addr <= tzdram_end));
/* get SMMU context table */
smmu_ctx_regs = plat_get_smmu_ctx();
assert(smmu_ctx_regs);
assert(smmu_ctx_regs != NULL);
/*
* smmu_ctx_regs[0].val contains the size of the context table minus
@ -98,19 +106,21 @@ void tegra_smmu_save_context(uint64_t smmu_ctx_addr)
}
/* panic if the sizes do not match */
if (num_entries != smmu_ctx_regs[0].val)
if (num_entries != smmu_ctx_regs[0].val) {
panic();
}
/* save SMMU register values */
for (i = 1; i < num_entries; i++)
for (i = 1U; i < num_entries; i++) {
smmu_ctx_regs[i].val = mmio_read_32(smmu_ctx_regs[i].reg);
}
/* increment by 1 to take care of the last entry */
num_entries++;
/* Save SMMU config settings */
memcpy16((void *)(uintptr_t)smmu_ctx_addr, (void *)smmu_ctx_regs,
(sizeof(smmu_regs_t) * num_entries));
(void)memcpy16((uint8_t *)smmu_ctx_addr, (uint8_t *)smmu_ctx_regs,
(sizeof(smmu_regs_t) * num_entries));
/* save the SMMU table address */
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO,
@ -128,17 +138,18 @@ void tegra_smmu_save_context(uint64_t smmu_ctx_addr)
void tegra_smmu_init(void)
{
uint32_t val, cb_idx, smmu_id, ctx_base;
uint32_t smmu_counter = plat_get_num_smmu_devices();
for (smmu_id = 0; smmu_id < NUM_SMMU_DEVICES; smmu_id++) {
for (smmu_id = 0U; smmu_id < smmu_counter; smmu_id++) {
/* Program the SMMU pagesize and reset CACHE_LOCK bit */
val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
val |= SMMU_GSR0_PGSIZE_64K;
val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
val &= (uint32_t)~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
/* reset CACHE LOCK bit for NS Aux. Config. Register */
val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
val &= (uint32_t)~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
/* disable TCU prefetch for all contexts */
@ -147,19 +158,19 @@ void tegra_smmu_init(void)
for (cb_idx = 0; cb_idx < SMMU_CONTEXT_BANK_MAX_IDX; cb_idx++) {
val = tegra_smmu_read_32(smmu_id,
ctx_base + (SMMU_GSR0_PGSIZE_64K * cb_idx));
val &= ~SMMU_CBn_ACTLR_CPRE_BIT;
val &= (uint32_t)~SMMU_CBn_ACTLR_CPRE_BIT;
tegra_smmu_write_32(smmu_id, ctx_base +
(SMMU_GSR0_PGSIZE_64K * cb_idx), val);
}
/* set CACHE LOCK bit for NS Aux. Config. Register */
val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
val |= (uint32_t)SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
/* set CACHE LOCK bit for S Aux. Config. Register */
val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
val |= (uint32_t)SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
}
}

View File

@ -0,0 +1,146 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*******************************************************************************
* The profiler stores the timestamps captured during cold boot to the shared
* memory for the non-secure world. The non-secure world driver parses the
* shared memory block and writes the contents to a file on the device, which
* can be later extracted for analysis.
*
* Profiler memory map
*
* TOP --------------------------- ---
* Trusted OS timestamps 3KB
* --------------------------- ---
* Trusted Firmware timestamps 1KB
* BASE --------------------------- ---
*
******************************************************************************/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <mmio.h>
#include <profiler.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <utils_def.h>
#include <xlat_tables_v2.h>
static uint64_t shmem_base_addr;
#define MAX_PROFILER_RECORDS U(16)
#define TAG_LEN_BYTES U(56)
/*******************************************************************************
* Profiler entry format
******************************************************************************/
typedef struct {
/* text explaining the timestamp location in code */
uint8_t tag[TAG_LEN_BYTES];
/* timestamp value */
uint64_t timestamp;
} profiler_rec_t;
static profiler_rec_t *head, *cur, *tail;
static uint32_t tmr;
static bool is_shmem_buf_mapped;
/*******************************************************************************
* Initialise the profiling library
******************************************************************************/
void boot_profiler_init(uint64_t shmem_base, uint32_t tmr_base)
{
uint64_t shmem_end_base;
assert(shmem_base != ULL(0));
assert(tmr_base != U(0));
/* store the buffer address */
shmem_base_addr = shmem_base;
/* calculate the base address of the last record */
shmem_end_base = shmem_base + (sizeof(profiler_rec_t) *
(MAX_PROFILER_RECORDS - U(1)));
/* calculate the head, tail and cur values */
head = (profiler_rec_t *)shmem_base;
tail = (profiler_rec_t *)shmem_end_base;
cur = head;
/* timer used to get the current timestamp */
tmr = tmr_base;
}
/*******************************************************************************
* Add tag and timestamp to profiler
******************************************************************************/
void boot_profiler_add_record(const char *str)
{
unsigned int len;
/* calculate the length of the tag */
if (((unsigned int)strlen(str) + U(1)) > TAG_LEN_BYTES) {
len = TAG_LEN_BYTES;
} else {
len = (unsigned int)strlen(str) + U(1);
}
if (head != NULL) {
/*
* The profiler runs with/without MMU enabled. Check
* if MMU is enabled and memmap the shmem buffer, in
* case it is.
*/
if ((!is_shmem_buf_mapped) &&
((read_sctlr_el3() & SCTLR_M_BIT) != U(0))) {
(void)mmap_add_dynamic_region(shmem_base_addr,
shmem_base_addr,
PROFILER_SIZE_BYTES,
(MT_NS | MT_RW | MT_EXECUTE_NEVER));
is_shmem_buf_mapped = true;
}
/* write the tag and timestamp to buffer */
(void)snprintf((char *)cur->tag, len, "%s", str);
cur->timestamp = mmio_read_32(tmr);
/* start from head if we reached the end */
if (cur == tail) {
cur = head;
} else {
cur++;
}
}
}
/*******************************************************************************
* Deinint the profiler
******************************************************************************/
void boot_profiler_deinit(void)
{
if (shmem_base_addr != ULL(0)) {
/* clean up resources */
cur = NULL;
head = NULL;
tail = NULL;
/* flush the shmem for it to be visible to the NS world */
flush_dcache_range(shmem_base_addr, PROFILER_SIZE_BYTES);
/* unmap the shmem buffer */
if (is_shmem_buf_mapped) {
(void)mmap_remove_dynamic_region(shmem_base_addr,
PROFILER_SIZE_BYTES);
}
}
}

View File

@ -26,6 +26,7 @@
#include <plat/common/platform.h>
#include <memctrl.h>
#include <profiler.h>
#include <tegra_def.h>
#include <tegra_platform.h>
#include <tegra_private.h>
@ -40,20 +41,19 @@ extern void memcpy16(void *dest, const void *src, unsigned int length);
* of trusted SRAM
******************************************************************************/
IMPORT_SYM(unsigned long, __RW_START__, BL31_RW_START);
IMPORT_SYM(unsigned long, __RW_END__, BL31_RW_END);
IMPORT_SYM(unsigned long, __RODATA_START__, BL31_RODATA_BASE);
IMPORT_SYM(unsigned long, __RODATA_END__, BL31_RODATA_END);
IMPORT_SYM(unsigned long, __TEXT_START__, TEXT_START);
IMPORT_SYM(unsigned long, __TEXT_END__, TEXT_END);
IMPORT_SYM(uint64_t, __RW_START__, BL31_RW_START);
IMPORT_SYM(uint64_t, __RW_END__, BL31_RW_END);
IMPORT_SYM(uint64_t, __RODATA_START__, BL31_RODATA_BASE);
IMPORT_SYM(uint64_t, __RODATA_END__, BL31_RODATA_END);
IMPORT_SYM(uint64_t, __TEXT_START__, TEXT_START);
IMPORT_SYM(uint64_t, __TEXT_END__, TEXT_END);
extern uint64_t tegra_bl31_phys_base;
extern uint64_t tegra_console_base;
static entry_point_info_t bl33_image_ep_info, bl32_image_ep_info;
static plat_params_from_bl2_t plat_bl31_params_from_bl2 = {
.tzdram_size = (uint64_t)TZDRAM_SIZE
.tzdram_size = TZDRAM_SIZE
};
static unsigned long bl32_mem_size;
static unsigned long bl32_boot_params;
@ -93,14 +93,16 @@ plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
******************************************************************************/
entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
{
if (type == NON_SECURE)
return &bl33_image_ep_info;
entry_point_info_t *ep = NULL;
/* return BL32 entry point info if it is valid */
if (type == SECURE && bl32_image_ep_info.pc)
return &bl32_image_ep_info;
if (type == NON_SECURE) {
ep = &bl33_image_ep_info;
} else if ((type == SECURE) && (bl32_image_ep_info.pc != 0U)) {
ep = &bl32_image_ep_info;
}
return NULL;
return ep;
}
/*******************************************************************************
@ -124,6 +126,7 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
image_info_t bl32_img_info = { {0} };
uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end;
uint32_t console_clock;
int32_t ret;
/*
* For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
@ -131,20 +134,22 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
* might use custom ways to get arguments, so provide handlers which
* they can override.
*/
if (arg_from_bl2 == NULL)
if (arg_from_bl2 == NULL) {
arg_from_bl2 = plat_get_bl31_params();
if (plat_params == NULL)
}
if (plat_params == NULL) {
plat_params = plat_get_bl31_plat_params();
}
/*
* Copy BL3-3, BL3-2 entry point information.
* They are stored in Secure RAM, in BL2's address space.
*/
assert(arg_from_bl2);
assert(arg_from_bl2->bl33_ep_info);
assert(arg_from_bl2 != NULL);
assert(arg_from_bl2->bl33_ep_info != NULL);
bl33_image_ep_info = *arg_from_bl2->bl33_ep_info;
if (arg_from_bl2->bl32_ep_info) {
if (arg_from_bl2->bl32_ep_info != NULL) {
bl32_image_ep_info = *arg_from_bl2->bl32_ep_info;
bl32_mem_size = arg_from_bl2->bl32_ep_info->args.arg0;
bl32_boot_params = arg_from_bl2->bl32_ep_info->args.arg2;
@ -153,7 +158,7 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
/*
* Parse platform specific parameters - TZDRAM aperture base and size
*/
assert(plat_params);
assert(plat_params != NULL);
plat_bl31_params_from_bl2.tzdram_base = plat_params->tzdram_base;
plat_bl31_params_from_bl2.tzdram_size = plat_params->tzdram_size;
plat_bl31_params_from_bl2.uart_id = plat_params->uart_id;
@ -163,14 +168,15 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
* It is very important that we run either from TZDRAM or TZSRAM base.
* Add an explicit check here.
*/
if ((plat_bl31_params_from_bl2.tzdram_base != BL31_BASE) &&
(TEGRA_TZRAM_BASE != BL31_BASE))
if ((plat_bl31_params_from_bl2.tzdram_base != (uint64_t)BL31_BASE) &&
(TEGRA_TZRAM_BASE != BL31_BASE)) {
panic();
}
/*
* Reference clock used by the FPGAs is a lot slower.
*/
if (tegra_platform_is_fpga() == 1U) {
if (tegra_platform_is_fpga()) {
console_clock = TEGRA_BOOT_UART_CLK_13_MHZ;
} else {
console_clock = TEGRA_BOOT_UART_CLK_408_MHZ;
@ -182,14 +188,40 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
*/
tegra_console_base = plat_get_console_from_id(plat_params->uart_id);
if (tegra_console_base != (uint64_t)0) {
if (tegra_console_base != 0U) {
/*
* Configure the UART port to be used as the console
*/
console_init(tegra_console_base, console_clock,
(void)console_init(tegra_console_base, console_clock,
TEGRA_CONSOLE_BAUDRATE);
}
/*
* The previous bootloader passes the base address of the shared memory
* location to store the boot profiler logs. Sanity check the
* address and initilise the profiler library, if it looks ok.
*/
if (plat_params->boot_profiler_shmem_base != 0ULL) {
ret = bl31_check_ns_address(plat_params->boot_profiler_shmem_base,
PROFILER_SIZE_BYTES);
if (ret == (int32_t)0) {
/* store the membase for the profiler lib */
plat_bl31_params_from_bl2.boot_profiler_shmem_base =
plat_params->boot_profiler_shmem_base;
/* initialise the profiler library */
boot_profiler_init(plat_params->boot_profiler_shmem_base,
TEGRA_TMRUS_BASE);
}
}
/*
* Add timestamp for platform early setup entry.
*/
boot_profiler_add_record("[TF] early setup entry");
/*
* Initialize delay timer
*/
@ -199,14 +231,14 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
* Do initial security configuration to allow DRAM/device access.
*/
tegra_memctrl_tzdram_setup(plat_bl31_params_from_bl2.tzdram_base,
plat_bl31_params_from_bl2.tzdram_size);
(uint32_t)plat_bl31_params_from_bl2.tzdram_size);
/*
* The previous bootloader might not have placed the BL32 image
* inside the TZDRAM. We check the BL32 image info to find out
* the base/PC values and relocate the image if necessary.
*/
if (arg_from_bl2->bl32_image_info) {
if (arg_from_bl2->bl32_image_info != NULL) {
bl32_img_info = *arg_from_bl2->bl32_image_info;
@ -223,11 +255,11 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
assert(bl32_image_ep_info.pc < tzdram_end);
/* relocate BL32 */
if (bl32_start >= tzdram_end || bl32_end <= tzdram_start) {
if ((bl32_start >= tzdram_end) || (bl32_end <= tzdram_start)) {
INFO("Relocate BL32 to TZDRAM\n");
memcpy16((void *)(uintptr_t)bl32_image_ep_info.pc,
(void)memcpy16((void *)(uintptr_t)bl32_image_ep_info.pc,
(void *)(uintptr_t)bl32_start,
bl32_img_info.image_size);
@ -240,6 +272,11 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
/* Early platform setup for Tegra SoCs */
plat_early_platform_setup();
/*
* Add timestamp for platform early setup exit.
*/
boot_profiler_add_record("[TF] early setup exit");
INFO("BL3-1: Boot CPU: %s Processor [%lx]\n",
(((read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK)
== DENVER_IMPL) ? "Denver" : "ARM", read_mpidr());
@ -256,6 +293,9 @@ void plat_trusty_set_boot_args(aapcs64_params_t *args)
if (args->arg4 != 0U) {
args->arg2 = args->arg4;
}
/* Profiler Carveout Base */
args->arg3 = args->arg5;
}
#endif
@ -264,7 +304,10 @@ void plat_trusty_set_boot_args(aapcs64_params_t *args)
******************************************************************************/
void bl31_platform_setup(void)
{
uint32_t tmp_reg;
/*
* Add timestamp for platform setup entry.
*/
boot_profiler_add_record("[TF] plat setup entry");
/* Initialize the gic cpu and distributor interfaces */
plat_gic_setup();
@ -285,9 +328,10 @@ void bl31_platform_setup(void)
*/
tegra_memctrl_tzram_setup(TEGRA_TZRAM_BASE, TEGRA_TZRAM_SIZE);
/* Set the next EL to be AArch64 */
tmp_reg = SCR_RES1_BITS | SCR_RW_BIT;
write_scr(tmp_reg);
/*
* Add timestamp for platform setup exit.
*/
boot_profiler_add_record("[TF] plat setup exit");
INFO("BL3-1: Tegra platform setup complete\n");
}
@ -297,6 +341,15 @@ void bl31_platform_setup(void)
******************************************************************************/
void bl31_plat_runtime_setup(void)
{
/*
* During cold boot, it is observed that the arbitration
* bit is set in the Memory controller leading to false
* error interrupts in the non-secure world. To avoid
* this, clean the interrupt status register before
* booting into the non-secure world
*/
tegra_memctrl_clear_pending_interrupts();
/*
* During boot, USB3 and flash media (SDMMC/SATA) devices need
* access to IRAM. Because these clients connect to the MC and
@ -307,6 +360,12 @@ void bl31_plat_runtime_setup(void)
* disabled before we jump to the non-secure world.
*/
tegra_memctrl_disable_ahb_redirection();
/*
* Add final timestamp before exiting BL31.
*/
boot_profiler_add_record("[TF] bl31 exit");
boot_profiler_deinit();
}
/*******************************************************************************
@ -315,17 +374,22 @@ void bl31_plat_runtime_setup(void)
******************************************************************************/
void bl31_plat_arch_setup(void)
{
unsigned long rw_start = BL31_RW_START;
unsigned long rw_size = BL31_RW_END - BL31_RW_START;
unsigned long rodata_start = BL31_RODATA_BASE;
unsigned long rodata_size = BL31_RODATA_END - BL31_RODATA_BASE;
unsigned long code_base = TEXT_START;
unsigned long code_size = TEXT_END - TEXT_START;
uint64_t rw_start = BL31_RW_START;
uint64_t rw_size = BL31_RW_END - BL31_RW_START;
uint64_t rodata_start = BL31_RODATA_BASE;
uint64_t rodata_size = BL31_RODATA_END - BL31_RODATA_BASE;
uint64_t code_base = TEXT_START;
uint64_t code_size = TEXT_END - TEXT_START;
const mmap_region_t *plat_mmio_map = NULL;
#if USE_COHERENT_MEM
unsigned long coh_start, coh_size;
uint32_t coh_start, coh_size;
#endif
plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
/*
* Add timestamp for arch setup entry.
*/
boot_profiler_add_record("[TF] arch setup entry");
/* add memory regions */
mmap_add_region(rw_start, rw_start,
@ -352,21 +416,22 @@ void bl31_plat_arch_setup(void)
mmap_add_region(coh_start, coh_start,
coh_size,
MT_DEVICE | MT_RW | MT_SECURE);
(uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE);
#endif
/* map on-chip free running uS timer */
mmap_add_region(page_align((uint64_t)TEGRA_TMRUS_BASE, 0),
page_align((uint64_t)TEGRA_TMRUS_BASE, 0),
(uint64_t)TEGRA_TMRUS_SIZE,
MT_DEVICE | MT_RO | MT_SECURE);
mmap_add_region(page_align(TEGRA_TMRUS_BASE, 0),
page_align(TEGRA_TMRUS_BASE, 0),
TEGRA_TMRUS_SIZE,
(uint8_t)MT_DEVICE | (uint8_t)MT_RO | (uint8_t)MT_SECURE);
/* add MMIO space */
plat_mmio_map = plat_get_mmio_map();
if (plat_mmio_map)
if (plat_mmio_map != NULL) {
mmap_add(plat_mmio_map);
else
} else {
WARN("MMIO map not available\n");
}
/* set up translation tables */
init_xlat_tables();
@ -374,33 +439,41 @@ void bl31_plat_arch_setup(void)
/* enable the MMU */
enable_mmu_el3(0);
/*
* Add timestamp for arch setup exit.
*/
boot_profiler_add_record("[TF] arch setup exit");
INFO("BL3-1: Tegra: MMU enabled\n");
}
/*******************************************************************************
* Check if the given NS DRAM range is valid
******************************************************************************/
int bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes)
int32_t bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes)
{
uint64_t end = base + size_in_bytes;
uint64_t end = base + size_in_bytes - U(1);
int32_t ret = 0;
/*
* Check if the NS DRAM address is valid
*/
if ((base < TEGRA_DRAM_BASE) || (end > TEGRA_DRAM_END)) {
if ((base < TEGRA_DRAM_BASE) || (base >= TEGRA_DRAM_END) ||
(end > TEGRA_DRAM_END)) {
ERROR("NS address is out-of-bounds!\n");
return -EFAULT;
ret = -EFAULT;
}
/*
* TZDRAM aperture contains the BL31 and BL32 images, so we need
* to check if the NS DRAM range overlaps the TZDRAM aperture.
*/
if ((base < TZDRAM_END) && (end > tegra_bl31_phys_base)) {
if ((base < (uint64_t)TZDRAM_END) && (end > tegra_bl31_phys_base)) {
ERROR("NS address overlaps TZDRAM!\n");
return -ENOTSUP;
ret = -ENOTSUP;
}
/* valid NS address */
return 0;
return ret;
}

View File

@ -5,6 +5,7 @@
#
PLAT_INCLUDES := -Iplat/nvidia/tegra/include/drivers \
-Iplat/nvidia/tegra/include/lib \
-Iplat/nvidia/tegra/include \
-Iplat/nvidia/tegra/include/${TARGET_SOC}
@ -25,6 +26,7 @@ BL31_SOURCES += drivers/console/aarch64/console.S \
${TEGRA_GICv2_SOURCES} \
${COMMON_DIR}/aarch64/tegra_helpers.S \
${COMMON_DIR}/drivers/pmc/pmc.c \
${COMMON_DIR}/lib/debug/profiler.c \
${COMMON_DIR}/tegra_bl31_setup.c \
${COMMON_DIR}/tegra_delay_timer.c \
${COMMON_DIR}/tegra_fiq_glue.c \

View File

@ -65,7 +65,7 @@ static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
* Set the new ELR to continue execution in the NS world using the
* FIQ handler registered earlier.
*/
assert(ns_fiq_handler_addr);
assert(ns_fiq_handler_addr != 0ULL);
write_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3), (ns_fiq_handler_addr));
/*

View File

@ -15,7 +15,7 @@
* Tegra platforms
******************************************************************************/
typedef enum tegra_platform {
TEGRA_PLATFORM_SILICON = 0,
TEGRA_PLATFORM_SILICON = 0U,
TEGRA_PLATFORM_QT,
TEGRA_PLATFORM_FPGA,
TEGRA_PLATFORM_EMULATION,
@ -83,7 +83,7 @@ bool tegra_chipid_is_t132(void)
{
uint32_t chip_id = ((tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK);
return (chip_id == (uint32_t)TEGRA_CHIPID_TEGRA13);
return (chip_id == TEGRA_CHIPID_TEGRA13);
}
bool tegra_chipid_is_t186(void)
@ -97,12 +97,12 @@ bool tegra_chipid_is_t210(void)
{
uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
return (chip_id == (uint32_t)TEGRA_CHIPID_TEGRA21);
return (chip_id == TEGRA_CHIPID_TEGRA21);
}
bool tegra_chipid_is_t210_b01(void)
{
return (tegra_chipid_is_t210() && (tegra_get_chipid_major() == 0x2UL));
return (tegra_chipid_is_t210() && (tegra_get_chipid_major() == 0x2U));
}
/*

View File

@ -50,37 +50,42 @@ uint8_t tegra_fake_system_suspend;
#pragma weak tegra_soc_prepare_system_off
#pragma weak tegra_soc_get_target_pwr_state
int tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
{
return PSCI_E_NOT_SUPPORTED;
}
int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
{
(void)target_state;
return PSCI_E_NOT_SUPPORTED;
}
int tegra_soc_pwr_domain_on(u_register_t mpidr)
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
{
(void)mpidr;
return PSCI_E_SUCCESS;
}
int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
{
(void)target_state;
return PSCI_E_SUCCESS;
}
int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
(void)target_state;
return PSCI_E_SUCCESS;
}
int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
{
(void)target_state;
return PSCI_E_SUCCESS;
}
int tegra_soc_prepare_system_reset(void)
int32_t tegra_soc_prepare_system_reset(void)
{
return PSCI_E_SUCCESS;
}
@ -91,19 +96,26 @@ __dead2 void tegra_soc_prepare_system_off(void)
panic();
}
plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
const plat_local_state_t *states,
unsigned int ncpu)
uint32_t ncpu)
{
plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
uint32_t num_cpu = ncpu;
const plat_local_state_t *local_state = states;
assert(ncpu);
(void)lvl;
assert(ncpu != 0U);
do {
temp = *states++;
if ((temp < target))
temp = *local_state;
if ((temp < target)) {
target = temp;
} while (--ncpu);
}
--num_cpu;
local_state++;
} while (num_cpu != 0U);
return target;
}
@ -117,8 +129,9 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state)
{
/* all affinities use system suspend state id */
for (uint32_t i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
for (uint32_t i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) {
req_state->pwr_domain_state[i] = PSTATE_ID_SOC_POWERDN;
}
}
/*******************************************************************************
@ -126,6 +139,8 @@ void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state)
******************************************************************************/
void tegra_cpu_standby(plat_local_state_t cpu_state)
{
(void)cpu_state;
/*
* Enter standby state
* dsb is good practice before using wfi to enter low power states
@ -138,7 +153,7 @@ void tegra_cpu_standby(plat_local_state_t cpu_state)
* Handler called when an affinity instance is about to be turned on. The
* level and mpidr determine the affinity instance.
******************************************************************************/
int tegra_pwr_domain_on(u_register_t mpidr)
int32_t tegra_pwr_domain_on(u_register_t mpidr)
{
return tegra_soc_pwr_domain_on(mpidr);
}
@ -149,7 +164,7 @@ int tegra_pwr_domain_on(u_register_t mpidr)
******************************************************************************/
void tegra_pwr_domain_off(const psci_power_state_t *target_state)
{
tegra_soc_pwr_domain_off(target_state);
(void)tegra_soc_pwr_domain_off(target_state);
}
/*******************************************************************************
@ -169,12 +184,13 @@ void tegra_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_sta
******************************************************************************/
void tegra_pwr_domain_suspend(const psci_power_state_t *target_state)
{
tegra_soc_pwr_domain_suspend(target_state);
(void)tegra_soc_pwr_domain_suspend(target_state);
/* Disable console if we are entering deep sleep. */
if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
PSTATE_ID_SOC_POWERDN)
console_uninit();
PSTATE_ID_SOC_POWERDN) {
(void)console_uninit();
}
/* disable GICC */
tegra_gic_cpuif_deactivate();
@ -191,7 +207,7 @@ __dead2 void tegra_pwr_domain_power_down_wfi(const psci_power_state_t
uint64_t rmr_el3 = 0;
/* call the chip's power down handler */
tegra_soc_pwr_domain_power_down_wfi(target_state);
(void)tegra_soc_pwr_domain_power_down_wfi(target_state);
/*
* If we are in fake system suspend mode, ensure we start doing
@ -222,7 +238,7 @@ __dead2 void tegra_pwr_domain_power_down_wfi(const psci_power_state_t
******************************************************************************/
void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
plat_params_from_bl2_t *plat_params;
const plat_params_from_bl2_t *plat_params;
uint32_t console_clock;
/*
@ -239,15 +255,15 @@ void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
/*
* Reference clock used by the FPGAs is a lot slower.
*/
if (tegra_platform_is_fpga() == 1U) {
if (tegra_platform_is_fpga()) {
console_clock = TEGRA_BOOT_UART_CLK_13_MHZ;
} else {
console_clock = TEGRA_BOOT_UART_CLK_408_MHZ;
}
/* Initialize the runtime console */
if (tegra_console_base != (uint64_t)0) {
console_init(tegra_console_base, console_clock,
if (tegra_console_base != 0ULL) {
(void)console_init(tegra_console_base, console_clock,
TEGRA_CONSOLE_BAUDRATE);
}
@ -262,7 +278,7 @@ void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
*/
plat_params = bl31_get_plat_params();
tegra_memctrl_tzdram_setup(plat_params->tzdram_base,
plat_params->tzdram_size);
(uint32_t)plat_params->tzdram_size);
/*
* Set up the TZRAM memory aperture to allow only secure world
@ -274,7 +290,7 @@ void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
/*
* Reset hardware settings.
*/
tegra_soc_pwr_domain_on_finish(target_state);
(void)tegra_soc_pwr_domain_on_finish(target_state);
}
/*******************************************************************************
@ -305,7 +321,7 @@ __dead2 void tegra_system_reset(void)
INFO("Restarting system...\n");
/* per-SoC system reset handler */
tegra_soc_prepare_system_reset();
(void)tegra_soc_prepare_system_reset();
/*
* Program the PMC in order to restart the system.
@ -316,10 +332,10 @@ __dead2 void tegra_system_reset(void)
/*******************************************************************************
* Handler called to check the validity of the power state parameter.
******************************************************************************/
int32_t tegra_validate_power_state(unsigned int power_state,
int32_t tegra_validate_power_state(uint32_t power_state,
psci_power_state_t *req_state)
{
assert(req_state);
assert(req_state != NULL);
return tegra_soc_validate_power_state(power_state, req_state);
}
@ -327,16 +343,19 @@ int32_t tegra_validate_power_state(unsigned int power_state,
/*******************************************************************************
* Platform handler called to check the validity of the non secure entrypoint.
******************************************************************************/
int tegra_validate_ns_entrypoint(uintptr_t entrypoint)
int32_t tegra_validate_ns_entrypoint(uintptr_t entrypoint)
{
int32_t ret = PSCI_E_INVALID_ADDRESS;
/*
* Check if the non secure entrypoint lies within the non
* secure DRAM.
*/
if ((entrypoint >= TEGRA_DRAM_BASE) && (entrypoint <= TEGRA_DRAM_END))
return PSCI_E_SUCCESS;
if ((entrypoint >= TEGRA_DRAM_BASE) && (entrypoint <= TEGRA_DRAM_END)) {
ret = PSCI_E_SUCCESS;
}
return PSCI_E_INVALID_ADDRESS;
return ret;
}
/*******************************************************************************
@ -376,7 +395,7 @@ int plat_setup_psci_ops(uintptr_t sec_entrypoint,
/*
* Reset hardware settings.
*/
tegra_soc_pwr_domain_on_finish(&target_state);
(void)tegra_soc_pwr_domain_on_finish(&target_state);
/*
* Initialize PSCI ops struct

View File

@ -69,7 +69,7 @@ uintptr_t tegra_sip_handler(uint32_t smc_fid,
void *handle,
u_register_t flags)
{
uint32_t regval;
uint32_t regval, local_x2_32 = (uint32_t)x2;
int32_t err;
/* Check if this is a SoC specific SiP */
@ -84,14 +84,11 @@ uintptr_t tegra_sip_handler(uint32_t smc_fid,
case TEGRA_SIP_NEW_VIDEOMEM_REGION:
/* clean up the high bits */
x2 = (uint32_t)x2;
/*
* Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
* or falls outside of the valid DRAM range
*/
err = bl31_check_ns_address(x1, x2);
err = bl31_check_ns_address(x1, local_x2_32);
if (err != 0) {
SMC_RET1(handle, (uint64_t)err);
}
@ -99,9 +96,9 @@ uintptr_t tegra_sip_handler(uint32_t smc_fid,
/*
* Check if Video Memory is aligned to 1MB.
*/
if (((x1 & 0xFFFFFU) != 0U) || ((x2 & 0xFFFFFU) != 0U)) {
if (((x1 & 0xFFFFFU) != 0U) || ((local_x2_32 & 0xFFFFFU) != 0U)) {
ERROR("Unaligned Video Memory base address!\n");
SMC_RET1(handle, -ENOTSUP);
SMC_RET1(handle, (uint64_t)-ENOTSUP);
}
/*
@ -111,13 +108,13 @@ uintptr_t tegra_sip_handler(uint32_t smc_fid,
*/
regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
TEGRA_GPU_RESET_REG_OFFSET);
if ((regval & GPU_RESET_BIT) == 0UL) {
if ((regval & GPU_RESET_BIT) == 0U) {
ERROR("GPU not in reset! Video Memory setup failed\n");
SMC_RET1(handle, -ENOTSUP);
SMC_RET1(handle, (uint64_t)-ENOTSUP);
}
/* new video memory carveout settings */
tegra_memctrl_videomem_setup(x1, (uint32_t)x2);
tegra_memctrl_videomem_setup(x1, local_x2_32);
SMC_RET1(handle, 0);

View File

@ -23,10 +23,14 @@ int32_t plat_core_pos_by_mpidr(u_register_t mpidr)
u_register_t cluster_id, cpu_id;
int32_t result;
cluster_id = (mpidr >> (u_register_t)MPIDR_AFF1_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> (u_register_t)MPIDR_AFF0_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
cluster_id = (mpidr >> (u_register_t)MPIDR_AFF1_SHIFT) &
(u_register_t)MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> (u_register_t)MPIDR_AFF0_SHIFT) &
(u_register_t)MPIDR_AFFLVL_MASK;
result = (int32_t)cpu_id + ((int32_t)cluster_id * 4);
/* CorePos = CoreId + (ClusterId * cpus per cluster) */
result = (int32_t)cpu_id + ((int32_t)cluster_id *
PLATFORM_MAX_CPUS_PER_CLUSTER);
if (cluster_id >= (u_register_t)PLATFORM_CLUSTER_COUNT) {
result = PSCI_E_NOT_PRESENT;

View File

@ -10,25 +10,25 @@
#include <stdint.h>
/* macro to enable clock to the Atomics block */
#define CAR_ENABLE_ATOMICS (1UL << 16)
#define CAR_ENABLE_ATOMICS (1U << 16)
/* command to get the channel base addresses from bpmp */
#define ATOMIC_CMD_GET 4UL
#define ATOMIC_CMD_GET 4U
/* Hardware IRQ # used to signal bpmp of an incoming command */
#define INT_SHR_SEM_OUTBOX_FULL 6UL
#define INT_SHR_SEM_OUTBOX_FULL 6U
/* macros to decode the bpmp's state */
#define CH_MASK(ch) (0x3UL << ((ch) * 2UL))
#define MA_FREE(ch) (0x2UL << ((ch) * 2UL))
#define MA_ACKD(ch) (0x3UL << ((ch) * 2UL))
#define CH_MASK(ch) ((uint32_t)0x3 << ((ch) * 2U))
#define MA_FREE(ch) ((uint32_t)0x2 << ((ch) * 2U))
#define MA_ACKD(ch) ((uint32_t)0x3 << ((ch) * 2U))
/* response from bpmp to indicate it has powered up */
#define SIGN_OF_LIFE 0xAAAAAAAAUL
#define SIGN_OF_LIFE 0xAAAAAAAAU
/* flags to indicate bpmp driver's state */
#define BPMP_INIT_COMPLETE 0xBEEFF00DUL
#define BPMP_INIT_PENDING 0xDEADBEEFUL
#define BPMP_INIT_COMPLETE 0xBEEFF00DU
#define BPMP_INIT_PENDING 0xDEADBEEFU
/* requests serviced by the bpmp */
#define MRQ_PING 0
@ -64,14 +64,14 @@
#define TEGRA_PM_SC7 23
/* flag to indicate if entry into a CCx power state is allowed */
#define BPMP_CCx_ALLOWED 0UL
#define BPMP_CCx_ALLOWED 0U
/* number of communication channels to interact with the bpmp */
#define NR_CHANNELS 4U
/* flag to ask bpmp to acknowledge command packet */
#define NO_ACK (0UL << 0UL)
#define DO_ACK (1UL << 0UL)
#define NO_ACK (0U << 0U)
#define DO_ACK (1U << 0U)
/* size of the command/response data */
#define MSG_DATA_MAX_SZ 120U

View File

@ -0,0 +1,16 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __GPCDMA_H__
#define __GPCDMA_H__
#include <stdint.h>
void tegra_gpcdma_memcpy(uint64_t dst_addr, uint64_t src_addr,
uint32_t num_bytes);
void tegra_gpcdma_zeromem(uint64_t dst_addr, uint32_t num_bytes);
#endif /* __GPCDMA_H__ */

View File

@ -13,5 +13,6 @@ void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes);
void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes);
void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes);
void tegra_memctrl_disable_ahb_redirection(void);
void tegra_memctrl_clear_pending_interrupts(void);
#endif /* MEMCTRL_H */

View File

@ -17,177 +17,177 @@
* StreamID to indicate no SMMU translations (requests to be steered on the
* SMMU bypass path)
******************************************************************************/
#define MC_STREAM_ID_MAX 0x7F
#define MC_STREAM_ID_MAX 0x7FU
/*******************************************************************************
* Stream ID Override Config registers
******************************************************************************/
#define MC_STREAMID_OVERRIDE_CFG_PTCR 0x000
#define MC_STREAMID_OVERRIDE_CFG_AFIR 0x070
#define MC_STREAMID_OVERRIDE_CFG_HDAR 0x0A8
#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR 0x0B0
#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD 0x0E0
#define MC_STREAMID_OVERRIDE_CFG_SATAR 0x0F8
#define MC_STREAMID_OVERRIDE_CFG_MPCORER 0x138
#define MC_STREAMID_OVERRIDE_CFG_NVENCSWR 0x158
#define MC_STREAMID_OVERRIDE_CFG_AFIW 0x188
#define MC_STREAMID_OVERRIDE_CFG_HDAW 0x1A8
#define MC_STREAMID_OVERRIDE_CFG_MPCOREW 0x1C8
#define MC_STREAMID_OVERRIDE_CFG_SATAW 0x1E8
#define MC_STREAMID_OVERRIDE_CFG_ISPRA 0x220
#define MC_STREAMID_OVERRIDE_CFG_ISPWA 0x230
#define MC_STREAMID_OVERRIDE_CFG_ISPWB 0x238
#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR 0x250
#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW 0x258
#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR 0x260
#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW 0x268
#define MC_STREAMID_OVERRIDE_CFG_TSECSRD 0x2A0
#define MC_STREAMID_OVERRIDE_CFG_TSECSWR 0x2A8
#define MC_STREAMID_OVERRIDE_CFG_GPUSRD 0x2C0
#define MC_STREAMID_OVERRIDE_CFG_GPUSWR 0x2C8
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRA 0x300
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAA 0x308
#define MC_STREAMID_OVERRIDE_CFG_SDMMCR 0x310
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAB 0x318
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWA 0x320
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAA 0x328
#define MC_STREAMID_OVERRIDE_CFG_SDMMCW 0x330
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAB 0x338
#define MC_STREAMID_OVERRIDE_CFG_VICSRD 0x360
#define MC_STREAMID_OVERRIDE_CFG_VICSWR 0x368
#define MC_STREAMID_OVERRIDE_CFG_VIW 0x390
#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD 0x3C0
#define MC_STREAMID_OVERRIDE_CFG_NVDECSWR 0x3C8
#define MC_STREAMID_OVERRIDE_CFG_APER 0x3D0
#define MC_STREAMID_OVERRIDE_CFG_APEW 0x3D8
#define MC_STREAMID_OVERRIDE_CFG_NVJPGSRD 0x3F0
#define MC_STREAMID_OVERRIDE_CFG_NVJPGSWR 0x3F8
#define MC_STREAMID_OVERRIDE_CFG_SESRD 0x400
#define MC_STREAMID_OVERRIDE_CFG_SESWR 0x408
#define MC_STREAMID_OVERRIDE_CFG_ETRR 0x420
#define MC_STREAMID_OVERRIDE_CFG_ETRW 0x428
#define MC_STREAMID_OVERRIDE_CFG_TSECSRDB 0x430
#define MC_STREAMID_OVERRIDE_CFG_TSECSWRB 0x438
#define MC_STREAMID_OVERRIDE_CFG_GPUSRD2 0x440
#define MC_STREAMID_OVERRIDE_CFG_GPUSWR2 0x448
#define MC_STREAMID_OVERRIDE_CFG_AXISR 0x460
#define MC_STREAMID_OVERRIDE_CFG_AXISW 0x468
#define MC_STREAMID_OVERRIDE_CFG_EQOSR 0x470
#define MC_STREAMID_OVERRIDE_CFG_EQOSW 0x478
#define MC_STREAMID_OVERRIDE_CFG_UFSHCR 0x480
#define MC_STREAMID_OVERRIDE_CFG_UFSHCW 0x488
#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR 0x490
#define MC_STREAMID_OVERRIDE_CFG_BPMPR 0x498
#define MC_STREAMID_OVERRIDE_CFG_BPMPW 0x4A0
#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAR 0x4A8
#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAW 0x4B0
#define MC_STREAMID_OVERRIDE_CFG_AONR 0x4B8
#define MC_STREAMID_OVERRIDE_CFG_AONW 0x4C0
#define MC_STREAMID_OVERRIDE_CFG_AONDMAR 0x4C8
#define MC_STREAMID_OVERRIDE_CFG_AONDMAW 0x4D0
#define MC_STREAMID_OVERRIDE_CFG_SCER 0x4D8
#define MC_STREAMID_OVERRIDE_CFG_SCEW 0x4E0
#define MC_STREAMID_OVERRIDE_CFG_SCEDMAR 0x4E8
#define MC_STREAMID_OVERRIDE_CFG_SCEDMAW 0x4F0
#define MC_STREAMID_OVERRIDE_CFG_APEDMAR 0x4F8
#define MC_STREAMID_OVERRIDE_CFG_APEDMAW 0x500
#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1 0x508
#define MC_STREAMID_OVERRIDE_CFG_VICSRD1 0x510
#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 0x518
#define MC_STREAMID_OVERRIDE_CFG_PTCR 0x000U
#define MC_STREAMID_OVERRIDE_CFG_AFIR 0x070U
#define MC_STREAMID_OVERRIDE_CFG_HDAR 0x0A8U
#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR 0x0B0U
#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD 0x0E0U
#define MC_STREAMID_OVERRIDE_CFG_SATAR 0x0F8U
#define MC_STREAMID_OVERRIDE_CFG_MPCORER 0x138U
#define MC_STREAMID_OVERRIDE_CFG_NVENCSWR 0x158U
#define MC_STREAMID_OVERRIDE_CFG_AFIW 0x188U
#define MC_STREAMID_OVERRIDE_CFG_HDAW 0x1A8U
#define MC_STREAMID_OVERRIDE_CFG_MPCOREW 0x1C8U
#define MC_STREAMID_OVERRIDE_CFG_SATAW 0x1E8U
#define MC_STREAMID_OVERRIDE_CFG_ISPRA 0x220U
#define MC_STREAMID_OVERRIDE_CFG_ISPWA 0x230U
#define MC_STREAMID_OVERRIDE_CFG_ISPWB 0x238U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR 0x250U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW 0x258U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR 0x260U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW 0x268U
#define MC_STREAMID_OVERRIDE_CFG_TSECSRD 0x2A0U
#define MC_STREAMID_OVERRIDE_CFG_TSECSWR 0x2A8U
#define MC_STREAMID_OVERRIDE_CFG_GPUSRD 0x2C0U
#define MC_STREAMID_OVERRIDE_CFG_GPUSWR 0x2C8U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRA 0x300U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAA 0x308U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCR 0x310U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAB 0x318U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWA 0x320U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAA 0x328U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCW 0x330U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAB 0x338U
#define MC_STREAMID_OVERRIDE_CFG_VICSRD 0x360U
#define MC_STREAMID_OVERRIDE_CFG_VICSWR 0x368U
#define MC_STREAMID_OVERRIDE_CFG_VIW 0x390U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD 0x3C0U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSWR 0x3C8U
#define MC_STREAMID_OVERRIDE_CFG_APER 0x3D0U
#define MC_STREAMID_OVERRIDE_CFG_APEW 0x3D8U
#define MC_STREAMID_OVERRIDE_CFG_NVJPGSRD 0x3F0U
#define MC_STREAMID_OVERRIDE_CFG_NVJPGSWR 0x3F8U
#define MC_STREAMID_OVERRIDE_CFG_SESRD 0x400U
#define MC_STREAMID_OVERRIDE_CFG_SESWR 0x408U
#define MC_STREAMID_OVERRIDE_CFG_ETRR 0x420U
#define MC_STREAMID_OVERRIDE_CFG_ETRW 0x428U
#define MC_STREAMID_OVERRIDE_CFG_TSECSRDB 0x430U
#define MC_STREAMID_OVERRIDE_CFG_TSECSWRB 0x438U
#define MC_STREAMID_OVERRIDE_CFG_GPUSRD2 0x440U
#define MC_STREAMID_OVERRIDE_CFG_GPUSWR2 0x448U
#define MC_STREAMID_OVERRIDE_CFG_AXISR 0x460U
#define MC_STREAMID_OVERRIDE_CFG_AXISW 0x468U
#define MC_STREAMID_OVERRIDE_CFG_EQOSR 0x470U
#define MC_STREAMID_OVERRIDE_CFG_EQOSW 0x478U
#define MC_STREAMID_OVERRIDE_CFG_UFSHCR 0x480U
#define MC_STREAMID_OVERRIDE_CFG_UFSHCW 0x488U
#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR 0x490U
#define MC_STREAMID_OVERRIDE_CFG_BPMPR 0x498U
#define MC_STREAMID_OVERRIDE_CFG_BPMPW 0x4A0U
#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAR 0x4A8U
#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAW 0x4B0U
#define MC_STREAMID_OVERRIDE_CFG_AONR 0x4B8U
#define MC_STREAMID_OVERRIDE_CFG_AONW 0x4C0U
#define MC_STREAMID_OVERRIDE_CFG_AONDMAR 0x4C8U
#define MC_STREAMID_OVERRIDE_CFG_AONDMAW 0x4D0U
#define MC_STREAMID_OVERRIDE_CFG_SCER 0x4D8U
#define MC_STREAMID_OVERRIDE_CFG_SCEW 0x4E0U
#define MC_STREAMID_OVERRIDE_CFG_SCEDMAR 0x4E8U
#define MC_STREAMID_OVERRIDE_CFG_SCEDMAW 0x4F0U
#define MC_STREAMID_OVERRIDE_CFG_APEDMAR 0x4F8U
#define MC_STREAMID_OVERRIDE_CFG_APEDMAW 0x500U
#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1 0x508U
#define MC_STREAMID_OVERRIDE_CFG_VICSRD1 0x510U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 0x518U
/*******************************************************************************
* Macro to calculate Security cfg register addr from StreamID Override register
******************************************************************************/
#define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) (addr + sizeof(uint32_t))
#define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) ((addr) + (uint32_t)sizeof(uint32_t))
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_SO_DEV (0UL << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_SO_DEV (1UL << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SO_DEV (2UL << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_SO_DEV (3UL << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_SO_DEV (0U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_SO_DEV (1U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SO_DEV (2U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_SO_DEV (3U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_NORMAL (0UL << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_NORMAL (1UL << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_NORMAL (2UL << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_NORMAL (3UL << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_NORMAL (0U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_NORMAL (1U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_NORMAL (2U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_NORMAL (3U << 8)
#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_ZERO (0UL << 12)
#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_CLIENT_AXI_ID (1UL << 12)
#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_ZERO (0U << 12)
#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_CLIENT_AXI_ID (1U << 12)
/*******************************************************************************
* Memory Controller transaction override config registers
******************************************************************************/
#define MC_TXN_OVERRIDE_CONFIG_HDAR 0x10a8
#define MC_TXN_OVERRIDE_CONFIG_BPMPW 0x14a0
#define MC_TXN_OVERRIDE_CONFIG_PTCR 0x1000
#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR 0x1490
#define MC_TXN_OVERRIDE_CONFIG_EQOSW 0x1478
#define MC_TXN_OVERRIDE_CONFIG_NVJPGSWR 0x13f8
#define MC_TXN_OVERRIDE_CONFIG_ISPRA 0x1220
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAA 0x1328
#define MC_TXN_OVERRIDE_CONFIG_VICSRD 0x1360
#define MC_TXN_OVERRIDE_CONFIG_MPCOREW 0x11c8
#define MC_TXN_OVERRIDE_CONFIG_GPUSRD 0x12c0
#define MC_TXN_OVERRIDE_CONFIG_AXISR 0x1460
#define MC_TXN_OVERRIDE_CONFIG_SCEDMAW 0x14f0
#define MC_TXN_OVERRIDE_CONFIG_SDMMCW 0x1330
#define MC_TXN_OVERRIDE_CONFIG_EQOSR 0x1470
#define MC_TXN_OVERRIDE_CONFIG_APEDMAR 0x14f8
#define MC_TXN_OVERRIDE_CONFIG_NVENCSRD 0x10e0
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAB 0x1318
#define MC_TXN_OVERRIDE_CONFIG_VICSRD1 0x1510
#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAR 0x14a8
#define MC_TXN_OVERRIDE_CONFIG_VIW 0x1390
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAA 0x1308
#define MC_TXN_OVERRIDE_CONFIG_AXISW 0x1468
#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVR 0x1260
#define MC_TXN_OVERRIDE_CONFIG_UFSHCR 0x1480
#define MC_TXN_OVERRIDE_CONFIG_TSECSWR 0x12a8
#define MC_TXN_OVERRIDE_CONFIG_GPUSWR 0x12c8
#define MC_TXN_OVERRIDE_CONFIG_SATAR 0x10f8
#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTW 0x1258
#define MC_TXN_OVERRIDE_CONFIG_TSECSWRB 0x1438
#define MC_TXN_OVERRIDE_CONFIG_GPUSRD2 0x1440
#define MC_TXN_OVERRIDE_CONFIG_SCEDMAR 0x14e8
#define MC_TXN_OVERRIDE_CONFIG_GPUSWR2 0x1448
#define MC_TXN_OVERRIDE_CONFIG_AONDMAW 0x14d0
#define MC_TXN_OVERRIDE_CONFIG_APEDMAW 0x1500
#define MC_TXN_OVERRIDE_CONFIG_AONW 0x14c0
#define MC_TXN_OVERRIDE_CONFIG_HOST1XDMAR 0x10b0
#define MC_TXN_OVERRIDE_CONFIG_ETRR 0x1420
#define MC_TXN_OVERRIDE_CONFIG_SESWR 0x1408
#define MC_TXN_OVERRIDE_CONFIG_NVJPGSRD 0x13f0
#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD 0x13c0
#define MC_TXN_OVERRIDE_CONFIG_TSECSRDB 0x1430
#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAW 0x14b0
#define MC_TXN_OVERRIDE_CONFIG_APER 0x13d0
#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD1 0x1518
#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTR 0x1250
#define MC_TXN_OVERRIDE_CONFIG_ISPWA 0x1230
#define MC_TXN_OVERRIDE_CONFIG_SESRD 0x1400
#define MC_TXN_OVERRIDE_CONFIG_SCER 0x14d8
#define MC_TXN_OVERRIDE_CONFIG_AONR 0x14b8
#define MC_TXN_OVERRIDE_CONFIG_MPCORER 0x1138
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWA 0x1320
#define MC_TXN_OVERRIDE_CONFIG_HDAW 0x11a8
#define MC_TXN_OVERRIDE_CONFIG_NVDECSWR 0x13c8
#define MC_TXN_OVERRIDE_CONFIG_UFSHCW 0x1488
#define MC_TXN_OVERRIDE_CONFIG_AONDMAR 0x14c8
#define MC_TXN_OVERRIDE_CONFIG_SATAW 0x11e8
#define MC_TXN_OVERRIDE_CONFIG_ETRW 0x1428
#define MC_TXN_OVERRIDE_CONFIG_VICSWR 0x1368
#define MC_TXN_OVERRIDE_CONFIG_NVENCSWR 0x1158
#define MC_TXN_OVERRIDE_CONFIG_AFIR 0x1070
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAB 0x1338
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRA 0x1300
#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR1 0x1508
#define MC_TXN_OVERRIDE_CONFIG_ISPWB 0x1238
#define MC_TXN_OVERRIDE_CONFIG_BPMPR 0x1498
#define MC_TXN_OVERRIDE_CONFIG_APEW 0x13d8
#define MC_TXN_OVERRIDE_CONFIG_SDMMCR 0x1310
#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVW 0x1268
#define MC_TXN_OVERRIDE_CONFIG_TSECSRD 0x12a0
#define MC_TXN_OVERRIDE_CONFIG_AFIW 0x1188
#define MC_TXN_OVERRIDE_CONFIG_SCEW 0x14e0
#define MC_TXN_OVERRIDE_CONFIG_HDAR 0x10a8U
#define MC_TXN_OVERRIDE_CONFIG_BPMPW 0x14a0U
#define MC_TXN_OVERRIDE_CONFIG_PTCR 0x1000U
#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR 0x1490U
#define MC_TXN_OVERRIDE_CONFIG_EQOSW 0x1478U
#define MC_TXN_OVERRIDE_CONFIG_NVJPGSWR 0x13f8U
#define MC_TXN_OVERRIDE_CONFIG_ISPRA 0x1220U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAA 0x1328U
#define MC_TXN_OVERRIDE_CONFIG_VICSRD 0x1360U
#define MC_TXN_OVERRIDE_CONFIG_MPCOREW 0x11c8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSRD 0x12c0U
#define MC_TXN_OVERRIDE_CONFIG_AXISR 0x1460U
#define MC_TXN_OVERRIDE_CONFIG_SCEDMAW 0x14f0U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCW 0x1330U
#define MC_TXN_OVERRIDE_CONFIG_EQOSR 0x1470U
#define MC_TXN_OVERRIDE_CONFIG_APEDMAR 0x14f8U
#define MC_TXN_OVERRIDE_CONFIG_NVENCSRD 0x10e0U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAB 0x1318U
#define MC_TXN_OVERRIDE_CONFIG_VICSRD1 0x1510U
#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAR 0x14a8U
#define MC_TXN_OVERRIDE_CONFIG_VIW 0x1390U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAA 0x1308U
#define MC_TXN_OVERRIDE_CONFIG_AXISW 0x1468U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVR 0x1260U
#define MC_TXN_OVERRIDE_CONFIG_UFSHCR 0x1480U
#define MC_TXN_OVERRIDE_CONFIG_TSECSWR 0x12a8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSWR 0x12c8U
#define MC_TXN_OVERRIDE_CONFIG_SATAR 0x10f8U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTW 0x1258U
#define MC_TXN_OVERRIDE_CONFIG_TSECSWRB 0x1438U
#define MC_TXN_OVERRIDE_CONFIG_GPUSRD2 0x1440U
#define MC_TXN_OVERRIDE_CONFIG_SCEDMAR 0x14e8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSWR2 0x1448U
#define MC_TXN_OVERRIDE_CONFIG_AONDMAW 0x14d0U
#define MC_TXN_OVERRIDE_CONFIG_APEDMAW 0x1500U
#define MC_TXN_OVERRIDE_CONFIG_AONW 0x14c0U
#define MC_TXN_OVERRIDE_CONFIG_HOST1XDMAR 0x10b0U
#define MC_TXN_OVERRIDE_CONFIG_ETRR 0x1420U
#define MC_TXN_OVERRIDE_CONFIG_SESWR 0x1408U
#define MC_TXN_OVERRIDE_CONFIG_NVJPGSRD 0x13f0U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD 0x13c0U
#define MC_TXN_OVERRIDE_CONFIG_TSECSRDB 0x1430U
#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAW 0x14b0U
#define MC_TXN_OVERRIDE_CONFIG_APER 0x13d0U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD1 0x1518U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTR 0x1250U
#define MC_TXN_OVERRIDE_CONFIG_ISPWA 0x1230U
#define MC_TXN_OVERRIDE_CONFIG_SESRD 0x1400U
#define MC_TXN_OVERRIDE_CONFIG_SCER 0x14d8U
#define MC_TXN_OVERRIDE_CONFIG_AONR 0x14b8U
#define MC_TXN_OVERRIDE_CONFIG_MPCORER 0x1138U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWA 0x1320U
#define MC_TXN_OVERRIDE_CONFIG_HDAW 0x11a8U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSWR 0x13c8U
#define MC_TXN_OVERRIDE_CONFIG_UFSHCW 0x1488U
#define MC_TXN_OVERRIDE_CONFIG_AONDMAR 0x14c8U
#define MC_TXN_OVERRIDE_CONFIG_SATAW 0x11e8U
#define MC_TXN_OVERRIDE_CONFIG_ETRW 0x1428U
#define MC_TXN_OVERRIDE_CONFIG_VICSWR 0x1368U
#define MC_TXN_OVERRIDE_CONFIG_NVENCSWR 0x1158U
#define MC_TXN_OVERRIDE_CONFIG_AFIR 0x1070U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAB 0x1338U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRA 0x1300U
#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR1 0x1508U
#define MC_TXN_OVERRIDE_CONFIG_ISPWB 0x1238U
#define MC_TXN_OVERRIDE_CONFIG_BPMPR 0x1498U
#define MC_TXN_OVERRIDE_CONFIG_APEW 0x13d8U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCR 0x1310U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVW 0x1268U
#define MC_TXN_OVERRIDE_CONFIG_TSECSRD 0x12a0U
#define MC_TXN_OVERRIDE_CONFIG_AFIW 0x1188U
#define MC_TXN_OVERRIDE_CONFIG_SCEW 0x14e0U
/*******************************************************************************
* Structure to hold the transaction override settings to use to override
@ -223,12 +223,12 @@ typedef struct mc_streamid_security_cfg {
int override_client_ns_flag;
} mc_streamid_security_cfg_t;
#define OVERRIDE_DISABLE 1
#define OVERRIDE_ENABLE 0
#define CLIENT_FLAG_SECURE 0
#define CLIENT_FLAG_NON_SECURE 1
#define CLIENT_INPUTS_OVERRIDE 1
#define CLIENT_INPUTS_NO_OVERRIDE 0
#define OVERRIDE_DISABLE 1U
#define OVERRIDE_ENABLE 0U
#define CLIENT_FLAG_SECURE 0U
#define CLIENT_FLAG_NON_SECURE 1U
#define CLIENT_INPUTS_OVERRIDE 1U
#define CLIENT_INPUTS_NO_OVERRIDE 0U
#define mc_make_sec_cfg(off, ns, ovrrd, access) \
{ \
@ -257,70 +257,70 @@ typedef struct tegra_mc_settings {
/*******************************************************************************
* Memory Controller SMMU Bypass config register
******************************************************************************/
#define MC_SMMU_BYPASS_CONFIG 0x1820
#define MC_SMMU_BYPASS_CTRL_MASK 0x3
#define MC_SMMU_BYPASS_CTRL_SHIFT 0
#define MC_SMMU_CTRL_TBU_BYPASS_ALL (0 << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_RSVD (1 << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID (2 << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_BYPASS_NONE (3 << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT (1 << 31)
#define MC_SMMU_BYPASS_CONFIG 0x1820U
#define MC_SMMU_BYPASS_CTRL_MASK 0x3U
#define MC_SMMU_BYPASS_CTRL_SHIFT 0U
#define MC_SMMU_CTRL_TBU_BYPASS_ALL (0U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_RSVD (1U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID (2U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_BYPASS_NONE (3U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT (1U << 31)
#define MC_SMMU_BYPASS_CONFIG_SETTINGS (MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \
MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID)
#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1 << 0)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2 << 4)
#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1 << 12)
#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1U << 0)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2U << 4)
#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1U << 12)
/*******************************************************************************
* Non-SO_DEV transactions override values for CGID_TAG bitfield for the
* MC_TXN_OVERRIDE_CONFIG_{module} registers
******************************************************************************/
#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT 0
#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID 1
#define MC_TXN_OVERRIDE_CGID_TAG_ZERO 2
#define MC_TXN_OVERRIDE_CGID_TAG_ADR 3
#define MC_TXN_OVERRIDE_CGID_TAG_MASK 3
#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT 0U
#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID 1U
#define MC_TXN_OVERRIDE_CGID_TAG_ZERO 2U
#define MC_TXN_OVERRIDE_CGID_TAG_ADR 3U
#define MC_TXN_OVERRIDE_CGID_TAG_MASK 3U
/*******************************************************************************
* Memory Controller Reset Control registers
******************************************************************************/
#define MC_CLIENT_HOTRESET_CTRL0 0x200
#define MC_CLIENT_HOTRESET_CTRL0_RESET_VAL 0
#define MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB (1 << 0)
#define MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB (1 << 6)
#define MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB (1 << 7)
#define MC_CLIENT_HOTRESET_CTRL0_ISP2_FLUSH_ENB (1 << 8)
#define MC_CLIENT_HOTRESET_CTRL0_MPCORE_FLUSH_ENB (1 << 9)
#define MC_CLIENT_HOTRESET_CTRL0_NVENC_FLUSH_ENB (1 << 11)
#define MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB (1 << 15)
#define MC_CLIENT_HOTRESET_CTRL0_VI_FLUSH_ENB (1 << 17)
#define MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB (1 << 18)
#define MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB (1 << 19)
#define MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB (1 << 20)
#define MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB (1 << 22)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB (1 << 29)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC2A_FLUSH_ENB (1 << 30)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB (1 << 31)
#define MC_CLIENT_HOTRESET_STATUS0 0x204
#define MC_CLIENT_HOTRESET_CTRL1 0x970
#define MC_CLIENT_HOTRESET_CTRL1_RESET_VAL 0
#define MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB (1 << 0)
#define MC_CLIENT_HOTRESET_CTRL1_GPU_FLUSH_ENB (1 << 2)
#define MC_CLIENT_HOTRESET_CTRL1_NVDEC_FLUSH_ENB (1 << 5)
#define MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB (1 << 6)
#define MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB (1 << 7)
#define MC_CLIENT_HOTRESET_CTRL1_NVJPG_FLUSH_ENB (1 << 8)
#define MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB (1 << 12)
#define MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB (1 << 13)
#define MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB (1 << 18)
#define MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB (1 << 19)
#define MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB (1 << 20)
#define MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB (1 << 21)
#define MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB (1 << 22)
#define MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB (1 << 23)
#define MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB (1 << 24)
#define MC_CLIENT_HOTRESET_STATUS1 0x974
#define MC_CLIENT_HOTRESET_CTRL0 0x200U
#define MC_CLIENT_HOTRESET_CTRL0_RESET_VAL 0U
#define MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB (1U << 0)
#define MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB (1U << 6)
#define MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB (1U << 7)
#define MC_CLIENT_HOTRESET_CTRL0_ISP2_FLUSH_ENB (1U << 8)
#define MC_CLIENT_HOTRESET_CTRL0_MPCORE_FLUSH_ENB (1U << 9)
#define MC_CLIENT_HOTRESET_CTRL0_NVENC_FLUSH_ENB (1U << 11)
#define MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB (1U << 15)
#define MC_CLIENT_HOTRESET_CTRL0_VI_FLUSH_ENB (1U << 17)
#define MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB (1U << 18)
#define MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB (1U << 19)
#define MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB (1U << 20)
#define MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB (1U << 22)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB (1U << 29)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC2A_FLUSH_ENB (1U << 30)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB (1U << 31)
#define MC_CLIENT_HOTRESET_STATUS0 0x204U
#define MC_CLIENT_HOTRESET_CTRL1 0x970U
#define MC_CLIENT_HOTRESET_CTRL1_RESET_VAL 0U
#define MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB (1U << 0)
#define MC_CLIENT_HOTRESET_CTRL1_GPU_FLUSH_ENB (1U << 2)
#define MC_CLIENT_HOTRESET_CTRL1_NVDEC_FLUSH_ENB (1U << 5)
#define MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB (1U << 6)
#define MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB (1U << 7)
#define MC_CLIENT_HOTRESET_CTRL1_NVJPG_FLUSH_ENB (1U << 8)
#define MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB (1U << 12)
#define MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB (1U << 13)
#define MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB (1U << 18)
#define MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB (1U << 19)
#define MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB (1U << 20)
#define MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB (1U << 21)
#define MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB (1U << 22)
#define MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB (1U << 23)
#define MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB (1U << 24)
#define MC_CLIENT_HOTRESET_STATUS1 0x974U
/*******************************************************************************
* Memory Controller's PCFIFO client configuration registers
@ -396,7 +396,7 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val)
}
#define mc_set_pcfifo_unordered_boot_so_mss(id, client) \
(~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
((uint32_t)~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED)
#define mc_set_pcfifo_ordered_boot_so_mss(id, client) \
@ -406,8 +406,8 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val)
{ \
mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSW_##client, \
(TSA_CONFIG_STATIC0_CSW_##client##_RESET & \
~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
(uint32_t)~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
(uint32_t)TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
}
#define mc_set_txn_override(client, normal_axi_id, so_dev_axi_id, normal_override, so_dev_override) \

View File

@ -38,8 +38,16 @@ typedef struct tegra_se_dev {
tegra_se_io_lst_t *src_ll_buf;
/* pointer to destination linked list buffer */
tegra_se_io_lst_t *dst_ll_buf;
/* LP context buffer pointer */
uint32_t *ctx_save_buf;
} tegra_se_dev_t;
/* PKA1 device structure */
typedef struct tegra_pka_dev {
/* PKA1 base address */
uint64_t pka_base;
} tegra_pka_dev_t;
/*******************************************************************************
* Public interface
******************************************************************************/

View File

@ -586,12 +586,12 @@
/*******************************************************************************
* SMMU Global Aux. Control Register
******************************************************************************/
#define SMMU_CBn_ACTLR_CPRE_BIT (1U << 1)
#define SMMU_CBn_ACTLR_CPRE_BIT (1ULL << 1U)
/*******************************************************************************
* SMMU configuration constants
******************************************************************************/
#define ID1_PAGESIZE (1U << 31)
#define ID1_PAGESIZE (1U << 31U)
#define ID1_NUMPAGENDXB_SHIFT 28U
#define ID1_NUMPAGENDXB_MASK 7U
#define ID1_NUMS2CB_SHIFT 16U
@ -705,5 +705,6 @@ typedef struct smmu_regs {
void tegra_smmu_init(void);
void tegra_smmu_save_context(uint64_t smmu_ctx_addr);
smmu_regs_t *plat_get_smmu_ctx(void);
uint32_t plat_get_num_smmu_devices(void);
#endif /* SMMU_H */

View File

@ -0,0 +1,19 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __PROFILER_H__
#define __PROFILER_H__
/*******************************************************************************
* Number of bytes of memory used by the profiler on Tegra
******************************************************************************/
#define PROFILER_SIZE_BYTES U(0x1000)
void boot_profiler_init(uint64_t shmem_base, uint32_t tmr_base);
void boot_profiler_add_record(const char *str);
void boot_profiler_deinit(void);
#endif /* __PROFILER_H__ */

View File

@ -83,6 +83,9 @@
******************************************************************************/
#define TEGRA_MC_BASE U(0x70019000)
/* Memory Controller Interrupt Status */
#define MC_INTSTATUS 0x00U
/* TZDRAM carveout configuration registers */
#define MC_SECURITY_CFG0_0 U(0x70)
#define MC_SECURITY_CFG1_0 U(0x74)

View File

@ -112,8 +112,13 @@
#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW U(0x15018)
#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW_RESET U(0x1100)
#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK (U(0x3) << 11)
#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU (U(0) << 11)
#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK (ULL(0x3) << 11)
#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU (ULL(0) << 11)
/*******************************************************************************
* Tegra General Purpose Centralised DMA constants
******************************************************************************/
#define TEGRA_GPCDMA_BASE ULL(0x2610000)
/*******************************************************************************
* Tegra Memory Controller constants
@ -124,7 +129,7 @@
/* General Security Carveout register macros */
#define MC_GSC_CONFIG_REGS_SIZE U(0x40)
#define MC_GSC_LOCK_CFG_SETTINGS_BIT (U(1) << 1)
#define MC_GSC_ENABLE_TZ_LOCK_BIT (U(1) << 0)
#define MC_GSC_ENABLE_TZ_LOCK_BIT (ULL(1) << 0)
#define MC_GSC_SIZE_RANGE_4KB_SHIFT U(27)
#define MC_GSC_BASE_LO_SHIFT U(12)
#define MC_GSC_BASE_LO_MASK U(0xFFFFF)
@ -136,6 +141,10 @@
#define MC_SECURITY_CFG1_0 U(0x74)
#define MC_SECURITY_CFG3_0 U(0x9BC)
#define MC_SECURITY_BOM_MASK (U(0xFFF) << 20)
#define MC_SECURITY_SIZE_MB_MASK (U(0x1FFF) << 0)
#define MC_SECURITY_BOM_HI_MASK (U(0x3) << 0)
/* Video Memory carveout configuration registers */
#define MC_VIDEO_PROTECT_BASE_HI U(0x978)
#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
@ -198,6 +207,8 @@
#define TEGRA_CAR_RESET_BASE U(0x05000000)
#define TEGRA_GPU_RESET_REG_OFFSET U(0x30)
#define GPU_RESET_BIT (U(1) << 0)
#define TEGRA_GPCDMA_RST_SET_REG_OFFSET U(0x6A0004)
#define TEGRA_GPCDMA_RST_CLR_REG_OFFSET U(0x6A0008)
/*******************************************************************************
* Tegra micro-seconds timer constants

View File

@ -89,6 +89,16 @@
#define TEGRA_RST_DEV_CLR_V U(0x434)
#define TEGRA_CLK_ENB_V U(0x440)
/* SE Clock Offsets */
#define TEGRA_RST_DEVICES_V 0x358UL
#define SE_RESET_BIT (0x1UL << 31)
#define TEGRA_RST_DEVICES_W 0x35CUL
#define ENTROPY_CLK_ENB_BIT (0x1UL << 21)
#define TEGRA_CLK_OUT_ENB_V 0x360UL
#define SE_CLK_ENB_BIT (0x1UL << 31)
#define TEGRA_CLK_OUT_ENB_W 0x364UL
#define ENTROPY_RESET_BIT (0x1UL << 21)
/*******************************************************************************
* Tegra Flow Controller constants
******************************************************************************/
@ -124,6 +134,16 @@
#define TEGRA_UARTD_BASE U(0x70006300)
#define TEGRA_UARTE_BASE U(0x70006400)
/*******************************************************************************
* Tegra Fuse Controller related constants
******************************************************************************/
#define TEGRA_FUSE_BASE 0x7000F800UL
#define FUSE_BOOT_SECURITY_INFO 0x268UL
#define FUSE_ATOMIC_SAVE_CARVEOUT_EN (0x1U << 7)
#define FUSE_JTAG_SECUREID_VALID (0x104UL)
#define ECID_VALID (0x1UL)
/*******************************************************************************
* Tegra Power Mgmt Controller constants
******************************************************************************/
@ -143,6 +163,9 @@
******************************************************************************/
#define TEGRA_MC_BASE U(0x70019000)
/* Memory Controller Interrupt Status */
#define MC_INTSTATUS 0x00U
/* TZDRAM carveout configuration registers */
#define MC_SECURITY_CFG0_0 U(0x70)
#define MC_SECURITY_CFG1_0 U(0x74)
@ -153,6 +176,10 @@
#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
#define MC_VIDEO_PROTECT_SIZE_MB U(0x64c)
/* SMMU configuration registers*/
#define MC_SMMU_PPCS_ASID_0 0x270U
#define PPCS_SMMU_ENABLE (0x1U << 31)
/*******************************************************************************
* Tegra SE constants
******************************************************************************/
@ -168,4 +195,10 @@
#define TEGRA_TZRAM_BASE U(0x7C010000)
#define TEGRA_TZRAM_SIZE U(0x10000)
/*******************************************************************************
* Tegra TZRAM carveout constants
******************************************************************************/
#define TEGRA_TZRAM_CARVEOUT_BASE U(0x7C04C000)
#define TEGRA_TZRAM_CARVEOUT_SIZE U(0x4000)
#endif /* TEGRA_DEF_H */

View File

@ -46,7 +46,6 @@ bool tegra_chipid_is_t186(void);
bool tegra_chipid_is_t210(void);
bool tegra_chipid_is_t210_b01(void);
/*
* Tegra platform identifiers
*/

View File

@ -22,6 +22,16 @@
#define TEGRA_DRAM_BASE ULL(0x80000000)
#define TEGRA_DRAM_END ULL(0x27FFFFFFF)
/*******************************************************************************
* Implementation defined ACTLR_EL1 bit definitions
******************************************************************************/
#define ACTLR_EL1_PMSTATE_MASK (ULL(0xF) << 0)
/*******************************************************************************
* Implementation defined ACTLR_EL2 bit definitions
******************************************************************************/
#define ACTLR_EL2_PMSTATE_MASK (ULL(0xF) << 0)
/*******************************************************************************
* Struct for parameters received from BL2
******************************************************************************/
@ -31,9 +41,11 @@ typedef struct plat_params_from_bl2 {
/* TZ memory base */
uint64_t tzdram_base;
/* UART port ID */
int uart_id;
int32_t uart_id;
/* L2 ECC parity protection disable flag */
int l2_ecc_parity_prot_dis;
int32_t l2_ecc_parity_prot_dis;
/* SHMEM base address for storing the boot logs */
uint64_t boot_profiler_shmem_base;
} plat_params_from_bl2_t;
/*******************************************************************************
@ -82,7 +94,30 @@ extern uint8_t tegra_fake_system_suspend;
void tegra_pm_system_suspend_entry(void);
void tegra_pm_system_suspend_exit(void);
int tegra_system_suspended(void);
int32_t tegra_system_suspended(void);
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state);
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr);
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state);
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state);
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state);
int32_t tegra_soc_prepare_system_reset(void);
__dead2 void tegra_soc_prepare_system_off(void);
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
const plat_local_state_t *states,
uint32_t ncpu);
void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state);
void tegra_cpu_standby(plat_local_state_t cpu_state);
int32_t tegra_pwr_domain_on(u_register_t mpidr);
void tegra_pwr_domain_off(const psci_power_state_t *target_state);
void tegra_pwr_domain_suspend(const psci_power_state_t *target_state);
void __dead2 tegra_pwr_domain_power_down_wfi(const psci_power_state_t *target_state);
void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state);
void tegra_pwr_domain_suspend_finish(const psci_power_state_t *target_state);
__dead2 void tegra_system_off(void);
__dead2 void tegra_system_reset(void);
int32_t tegra_validate_power_state(uint32_t power_state,
psci_power_state_t *req_state);
int32_t tegra_validate_ns_entrypoint(uintptr_t entrypoint);
/* Declarations for tegraXXX_pm.c */
int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl);
@ -90,7 +125,7 @@ int tegra_prepare_cpu_on_finish(unsigned long mpidr);
/* Declarations for tegra_bl31_setup.c */
plat_params_from_bl2_t *bl31_get_plat_params(void);
int bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes);
int32_t bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes);
void plat_early_platform_setup(void);
/* Declarations for tegra_delay_timer.c */

View File

@ -12,6 +12,8 @@ $(eval $(call add_define,CRASH_REPORTING))
# enable assert() for release/debug builds
ENABLE_ASSERTIONS := 1
PLAT_LOG_LEVEL_ASSERT := 40
$(eval $(call add_define,PLAT_LOG_LEVEL_ASSERT))
# enable dynamic memory mapping
PLAT_XLAT_TABLES_DYNAMIC := 1
@ -29,6 +31,9 @@ USE_COHERENT_MEM := 0
# do not enable SVE
ENABLE_SVE_FOR_NS := 0
# enable D-cache early during CPU warmboot
WARMBOOT_ENABLE_DCACHE_EARLY := 1
include plat/nvidia/tegra/common/tegra_common.mk
include ${SOC_DIR}/platform_${TARGET_SOC}.mk

View File

@ -98,19 +98,24 @@ int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
{
uint64_t val;
tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK);
/* Disable DCO operations */
denver_disable_dco();
/* Power down the CPU */
write_actlr_el1(DENVER_CPU_STATE_POWER_DOWN);
val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
write_actlr_el1(val | DENVER_CPU_STATE_POWER_DOWN);
return PSCI_E_SUCCESS;
}
int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
{
uint64_t val;
#if ENABLE_ASSERTIONS
int cpu = read_mpidr() & MPIDR_CPU_MASK;
@ -128,7 +133,8 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
denver_disable_dco();
/* Program the suspend state ID */
write_actlr_el1(target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]);
val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
write_actlr_el1(val | target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]);
return PSCI_E_SUCCESS;
}

View File

@ -151,7 +151,7 @@ int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
/* Enter the cstate, to be woken up after wake_time (TSC ticks) */
ret = ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
TEGRA_ARI_ENTER_CSTATE, state, wake_time);
(uint32_t)TEGRA_ARI_ENTER_CSTATE, state, wake_time);
}
return ret;
@ -191,7 +191,7 @@ int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccp
}
/* set the updated cstate info */
return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO,
return ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_UPDATE_CSTATE_INFO,
(uint32_t)val, wake_mask);
}
@ -208,8 +208,8 @@ int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t tim
ari_clobber_response(ari_base);
/* update crossover threshold time */
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CROSSOVER,
type, time);
ret = ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_UPDATE_CROSSOVER, type, time);
}
return ret;
@ -227,7 +227,8 @@ uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_CSTATE_STATS, state, 0U);
ret = ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_CSTATE_STATS, state, 0U);
if (ret != 0) {
result = EINVAL;
} else {
@ -243,8 +244,8 @@ int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats
ari_clobber_response(ari_base);
/* write the cstate stats */
return ari_request_wait(ari_base, 0U, TEGRA_ARI_WRITE_CSTATE_STATS, state,
stats);
return ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_WRITE_CSTATE_STATS,
state, stats);
}
uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
@ -261,7 +262,7 @@ uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
local_data = 0U;
}
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC, cmd, local_data);
ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_MISC, cmd, local_data);
if (ret != 0) {
resp = (uint64_t)ret;
} else {
@ -281,8 +282,8 @@ int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7U,
wake_time);
ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_IS_CCX_ALLOWED,
state & 0x7U, wake_time);
if (ret != 0) {
ERROR("%s: failed (%d)\n", __func__, ret);
result = 0U;
@ -307,8 +308,8 @@ int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_SC7_ALLOWED, state,
wake_time);
ret = ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_IS_SC7_ALLOWED, state, wake_time);
if (ret != 0) {
ERROR("%s: failed (%d)\n", __func__, ret);
result = 0;
@ -346,7 +347,8 @@ int32_t ari_online_core(uint32_t ari_base, uint32_t core)
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_ONLINE_CORE, core, 0U);
ret = ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_ONLINE_CORE, core, 0U);
}
}
@ -374,7 +376,8 @@ int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t en
((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
return ari_request_wait(ari_base, 0U, TEGRA_ARI_CC3_CTRL, val, 0U);
return ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_CC3_CTRL, val, 0U);
}
int32_t ari_reset_vector_update(uint32_t ari_base)
@ -386,7 +389,8 @@ int32_t ari_reset_vector_update(uint32_t ari_base)
* Need to program the CPU reset vector one time during cold boot
* and SC7 exit
*/
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U);
(void)ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U);
return 0;
}
@ -396,8 +400,8 @@ int32_t ari_roc_flush_cache_trbits(uint32_t ari_base)
/* clean the previous response state */
ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS,
0U, 0U);
return ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS, 0U, 0U);
}
int32_t ari_roc_flush_cache(uint32_t ari_base)
@ -405,8 +409,8 @@ int32_t ari_roc_flush_cache(uint32_t ari_base)
/* clean the previous response state */
ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY,
0U, 0U);
return ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_ROC_FLUSH_CACHE_ONLY, 0U, 0U);
}
int32_t ari_roc_clean_cache(uint32_t ari_base)
@ -414,8 +418,8 @@ int32_t ari_roc_clean_cache(uint32_t ari_base)
/* clean the previous response state */
ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY,
0U, 0U);
return ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_ROC_CLEAN_CACHE_ONLY, 0U, 0U);
}
uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data)
@ -432,7 +436,7 @@ uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data)
ari_write_32(ari_base, (uint32_t)cmd, ARI_RESPONSE_DATA_LO);
ari_write_32(ari_base, (uint32_t)(cmd >> 32U), ARI_RESPONSE_DATA_HI);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MCA,
ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_MCA,
(uint32_t)mca_arg_data,
(uint32_t)(mca_arg_data >> 32U));
if (ret == 0) {
@ -473,7 +477,8 @@ int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
* the ID, from the MC registers and update the internal GSC registers
* of the CCPLEX.
*/
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U);
(void)ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U);
}
return ret;
@ -487,7 +492,8 @@ void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
/*
* The MCE will shutdown or restart the entire system
*/
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, state_idx, 0U);
(void)ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_MISC_CCPLEX, state_idx, 0U);
}
int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
@ -514,8 +520,8 @@ int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
val = (req_cmd == UNCORE_PERFMON_CMD_WRITE) ?
(uint32_t)*data : 0U;
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_PERFMON, val,
(uint32_t)req);
ret = ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_PERFMON, val, (uint32_t)req);
if (ret != 0) {
result = ret;
} else {
@ -552,6 +558,7 @@ void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value)
} else {
/* clean the previous response state */
ari_clobber_response(ari_base);
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, index, value);
(void)ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_MISC_CCPLEX, index, value);
}
}

View File

@ -170,12 +170,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
cpu_ari_base = mce_get_curr_cpu_ari_base();
switch (cmd) {
case MCE_CMD_ENTER_CSTATE:
case (uint64_t)MCE_CMD_ENTER_CSTATE:
ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
break;
case MCE_CMD_UPDATE_CSTATE_INFO:
case (uint64_t)MCE_CMD_UPDATE_CSTATE_INFO:
/*
* get the parameters required for the update cstate info
* command
@ -194,12 +194,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
case MCE_CMD_UPDATE_CROSSOVER_TIME:
case (uint64_t)MCE_CMD_UPDATE_CROSSOVER_TIME:
ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
break;
case MCE_CMD_READ_CSTATE_STATS:
case (uint64_t)MCE_CMD_READ_CSTATE_STATS:
ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
/* update context to return cstate stats value */
@ -208,12 +208,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
case MCE_CMD_WRITE_CSTATE_STATS:
case (uint64_t)MCE_CMD_WRITE_CSTATE_STATS:
ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
break;
case MCE_CMD_IS_CCX_ALLOWED:
case (uint64_t)MCE_CMD_IS_CCX_ALLOWED:
ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
/* update context to return CCx status value */
@ -221,7 +221,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
case MCE_CMD_IS_SC7_ALLOWED:
case (uint64_t)MCE_CMD_IS_SC7_ALLOWED:
ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
/* update context to return SC7 status value */
@ -230,17 +230,17 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
case MCE_CMD_ONLINE_CORE:
case (uint64_t)MCE_CMD_ONLINE_CORE:
ret = ops->online_core(cpu_ari_base, arg0);
break;
case MCE_CMD_CC3_CTRL:
case (uint64_t)MCE_CMD_CC3_CTRL:
ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
break;
case MCE_CMD_ECHO_DATA:
case (uint64_t)MCE_CMD_ECHO_DATA:
ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
arg0);
@ -252,7 +252,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
case MCE_CMD_READ_VERSIONS:
case (uint64_t)MCE_CMD_READ_VERSIONS:
ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
arg0);
@ -265,7 +265,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
case MCE_CMD_ENUM_FEATURES:
case (uint64_t)MCE_CMD_ENUM_FEATURES:
ret64 = ops->call_enum_misc(cpu_ari_base,
TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
@ -274,22 +274,22 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
case (uint64_t)MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
ret = ops->roc_flush_cache_trbits(cpu_ari_base);
break;
case MCE_CMD_ROC_FLUSH_CACHE:
case (uint64_t)MCE_CMD_ROC_FLUSH_CACHE:
ret = ops->roc_flush_cache(cpu_ari_base);
break;
case MCE_CMD_ROC_CLEAN_CACHE:
case (uint64_t)MCE_CMD_ROC_CLEAN_CACHE:
ret = ops->roc_clean_cache(cpu_ari_base);
break;
case MCE_CMD_ENUM_READ_MCA:
case (uint64_t)MCE_CMD_ENUM_READ_MCA:
ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
/* update context to return MCA data/error */
@ -299,7 +299,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
case MCE_CMD_ENUM_WRITE_MCA:
case (uint64_t)MCE_CMD_ENUM_WRITE_MCA:
ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
/* update context to return MCA error */
@ -309,7 +309,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
#if ENABLE_CHIP_VERIFICATION_HARNESS
case MCE_CMD_ENABLE_LATIC:
case (uint64_t)MCE_CMD_ENABLE_LATIC:
/*
* This call is not for production use. The constant value,
* 0xFFFF0000, is specific to allowing for enabling LATIC on
@ -327,14 +327,14 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break;
#endif
case MCE_CMD_UNCORE_PERFMON_REQ:
case (uint64_t)MCE_CMD_UNCORE_PERFMON_REQ:
ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
/* update context to return data */
write_ctx_reg(gp_regs, CTX_GPREG_X1, (arg1));
break;
case MCE_CMD_MISC_CCPLEX:
case (uint64_t)MCE_CMD_MISC_CCPLEX:
ops->misc_ccplex(cpu_ari_base, arg0, arg1);
break;

View File

@ -14,10 +14,12 @@
#include <mce_private.h>
#include <t18x_ari.h>
#include <tegra_private.h>
int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
int32_t ret = 0;
uint64_t val = 0ULL;
(void)ari_base;
@ -28,10 +30,11 @@ int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
ret = EINVAL;
} else {
/* time (TSC ticks) until the core is expected to get a wake event */
nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
/* set the core cstate */
write_actlr_el1(state);
val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
write_actlr_el1(val | (uint64_t)state);
}
return ret;
@ -78,7 +81,7 @@ int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccp
val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT);
/* set the updated cstate info */
nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
return 0;
}
@ -189,7 +192,7 @@ int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time
((uint64_t)state & MCE_SC7_ALLOWED_MASK);
/* issue command to check if SC7 is allowed */
nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
/* 1 = SC7 allowed, 0 = SC7 not allowed */
ret = (nvg_get_result() != 0ULL) ? 1 : 0;
@ -219,7 +222,7 @@ int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
ret = EINVAL;
} else {
/* get a core online */
nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE,
nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_ONLINE_CORE,
((uint64_t)core & MCE_CORE_ID_MASK));
}
}
@ -247,7 +250,7 @@ int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t en
((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val);
nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val);
return 0;
}

View File

@ -206,11 +206,11 @@ const static mc_txn_override_cfg_t tegra186_txn_override_cfgs[] = {
******************************************************************************/
static tegra_mc_settings_t tegra186_mc_settings = {
.streamid_override_cfg = tegra186_streamid_override_regs,
.num_streamid_override_cfgs = ARRAY_SIZE(tegra186_streamid_override_regs),
.num_streamid_override_cfgs = (uint32_t)ARRAY_SIZE(tegra186_streamid_override_regs),
.streamid_security_cfg = tegra186_streamid_sec_cfgs,
.num_streamid_security_cfgs = ARRAY_SIZE(tegra186_streamid_sec_cfgs),
.num_streamid_security_cfgs = (uint32_t)ARRAY_SIZE(tegra186_streamid_sec_cfgs),
.txn_override_cfg = tegra186_txn_override_cfgs,
.num_txn_override_cfgs = ARRAY_SIZE(tegra186_txn_override_cfgs)
.num_txn_override_cfgs = (uint32_t)ARRAY_SIZE(tegra186_txn_override_cfgs)
};
/*******************************************************************************

View File

@ -20,6 +20,7 @@
#include <mce.h>
#include <smmu.h>
#include <stdbool.h>
#include <t18x_ari.h>
#include <tegra_private.h>
@ -27,12 +28,9 @@ extern void memcpy16(void *dest, const void *src, unsigned int length);
extern void prepare_cpu_pwr_dwn(void);
extern void tegra186_cpu_reset_handler(void);
extern uint32_t __tegra186_cpu_reset_handler_end,
extern uint64_t __tegra186_cpu_reset_handler_end,
__tegra186_smmu_context;
/* TZDRAM offset for saving SMMU context */
#define TEGRA186_SMMU_CTX_OFFSET 16UL
/* state id mask */
#define TEGRA186_STATE_ID_MASK 0xFU
/* constants to get power state's wake time */
@ -111,7 +109,7 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
/* Enter CPU idle/powerdown */
val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
(uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7;
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
tegra_percpu_data[cpu].wake_time, 0U);
@ -132,12 +130,12 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
/* save SMMU context to TZDRAM */
smmu_ctx_base = params_from_bl2->tzdram_base +
((uintptr_t)&__tegra186_smmu_context -
(uintptr_t)tegra186_cpu_reset_handler);
(uintptr_t)&tegra186_cpu_reset_handler);
tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
/* Prepare for system suspend */
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
cstate_info.system = TEGRA_ARI_SYSTEM_SC7;
cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7;
cstate_info.system_state_force = 1;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
@ -145,14 +143,14 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
do {
val = (uint32_t)mce_command_handler(
(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
TEGRA_ARI_CORE_C7,
(uint64_t)TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE,
0U);
} while (val == 0U);
/* Instruct the MCE to enter system suspend state */
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
} else {
; /* do nothing */
}
@ -160,6 +158,87 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Helper function to check if this is the last ON CPU in the cluster
******************************************************************************/
static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
uint32_t ncpu)
{
plat_local_state_t target;
bool last_on_cpu = true;
uint32_t num_cpus = ncpu, pos = 0;
do {
target = states[pos];
if (target != PLAT_MAX_OFF_STATE) {
last_on_cpu = false;
}
--num_cpus;
pos++;
} while (num_cpus != 0U);
return last_on_cpu;
}
/*******************************************************************************
* Helper function to get target power state for the cluster
******************************************************************************/
static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
uint32_t ncpu)
{
uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
uint32_t cpu = plat_my_core_pos();
int32_t ret;
plat_local_state_t target = states[core_pos];
mce_cstate_info_t cstate_info = { 0 };
/* CPU suspend */
if (target == PSTATE_ID_CORE_POWERDN) {
/* Program default wake mask */
cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
/* Check if CCx state is allowed. */
ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
(uint64_t)TEGRA_ARI_CORE_C7,
tegra_percpu_data[cpu].wake_time,
0U);
if (ret == 0) {
target = PSCI_LOCAL_STATE_RUN;
}
}
/* CPU off */
if (target == PLAT_MAX_OFF_STATE) {
/* Enable cluster powerdn from last CPU in the cluster */
if (tegra_last_cpu_in_cluster(states, ncpu)) {
/* Enable CC7 state and turn off wake mask */
cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
/* Check if CCx state is allowed. */
ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
(uint64_t)TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE,
0U);
if (ret == 0) {
target = PSCI_LOCAL_STATE_RUN;
}
} else {
/* Turn off wake_mask */
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
target = PSCI_LOCAL_STATE_RUN;
}
}
return target;
}
/*******************************************************************************
* Platform handler to calculate the proper target power level at the
* specified affinity level
@ -168,85 +247,22 @@ plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
const plat_local_state_t *states,
uint32_t ncpu)
{
plat_local_state_t target = *states;
uint32_t pos = 0;
plat_local_state_t result = PSCI_LOCAL_STATE_RUN;
uint32_t cpu = plat_my_core_pos(), num_cpu = ncpu;
int32_t ret, cluster_powerdn = 1;
uint64_t core_pos = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
mce_cstate_info_t cstate_info = { 0 };
/* get the power state at this level */
if (lvl == (uint32_t)MPIDR_AFFLVL1) {
target = states[core_pos];
}
if (lvl == (uint32_t)MPIDR_AFFLVL2) {
target = states[cpu];
}
/* CPU suspend */
if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) {
/* Program default wake mask */
cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
/* Check if CCx state is allowed. */
ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
TEGRA_ARI_CORE_C7, tegra_percpu_data[cpu].wake_time,
0U);
if (ret != 0) {
result = PSTATE_ID_CORE_POWERDN;
}
}
/* CPU off */
if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) {
/* find out the number of ON cpus in the cluster */
do {
target = states[pos];
if (target != PLAT_MAX_OFF_STATE) {
cluster_powerdn = 0;
}
--num_cpu;
pos++;
} while (num_cpu != 0U);
/* Enable cluster powerdn from last CPU in the cluster */
if (cluster_powerdn != 0) {
/* Enable CC7 state and turn off wake mask */
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
/* Check if CCx state is allowed. */
ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE,
0U);
if (ret != 0) {
result = PSTATE_ID_CORE_POWERDN;
}
} else {
/* Turn off wake_mask */
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
}
}
plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
uint32_t cpu = plat_my_core_pos();
/* System Suspend */
if (((lvl == (uint32_t)MPIDR_AFFLVL2) || (lvl == (uint32_t)MPIDR_AFFLVL1)) &&
(target == PSTATE_ID_SOC_POWERDN)) {
result = PSTATE_ID_SOC_POWERDN;
if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
(states[cpu] == PSTATE_ID_SOC_POWERDN)) {
target = PSTATE_ID_SOC_POWERDN;
}
/* default state */
return result;
/* CPU off, CPU suspend */
if (lvl == (uint32_t)MPIDR_AFFLVL1) {
target = tegra_get_afflvl1_pwr_state(states, ncpu);
}
/* target cluster/system state */
return target;
}
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
@ -276,12 +292,12 @@ int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_sta
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
{
uint32_t target_cpu = mpidr & (uint64_t)MPIDR_CPU_MASK;
uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
(uint64_t)MPIDR_AFFINITY_BITS;
int32_t ret = PSCI_E_SUCCESS;
uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
MPIDR_AFFINITY_BITS;
if (target_cluster > (uint64_t)MPIDR_AFFLVL1) {
if (target_cluster > MPIDR_AFFLVL1) {
ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
ret = PSCI_E_NOT_PRESENT;
@ -304,14 +320,13 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
uint64_t impl, val;
const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
/*
* Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
* A02p and beyond).
*/
if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
(impl != (uint64_t)DENVER_IMPL)) {
if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) {
val = read_l2ctlr_el1();
val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
@ -327,7 +342,7 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
*/
if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1;
cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
}
@ -354,8 +369,8 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
* and SC7 for SC7 entry which may not be requested by
* non-secure SW which controls idle states.
*/
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
cstate_info.system = TEGRA_ARI_SYSTEM_SC1;
cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
}
@ -375,8 +390,8 @@ int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
}
/* Turn off CPU */
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE, 0U);
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
return PSCI_E_SUCCESS;
}
@ -384,7 +399,7 @@ int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
__dead2 void tegra_soc_prepare_system_off(void)
{
/* power off the entire system */
mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
wfi();
@ -396,7 +411,7 @@ __dead2 void tegra_soc_prepare_system_off(void)
int32_t tegra_soc_prepare_system_reset(void)
{
mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
return PSCI_E_SUCCESS;
}

View File

@ -49,11 +49,10 @@ void plat_secondary_setup(void)
cpu_reset_handler_base = params_from_bl2->tzdram_base;
memcpy16((void *)((uintptr_t)cpu_reset_handler_base),
(void *)(uintptr_t)tegra186_cpu_reset_handler,
(uintptr_t)&__tegra186_cpu_reset_handler_end -
(uintptr_t)tegra186_cpu_reset_handler);
(uintptr_t)&tegra186_cpu_reset_handler);
} else {
cpu_reset_handler_base = (uintptr_t)tegra_secure_entrypoint;
cpu_reset_handler_base = (uintptr_t)&tegra_secure_entrypoint;
}
addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64;

View File

@ -23,7 +23,7 @@
/*******************************************************************************
* Offset to read the ref_clk counter value
******************************************************************************/
#define REF_CLK_OFFSET 4
#define REF_CLK_OFFSET 4ULL
/*******************************************************************************
* Tegra186 SiP SMCs
@ -35,7 +35,7 @@
#define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS 0xC2FFFF03
#define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS 0xC2FFFF04
#define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED 0xC2FFFF05
#define TEGRA_SIP_MCE_CMD_ONLINE_CORE 0xC2FFFF06
#define TEGRA_SIP_MCE_CMD_CC3_CTRL 0xC2FFFF07
#define TEGRA_SIP_MCE_CMD_ECHO_DATA 0xC2FFFF08
#define TEGRA_SIP_MCE_CMD_READ_VERSIONS 0xC2FFFF09
@ -52,7 +52,7 @@
/*******************************************************************************
* This function is responsible for handling all T186 SiP calls
******************************************************************************/
int plat_sip_handler(uint32_t smc_fid,
int32_t plat_sip_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
@ -61,24 +61,30 @@ int plat_sip_handler(uint32_t smc_fid,
void *handle,
uint64_t flags)
{
int mce_ret;
int impl, cpu;
int32_t mce_ret, ret = 0;
uint32_t impl, cpu;
uint32_t base, core_clk_ctr, ref_clk_ctr;
uint32_t local_smc_fid = smc_fid;
uint64_t local_x1 = x1, local_x2 = x2, local_x3 = x3;
(void)x4;
(void)cookie;
(void)flags;
if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
/* 32-bit function, clear top parameter bits */
x1 = (uint32_t)x1;
x2 = (uint32_t)x2;
x3 = (uint32_t)x3;
local_x1 = (uint32_t)x1;
local_x2 = (uint32_t)x2;
local_x3 = (uint32_t)x3;
}
/*
* Convert SMC FID to SMC64, to support SMC32/SMC64 configurations
*/
smc_fid |= (SMC_64 << FUNCID_CC_SHIFT);
local_smc_fid |= (SMC_64 << FUNCID_CC_SHIFT);
switch (smc_fid) {
switch (local_smc_fid) {
/*
* Micro Coded Engine (MCE) commands reside in the 0x82FFFF00 -
* 0x82FFFFFF SiP SMC space
@ -103,14 +109,13 @@ int plat_sip_handler(uint32_t smc_fid,
case TEGRA_SIP_MCE_CMD_MISC_CCPLEX:
/* clean up the high bits */
smc_fid &= MCE_CMD_MASK;
local_smc_fid &= MCE_CMD_MASK;
/* execute the command and store the result */
mce_ret = mce_command_handler(smc_fid, x1, x2, x3);
write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X0,
(uint64_t)mce_ret);
return 0;
mce_ret = mce_command_handler(local_smc_fid, local_x1, local_x2, local_x3);
write_ctx_reg(get_gpregs_ctx(handle),
CTX_GPREG_X0, (uint64_t)(mce_ret));
break;
/*
* This function ID reads the Activity monitor's core/ref clock
@ -125,28 +130,30 @@ int plat_sip_handler(uint32_t smc_fid,
impl = ((uint32_t)x2 >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
/* sanity check target CPU number */
if (cpu > PLATFORM_MAX_CPUS_PER_CLUSTER)
return -EINVAL;
if (cpu > (uint32_t)PLATFORM_MAX_CPUS_PER_CLUSTER) {
ret = -EINVAL;
} else {
/* get the base address for the current CPU */
base = (impl == DENVER_IMPL) ? TEGRA_DENVER_ACTMON_CTR_BASE :
TEGRA_ARM_ACTMON_CTR_BASE;
/* get the base address for the current CPU */
base = (impl == DENVER_IMPL) ? TEGRA_DENVER_ACTMON_CTR_BASE :
TEGRA_ARM_ACTMON_CTR_BASE;
/* read the clock counter values */
core_clk_ctr = mmio_read_32(base + (8ULL * cpu));
ref_clk_ctr = mmio_read_32(base + (8ULL * cpu) + REF_CLK_OFFSET);
/* read the clock counter values */
core_clk_ctr = mmio_read_32(base + (8 * cpu));
ref_clk_ctr = mmio_read_32(base + (8 * cpu) + REF_CLK_OFFSET);
/* return the counter values as two different parameters */
write_ctx_reg(get_gpregs_ctx(handle),
CTX_GPREG_X1, (core_clk_ctr));
write_ctx_reg(get_gpregs_ctx(handle),
CTX_GPREG_X2, (ref_clk_ctr));
}
/* return the counter values as two different parameters */
write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1,
(uint64_t)core_clk_ctr);
write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X2,
(uint64_t)ref_clk_ctr);
return 0;
break;
default:
ret = -ENOTSUP;
break;
}
return -ENOTSUP;
return ret;
}

View File

@ -9,6 +9,8 @@
#include <smmu.h>
#include <tegra_def.h>
#define MAX_NUM_SMMU_DEVICES U(1)
/*******************************************************************************
* Array to hold SMMU context for Tegra186
******************************************************************************/
@ -305,7 +307,15 @@ static __attribute__((aligned(16))) smmu_regs_t tegra186_smmu_context[] = {
smmu_regs_t *plat_get_smmu_ctx(void)
{
/* index of _END_OF_TABLE_ */
tegra186_smmu_context[0].val = ARRAY_SIZE(tegra186_smmu_context) - 1;
tegra186_smmu_context[0].val = (uint32_t)(ARRAY_SIZE(tegra186_smmu_context)) - 1U;
return tegra186_smmu_context;
}
/*******************************************************************************
* Handler to return the support SMMU devices number
******************************************************************************/
uint32_t plat_get_num_smmu_devices(void)
{
return MAX_NUM_SMMU_DEVICES;
}

View File

@ -20,9 +20,6 @@ $(eval $(call add_define,ENABLE_CHIP_VERIFICATION_HARNESS))
ENABLE_SMMU_DEVICE := 1
$(eval $(call add_define,ENABLE_SMMU_DEVICE))
NUM_SMMU_DEVICES := 1
$(eval $(call add_define,NUM_SMMU_DEVICES))
RESET_TO_BL31 := 1
PROGRAMMABLE_RESET_ADDRESS := 1
@ -50,6 +47,7 @@ PLAT_INCLUDES += -I${SOC_DIR}/drivers/include
BL31_SOURCES += lib/cpus/aarch64/denver.S \
lib/cpus/aarch64/cortex_a57.S \
${COMMON_DIR}/drivers/gpcdma/gpcdma.c \
${COMMON_DIR}/drivers/memctrl/memctrl_v2.c \
${COMMON_DIR}/drivers/smmu/smmu.c \
${SOC_DIR}/drivers/mce/mce.c \
@ -64,3 +62,13 @@ BL31_SOURCES += lib/cpus/aarch64/denver.S \
${SOC_DIR}/plat_smmu.c \
${SOC_DIR}/plat_trampoline.S
# Enable workarounds for selected Cortex-A57 erratas.
A57_DISABLE_NON_TEMPORAL_HINT := 1
ERRATA_A57_806969 := 1
ERRATA_A57_813419 := 1
ERRATA_A57_813420 := 1
ERRATA_A57_826974 := 1
ERRATA_A57_826977 := 1
ERRATA_A57_828024 := 1
ERRATA_A57_829520 := 1
ERRATA_A57_833471 := 1

View File

@ -16,14 +16,16 @@
*/
/* Secure scratch registers */
#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U
#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U
#define PMC_SECURE_SCRATCH6_OFFSET 0x224U
#define PMC_SECURE_SCRATCH7_OFFSET 0x228U
#define PMC_SECURE_SCRATCH120_OFFSET 0xB38U
#define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU
#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U
#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U
#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U
#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U
#define PMC_SECURE_SCRATCH6_OFFSET 0x224U
#define PMC_SECURE_SCRATCH7_OFFSET 0x228U
#define PMC_SECURE_SCRATCH116_OFFSET 0xB28U
#define PMC_SECURE_SCRATCH117_OFFSET 0xB2CU
#define PMC_SECURE_SCRATCH120_OFFSET 0xB38U
#define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU
#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U
#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U
/*
* AHB arbitration memory write queue
@ -32,6 +34,12 @@
#define ARAHB_MST_ID_SE2_MASK (0x1U << 13)
#define ARAHB_MST_ID_SE_MASK (0x1U << 14)
/**
* SE registers
*/
#define TEGRA_SE_AES_KEYSLOT_COUNT 16
#define SE_MAX_LAST_BLOCK_SIZE 0xFFFFF
/* SE Status register */
#define SE_STATUS_OFFSET 0x800U
#define SE_STATUS_SHIFT 0
@ -42,8 +50,24 @@
#define SE_STATUS(x) \
((x) & ((0x3U) << SE_STATUS_SHIFT))
#define SE_MEM_INTERFACE_SHIFT 2
#define SE_MEM_INTERFACE_IDLE 0
#define SE_MEM_INTERFACE_BUSY 1
#define SE_MEM_INTERFACE(x) ((x) << SE_STATUS_SHIFT)
/* SE register definitions */
#define SE_SECURITY_REG_OFFSET 0x0
#define SE_SECURITY_TZ_LOCK_SOFT_SHIFT 5
#define SE_SECURE 0x0
#define SE_SECURITY_TZ_LOCK_SOFT(x) ((x) << SE_SECURITY_TZ_LOCK_SOFT_SHIFT)
#define SE_SEC_ENG_DIS_SHIFT 1
#define SE_DISABLE_FALSE 0
#define SE_DISABLE_TRUE 1
#define SE_SEC_ENG_DISABLE(x)((x) << SE_SEC_ENG_DIS_SHIFT)
/* SE config register */
#define SE_CONFIG_REG_OFFSET 0x14U
#define SE_CONFIG_REG_OFFSET 0x14U
#define SE_CONFIG_ENC_ALG_SHIFT 12
#define SE_CONFIG_ENC_ALG_AES_ENC \
((1U) << SE_CONFIG_ENC_ALG_SHIFT)
@ -66,7 +90,7 @@
#define SE_CONFIG_DEC_ALG(x) \
((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT))
#define SE_CONFIG_DST_SHIFT 2
#define SE_CONFIG_DST_SHIFT 2
#define SE_CONFIG_DST_MEMORY \
((0U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_HASHREG \
@ -80,33 +104,75 @@
#define SE_CONFIG_DST(x) \
((x) & ((0x7U) << SE_CONFIG_DST_SHIFT))
#define SE_CONFIG_ENC_MODE_SHIFT 24
#define SE_CONFIG_ENC_MODE_KEY128 \
((0UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_KEY192 \
((1UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_KEY256 \
((2UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA1 \
((0UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA224 \
((4UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA256 \
((5UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA384 \
((6UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA512 \
((7UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE(x)\
((x) & ((0xFFUL) << SE_CONFIG_ENC_MODE_SHIFT))
#define SE_CONFIG_DEC_MODE_SHIFT 16
#define SE_CONFIG_DEC_MODE_KEY128 \
((0UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_KEY192 \
((1UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_KEY256 \
((2UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA1 \
((0UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA224 \
((4UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA256 \
((5UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA384 \
((6UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA512 \
((7UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE(x)\
((x) & ((0xFFUL) << SE_CONFIG_DEC_MODE_SHIFT))
/* DRBG random number generator config */
#define SE_RNG_CONFIG_REG_OFFSET 0x340
#define DRBG_MODE_SHIFT 0
#define DRBG_MODE_NORMAL \
((0UL) << DRBG_MODE_SHIFT)
((0U) << DRBG_MODE_SHIFT)
#define DRBG_MODE_FORCE_INSTANTION \
((1UL) << DRBG_MODE_SHIFT)
((1U) << DRBG_MODE_SHIFT)
#define DRBG_MODE_FORCE_RESEED \
((2UL) << DRBG_MODE_SHIFT)
((2U) << DRBG_MODE_SHIFT)
#define SE_RNG_CONFIG_MODE(x) \
((x) & ((0x3UL) << DRBG_MODE_SHIFT))
((x) & ((0x3U) << DRBG_MODE_SHIFT))
#define DRBG_SRC_SHIFT 2
#define DRBG_SRC_NONE \
((0UL) << DRBG_SRC_SHIFT)
((0U) << DRBG_SRC_SHIFT)
#define DRBG_SRC_ENTROPY \
((1UL) << DRBG_SRC_SHIFT)
((1U) << DRBG_SRC_SHIFT)
#define DRBG_SRC_LFSR \
((2UL) << DRBG_SRC_SHIFT)
((2U) << DRBG_SRC_SHIFT)
#define SE_RNG_SRC_CONFIG_MODE(x) \
((x) & ((0x3UL) << DRBG_SRC_SHIFT))
((x) & ((0x3U) << DRBG_SRC_SHIFT))
/* DRBG random number generator entropy config */
#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x344U
#define DRBG_RO_ENT_SRC_SHIFT 1
#define DRBG_RO_ENT_SRC_SHIFT 1
#define DRBG_RO_ENT_SRC_ENABLE \
((1U) << DRBG_RO_ENT_SRC_SHIFT)
#define DRBG_RO_ENT_SRC_DISABLE \
@ -114,7 +180,7 @@
#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) \
((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT))
#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0
#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0
#define DRBG_RO_ENT_SRC_LOCK_ENABLE \
((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
#define DRBG_RO_ENT_SRC_LOCK_DISABLE \
@ -130,9 +196,97 @@
#define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x) \
((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT))
#define SE_RNG_RESEED_INTERVAL_REG_OFFSET 0x348
/* SE CRYPTO */
#define SE_CRYPTO_REG_OFFSET 0x304
#define SE_CRYPTO_HASH_SHIFT 0
#define SE_CRYPTO_HASH_DISABLE \
((0U) << SE_CRYPTO_HASH_SHIFT)
#define SE_CRYPTO_HASH_ENABLE \
((1U) << SE_CRYPTO_HASH_SHIFT)
#define SE_CRYPTO_XOR_POS_SHIFT 1
#define SE_CRYPTO_XOR_BYPASS \
((0U) << SE_CRYPTO_XOR_POS_SHIFT)
#define SE_CRYPTO_XOR_TOP \
((2U) << SE_CRYPTO_XOR_POS_SHIFT)
#define SE_CRYPTO_XOR_BOTTOM \
((3U) << SE_CRYPTO_XOR_POS_SHIFT)
#define SE_CRYPTO_INPUT_SEL_SHIFT 3
#define SE_CRYPTO_INPUT_AHB \
((0U) << SE_CRYPTO_INPUT_SEL_SHIFT)
#define SE_CRYPTO_INPUT_RANDOM \
((1U) << SE_CRYPTO_INPUT_SEL_SHIFT)
#define SE_CRYPTO_INPUT_AESOUT \
((2U) << SE_CRYPTO_INPUT_SEL_SHIFT)
#define SE_CRYPTO_INPUT_LNR_CTR \
((3U) << SE_CRYPTO_INPUT_SEL_SHIFT)
#define SE_CRYPTO_VCTRAM_SEL_SHIFT 5
#define SE_CRYPTO_VCTRAM_AHB \
((0U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
#define SE_CRYPTO_VCTRAM_AESOUT \
((2U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
#define SE_CRYPTO_VCTRAM_PREVAHB \
((3U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
#define SE_CRYPTO_IV_SEL_SHIFT 7
#define SE_CRYPTO_IV_ORIGINAL \
((0U) << SE_CRYPTO_IV_SEL_SHIFT)
#define SE_CRYPTO_IV_UPDATED \
((1U) << SE_CRYPTO_IV_SEL_SHIFT)
#define SE_CRYPTO_CORE_SEL_SHIFT 8
#define SE_CRYPTO_CORE_DECRYPT \
((0U) << SE_CRYPTO_CORE_SEL_SHIFT)
#define SE_CRYPTO_CORE_ENCRYPT \
((1U) << SE_CRYPTO_CORE_SEL_SHIFT)
#define SE_CRYPTO_KEY_INDEX_SHIFT 24
#define SE_CRYPTO_KEY_INDEX(x) (x << SE_CRYPTO_KEY_INDEX_SHIFT)
#define SE_CRYPTO_MEMIF_AHB \
((0U) << SE_CRYPTO_MEMIF_SHIFT)
#define SE_CRYPTO_MEMIF_MCCIF \
((1U) << SE_CRYPTO_MEMIF_SHIFT)
#define SE_CRYPTO_MEMIF_SHIFT 31
/* KEY TABLE */
#define SE_KEYTABLE_REG_OFFSET 0x31C
/* KEYIV PKT - key slot */
#define SE_KEYTABLE_SLOT_SHIFT 4
#define SE_KEYTABLE_SLOT(x) (x << SE_KEYTABLE_SLOT_SHIFT)
/* KEYIV PKT - KEYIV select */
#define SE_KEYIV_PKT_KEYIV_SEL_SHIFT 3
#define SE_CRYPTO_KEYIV_KEY \
((0U) << SE_KEYIV_PKT_KEYIV_SEL_SHIFT)
#define SE_CRYPTO_KEYIV_IVS \
((1U) << SE_KEYIV_PKT_KEYIV_SEL_SHIFT)
/* KEYIV PKT - IV select */
#define SE_KEYIV_PKT_IV_SEL_SHIFT 2
#define SE_CRYPTO_KEYIV_IVS_OIV \
((0U) << SE_KEYIV_PKT_IV_SEL_SHIFT)
#define SE_CRYPTO_KEYIV_IVS_UIV \
((1U) << SE_KEYIV_PKT_IV_SEL_SHIFT)
/* KEYIV PKT - key word */
#define SE_KEYIV_PKT_KEY_WORD_SHIFT 0
#define SE_KEYIV_PKT_KEY_WORD(x) \
((x) << SE_KEYIV_PKT_KEY_WORD_SHIFT)
/* KEYIV PKT - iv word */
#define SE_KEYIV_PKT_IV_WORD_SHIFT 0
#define SE_KEYIV_PKT_IV_WORD(x) \
((x) << SE_KEYIV_PKT_IV_WORD_SHIFT)
/* SE OPERATION */
#define SE_OPERATION_REG_OFFSET 0x8U
#define SE_OPERATION_SHIFT 0
#define SE_OPERATION_SHIFT 0
#define SE_OP_ABORT \
((0x0U) << SE_OPERATION_SHIFT)
#define SE_OP_START \
@ -146,11 +300,85 @@
#define SE_OPERATION(x) \
((x) & ((0x7U) << SE_OPERATION_SHIFT))
/* SE CONTEXT */
#define SE_CTX_SAVE_CONFIG_REG_OFFSET 0x70
#define SE_CTX_SAVE_WORD_QUAD_SHIFT 0
#define SE_CTX_SAVE_WORD_QUAD(x) \
(x << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_WORD_QUAD_KEYS_0_3 \
((0U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_WORD_QUAD_KEYS_4_7 \
((1U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_WORD_QUAD_ORIG_IV \
((2U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_WORD_QUAD_UPD_IV \
((3U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_KEY_INDEX_SHIFT 8
#define SE_CTX_SAVE_KEY_INDEX(x) (x << SE_CTX_SAVE_KEY_INDEX_SHIFT)
#define SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT 24
#define SE_CTX_SAVE_STICKY_WORD_QUAD_STICKY_0_3 \
((0U) << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_STICKY_WORD_QUAD_STICKY_4_7 \
((1U) << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_STICKY_WORD_QUAD(x) \
(x << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_SRC_SHIFT 29
#define SE_CTX_SAVE_SRC_STICKY_BITS \
((0U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_RSA_KEYTABLE \
((1U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_AES_KEYTABLE \
((2U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_PKA1_STICKY_BITS \
((3U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_MEM \
((4U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_SRK \
((6U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_PKA1_KEYTABLE \
((7U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_STICKY_WORD_QUAD_SHIFT 24
#define SE_CTX_STICKY_WORD_QUAD_WORDS_0_3 \
((0U) << SE_CTX_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_STICKY_WORD_QUAD_WORDS_4_7 \
((1U) << SE_CTX_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_STICKY_WORD_QUAD(x) (x << SE_CTX_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_RSA_KEY_INDEX_SHIFT 16
#define SE_CTX_SAVE_RSA_KEY_INDEX(x) \
(x << SE_CTX_SAVE_RSA_KEY_INDEX_SHIFT)
#define SE_CTX_RSA_WORD_QUAD_SHIFT 12
#define SE_CTX_RSA_WORD_QUAD(x) \
(x << SE_CTX_RSA_WORD_QUAD_SHIFT)
#define SE_CTX_PKA1_WORD_QUAD_L_SHIFT 0
#define SE_CTX_PKA1_WORD_QUAD_L_SIZE \
((true ? 4:0) - \
(false ? 4:0) + 1)
#define SE_CTX_PKA1_WORD_QUAD_L(x)\
(((x) << SE_CTX_PKA1_WORD_QUAD_L_SHIFT) & 0x1f)
#define SE_CTX_PKA1_WORD_QUAD_H_SHIFT 12
#define SE_CTX_PKA1_WORD_QUAD_H(x)\
((((x) >> SE_CTX_PKA1_WORD_QUAD_L_SIZE) & 0xf) \
<< SE_CTX_PKA1_WORD_QUAD_H_SHIFT)
#define SE_RSA_KEY_INDEX_SLOT0_EXP 0
#define SE_RSA_KEY_INDEX_SLOT0_MOD 1
#define SE_RSA_KEY_INDEX_SLOT1_EXP 2
#define SE_RSA_KEY_INDEX_SLOT1_MOD 3
/* SE_CTX_SAVE_AUTO */
#define SE_CTX_SAVE_AUTO_REG_OFFSET 0x74U
/* Enable */
#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0
#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0
#define SE_CTX_SAVE_AUTO_DIS \
((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
#define SE_CTX_SAVE_AUTO_EN \
@ -167,20 +395,22 @@
#define SE_CTX_SAVE_AUTO_LOCK(x) \
((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT))
/* Current context save number of blocks */
/* Current context save number of blocks*/
#define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT 16
#define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 0x3FFU
#define SE_CTX_SAVE_GET_BLK_COUNT(x) \
(((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \
SE_CTX_SAVE_AUTO_CURR_CNT_MASK)
#define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133
#define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646
#define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133
#define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646
/* SE TZRAM OPERATION - only for SE1 */
#define SE_TZRAM_OPERATION 0x540U
#define SE_TZRAM_OPERATION 0x540U
#define SE_TZRAM_OP_MODE_SHIFT 1
#define SE_TZRAM_OP_MODE_SHIFT 1
#define SE_TZRAM_OP_COMMAND_INIT 1
#define SE_TZRAM_OP_COMMAND_SHIFT 0
#define SE_TZRAM_OP_MODE_SAVE \
((0U) << SE_TZRAM_OP_MODE_SHIFT)
#define SE_TZRAM_OP_MODE_RESTORE \
@ -188,7 +418,7 @@
#define SE_TZRAM_OP_MODE(x) \
((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT))
#define SE_TZRAM_OP_BUSY_SHIFT 2
#define SE_TZRAM_OP_BUSY_SHIFT 2
#define SE_TZRAM_OP_BUSY_OFF \
((0U) << SE_TZRAM_OP_BUSY_SHIFT)
#define SE_TZRAM_OP_BUSY_ON \
@ -196,7 +426,7 @@
#define SE_TZRAM_OP_BUSY(x) \
((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT))
#define SE_TZRAM_OP_REQ_SHIFT 0
#define SE_TZRAM_OP_REQ_SHIFT 0
#define SE_TZRAM_OP_REQ_IDLE \
((0U) << SE_TZRAM_OP_REQ_SHIFT)
#define SE_TZRAM_OP_REQ_INIT \
@ -206,7 +436,7 @@
/* SE Interrupt */
#define SE_INT_STATUS_REG_OFFSET 0x10U
#define SE_INT_OP_DONE_SHIFT 4
#define SE_INT_OP_DONE_SHIFT 4
#define SE_INT_OP_DONE_CLEAR \
((0U) << SE_INT_OP_DONE_SHIFT)
#define SE_INT_OP_DONE_ACTIVE \
@ -214,19 +444,186 @@
#define SE_INT_OP_DONE(x) \
((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT))
/* SE TZRAM SECURITY */
#define SE_TZRAM_SEC_REG_OFFSET 0x4
#define SE_TZRAM_SEC_SETTING_SHIFT 0
#define SE_TZRAM_SECURE \
((0UL) << SE_TZRAM_SEC_SETTING_SHIFT)
#define SE_TZRAM_NONSECURE \
((1UL) << SE_TZRAM_SEC_SETTING_SHIFT)
#define SE_TZRAM_SEC_SETTING(x) \
((x) & ((0x1UL) << SE_TZRAM_SEC_SETTING_SHIFT))
/* PKA1 KEY SLOTS */
#define TEGRA_SE_PKA1_KEYSLOT_COUNT 4
/* SE error status */
#define SE_ERR_STATUS_REG_OFFSET 0x804U
#define SE_CRYPTO_KEYTABLE_DST_REG_OFFSET 0x330
#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT 0
#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(x) \
(x << SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT)
#define SE_KEY_INDEX_SHIFT 8
#define SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(x) (x << SE_KEY_INDEX_SHIFT)
/* SE linked list (LL) register */
#define SE_IN_LL_ADDR_REG_OFFSET 0x18U
#define SE_OUT_LL_ADDR_REG_OFFSET 0x24U
#define SE_BLOCK_COUNT_REG_OFFSET 0x318U
#define SE_OUT_LL_ADDR_REG_OFFSET 0x24U
#define SE_BLOCK_COUNT_REG_OFFSET 0x318U
/* AES data sizes */
#define TEGRA_SE_KEY_256_SIZE 32
#define TEGRA_SE_KEY_192_SIZE 24
#define TEGRA_SE_KEY_128_SIZE 16
#define TEGRA_SE_AES_BLOCK_SIZE 16
#define TEGRA_SE_AES_MIN_KEY_SIZE 16
#define TEGRA_SE_AES_MAX_KEY_SIZE 32
#define TEGRA_SE_AES_IV_SIZE 16
#define TEGRA_SE_AES_MIN_KEY_SIZE 16
#define TEGRA_SE_AES_MAX_KEY_SIZE 32
#define TEGRA_SE_AES_IV_SIZE 16
#define TEGRA_SE_RNG_IV_SIZE 16
#define TEGRA_SE_RNG_DT_SIZE 16
#define TEGRA_SE_RNG_KEY_SIZE 16
#define TEGRA_SE_RNG_SEED_SIZE (TEGRA_SE_RNG_IV_SIZE + \
TEGRA_SE_RNG_KEY_SIZE + \
TEGRA_SE_RNG_DT_SIZE)
#define TEGRA_SE_RSA512_DIGEST_SIZE 64
#define TEGRA_SE_RSA1024_DIGEST_SIZE 128
#define TEGRA_SE_RSA1536_DIGEST_SIZE 192
#define TEGRA_SE_RSA2048_DIGEST_SIZE 256
#define SE_KEY_TABLE_ACCESS_REG_OFFSET 0x284
#define SE_KEY_READ_DISABLE_SHIFT 0
#define SE_CTX_BUFER_SIZE 1072
#define SE_CTX_DRBG_BUFER_SIZE 2112
/* SE blobs size in bytes */
#define SE_CTX_SAVE_RSA_KEY_LENGTH 1024
#define SE_CTX_SAVE_RANDOM_DATA_SIZE 16
#define SE_CTX_SAVE_STICKY_BITS_SIZE 16
#define SE2_CONTEXT_SAVE_PKA1_STICKY_BITS_LENGTH 16
#define SE2_CONTEXT_SAVE_PKA1_KEYS_LENGTH 8192
#define SE_CTX_KNOWN_PATTERN_SIZE 16
#define SE_CTX_KNOWN_PATTERN_SIZE_WORDS (SE_CTX_KNOWN_PATTERN_SIZE/4)
/* SE RSA */
#define TEGRA_SE_RSA_KEYSLOT_COUNT 2
#define SE_RSA_KEY_SIZE_REG_OFFSET 0x404
#define SE_RSA_EXP_SIZE_REG_OFFSET 0x408
#define SE_RSA_MAX_EXP_BIT_SIZE 2048
#define SE_RSA_MAX_EXP_SIZE32 \
(SE_RSA_MAX_EXP_BIT_SIZE >> 5)
#define SE_RSA_MAX_MOD_BIT_SIZE 2048
#define SE_RSA_MAX_MOD_SIZE32 \
(SE_RSA_MAX_MOD_BIT_SIZE >> 5)
/* SE_RSA_KEYTABLE_ADDR */
#define SE_RSA_KEYTABLE_ADDR 0x420
#define RSA_KEY_PKT_WORD_ADDR_SHIFT 0
#define RSA_KEY_PKT_EXPMOD_SEL_SHIFT \
((6U) << RSA_KEY_PKT_WORD_ADDR_SHIFT)
#define RSA_KEY_MOD \
((1U) << RSA_KEY_PKT_EXPMOD_SEL_SHIFT)
#define RSA_KEY_EXP \
((0U) << RSA_KEY_PKT_EXPMOD_SEL_SHIFT)
#define RSA_KEY_PKT_SLOT_SHIFT 7
#define RSA_KEY_SLOT_1 \
((0U) << RSA_KEY_PKT_SLOT_SHIFT)
#define RSA_KEY_SLOT_2 \
((1U) << RSA_KEY_PKT_SLOT_SHIFT)
#define RSA_KEY_PKT_INPUT_MODE_SHIFT 8
#define RSA_KEY_REG_INPUT \
((0U) << RSA_KEY_PKT_INPUT_MODE_SHIFT)
#define RSA_KEY_DMA_INPUT \
((1U) << RSA_KEY_PKT_INPUT_MODE_SHIFT)
/* SE_RSA_KEYTABLE_DATA */
#define SE_RSA_KEYTABLE_DATA 0x424
/* SE_RSA_CONFIG register */
#define SE_RSA_CONFIG 0x400
#define RSA_KEY_SLOT_SHIFT 24
#define RSA_KEY_SLOT(x) \
((x) << RSA_KEY_SLOT_SHIFT)
/*******************************************************************************
* Structure definition
******************************************************************************/
/* SE context blob */
#pragma pack(push, 1)
typedef struct tegra_aes_key_slot {
/* 0 - 7 AES key */
uint32_t key[8];
/* 8 - 11 Original IV */
uint32_t oiv[4];
/* 12 - 15 Updated IV */
uint32_t uiv[4];
} tegra_se_aes_key_slot_t;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct tegra_se_context {
/* random number */
unsigned char rand_data[SE_CTX_SAVE_RANDOM_DATA_SIZE];
/* Sticky bits */
unsigned char sticky_bits[SE_CTX_SAVE_STICKY_BITS_SIZE * 2];
/* AES key slots */
tegra_se_aes_key_slot_t key_slots[TEGRA_SE_AES_KEYSLOT_COUNT];
/* RSA key slots */
unsigned char rsa_keys[SE_CTX_SAVE_RSA_KEY_LENGTH];
} tegra_se_context_t;
#pragma pack(pop)
/* PKA context blob */
#pragma pack(push, 1)
typedef struct tegra_pka_context {
unsigned char sticky_bits[SE2_CONTEXT_SAVE_PKA1_STICKY_BITS_LENGTH];
unsigned char pka_keys[SE2_CONTEXT_SAVE_PKA1_KEYS_LENGTH];
} tegra_pka_context_t;
#pragma pack(pop)
/* SE context blob */
#pragma pack(push, 1)
typedef struct tegra_se_context_blob {
/* SE context */
tegra_se_context_t se_ctx;
/* Known Pattern */
unsigned char known_pattern[SE_CTX_KNOWN_PATTERN_SIZE];
} tegra_se_context_blob_t;
#pragma pack(pop)
/* SE2 and PKA1 context blob */
#pragma pack(push, 1)
typedef struct tegra_se2_context_blob {
/* SE2 context */
tegra_se_context_t se_ctx;
/* PKA1 context */
tegra_pka_context_t pka_ctx;
/* Known Pattern */
unsigned char known_pattern[SE_CTX_KNOWN_PATTERN_SIZE];
} tegra_se2_context_blob_t;
#pragma pack(pop)
/* SE AES key type 128bit, 192bit, 256bit */
typedef enum {
SE_AES_KEY128,
SE_AES_KEY192,
SE_AES_KEY256,
} tegra_se_aes_key_type_t;
/* SE RSA key slot */
typedef struct tegra_se_rsa_key_slot {
/* 0 - 63 exponent key */
uint32_t exponent[SE_RSA_MAX_EXP_SIZE32];
/* 64 - 127 modulus key */
uint32_t modulus[SE_RSA_MAX_MOD_SIZE32];
} tegra_se_rsa_key_slot_t;
/*******************************************************************************
* Inline functions definition
@ -242,8 +639,21 @@ static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset,
mmio_write_32(dev->se_base + offset, val);
}
static inline uint32_t tegra_pka_read_32(tegra_pka_dev_t *dev, uint32_t offset)
{
return mmio_read_32(dev->pka_base + offset);
}
static inline void tegra_pka_write_32(tegra_pka_dev_t *dev, uint32_t offset,
uint32_t val)
{
mmio_write_32(dev->pka_base + offset, val);
}
/*******************************************************************************
* Prototypes
******************************************************************************/
int tegra_se_start_normal_operation(const tegra_se_dev_t *, uint32_t);
int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *, uint32_t);
#endif /* SE_PRIVATE_H */

View File

@ -20,7 +20,8 @@
* Constants and Macros
******************************************************************************/
#define TIMEOUT_100MS 100UL // Timeout in 100ms
#define TIMEOUT_100MS 100U // Timeout in 100ms
#define RNG_AES_KEY_INDEX 1
/*******************************************************************************
* Data structure and global variables
@ -67,6 +68,15 @@
* #--------------------------------#
*/
/* Known pattern data */
static const uint32_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE_WORDS] = {
/* 128 bit AES block */
0x0C0D0E0F,
0x08090A0B,
0x04050607,
0x00010203,
};
/* SE input and output linked list buffers */
static tegra_se_io_lst_t se1_src_ll_buf;
static tegra_se_io_lst_t se1_dst_ll_buf;
@ -78,7 +88,7 @@ static tegra_se_io_lst_t se2_dst_ll_buf;
/* SE1 security engine device handle */
static tegra_se_dev_t se_dev_1 = {
.se_num = 1,
/* setup base address for se */
/* Setup base address for se */
.se_base = TEGRA_SE1_BASE,
/* Setup context size in AES blocks */
.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
@ -86,12 +96,14 @@ static tegra_se_dev_t se_dev_1 = {
.src_ll_buf = &se1_src_ll_buf,
/* Setup DST buffers for SE operations */
.dst_ll_buf = &se1_dst_ll_buf,
/* Setup context save destination */
.ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE),
};
/* SE2 security engine device handle */
static tegra_se_dev_t se_dev_2 = {
.se_num = 2,
/* setup base address for se */
/* Setup base address for se */
.se_base = TEGRA_SE2_BASE,
/* Setup context size in AES blocks */
.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
@ -99,8 +111,12 @@ static tegra_se_dev_t se_dev_2 = {
.src_ll_buf = &se2_src_ll_buf,
/* Setup DST buffers for SE operations */
.dst_ll_buf = &se2_dst_ll_buf,
/* Setup context save destination */
.ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000),
};
static bool ecid_valid;
/*******************************************************************************
* Functions Definition
******************************************************************************/
@ -186,35 +202,15 @@ static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
}
/*
* Verify the SE context save auto has been enabled.
* SE_CTX_SAVE_AUTO.ENABLE == ENABLE
* If the SE context save auto is not enabled, then set
* the context save auto enable and lock the setting.
* If the SE context save auto is not enabled and the
* enable setting is locked, then return an error.
* Returns true if the SE engine is configured to perform SE context save in
* hardware.
*/
static inline int32_t tegra_se_ctx_save_auto_enable(const tegra_se_dev_t *se_dev)
static inline bool tegra_se_atomic_save_enabled(const tegra_se_dev_t *se_dev)
{
uint32_t val;
int32_t ret = 0;
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_DIS) {
if (SE_CTX_SAVE_AUTO_LOCK(val) == SE_CTX_SAVE_AUTO_LOCK_EN) {
ERROR("%s: ERR: Cannot enable atomic. Write locked!\n",
__func__);
ret = -EACCES;
}
/* Program SE_CTX_SAVE_AUTO */
if (ret == 0) {
tegra_se_write_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET,
SE_CTX_SAVE_AUTO_LOCK_EN |
SE_CTX_SAVE_AUTO_EN);
}
}
return ret;
return (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_EN);
}
/*
@ -259,14 +255,6 @@ static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
/* Check that previous operation is finalized */
ret = tegra_se_operation_prepare(se_dev);
/* Ensure HW atomic context save has been enabled
* This should have been done at boot time.
* SE_CTX_SAVE_AUTO.ENABLE == ENABLE
*/
if (ret == 0) {
ret = tegra_se_ctx_save_auto_enable(se_dev);
}
/* Read the context save progress counter: block_count
* Ensure no previous context save has been triggered
* SE_CTX_SAVE_AUTO.CURR_CNT == 0
@ -325,7 +313,8 @@ static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
* Security engine primitive operations, including normal operation
* and the context save operation.
*/
static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes,
bool context_save)
{
uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
int ret = 0;
@ -351,7 +340,10 @@ static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nby
tegra_se_make_data_coherent(se_dev);
/* Start hardware operation */
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
if (context_save)
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE);
else
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
/* Wait for operation to finish */
ret = tegra_se_operation_complete(se_dev);
@ -360,6 +352,22 @@ op_error:
return ret;
}
/*
* Normal security engine operations other than the context save
*/
int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
{
return tegra_se_perform_operation(se_dev, nbytes, false);
}
/*
* Security engine context save operation
*/
int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
{
return tegra_se_perform_operation(se_dev, nbytes, true);
}
/*
* Security Engine sequence to generat SRK
* SE and SE2 will generate different SRK by different
@ -381,7 +389,10 @@ static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
se_dev->dst_ll_buf->last_buff_num = 0;
/* Configure random number generator */
val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
if (ecid_valid)
val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_ENTROPY);
else
val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
/* Configure output destination = SRK */
@ -391,25 +402,562 @@ static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
/* Perform hardware operation */
ret = tegra_se_perform_operation(se_dev, 0);
ret = tegra_se_start_normal_operation(se_dev, 0);
return ret;
}
/*
* Generate plain text random data to some memory location using
* SE/SE2's SP800-90 random number generator. The random data size
* must be some multiple of the AES block size (16 bytes).
*/
static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev)
{
int ret = 0;
uint32_t val;
/* Set some arbitrary memory location to store the random data */
se_dev->dst_ll_buf->last_buff_num = 0;
if (!se_dev->ctx_save_buf) {
ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
return PSCI_E_NOT_PRESENT;
}
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
se_dev->ctx_save_buf)->rand_data)));
se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE;
/* Confgure the following hardware register settings:
* SE_CONFIG.DEC_ALG = NOP
* SE_CONFIG.ENC_ALG = RNG
* SE_CONFIG.ENC_MODE = KEY192
* SE_CONFIG.DST = MEMORY
*/
val = (SE_CONFIG_ENC_ALG_RNG |
SE_CONFIG_DEC_ALG_NOP |
SE_CONFIG_ENC_MODE_KEY192 |
SE_CONFIG_DST_MEMORY);
tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
/* Program the RNG options in SE_CRYPTO_CONFIG as follows:
* XOR_POS = BYPASS
* INPUT_SEL = RANDOM (Entropy or LFSR)
* HASH_ENB = DISABLE
*/
val = (SE_CRYPTO_INPUT_RANDOM |
SE_CRYPTO_XOR_BYPASS |
SE_CRYPTO_CORE_ENCRYPT |
SE_CRYPTO_HASH_DISABLE |
SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) |
SE_CRYPTO_IV_ORIGINAL);
tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val);
/* Configure RNG */
if (ecid_valid)
val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR);
else
val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_LFSR);
tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
/* SE normal operation */
ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE);
return ret;
}
/*
* Encrypt memory blocks with SRK as part of the security engine context.
* The data blocks include: random data and the known pattern data, where
* the random data is the first block and known pattern is the last block.
*/
static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev,
uint64_t src_addr, uint64_t dst_addr, uint32_t data_size)
{
int ret = 0;
se_dev->src_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->src_ll_buf->buffer[0].addr = src_addr;
se_dev->src_ll_buf->buffer[0].data_len = data_size;
se_dev->dst_ll_buf->buffer[0].addr = dst_addr;
se_dev->dst_ll_buf->buffer[0].data_len = data_size;
/* By setting the context source from memory and calling the context save
* operation, the SE encrypts the memory data with SRK.
*/
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM);
ret = tegra_se_start_ctx_save_operation(se_dev, data_size);
return ret;
}
/*
* Context save the key table access control sticky bits and
* security status of each key-slot. The encrypted sticky-bits are
* 32 bytes (2 AES blocks) and formatted as the following structure:
* { bit in registers bit in context save
* SECURITY_0[4] 158
* SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155
* SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152
* SE_RSA_SECURITY_PERKEY_0[1:0] 151:150
* SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142
* ...,
* SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22
* SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6
* SE_TZRAM_SECURITY_0[1:0] 5:4
* SE_SECURITY_0[16] 3:3
* SE_SECURITY_0[2:0] } 2:0
*/
static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev)
{
int ret = PSCI_E_INTERN_FAIL;
uint32_t val = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
if (!se_dev->ctx_save_buf) {
ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
return PSCI_E_NOT_PRESENT;
}
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
se_dev->ctx_save_buf)->sticky_bits)));
se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE;
/*
* The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words).
* The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words).
*/
for (int i = 0; i < 2; i++) {
val = SE_CTX_SAVE_SRC_STICKY_BITS |
SE_CTX_SAVE_STICKY_WORD_QUAD(i);
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev,
SE_CTX_SAVE_STICKY_BITS_SIZE);
if (ret)
break;
se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE;
}
return ret;
}
static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int ret = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
if (!se_dev->ctx_save_buf) {
ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
ret = -EINVAL;
goto aes_keytable_save_err;
}
/* AES key context save */
for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) {
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se_context_t *)se_dev->
ctx_save_buf)->key_slots[slot].key)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
for (int i = 0; i < 2; i++) {
val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
SE_CTX_SAVE_KEY_INDEX(slot) |
SE_CTX_SAVE_WORD_QUAD(i);
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev,
TEGRA_SE_KEY_128_SIZE);
if (ret) {
ERROR("%s: ERR: AES key CTX_SAVE OP failed, "
"slot=%d, word_quad=%d.\n",
__func__, slot, i);
goto aes_keytable_save_err;
}
se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE;
}
/* OIV context save */
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se_context_t *)se_dev->
ctx_save_buf)->key_slots[slot].oiv)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
SE_CTX_SAVE_KEY_INDEX(slot) |
SE_CTX_SAVE_WORD_QUAD_ORIG_IV;
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
if (ret) {
ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n",
__func__, slot);
goto aes_keytable_save_err;
}
/* UIV context save */
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se_context_t *)se_dev->
ctx_save_buf)->key_slots[slot].uiv)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
SE_CTX_SAVE_KEY_INDEX(slot) |
SE_CTX_SAVE_WORD_QUAD_UPD_IV;
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
if (ret) {
ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n",
__func__, slot);
goto aes_keytable_save_err;
}
}
aes_keytable_save_err:
return ret;
}
static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int ret = 0;
/* First the modulus and then the exponent must be
* encrypted and saved. This is repeated for SLOT 0
* and SLOT 1. Hence the order:
* SLOT 0 exponent : RSA_KEY_INDEX : 0
* SLOT 0 modulus : RSA_KEY_INDEX : 1
* SLOT 1 exponent : RSA_KEY_INDEX : 2
* SLOT 1 modulus : RSA_KEY_INDEX : 3
*/
const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = {
/* RSA key slot 0 */
{SE_RSA_KEY_INDEX_SLOT0_EXP, SE_RSA_KEY_INDEX_SLOT0_MOD},
/* RSA key slot 1 */
{SE_RSA_KEY_INDEX_SLOT1_EXP, SE_RSA_KEY_INDEX_SLOT1_MOD},
};
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se_context_t *)se_dev->
ctx_save_buf)->rsa_keys)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) {
/* loop for modulus and exponent */
for (int index = 0; index < 2; index++) {
for (int word_quad = 0; word_quad < 16; word_quad++) {
val = SE_CTX_SAVE_SRC_RSA_KEYTABLE |
SE_CTX_SAVE_RSA_KEY_INDEX(
key_index_mod[slot][index]) |
SE_CTX_RSA_WORD_QUAD(word_quad);
tegra_se_write_32(se_dev,
SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev,
TEGRA_SE_KEY_128_SIZE);
if (ret) {
ERROR("%s: ERR: slot=%d.\n",
__func__, slot);
goto rsa_keytable_save_err;
}
/* Update the pointer to the next word quad */
se_dev->dst_ll_buf->buffer[0].addr +=
TEGRA_SE_KEY_128_SIZE;
}
}
}
rsa_keytable_save_err:
return ret;
}
static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev)
{
int ret = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se2_context_blob_t *)se_dev->
ctx_save_buf)->pka_ctx.sticky_bits)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE;
/* PKA1 sticky bits are 1 AES block (16 bytes) */
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
SE_CTX_SAVE_SRC_PKA1_STICKY_BITS |
SE_CTX_STICKY_WORD_QUAD_WORDS_0_3);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev, 0);
if (ret) {
ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n",
__func__);
goto pka_sticky_bits_save_err;
}
pka_sticky_bits_save_err:
return ret;
}
static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int ret = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se2_context_blob_t *)se_dev->
ctx_save_buf)->pka_ctx.pka_keys)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
/* for each slot, save word quad 0-127 */
for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) {
for (int word_quad = 0; word_quad < 512/4; word_quad++) {
val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE |
SE_CTX_PKA1_WORD_QUAD_L((slot * 128) +
word_quad) |
SE_CTX_PKA1_WORD_QUAD_H((slot * 128) +
word_quad);
tegra_se_write_32(se_dev,
SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev,
TEGRA_SE_KEY_128_SIZE);
if (ret) {
ERROR("%s: ERR: pka1 keytable ctx save error\n",
__func__);
goto pka_keytable_save_err;
}
/* Update the pointer to the next word quad */
se_dev->dst_ll_buf->buffer[0].addr +=
TEGRA_SE_KEY_128_SIZE;
}
}
pka_keytable_save_err:
return ret;
}
static int tegra_se_save_SRK(tegra_se_dev_t *se_dev)
{
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
SE_CTX_SAVE_SRC_SRK);
/* SE context save operation */
return tegra_se_start_ctx_save_operation(se_dev, 0);
}
/*
* Lock both SE from non-TZ clients.
*/
static inline void tegra_se_lock(tegra_se_dev_t *se_dev)
{
uint32_t val;
assert(se_dev);
val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET);
val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE);
tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val);
}
/*
* Use SRK to encrypt SE state and save to TZRAM carveout
*/
static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev)
{
int err = 0;
assert(se_dev);
/* Lock entire SE/SE2 as TZ protected */
tegra_se_lock(se_dev);
INFO("%s: generate SRK\n", __func__);
/* Generate SRK */
err = tegra_se_generate_srk(se_dev);
if (err) {
ERROR("%s: ERR: SRK generation failed\n", __func__);
return err;
}
INFO("%s: generate random data\n", __func__);
/* Generate random data */
err = tegra_se_lp_generate_random_data(se_dev);
if (err) {
ERROR("%s: ERR: LP random pattern generation failed\n", __func__);
return err;
}
INFO("%s: encrypt random data\n", __func__);
/* Encrypt the random data block */
err = tegra_se_lp_data_context_save(se_dev,
((uint64_t)(&(((tegra_se_context_t *)se_dev->
ctx_save_buf)->rand_data))),
((uint64_t)(&(((tegra_se_context_t *)se_dev->
ctx_save_buf)->rand_data))),
SE_CTX_SAVE_RANDOM_DATA_SIZE);
if (err) {
ERROR("%s: ERR: random pattern encryption failed\n", __func__);
return err;
}
INFO("%s: save SE sticky bits\n", __func__);
/* Save AES sticky bits context */
err = tegra_se_lp_sticky_bits_context_save(se_dev);
if (err) {
ERROR("%s: ERR: sticky bits context save failed\n", __func__);
return err;
}
INFO("%s: save AES keytables\n", __func__);
/* Save AES key table context */
err = tegra_se_aeskeytable_context_save(se_dev);
if (err) {
ERROR("%s: ERR: LP keytable save failed\n", __func__);
return err;
}
/* RSA key slot table context save */
INFO("%s: save RSA keytables\n", __func__);
err = tegra_se_lp_rsakeytable_context_save(se_dev);
if (err) {
ERROR("%s: ERR: rsa key table context save failed\n", __func__);
return err;
}
/* Only SE2 has an interface with PKA1; thus, PKA1's context is saved
* via SE2.
*/
if (se_dev->se_num == 2) {
/* Encrypt PKA1 sticky bits on SE2 only */
INFO("%s: save PKA sticky bits\n", __func__);
err = tegra_se_pkakeytable_sticky_bits_save(se_dev);
if (err) {
ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__);
return err;
}
/* Encrypt PKA1 keyslots on SE2 only */
INFO("%s: save PKA keytables\n", __func__);
err = tegra_se_pkakeytable_context_save(se_dev);
if (err) {
ERROR("%s: ERR: PKA key table context save failed\n", __func__);
return err;
}
}
/* Encrypt known pattern */
if (se_dev->se_num == 1) {
err = tegra_se_lp_data_context_save(se_dev,
((uint64_t)(&se_ctx_known_pattern_data)),
((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
SE_CTX_KNOWN_PATTERN_SIZE);
} else if (se_dev->se_num == 2) {
err = tegra_se_lp_data_context_save(se_dev,
((uint64_t)(&se_ctx_known_pattern_data)),
((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
SE_CTX_KNOWN_PATTERN_SIZE);
}
if (err) {
ERROR("%s: ERR: save LP known pattern failure\n", __func__);
return err;
}
/* Write lp context buffer address into PMC scratch register */
if (se_dev->se_num == 1) {
/* SE context address */
mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH117_OFFSET,
((uint64_t)(se_dev->ctx_save_buf)));
} else if (se_dev->se_num == 2) {
/* SE2 & PKA1 context address */
mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET,
((uint64_t)(se_dev->ctx_save_buf)));
}
/* Saves SRK to PMC secure scratch registers for BootROM, which
* verifies and restores the security engine context on warm boot.
*/
err = tegra_se_save_SRK(se_dev);
if (err < 0) {
ERROR("%s: ERR: LP SRK save failure\n", __func__);
return err;
}
INFO("%s: SE context save done \n", __func__);
return err;
}
/*
* Initialize the SE engine handle
*/
void tegra_se_init(void)
{
uint32_t val = 0;
INFO("%s: start SE init\n", __func__);
/* Generate random SRK to initialize DRBG */
tegra_se_generate_srk(&se_dev_1);
tegra_se_generate_srk(&se_dev_2);
/* determine if ECID is valid */
val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID);
ecid_valid = (val == ECID_VALID);
INFO("%s: SE init done\n", __func__);
}
static void tegra_se_enable_clocks(void)
{
uint32_t val = 0;
/* Enable entropy clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
val |= ENTROPY_CLK_ENB_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
/* De-Assert Entropy Reset */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W);
val &= ~ENTROPY_RESET_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val);
/* Enable SE clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
val |= SE_CLK_ENB_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
/* De-Assert SE Reset */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V);
val &= ~SE_RESET_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val);
}
static void tegra_se_disable_clocks(void)
{
uint32_t val = 0;
/* Disable entropy clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
val &= ~ENTROPY_CLK_ENB_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
/* Disable SE clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
val &= ~SE_CLK_ENB_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
}
/*
* Security engine power suspend entry point.
* This function is invoked from PSCI power domain suspend handler.
@ -417,20 +965,56 @@ void tegra_se_init(void)
int32_t tegra_se_suspend(void)
{
int32_t ret = 0;
uint32_t val = 0;
/* Atomic context save se2 and pka1 */
INFO("%s: SE2/PKA1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_2);
/* SE does not use SMMU in EL3, disable SMMU.
* This will be re-enabled by kernel on resume */
val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0);
val &= ~PPCS_SMMU_ENABLE;
mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val);
/* Atomic context save se */
if (ret == 0) {
INFO("%s: SE1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_1);
tegra_se_enable_clocks();
if (tegra_se_atomic_save_enabled(&se_dev_2) &&
tegra_se_atomic_save_enabled(&se_dev_1)) {
/* Atomic context save se2 and pka1 */
INFO("%s: SE2/PKA1 atomic context save\n", __func__);
if (ret == 0) {
ret = tegra_se_context_save_atomic(&se_dev_2);
}
/* Atomic context save se */
if (ret == 0) {
INFO("%s: SE1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_1);
}
if (ret == 0) {
INFO("%s: SE atomic context save done\n", __func__);
}
} else if (!tegra_se_atomic_save_enabled(&se_dev_2) &&
!tegra_se_atomic_save_enabled(&se_dev_1)) {
/* SW context save se2 and pka1 */
INFO("%s: SE2/PKA1 legacy(SW) context save\n", __func__);
if (ret == 0) {
ret = tegra_se_context_save_sw(&se_dev_2);
}
/* SW context save se */
if (ret == 0) {
INFO("%s: SE1 legacy(SW) context save\n", __func__);
ret = tegra_se_context_save_sw(&se_dev_1);
}
if (ret == 0) {
INFO("%s: SE SW context save done\n", __func__);
}
} else {
ERROR("%s: One SE set for atomic CTX save, the other is not\n",
__func__);
}
if (ret == 0) {
INFO("%s: SE atomic context save done\n", __func__);
}
tegra_se_disable_clocks();
return ret;
}
@ -445,6 +1029,7 @@ int32_t tegra_se_save_tzram(void)
uint32_t timeout;
INFO("%s: SE TZRAM save start\n", __func__);
tegra_se_enable_clocks();
val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
@ -465,6 +1050,8 @@ int32_t tegra_se_save_tzram(void)
INFO("%s: SE TZRAM save done!\n", __func__);
}
tegra_se_disable_clocks();
return ret;
}
@ -483,12 +1070,6 @@ static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
DRBG_RO_ENT_SRC_ENABLE;
tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
/* Enable and lock the SE atomic context save setting */
if (tegra_se_ctx_save_auto_enable(se_dev) != 0) {
ERROR("%s: ERR: enable SE%d context save auto failed!\n",
__func__, se_dev->se_num);
}
/* Set a random value to SRK to initialize DRBG */
tegra_se_generate_srk(se_dev);
}

View File

@ -177,16 +177,6 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
if (tegra_se_suspend() != 0) {
ret = PSCI_E_INTERN_FAIL;
}
/* Save tzram contents */
if (tegra_se_save_tzram() != 0) {
ret = PSCI_E_INTERN_FAIL;
}
}
/* enter system suspend */
if (ret == PSCI_E_SUCCESS) {
tegra_fc_soc_powerdn(mpidr);
}
} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) {
@ -217,6 +207,27 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
return ret;
}
int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
{
u_register_t mpidr = read_mpidr();
const plat_local_state_t *pwr_domain_state =
target_state->pwr_domain_state;
unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL];
if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
if (tegra_chipid_is_t210_b01()) {
/* Save tzram contents */
tegra_se_save_tzram();
}
/* enter system suspend */
tegra_fc_soc_powerdn(mpidr);
}
return PSCI_E_SUCCESS;
}
int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();

View File

@ -19,7 +19,7 @@ $(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
MAX_XLAT_TABLES := 10
$(eval $(call add_define,MAX_XLAT_TABLES))
MAX_MMAP_REGIONS := 10
MAX_MMAP_REGIONS := 15
$(eval $(call add_define,MAX_MMAP_REGIONS))
PLAT_INCLUDES += -I${SOC_DIR}/drivers/se