Merge pull request #341 from vwadekar/tegra-denver-plat-support-v3

Tegra denver plat support v3
This commit is contained in:
danh-arm 2015-07-24 10:40:18 +01:00
commit 7d4479a383
15 changed files with 755 additions and 37 deletions

View File

@ -1,5 +1,8 @@
Tegra-T210 Overview
====================
Tegra SoCs - Overview
======================
* T210
-------
T210 has Quad ARM® Cortex®-A57 cores in a switched configuration with a
companion set of quad ARM Cortex-A53 cores. The Cortex-A57 and A53 cores
@ -9,6 +12,34 @@ including legacy ARMv7 applications. The Cortex-A57 processors each have
Level 2 unified cache. The Cortex-A53 processors each have 32 KB Instruction
and 32 KB Data Level 1 caches; and have a 512 KB shared Level 2 unified cache.
* T132
-------
Denver is NVIDIA's own custom-designed, 64-bit, dual-core CPU which is
fully ARMv8 architecture compatible. Each of the two Denver cores
implements a 7-way superscalar microarchitecture (up to 7 concurrent
micro-ops can be executed per clock), and includes a 128KB 4-way L1
instruction cache, a 64KB 4-way L1 data cache, and a 2MB 16-way L2
cache, which services both cores.
Denver implements an innovative process called Dynamic Code Optimization,
which optimizes frequently used software routines at runtime into dense,
highly tuned microcode-equivalent routines. These are stored in a
dedicated, 128MB main-memory-based optimization cache. After being read
into the instruction cache, the optimized micro-ops are executed,
re-fetched and executed from the instruction cache as long as needed and
capacity allows.
Effectively, this reduces the need to re-optimize the software routines.
Instead of using hardware to extract the instruction-level parallelism
(ILP) inherent in the code, Denver extracts the ILP once via software
techniques, and then executes those routines repeatedly, thus amortizing
the cost of ILP extraction over the many execution instances.
Denver also features new low latency power-state transitions, in addition
to extensive power-gating and dynamic voltage and clock scaling based on
workloads.
Directory structure
====================
@ -25,9 +56,9 @@ without changing any makefiles.
Preparing the BL31 image to run on Tegra SoCs
===================================================
CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- make PLAT=tegra \
TARGET_SOC=<target-soc e.g. t210> BL32=<path-to-trusted-os-binary> \
SPD=<dispatcher e.g. tlkd> all
'CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- make PLAT=tegra \
TARGET_SOC=<target-soc e.g. t210|t132> BL32=<path-to-trusted-os-binary> \
SPD=<dispatcher e.g. tlkd> all'
Power Management
================

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DENVER_H__
#define __DENVER_H__
/* MIDR for Denver v1.0 */
#define DENVER_1_0_MIDR 0x4E0F0000
/* CPU state ids - implementation defined */
#define DENVER_CPU_STATE_POWER_DOWN 0x3
#endif /* __DENVER_H__ */

166
lib/cpus/aarch64/denver.S Normal file
View File

@ -0,0 +1,166 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <denver.h>
#include <cpu_macros.S>
#include <plat_macros.S>
/* ---------------------------------------------
* Disable debug interfaces
* ---------------------------------------------
*/
func denver_disable_ext_debug
mov x0, #1
msr osdlr_el1, x0
isb
dsb sy
ret
endfunc denver_disable_ext_debug
/* ----------------------------------------------------
* Enable dynamic code optimizer (DCO)
* ----------------------------------------------------
*/
func denver_enable_dco
mrs x0, mpidr_el1
and x0, x0, #0xF
mov x1, #1
lsl x1, x1, x0
msr s3_0_c15_c0_2, x1
isb
ret
endfunc denver_enable_dco
/* ----------------------------------------------------
* Disable dynamic code optimizer (DCO)
* ----------------------------------------------------
*/
func denver_disable_dco
/* turn off background work */
mrs x0, mpidr_el1
and x0, x0, #0xF
mov x1, #1
lsl x1, x1, x0
lsl x2, x1, #16
msr s3_0_c15_c0_2, x2
isb
/* wait till the background work turns off */
1: mrs x2, s3_0_c15_c0_2
lsr x2, x2, #32
and w2, w2, 0xFFFF
and x2, x2, x1
cbnz x2, 1b
ret
endfunc denver_disable_dco
/* -------------------------------------------------
* The CPU Ops reset function for Denver.
* -------------------------------------------------
*/
func denver_reset_func
mov x19, x30
/* ----------------------------------------------------
* Enable dynamic code optimizer (DCO)
* ----------------------------------------------------
*/
bl denver_enable_dco
ret x19
endfunc denver_reset_func
/* ----------------------------------------------------
* The CPU Ops core power down function for Denver.
* ----------------------------------------------------
*/
func denver_core_pwr_dwn
mov x19, x30
/* ----------------------------------------------------
* We enter the 'core power gated with ARM state not
* retained' power state during CPU power down. We let
* DCO know that we expect to enter this power state
* by writing to the ACTLR_EL1 register.
* ----------------------------------------------------
*/
mov x0, #DENVER_CPU_STATE_POWER_DOWN
msr actlr_el1, x0
/* ---------------------------------------------
* Force DCO to be quiescent
* ---------------------------------------------
*/
bl denver_disable_dco
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
bl denver_disable_ext_debug
ret x19
endfunc denver_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Denver.
* -------------------------------------------------------
*/
func denver_cluster_pwr_dwn
ret
endfunc denver_cluster_pwr_dwn
/* ---------------------------------------------
* This function provides Denver specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
* x8 - x15 having values of registers to be
* reported.
* ---------------------------------------------
*/
.section .rodata.denver_regs, "aS"
denver_regs: /* The ascii list of register names to be reported */
.asciz "actlr_el1", ""
func denver_cpu_reg_dump
adr x6, denver_regs
mrs x8, ACTLR_EL1
ret
endfunc denver_cpu_reg_dump
declare_cpu_ops denver, DENVER_1_0_MIDR

View File

@ -50,8 +50,6 @@ BL31_SOURCES += drivers/arm/gic/gic_v2.c \
drivers/console/console.S \
drivers/delay_timer/delay_timer.c \
drivers/ti/uart/16550_console.S \
lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a57.S \
plat/common/aarch64/platform_mp_stack.S \
${COMMON_DIR}/aarch64/tegra_helpers.S \
${COMMON_DIR}/drivers/memctrl/memctrl.c \

View File

@ -51,27 +51,27 @@ static int system_suspended;
* The following platform setup functions are weakly defined. They
* provide typical implementations that will be overridden by a SoC.
*/
#pragma weak tegra_prepare_cpu_suspend
#pragma weak tegra_prepare_cpu_on
#pragma weak tegra_prepare_cpu_off
#pragma weak tegra_prepare_cpu_on_finish
#pragma weak tegra_soc_prepare_cpu_suspend
#pragma weak tegra_soc_prepare_cpu_on
#pragma weak tegra_soc_prepare_cpu_off
#pragma weak tegra_soc_prepare_cpu_on_finish
int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl)
int tegra_soc_prepare_cpu_suspend(unsigned int id, unsigned int afflvl)
{
return PSCI_E_NOT_SUPPORTED;
}
int tegra_prepare_cpu_on(unsigned long mpidr)
int tegra_soc_prepare_cpu_on(unsigned long mpidr)
{
return PSCI_E_SUCCESS;
}
int tegra_prepare_cpu_off(unsigned long mpidr)
int tegra_soc_prepare_cpu_off(unsigned long mpidr)
{
return PSCI_E_SUCCESS;
}
int tegra_prepare_cpu_on_finish(unsigned long mpidr)
int tegra_soc_prepare_cpu_on_finish(unsigned long mpidr)
{
return PSCI_E_SUCCESS;
}
@ -134,17 +134,7 @@ unsigned int tegra_get_sys_suspend_power_state(void)
******************************************************************************/
int32_t tegra_validate_power_state(unsigned int power_state)
{
/* Sanity check the requested state */
if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on affinity level 0 i.e.
* a cpu on Tegra. Ignore any other affinity level.
*/
if (psci_get_pstate_afflvl(power_state) != MPIDR_AFFLVL0)
return PSCI_E_INVALID_PARAMS;
}
return PSCI_E_SUCCESS;
return tegra_soc_validate_power_state(power_state);
}
/*******************************************************************************
@ -171,7 +161,7 @@ int tegra_affinst_on(unsigned long mpidr,
sec_entry_point[cpu] = sec_entrypoint;
flush_dcache_range((uint64_t)&sec_entry_point[cpu], sizeof(uint64_t));
return tegra_prepare_cpu_on(mpidr);
return tegra_soc_prepare_cpu_on(mpidr);
}
/*******************************************************************************
@ -194,7 +184,7 @@ void tegra_affinst_off(unsigned int afflvl, unsigned int state)
if (afflvl > MPIDR_AFFLVL0)
return;
tegra_prepare_cpu_off(read_mpidr());
tegra_soc_prepare_cpu_off(read_mpidr());
}
/*******************************************************************************
@ -227,7 +217,7 @@ void tegra_affinst_suspend(unsigned long sec_entrypoint,
sec_entry_point[cpu] = sec_entrypoint;
flush_dcache_range((uint64_t)&sec_entry_point[cpu], sizeof(uint64_t));
tegra_prepare_cpu_suspend(id, afflvl);
tegra_soc_prepare_cpu_suspend(id, afflvl);
/* disable GICC */
tegra_gic_cpuif_deactivate();
@ -280,7 +270,7 @@ void tegra_affinst_on_finish(unsigned int afflvl, unsigned int state)
/*
* Reset hardware settings.
*/
tegra_prepare_cpu_on_finish(read_mpidr());
tegra_soc_prepare_cpu_on_finish(read_mpidr());
}
/*******************************************************************************
@ -338,7 +328,7 @@ int platform_setup_pm(const plat_pm_ops_t **plat_ops)
/*
* Reset hardware settings.
*/
tegra_prepare_cpu_on_finish(read_mpidr());
tegra_soc_prepare_cpu_on_finish(read_mpidr());
/*
* Initialize PM ops struct

View File

@ -28,6 +28,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
@ -38,7 +39,21 @@
#include <runtime_svc.h>
#include <tegra_private.h>
#define NS_SWITCH_AARCH32 1
#define SCR_RW_BITPOS __builtin_ctz(SCR_RW_BIT)
/*******************************************************************************
* Tegra SiP SMCs
******************************************************************************/
#define TEGRA_SIP_NEW_VIDEOMEM_REGION 0x82000003
#define TEGRA_SIP_AARCH_SWITCH 0x82000004
/*******************************************************************************
* SPSR settings for AARCH32/AARCH64 modes
******************************************************************************/
#define SPSR32 SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, \
DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT)
#define SPSR64 SPSR_64(MODE_EL2, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)
/*******************************************************************************
* This function is responsible for handling all SiP calls from the NS world
@ -64,6 +79,10 @@ uint64_t tegra_sip_handler(uint32_t smc_fid,
case TEGRA_SIP_NEW_VIDEOMEM_REGION:
/* clean up the high bits */
x1 = (uint32_t)x1;
x2 = (uint32_t)x2;
/*
* Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
* or falls outside of the valid DRAM range
@ -84,6 +103,30 @@ uint64_t tegra_sip_handler(uint32_t smc_fid,
tegra_memctrl_videomem_setup(x1, x2);
SMC_RET1(handle, 0);
break;
case TEGRA_SIP_AARCH_SWITCH:
/* clean up the high bits */
x1 = (uint32_t)x1;
x2 = (uint32_t)x2;
if (!x1 || x2 > NS_SWITCH_AARCH32) {
ERROR("%s: invalid parameters\n", __func__);
SMC_RET1(handle, SMC_UNK);
}
/* x1 = ns entry point */
cm_set_elr_spsr_el3(NON_SECURE, x1,
(x2 == NS_SWITCH_AARCH32) ? SPSR32 : SPSR64);
/* switch NS world mode */
cm_write_scr_el3_bit(NON_SECURE, SCR_RW_BITPOS, !x2);
INFO("CPU switched to AARCH%s mode\n",
(x2 == NS_SWITCH_AARCH32) ? "32" : "64");
SMC_RET1(handle, 0);
break;
default:
ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __TEGRA_DEF_H__
#define __TEGRA_DEF_H__
#include <platform_def.h>
/*******************************************************************************
* This value is used by the PSCI implementation during the `SYSTEM_SUSPEND`
* call as the `state-id` field in the 'power state' parameter.
******************************************************************************/
#define PLAT_SYS_SUSPEND_STATE_ID 0xD
/*******************************************************************************
* GIC memory map
******************************************************************************/
#define TEGRA_GICD_BASE 0x50041000
#define TEGRA_GICC_BASE 0x50042000
/*******************************************************************************
* Tegra micro-seconds timer constants
******************************************************************************/
#define TEGRA_TMRUS_BASE 0x60005010
/*******************************************************************************
* Tegra Clock and Reset Controller constants
******************************************************************************/
#define TEGRA_CAR_RESET_BASE 0x60006000
/*******************************************************************************
* Tegra Flow Controller constants
******************************************************************************/
#define TEGRA_FLOWCTRL_BASE 0x60007000
/*******************************************************************************
* Tegra Secure Boot Controller constants
******************************************************************************/
#define TEGRA_SB_BASE 0x6000C200
/*******************************************************************************
* Tegra Exception Vectors constants
******************************************************************************/
#define TEGRA_EVP_BASE 0x6000F000
/*******************************************************************************
* Tegra Power Mgmt Controller constants
******************************************************************************/
#define TEGRA_PMC_BASE 0x7000E400
/*******************************************************************************
* Tegra Memory Controller constants
******************************************************************************/
#define TEGRA_MC_BASE 0x70019000
#endif /* __TEGRA_DEF_H__ */

View File

@ -45,6 +45,9 @@ typedef struct plat_params_from_bl2 {
uintptr_t bl32_params;
} plat_params_from_bl2_t;
/* Declarations for plat_psci_handlers.c */
int32_t tegra_soc_validate_power_state(unsigned int power_state);
/* Declarations for plat_setup.c */
const mmap_region_t *plat_get_mmio_map(void);
uint64_t plat_get_syscnt_freq(void);

View File

@ -32,3 +32,6 @@ SOC_DIR := plat/nvidia/tegra/soc/${TARGET_SOC}
include plat/nvidia/tegra/common/tegra_common.mk
include ${SOC_DIR}/platform_${TARGET_SOC}.mk
# modify BUILD_PLAT to point to SoC specific build directory
BUILD_PLAT := ${BUILD_BASE}/${PLAT}/${TARGET_SOC}/${BUILD_TYPE}

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <denver.h>
#include <debug.h>
#include <flowctrl.h>
#include <mmio.h>
#include <platform_def.h>
#include <pmc.h>
#include <psci.h>
#include <tegra_def.h>
#include <tegra_private.h>
/*
* Register used to clear CPU reset signals. Each CPU has two reset
* signals: CPU reset (3:0) and Core reset (19:16)
*/
#define CPU_CMPLX_RESET_CLR 0x344
#define CPU_CORE_RESET_MASK 0x10001
static int cpu_powergate_mask[PLATFORM_MAX_CPUS_PER_CLUSTER];
int32_t tegra_soc_validate_power_state(unsigned int power_state)
{
/* Sanity check the requested afflvl */
if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on affinity level 0 i.e.
* a cpu on Tegra. Ignore any other affinity level.
*/
if (psci_get_pstate_afflvl(power_state) != MPIDR_AFFLVL0)
return PSCI_E_INVALID_PARAMS;
}
/* Sanity check the requested state id */
if (psci_get_pstate_id(power_state) != PLAT_SYS_SUSPEND_STATE_ID) {
ERROR("unsupported state id\n");
return PSCI_E_NOT_SUPPORTED;
}
return PSCI_E_SUCCESS;
}
int tegra_soc_prepare_cpu_on(unsigned long mpidr)
{
int cpu = mpidr & MPIDR_CPU_MASK;
uint32_t mask = CPU_CORE_RESET_MASK << cpu;
if (cpu_powergate_mask[cpu] == 0) {
/* Deassert CPU reset signals */
mmio_write_32(TEGRA_CAR_RESET_BASE + CPU_CMPLX_RESET_CLR, mask);
/* Power on CPU using PMC */
tegra_pmc_cpu_on(cpu);
/* Fill in the CPU powergate mask */
cpu_powergate_mask[cpu] = 1;
} else {
/* Power on CPU using Flow Controller */
tegra_fc_cpu_on(cpu);
}
return PSCI_E_SUCCESS;
}
int tegra_soc_prepare_cpu_off(unsigned long mpidr)
{
tegra_fc_cpu_off(mpidr & MPIDR_CPU_MASK);
return PSCI_E_SUCCESS;
}
int tegra_soc_prepare_cpu_suspend(unsigned int id, unsigned int afflvl)
{
/* Nothing to be done for lower affinity levels */
if (afflvl < MPIDR_AFFLVL2)
return PSCI_E_SUCCESS;
/* Enter system suspend state */
tegra_pm_system_suspend_entry();
/* Allow restarting CPU #1 using PMC on suspend exit */
cpu_powergate_mask[1] = 0;
/* Program FC to enter suspend state */
tegra_fc_cpu_idle(read_mpidr());
/* Suspend DCO operations */
write_actlr_el1(id);
return PSCI_E_SUCCESS;
}

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <denver.h>
#include <mmio.h>
#include <platform.h>
#include <psci.h>
#include <pmc.h>
#include <tegra_def.h>
#define SB_CSR 0x0
#define SB_CSR_NS_RST_VEC_WR_DIS (1 << 1)
/* AARCH64 CPU reset vector */
#define SB_AA64_RESET_LOW 0x30 /* width = 31:0 */
#define SB_AA64_RESET_HI 0x34 /* width = 11:0 */
/* AARCH32 CPU reset vector */
#define EVP_CPU_RESET_VECTOR 0x100
extern void tegra_secure_entrypoint(void);
/*
* For T132, CPUs reset to AARCH32, so the reset vector is first
* armv8_trampoline which does a warm reset to AARCH64 and starts
* execution at the address in SB_AA64_RESET_LOW/SB_AA64_RESET_HI.
*/
__aligned(8) const uint32_t armv8_trampoline[] = {
0xE3A00003, /* mov r0, #3 */
0xEE0C0F50, /* mcr p15, 0, r0, c12, c0, 2 */
0xEAFFFFFE, /* b . */
};
/*******************************************************************************
* Setup secondary CPU vectors
******************************************************************************/
void plat_secondary_setup(void)
{
uint32_t val;
uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint;
/*
* For T132, CPUs reset to AARCH32, so the reset vector is first
* armv8_trampoline, which does a warm reset to AARCH64 and starts
* execution at the address in SCRATCH34/SCRATCH35.
*/
INFO("Setting up T132 CPU boot\n");
/* initial AARCH32 reset address */
tegra_pmc_write_32(PMC_SECURE_SCRATCH22,
(unsigned long)&armv8_trampoline);
/* set AARCH32 exception vector (read to flush) */
mmio_write_32(TEGRA_EVP_BASE + EVP_CPU_RESET_VECTOR,
(unsigned long)&armv8_trampoline);
val = mmio_read_32(TEGRA_EVP_BASE + EVP_CPU_RESET_VECTOR);
/* setup secondary CPU vector */
mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_LOW,
(reset_addr & 0xFFFFFFFF) | 1);
val = reset_addr >> 32;
mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_HI, val & 0x7FF);
/* configure PMC */
tegra_pmc_cpu_setup(reset_addr);
tegra_pmc_lock_cpu_vectors();
}

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <xlat_tables.h>
#include <tegra_def.h>
/* sets of MMIO ranges setup */
#define MMIO_RANGE_0_ADDR 0x50000000
#define MMIO_RANGE_1_ADDR 0x60000000
#define MMIO_RANGE_2_ADDR 0x70000000
#define MMIO_RANGE_SIZE 0x200000
/*
* Table of regions to map using the MMU.
*/
static const mmap_region_t tegra_mmap[] = {
MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
{0}
};
/*******************************************************************************
* Set up the pagetables as per the platform memory map & initialize the MMU
******************************************************************************/
const mmap_region_t *plat_get_mmio_map(void)
{
/* MMIO space */
return tegra_mmap;
}
uint64_t plat_get_syscnt_freq(void)
{
return 12000000;
}

View File

@ -0,0 +1,46 @@
#
# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
TEGRA_BOOT_UART_BASE := 0x70006300
$(eval $(call add_define,TEGRA_BOOT_UART_BASE))
TZDRAM_BASE := 0xF1C00000
$(eval $(call add_define,TZDRAM_BASE))
PLATFORM_CLUSTER_COUNT := 1
$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
PLATFORM_MAX_CPUS_PER_CLUSTER := 2
$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
BL31_SOURCES += lib/cpus/aarch64/denver.S \
${SOC_DIR}/plat_psci_handlers.c \
${SOC_DIR}/plat_setup.c \
${SOC_DIR}/plat_secondary.c

View File

@ -49,7 +49,35 @@
static int cpu_powergate_mask[PLATFORM_MAX_CPUS_PER_CLUSTER];
int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl)
int32_t tegra_soc_validate_power_state(unsigned int power_state)
{
/* Sanity check the requested afflvl */
if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on affinity level 0 i.e.
* a cpu on Tegra. Ignore any other affinity level.
*/
if (psci_get_pstate_afflvl(power_state) != MPIDR_AFFLVL0)
return PSCI_E_INVALID_PARAMS;
}
/* Sanity check the requested state id */
switch (psci_get_pstate_id(power_state)) {
case PSTATE_ID_CORE_POWERDN:
case PSTATE_ID_CLUSTER_IDLE:
case PSTATE_ID_CLUSTER_POWERDN:
case PSTATE_ID_SOC_POWERDN:
break;
default:
ERROR("unsupported state id\n");
return PSCI_E_NOT_SUPPORTED;
}
return PSCI_E_SUCCESS;
}
int tegra_soc_prepare_cpu_suspend(unsigned int id, unsigned int afflvl)
{
/* There's nothing to be done for affinity level 1 */
if (afflvl == MPIDR_AFFLVL1)
@ -90,7 +118,7 @@ int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl)
return PSCI_E_NOT_SUPPORTED;
}
int tegra_prepare_cpu_on_finish(unsigned long mpidr)
int tegra_soc_prepare_cpu_on_finish(unsigned long mpidr)
{
/*
* Check if we are exiting from SOC_POWERDN.
@ -120,7 +148,7 @@ int tegra_prepare_cpu_on_finish(unsigned long mpidr)
return PSCI_E_SUCCESS;
}
int tegra_prepare_cpu_on(unsigned long mpidr)
int tegra_soc_prepare_cpu_on(unsigned long mpidr)
{
int cpu = mpidr & MPIDR_CPU_MASK;
uint32_t mask = CPU_CORE_RESET_MASK << cpu;
@ -139,7 +167,7 @@ int tegra_prepare_cpu_on(unsigned long mpidr)
return PSCI_E_SUCCESS;
}
int tegra_prepare_cpu_off(unsigned long mpidr)
int tegra_soc_prepare_cpu_off(unsigned long mpidr)
{
tegra_fc_cpu_off(mpidr & MPIDR_CPU_MASK);
return PSCI_E_SUCCESS;

View File

@ -46,8 +46,10 @@ $(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
PLATFORM_MAX_CPUS_PER_CLUSTER := 4
$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
BL31_SOURCES += ${SOC_DIR}/plat_psci_handlers.c \
${SOC_DIR}/plat_setup.c \
BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a57.S \
${SOC_DIR}/plat_psci_handlers.c \
${SOC_DIR}/plat_setup.c \
${SOC_DIR}/plat_secondary.c
# Enable workarounds for selected Cortex-A53 erratas.