Support for Rockchip's family SoCs

This patch adds to support the RK3368 and RK3399 SoCs.

RK3368/RK3399 is one of the Rockchip family SoCs, which is an
multi-cores ARM SoCs.

This patch adds support to boot the Trusted Firmware on RK3368/RK3399
SoCs, and adds support to boot secondary CPUs, enter/exit core
power states for all CPUs in the slow/fast clusters.

This is the initial version for rockchip SoCs.(RK3368/RK3399 and next SoCs)
* Support arm gicv2 & gicv3.
* Boot up multi-cores CPU.
* Add generic CPU helper functions.
* Support suspend/resume.
* Add system_off & system_reset implementation.
* Add delay timer platform implementation.
* Support the new porting interface for the PSCI implementation.

Change-Id: I704bb3532d65e8c70dbd99b512c5e6e440ea6f43
Signed-off-by: Tony Xie <tony.xie@rock-chips.com>
Signed-off-by: Caesar Wang <wxt@rock-chips.com>
Signed-off-by: Shengfei xu <xsf@rock-chips.com>
This commit is contained in:
Tony Xie 2016-01-15 17:17:32 +08:00 committed by Caesar Wang
parent c291641795
commit 6fba6e0490
31 changed files with 6326 additions and 0 deletions

View File

@ -0,0 +1,252 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <cortex_a53.h>
#include <cortex_a72.h>
#include <plat_private.h>
#include <platform_def.h>
.globl cpuson_entry_point
.globl cpuson_flags
.globl platform_cpu_warmboot
.globl plat_secondary_cold_boot_setup
.globl plat_report_exception
.globl platform_is_primary_cpu
.globl plat_crash_console_init
.globl plat_crash_console_putc
.globl plat_my_core_pos
.globl plat_reset_handler
#define RK_REVISION(rev) RK_PLAT_CFG##rev
#define RK_HANDLER(rev) plat_reset_handler_juno_r##rev
#define JUMP_TO_HANDLER_IF_RK_R(revision) \
jump_to_handler RK_REVISION(revision), RK_HANDLER(revision)
/*
* Helper macro to jump to the given handler if the board revision
* matches.
* Expects the Juno board revision in x0.
*
*/
.macro jump_to_handler _revision, _handler
cmp x0, #\_revision
b.eq \_handler
.endm
/*
* Helper macro that reads the part number of the current CPU and jumps
* to the given label if it matches the CPU MIDR provided.
*/
.macro jump_if_cpu_midr _cpu_midr, _label
mrs x0, midr_el1
ubfx x0, x0, MIDR_PN_SHIFT, #12
cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
b.eq \_label
.endm
/*
* Platform reset handler for rockchip.
* only A53 cores
*/
func RK_HANDLER(0)
ret
endfunc RK_HANDLER(0)
/*
* Platform reset handler for rockchip.
* - Cortex-A53 processor cluster;
* - Cortex-A72 processor cluster.
*
* This handler does the following:
* - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72
* - Set the L2 Tag RAM latency to 1 (i.e. 2 cycles) for Cortex-A72
*/
func RK_HANDLER(1)
/*
* Nothing to do on Cortex-A53.
*
*/
jump_if_cpu_midr CORTEX_A72_MIDR, A72
ret
A72:
/* Cortex-A72 specific settings */
mov x0, #((2 << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \
(0x1 << 5))
msr L2CTLR_EL1, x0
isb
ret
endfunc RK_HANDLER(1)
/*
* void plat_reset_handler(void);
*
* Determine the SOC type and call the appropriate reset
* handler.
*
*/
func plat_reset_handler
mov x0, RK_PLAT_AARCH_CFG
JUMP_TO_HANDLER_IF_RK_R(0)
JUMP_TO_HANDLER_IF_RK_R(1)
/* SOC type is not supported */
not_supported:
b not_supported
endfunc plat_reset_handler
func plat_my_core_pos
mrs x0, mpidr_el1
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc plat_my_core_pos
/* --------------------------------------------------------------------
* void plat_secondary_cold_boot_setup (void);
*
* This function performs any platform specific actions
* needed for a secondary cpu after a cold reset e.g
* mark the cpu's presence, mechanism to place it in a
* holding pen etc.
* --------------------------------------------------------------------
*/
func plat_secondary_cold_boot_setup
/* rk3368 does not do cold boot for secondary CPU */
cb_panic:
b cb_panic
endfunc plat_secondary_cold_boot_setup
func platform_is_primary_cpu
and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
cmp x0, #PLAT_RK_PRIMARY_CPU
cset x0, eq
ret
endfunc platform_is_primary_cpu
/* --------------------------------------------------------------------
* int plat_crash_console_init(void)
* Function to initialize the crash console
* without a C Runtime to print crash report.
* Clobber list : x0, x1, x2
* --------------------------------------------------------------------
*/
func plat_crash_console_init
mov_imm x0, PLAT_RK_UART_BASE
mov_imm x1, PLAT_RK_UART_CLOCK
mov_imm x2, PLAT_RK_UART_BAUDRATE
b console_core_init
endfunc plat_crash_console_init
/* --------------------------------------------------------------------
* int plat_crash_console_putc(void)
* Function to print a character on the crash
* console without a C Runtime.
* Clobber list : x1, x2
* --------------------------------------------------------------------
*/
func plat_crash_console_putc
mov_imm x1, PLAT_RK_UART_BASE
b console_core_putc
endfunc plat_crash_console_putc
/* --------------------------------------------------------------------
* void platform_cpu_warmboot (void);
* cpus online or resume enterpoint
* --------------------------------------------------------------------
*/
func platform_cpu_warmboot
mrs x0, MPIDR_EL1
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
/* --------------------------------------------------------------------
* big cluster id is 1
* big cores id is from 0-3, little cores id 4-7
* --------------------------------------------------------------------
*/
add x0, x1, x0, lsr #6
/* --------------------------------------------------------------------
* get per cpuup flag
* --------------------------------------------------------------------
*/
adr x4, cpuson_flags
add x4, x4, x0, lsl #2
ldr w1, [x4]
/* --------------------------------------------------------------------
* get per cpuup boot addr
* --------------------------------------------------------------------
*/
adr x5, cpuson_entry_point
ldr x2, [x5, x0, lsl #3]
/* --------------------------------------------------------------------
* check cpuon reason
* --------------------------------------------------------------------
*/
ldr w3, =PMU_CPU_AUTO_PWRDN
cmp w1, w3
b.eq boot_entry
ldr w3, =PMU_CPU_HOTPLUG
cmp w1, w3
b.eq boot_entry
/* --------------------------------------------------------------------
* If the boot core cpuson_flags or cpuson_entry_point is not
* expection. force the core into wfe.
* --------------------------------------------------------------------
*/
wfe_loop:
wfe
b wfe_loop
boot_entry:
mov w0, #0
str w0, [x4]
br x2
endfunc platform_cpu_warmboot
/* --------------------------------------------------------------------
* Per-CPU Secure entry point - resume or power up
* --------------------------------------------------------------------
*/
.section tzfw_coherent_mem, "a"
.align 3
cpuson_entry_point:
.rept PLATFORM_CORE_COUNT
.quad 0
.endr
cpuson_flags:
.rept PLATFORM_CORE_COUNT
.quad 0
.endr

View File

@ -0,0 +1,107 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <arm_gic.h>
#include <bl_common.h>
#include <cci.h>
#include <debug.h>
#include <string.h>
#include <xlat_tables.h>
#include <platform_def.h>
#include <plat_private.h>
#ifdef PLAT_RK_CCI_BASE
static const int cci_map[] = {
PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX,
PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX
};
#endif
/******************************************************************************
* Macro generating the code for the function setting up the pagetables as per
* the platform memory map & initialize the mmu, for the given exception level
******************************************************************************/
#define DEFINE_CONFIGURE_MMU_EL(_el) \
void plat_configure_mmu_el ## _el(unsigned long total_base, \
unsigned long total_size, \
unsigned long ro_start, \
unsigned long ro_limit, \
unsigned long coh_start, \
unsigned long coh_limit) \
{ \
mmap_add_region(total_base, total_base, \
total_size, \
MT_MEMORY | MT_RW | MT_SECURE); \
mmap_add_region(ro_start, ro_start, \
ro_limit - ro_start, \
MT_MEMORY | MT_RO | MT_SECURE); \
mmap_add_region(coh_start, coh_start, \
coh_limit - coh_start, \
MT_DEVICE | MT_RW | MT_SECURE); \
mmap_add(plat_rk_mmap); \
init_xlat_tables(); \
\
enable_mmu_el ## _el(0); \
}
/* Define EL3 variants of the function initialising the MMU */
DEFINE_CONFIGURE_MMU_EL(3)
uint64_t plat_get_syscnt_freq(void)
{
return SYS_COUNTER_FREQ_IN_TICKS;
}
void plat_cci_init(void)
{
#ifdef PLAT_RK_CCI_BASE
/* Initialize CCI driver */
cci_init(PLAT_RK_CCI_BASE, cci_map, ARRAY_SIZE(cci_map));
#endif
}
void plat_cci_enable(void)
{
/*
* Enable CCI coherency for this cluster.
* No need for locks as no other cpu is active at the moment.
*/
#ifdef PLAT_RK_CCI_BASE
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
#endif
}
void plat_cci_disable(void)
{
#ifdef PLAT_RK_CCI_BASE
cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
#endif
}

View File

@ -0,0 +1,152 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arm_gic.h>
#include <assert.h>
#include <bl_common.h>
#include <console.h>
#include <debug.h>
#include <mmio.h>
#include <platform.h>
#include <plat_private.h>
#include <platform_def.h>
/*******************************************************************************
* Declarations of linker defined symbols which will help us find the layout
* of trusted SRAM
******************************************************************************/
unsigned long __RO_START__;
unsigned long __RO_END__;
unsigned long __COHERENT_RAM_START__;
unsigned long __COHERENT_RAM_END__;
/*
* The next 2 constants identify the extents of the code & RO data region.
* These addresses are used by the MMU setup code and therefore they must be
* page-aligned. It is the responsibility of the linker script to ensure that
* __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
*/
#define BL31_RO_BASE (unsigned long)(&__RO_START__)
#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
/*
* The next 2 constants identify the extents of the coherent memory region.
* These addresses are used by the MMU setup code and therefore they must be
* page-aligned. It is the responsibility of the linker script to ensure that
* __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
* refer to page-aligned addresses.
*/
#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
static entry_point_info_t bl32_ep_info;
static entry_point_info_t bl33_ep_info;
/*******************************************************************************
* Return a pointer to the 'entry_point_info' structure of the next image for
* the security state specified. BL33 corresponds to the non-secure image type
* while BL32 corresponds to the secure image type. A NULL pointer is returned
* if the image does not exist.
******************************************************************************/
entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
{
entry_point_info_t *next_image_info;
next_image_info = (type == NON_SECURE) ? &bl33_ep_info : &bl32_ep_info;
/* None of the images on this platform can have 0x0 as the entrypoint */
if (next_image_info->pc)
return next_image_info;
else
return NULL;
}
/*******************************************************************************
* Perform any BL3-1 early platform setup. Here is an opportunity to copy
* parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
* are lost (potentially). This needs to be done before the MMU is initialized
* so that the memory layout can be used while creating page tables.
* BL2 has flushed this information to memory, so we are guaranteed to pick up
* good data.
******************************************************************************/
void bl31_early_platform_setup(bl31_params_t *from_bl2,
void *plat_params_from_bl2)
{
console_init(PLAT_RK_UART_BASE, PLAT_RK_UART_CLOCK,
PLAT_RK_UART_BAUDRATE);
VERBOSE("bl31_setup\n");
/* Passing a NULL context is a critical programming error */
assert(from_bl2);
assert(from_bl2->h.type == PARAM_BL31);
assert(from_bl2->h.version >= VERSION_1);
bl32_ep_info = *from_bl2->bl32_ep_info;
bl33_ep_info = *from_bl2->bl33_ep_info;
/*
* The code for resuming cpu from suspend must be excuted in pmusram.
* Copy the code into pmusram.
*/
plat_rockchip_pmusram_prepare();
}
/*******************************************************************************
* Perform any BL3-1 platform setup code
******************************************************************************/
void bl31_platform_setup(void)
{
plat_delay_timer_init();
plat_rockchip_soc_init();
/* Initialize the gic cpu and distributor interfaces */
plat_rockchip_gic_driver_init();
plat_rockchip_gic_init();
plat_rockchip_pmu_init();
}
/*******************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this is only intializes the mmu in a quick and dirty way.
******************************************************************************/
void bl31_plat_arch_setup(void)
{
plat_cci_init();
plat_cci_enable();
plat_configure_mmu_el3(BL31_RO_BASE,
(BL31_COHERENT_RAM_LIMIT - BL31_RO_BASE),
BL31_RO_BASE,
BL31_RO_LIMIT,
BL31_COHERENT_RAM_BASE,
BL31_COHERENT_RAM_LIMIT);
}

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PMU_COM_H__
#define __PMU_COM_H__
DEFINE_BAKERY_LOCK(rockchip_pd_lock);
#define rockchip_pd_lock_get() bakery_lock_get(&rockchip_pd_lock)
#define rockchip_pd_lock_rls() bakery_lock_release(&rockchip_pd_lock)
#define rockchip_pd_lock_init() bakery_lock_init(&rockchip_pd_lock)
/*****************************************************************************
* power domain on or off
*****************************************************************************/
enum pmu_pd_state {
pmu_pd_on = 0,
pmu_pd_off = 1
};
#pragma weak plat_ic_get_pending_interrupt_id
#pragma weak pmu_power_domain_ctr
#pragma weak check_cpu_wfie
static inline uint32_t pmu_power_domain_st(uint32_t pd)
{
uint32_t pwrdn_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & BIT(pd);
if (pwrdn_st)
return pmu_pd_off;
else
return pmu_pd_on;
}
static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state)
{
uint32_t val;
uint32_t loop = 0;
int ret = 0;
rockchip_pd_lock_get();
val = mmio_read_32(PMU_BASE + PMU_PWRDN_CON);
if (pd_state == pmu_pd_off)
val |= BIT(pd);
else
val &= ~BIT(pd);
mmio_write_32(PMU_BASE + PMU_PWRDN_CON, val);
dsb();
while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) {
udelay(1);
loop++;
}
if (pmu_power_domain_st(pd) != pd_state) {
WARN("%s: %d, %d, error!\n", __func__, pd, pd_state);
ret = -EINVAL;
}
rockchip_pd_lock_rls();
return ret;
}
static int check_cpu_wfie(uint32_t cpu_id, uint32_t wfie_msk)
{
uint32_t cluster_id, loop = 0;
if (cpu_id >= PLATFORM_CLUSTER0_CORE_COUNT) {
cluster_id = 1;
cpu_id -= PLATFORM_CLUSTER0_CORE_COUNT;
} else {
cluster_id = 0;
}
if (cluster_id)
wfie_msk <<= (clstb_cpu_wfe + cpu_id);
else
wfie_msk <<= (clstl_cpu_wfe + cpu_id);
while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & wfie_msk) &&
(loop < CHK_CPU_LOOP)) {
udelay(1);
loop++;
}
if ((mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & wfie_msk) == 0) {
WARN("%s: %d, %d, %d, error!\n", __func__,
cluster_id, cpu_id, wfie_msk);
return -EINVAL;
}
return 0;
}
#endif /* __PMU_COM_H__ */

View File

@ -0,0 +1,151 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ROCKCHIP_PLAT_MACROS_S__
#define __ROCKCHIP_PLAT_MACROS_S__
#include <cci.h>
#include <gic_common.h>
#include <gicv2.h>
#include <gicv3.h>
#include <platform_def.h>
.section .rodata.gic_reg_name, "aS"
/* Applicable only to GICv2 and GICv3 with SRE disabled (legacy mode) */
gicc_regs:
.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
/* Applicable only to GICv3 with SRE enabled */
icc_regs:
.asciz "icc_hppir0_el1", "icc_hppir1_el1", "icc_ctlr_el3", ""
/* Registers common to both GICv2 and GICv3 */
gicd_pend_reg:
.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n" \
" Offset:\t\t\tvalue\n"
newline:
.asciz "\n"
spacer:
.asciz ":\t\t0x"
/* ---------------------------------------------
* The below utility macro prints out relevant GIC
* registers whenever an unhandled exception is
* taken in BL31 on ARM standard platforms.
* Expects: GICD base in x16, GICC base in x17
* Clobbers: x0 - x10, sp
* ---------------------------------------------
*/
.macro plat_print_gic_regs
mov_imm x16, PLAT_RK_GICD_BASE
mov_imm x17, PLAT_RK_GICC_BASE
/* Check for GICv3 system register access */
mrs x7, id_aa64pfr0_el1
ubfx x7, x7, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH
cmp x7, #1
b.ne print_gicv2
/* Check for SRE enable */
mrs x8, ICC_SRE_EL3
tst x8, #ICC_SRE_SRE_BIT
b.eq print_gicv2
/* Load the icc reg list to x6 */
adr x6, icc_regs
/* Load the icc regs to gp regs used by str_in_crash_buf_print */
mrs x8, ICC_HPPIR0_EL1
mrs x9, ICC_HPPIR1_EL1
mrs x10, ICC_CTLR_EL3
/* Store to the crash buf and print to console */
bl str_in_crash_buf_print
b print_gic_common
print_gicv2:
/* Load the gicc reg list to x6 */
adr x6, gicc_regs
/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
ldr w8, [x17, #GICC_HPPIR]
ldr w9, [x17, #GICC_AHPPIR]
ldr w10, [x17, #GICC_CTLR]
/* Store to the crash buf and print to console */
bl str_in_crash_buf_print
print_gic_common:
/* Print the GICD_ISPENDR regs */
add x7, x16, #GICD_ISPENDR
adr x4, gicd_pend_reg
bl asm_print_str
gicd_ispendr_loop:
sub x4, x7, x16
cmp x4, #0x280
b.eq exit_print_gic_regs
bl asm_print_hex
adr x4, spacer
bl asm_print_str
ldr x4, [x7], #8
bl asm_print_hex
adr x4, newline
bl asm_print_str
b gicd_ispendr_loop
exit_print_gic_regs:
.endm
.section .rodata.cci_reg_name, "aS"
cci_iface_regs:
.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
/* ------------------------------------------------
* The below macro prints out relevant interconnect
* registers whenever an unhandled exception is
* taken in BL3-1.
* Clobbers: x0 - x9, sp
* ------------------------------------------------
*/
.macro plat_print_interconnect_regs
#if PLATFORM_CLUSTER_COUNT > 1
adr x6, cci_iface_regs
/* Store in x7 the base address of the first interface */
mov_imm x7, (PLAT_RK_CCI_BASE + SLAVE_IFACE_OFFSET( \
PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX))
ldr w8, [x7, #SNOOP_CTRL_REG]
/* Store in x7 the base address of the second interface */
mov_imm x7, (PLAT_RK_CCI_BASE + SLAVE_IFACE_OFFSET( \
PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX))
ldr w9, [x7, #SNOOP_CTRL_REG]
/* Store to the crash buf and print to console */
bl str_in_crash_buf_print
#endif
.endm
#endif /* __ROCKCHIP_PLAT_MACROS_S__ */

View File

@ -0,0 +1,127 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PLAT_PRIVATE_H__
#define __PLAT_PRIVATE_H__
#ifndef __ASSEMBLY__
#include <mmio.h>
#include <stdint.h>
#include <xlat_tables.h>
/******************************************************************************
* For rockchip socs pm ops
******************************************************************************/
struct rockchip_pm_ops_cb {
int (*cores_pwr_dm_on)(unsigned long mpidr, uint64_t entrypoint);
int (*cores_pwr_dm_off)(void);
int (*cores_pwr_dm_on_finish)(void);
int (*cores_pwr_dm_suspend)(void);
int (*cores_pwr_dm_resume)(void);
int (*sys_pwr_dm_suspend)(void);
int (*sys_pwr_dm_resume)(void);
void (*sys_gbl_soft_reset)(void) __dead2;
void (*system_off)(void) __dead2;
};
/******************************************************************************
* The register have write-mask bits, it is mean, if you want to set the bits,
* you needs set the write-mask bits at the same time,
* The write-mask bits is in high 16-bits.
* The fllowing macro definition helps access write-mask bits reg efficient!
******************************************************************************/
#define REG_MSK_SHIFT 16
#ifndef BIT
#define BIT(nr) (1 << (nr))
#endif
#ifndef WMSK_BIT
#define WMSK_BIT(nr) BIT((nr) + REG_MSK_SHIFT)
#endif
/* set one bit with write mask */
#ifndef BIT_WITH_WMSK
#define BIT_WITH_WMSK(nr) (BIT(nr) | WMSK_BIT(nr))
#endif
#ifndef BITS_SHIFT
#define BITS_SHIFT(bits, shift) (bits << (shift))
#endif
#ifndef BITS_WITH_WMASK
#define BITS_WITH_WMASK(msk, bits, shift)\
(BITS_SHIFT(bits, shift) | BITS_SHIFT(msk, (shift + REG_MSK_SHIFT)))
#endif
/******************************************************************************
* Function and variable prototypes
*****************************************************************************/
void plat_configure_mmu_el3(unsigned long total_base,
unsigned long total_size,
unsigned long,
unsigned long,
unsigned long,
unsigned long);
void plat_cci_init(void);
void plat_cci_enable(void);
void plat_cci_disable(void);
void plat_delay_timer_init(void);
void plat_rockchip_gic_driver_init(void);
void plat_rockchip_gic_init(void);
void plat_rockchip_gic_cpuif_enable(void);
void plat_rockchip_gic_cpuif_disable(void);
void plat_rockchip_gic_pcpu_init(void);
void plat_rockchip_pmusram_prepare(void);
void plat_rockchip_pmu_init(void);
void plat_rockchip_soc_init(void);
void plat_setup_rockchip_pm_ops(struct rockchip_pm_ops_cb *ops);
extern const unsigned char rockchip_power_domain_tree_desc[];
extern void *pmu_cpuson_entrypoint_start;
extern void *pmu_cpuson_entrypoint_end;
extern uint64_t cpuson_entry_point[PLATFORM_CORE_COUNT];
extern uint32_t cpuson_flags[PLATFORM_CORE_COUNT];
extern const mmap_region_t plat_rk_mmap[];
#endif /* __ASSEMBLY__ */
/* only Cortex-A53 */
#define RK_PLAT_CFG0 0
/* include Cortex-A72 */
#define RK_PLAT_CFG1 1
#endif /* __PLAT_PRIVATE_H__ */

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <delay_timer.h>
#include <platform_def.h>
static uint32_t plat_get_timer_value(void)
{
/*
* Generic delay timer implementation expects the timer to be a down
* counter. We apply bitwise NOT operator to the tick values returned
* by read_cntpct_el0() to simulate the down counter.
*/
return (uint32_t)(~read_cntpct_el0());
}
static const timer_ops_t plat_timer_ops = {
.get_timer_value = plat_get_timer_value,
.clk_mult = 1,
.clk_div = SYS_COUNTER_FREQ_IN_MHZ,
};
void plat_delay_timer_init(void)
{
timer_init(&plat_timer_ops);
}

View File

@ -0,0 +1,286 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <console.h>
#include <errno.h>
#include <debug.h>
#include <psci.h>
#include <delay_timer.h>
#include <platform_def.h>
#include <plat_private.h>
/* Macros to read the rk power domain state */
#define RK_CORE_PWR_STATE(state) \
((state)->pwr_domain_state[MPIDR_AFFLVL0])
#define RK_CLUSTER_PWR_STATE(state) \
((state)->pwr_domain_state[MPIDR_AFFLVL1])
#define RK_SYSTEM_PWR_STATE(state) \
((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
static uintptr_t rockchip_sec_entrypoint;
static struct rockchip_pm_ops_cb *rockchip_ops;
static void plat_rockchip_sys_pwr_domain_resume(void)
{
plat_rockchip_gic_init();
if (rockchip_ops && rockchip_ops->sys_pwr_dm_resume)
rockchip_ops->sys_pwr_dm_resume();
}
static void plat_rockchip_cores_pwr_domain_resume(void)
{
if (rockchip_ops && rockchip_ops->cores_pwr_dm_resume)
rockchip_ops->cores_pwr_dm_resume();
/* Enable the gic cpu interface */
plat_rockchip_gic_pcpu_init();
/* Program the gic per-cpu distributor or re-distributor interface */
plat_rockchip_gic_cpuif_enable();
}
/*******************************************************************************
* Rockchip standard platform handler called to check the validity of the power
* state parameter.
******************************************************************************/
int rockchip_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int pstate = psci_get_pstate_type(power_state);
int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
int i;
assert(req_state);
if (pwr_lvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */
if (pstate == PSTATE_TYPE_STANDBY) {
/*
* It's probably to enter standby only on power level 0
* ignore any other power level.
*/
if (pwr_lvl != MPIDR_AFFLVL0)
return PSCI_E_INVALID_PARAMS;
req_state->pwr_domain_state[MPIDR_AFFLVL0] =
PLAT_MAX_RET_STATE;
} else {
for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
req_state->pwr_domain_state[i] =
PLAT_MAX_OFF_STATE;
}
/* We expect the 'state id' to be zero */
if (psci_get_pstate_id(power_state))
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
void rockchip_get_sys_suspend_power_state(psci_power_state_t *req_state)
{
int i;
for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
}
/*******************************************************************************
* RockChip handler called when a CPU is about to enter standby.
******************************************************************************/
void rockchip_cpu_standby(plat_local_state_t cpu_state)
{
unsigned int scr;
assert(cpu_state == PLAT_MAX_RET_STATE);
scr = read_scr_el3();
/* Enable PhysicalIRQ bit for NS world to wake the CPU */
write_scr_el3(scr | SCR_IRQ_BIT);
isb();
dsb();
wfi();
/*
* Restore SCR to the original value, synchronisation of scr_el3 is
* done by eret while el3_exit to save some execution cycles.
*/
write_scr_el3(scr);
}
/*******************************************************************************
* RockChip handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
******************************************************************************/
int rockchip_pwr_domain_on(u_register_t mpidr)
{
if (rockchip_ops && rockchip_ops->cores_pwr_dm_on)
rockchip_ops->cores_pwr_dm_on(mpidr, rockchip_sec_entrypoint);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* RockChip handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void rockchip_pwr_domain_off(const psci_power_state_t *target_state)
{
assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE);
plat_rockchip_gic_cpuif_disable();
if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
plat_cci_disable();
if (rockchip_ops && rockchip_ops->cores_pwr_dm_off)
rockchip_ops->cores_pwr_dm_off();
}
/*******************************************************************************
* RockChip handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void rockchip_pwr_domain_suspend(const psci_power_state_t *target_state)
{
if (RK_CORE_PWR_STATE(target_state) == PLAT_MAX_RET_STATE)
return;
assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE);
if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
if (rockchip_ops && rockchip_ops->sys_pwr_dm_suspend)
rockchip_ops->sys_pwr_dm_suspend();
} else {
if (rockchip_ops && rockchip_ops->cores_pwr_dm_suspend)
rockchip_ops->cores_pwr_dm_suspend();
}
/* Prevent interrupts from spuriously waking up this cpu */
plat_rockchip_gic_cpuif_disable();
/* Perform the common cluster specific operations */
if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
plat_cci_disable();
}
/*******************************************************************************
* RockChip handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
******************************************************************************/
void rockchip_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE);
if (rockchip_ops && rockchip_ops->cores_pwr_dm_on_finish)
rockchip_ops->cores_pwr_dm_on_finish();
/* Perform the common cluster specific operations */
if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
/* Enable coherency if this cluster was off */
plat_cci_enable();
}
/* Enable the gic cpu interface */
plat_rockchip_gic_pcpu_init();
/* Program the gic per-cpu distributor or re-distributor interface */
plat_rockchip_gic_cpuif_enable();
}
/*******************************************************************************
* RockChip handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
void rockchip_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
/* Nothing to be done on waking up from retention from CPU level */
if (RK_CORE_PWR_STATE(target_state) == PLAT_MAX_RET_STATE)
return;
/* Perform system domain restore if woken up from system suspend */
if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
plat_rockchip_sys_pwr_domain_resume();
else
plat_rockchip_cores_pwr_domain_resume();
/* Perform the common cluster specific operations */
if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
/* Enable coherency if this cluster was off */
plat_cci_enable();
}
}
/*******************************************************************************
* RockChip handlers to reboot the system
******************************************************************************/
static void __dead2 rockchip_system_reset(void)
{
assert(rockchip_ops && rockchip_ops->sys_gbl_soft_reset);
rockchip_ops->sys_gbl_soft_reset();
}
/*******************************************************************************
* Export the platform handlers via plat_rockchip_psci_pm_ops. The rockchip
* standard
* platform layer will take care of registering the handlers with PSCI.
******************************************************************************/
const plat_psci_ops_t plat_rockchip_psci_pm_ops = {
.cpu_standby = rockchip_cpu_standby,
.pwr_domain_on = rockchip_pwr_domain_on,
.pwr_domain_off = rockchip_pwr_domain_off,
.pwr_domain_suspend = rockchip_pwr_domain_suspend,
.pwr_domain_on_finish = rockchip_pwr_domain_on_finish,
.pwr_domain_suspend_finish = rockchip_pwr_domain_suspend_finish,
.system_reset = rockchip_system_reset,
.validate_power_state = rockchip_validate_power_state,
.get_sys_suspend_power_state = rockchip_get_sys_suspend_power_state
};
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
*psci_ops = &plat_rockchip_psci_pm_ops;
rockchip_sec_entrypoint = sec_entrypoint;
return 0;
}
void plat_setup_rockchip_pm_ops(struct rockchip_pm_ops_cb *ops)
{
rockchip_ops = ops;
}

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <platform_def.h>
#include <plat_private.h>
#include <psci.h>
/*******************************************************************************
* This function returns the RockChip default topology tree information.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{
return rockchip_power_domain_tree_desc;
}
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned int cluster_id, cpu_id;
cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
if (cluster_id >= PLATFORM_CLUSTER_COUNT)
return -1;
return ((cluster_id * PLATFORM_CLUSTER0_CORE_COUNT) + cpu_id);
}

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <platform.h>
/*****************************************************************************
* sram only surpport 32-bits access
******************************************************************************/
void u32_align_cpy(uint32_t *dst, const uint32_t *src, size_t bytes)
{
uint32_t i;
for (i = 0; i < bytes; i++)
dst[i] = src[i];
}

View File

@ -0,0 +1,91 @@
/* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PMU_SRAM_H__
#define __PMU_SRAM_H__
/*****************************************************************************
* cpu up status
*****************************************************************************/
#define PMU_SYS_SLP_MODE 0xa5
#define PMU_SYS_ON_MODE 0x0
/*****************************************************************************
* define data offset in struct psram_data
*****************************************************************************/
#define PSRAM_DT_SP 0x0
#define PSRAM_DT_DDR_FUNC 0x8
#define PSRAM_DT_DDR_DATA 0x10
#define PSRAM_DT_DDRFLAG 0x18
#define PSRAM_DT_SYS_MODE 0x1c
#define PSRAM_DT_MPIDR 0x20
#define PSRAM_DT_END 0x24
/******************************************************************************
* Allocate data region for struct psram_data_t in pmusram
******************************************************************************/
/* Needed aligned 16 bytes for sp stack top */
#define PSRAM_DT_SIZE (((PSRAM_DT_END + 16) / 16) * 16)
#define PSRAM_DT_BASE ((PMUSRAM_BASE + PMUSRAM_RSIZE) - PSRAM_DT_SIZE)
#define PSRAM_SP_TOP PSRAM_DT_BASE
#ifndef __ASSEMBLY__
/*
* The struct is used in pmu_cpus_on.S which
* gets the data of the struct by the following index
* #define PSRAM_DT_SP 0x0
* #define PSRAM_DT_DDR_FUNC 0x8
* #define PSRAM_DT_DDR_DATA 0x10
* #define PSRAM_DT_DDRFLAG 0x18
* #define PSRAM_DT_SYS_MODE 0x1c
* #define PSRAM_DT_MPIDR 0x20
*/
struct psram_data_t {
uint64_t sp;
uint64_t ddr_func;
uint64_t ddr_data;
uint32_t ddr_flag;
uint32_t sys_mode;
uint32_t boot_mpidr;
};
CASSERT(sizeof(struct psram_data_t) <= PSRAM_DT_SIZE,
assert_psram_dt_size_mismatch);
CASSERT(__builtin_offsetof(struct psram_data_t, sp) == PSRAM_DT_SP,
assert_psram_dt_sp_offset_mistmatch);
CASSERT(__builtin_offsetof(struct psram_data_t, ddr_func) == PSRAM_DT_DDR_FUNC,
assert_psram_dt_ddr_func_offset_mistmatch);
CASSERT(__builtin_offsetof(struct psram_data_t, ddr_data) == PSRAM_DT_DDR_DATA,
assert_psram_dt_ddr_data_offset_mistmatch);
CASSERT(__builtin_offsetof(struct psram_data_t, ddr_flag) == PSRAM_DT_DDRFLAG,
assert_psram_dt_ddr_flag_offset_mistmatch);
CASSERT(__builtin_offsetof(struct psram_data_t, sys_mode) == PSRAM_DT_SYS_MODE,
assert_psram_dt_sys_mode_offset_mistmatch);
CASSERT(__builtin_offsetof(struct psram_data_t, boot_mpidr) == PSRAM_DT_MPIDR,
assert_psram_dt_mpidr_offset_mistmatch);
void u32_align_cpy(uint32_t *dst, const uint32_t *src, size_t bytes);
#endif /* __ASSEMBLY__ */
#endif

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <platform_def.h>
#include <pmu_sram.h>
.globl pmu_cpuson_entrypoint_start
.globl pmu_cpuson_entrypoint_end
func pmu_cpuson_entrypoint
pmu_cpuson_entrypoint_start:
ldr x5, psram_data
ldr w0, [x5, #PSRAM_DT_SYS_MODE]
cmp w0, #PMU_SYS_SLP_MODE
b.eq check_wake_cpus
ldr x6, warm_boot_func
br x6
check_wake_cpus:
mrs x0, MPIDR_EL1
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
orr x0, x0, x1
/* primary_cpu */
ldr w1, [x5, #PSRAM_DT_MPIDR]
cmp w0, w1
b.eq sys_wakeup
/*
* If the core is not the primary cpu,
* force the core into wfe.
*/
wfe_loop:
wfe
b wfe_loop
sys_wakeup:
/* check ddr flag for resume ddr */
ldr w2, [x5, #PSRAM_DT_DDRFLAG]
cmp w2, #0x0
b.eq sys_resume
ddr_resume:
ldr x2, [x5, #PSRAM_DT_SP]
mov sp, x2
ldr x1, [x5, #PSRAM_DT_DDR_FUNC]
ldr x0, [x5, #PSRAM_DT_DDR_DATA]
blr x1
sys_resume:
ldr x1, sys_wakeup_entry
br x1
.align 3
psram_data:
.quad PSRAM_DT_BASE
warm_boot_func:
.quad platform_cpu_warmboot
sys_wakeup_entry:
.quad psci_entrypoint
pmu_cpuson_entrypoint_end:
.word 0
endfunc pmu_cpuson_entrypoint

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <bl_common.h>
#include <gicv2.h>
#include <platform_def.h>
/******************************************************************************
* The following functions are defined as weak to allow a platform to override
* the way the GICv2 driver is initialised and used.
*****************************************************************************/
#pragma weak plat_rockchip_gic_driver_init
#pragma weak plat_rockchip_gic_init
#pragma weak plat_rockchip_gic_cpuif_enable
#pragma weak plat_rockchip_gic_cpuif_disable
#pragma weak plat_rockchip_gic_pcpu_init
/******************************************************************************
* On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
* interrupts.
*****************************************************************************/
const unsigned int g0_interrupt_array[] = {
PLAT_RK_G1S_IRQS,
};
/*
* Ideally `rockchip_gic_data` structure definition should be a `const` but it
* is kept as modifiable for overwriting with different GICD and GICC base when
* running on FVP with VE memory map.
*/
gicv2_driver_data_t rockchip_gic_data = {
.gicd_base = PLAT_RK_GICD_BASE,
.gicc_base = PLAT_RK_GICC_BASE,
.g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
.g0_interrupt_array = g0_interrupt_array,
};
/******************************************************************************
* RockChip common helper to initialize the GICv2 only driver.
*****************************************************************************/
void plat_rockchip_gic_driver_init(void)
{
gicv2_driver_init(&rockchip_gic_data);
}
void plat_rockchip_gic_init(void)
{
gicv2_distif_init();
gicv2_pcpu_distif_init();
gicv2_cpuif_enable();
}
/******************************************************************************
* RockChip common helper to enable the GICv2 CPU interface
*****************************************************************************/
void plat_rockchip_gic_cpuif_enable(void)
{
gicv2_cpuif_enable();
}
/******************************************************************************
* RockChip common helper to disable the GICv2 CPU interface
*****************************************************************************/
void plat_rockchip_gic_cpuif_disable(void)
{
gicv2_cpuif_disable();
}
/******************************************************************************
* RockChip common helper to initialize the per cpu distributor interface
* in GICv2
*****************************************************************************/
void plat_rockchip_gic_pcpu_init(void)
{
gicv2_pcpu_distif_init();
}

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <bl_common.h>
#include <gicv3.h>
#include <platform.h>
#include <platform_def.h>
/******************************************************************************
* The following functions are defined as weak to allow a platform to override
* the way the GICv3 driver is initialised and used.
*****************************************************************************/
#pragma weak plat_rockchip_gic_driver_init
#pragma weak plat_rockchip_gic_init
#pragma weak plat_rockchip_gic_cpuif_enable
#pragma weak plat_rockchip_gic_cpuif_disable
#pragma weak plat_rockchip_gic_pcpu_init
/* The GICv3 driver only needs to be initialized in EL3 */
uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
/* Array of Group1 secure interrupts to be configured by the gic driver */
const unsigned int g1s_interrupt_array[] = {
PLAT_RK_G1S_IRQS
};
/* Array of Group0 interrupts to be configured by the gic driver */
const unsigned int g0_interrupt_array[] = {
PLAT_RK_G0_IRQS
};
static unsigned int plat_rockchip_mpidr_to_core_pos(unsigned long mpidr)
{
return (unsigned int)plat_core_pos_by_mpidr(mpidr);
}
const gicv3_driver_data_t rockchip_gic_data = {
.gicd_base = PLAT_RK_GICD_BASE,
.gicr_base = PLAT_RK_GICR_BASE,
.g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
.g1s_interrupt_num = ARRAY_SIZE(g1s_interrupt_array),
.g0_interrupt_array = g0_interrupt_array,
.g1s_interrupt_array = g1s_interrupt_array,
.rdistif_num = PLATFORM_CORE_COUNT,
.rdistif_base_addrs = rdistif_base_addrs,
.mpidr_to_core_pos = plat_rockchip_mpidr_to_core_pos,
};
void plat_rockchip_gic_driver_init(void)
{
/*
* The GICv3 driver is initialized in EL3 and does not need
* to be initialized again in SEL1. This is because the S-EL1
* can use GIC system registers to manage interrupts and does
* not need GIC interface base addresses to be configured.
*/
#if IMAGE_BL31
gicv3_driver_init(&rockchip_gic_data);
#endif
}
/******************************************************************************
* RockChip common helper to initialize the GIC. Only invoked
* by BL31
*****************************************************************************/
void plat_rockchip_gic_init(void)
{
gicv3_distif_init();
gicv3_rdistif_init(plat_my_core_pos());
gicv3_cpuif_enable(plat_my_core_pos());
}
/******************************************************************************
* RockChip common helper to enable the GIC CPU interface
*****************************************************************************/
void plat_rockchip_gic_cpuif_enable(void)
{
gicv3_cpuif_enable(plat_my_core_pos());
}
/******************************************************************************
* RockChip common helper to disable the GIC CPU interface
*****************************************************************************/
void plat_rockchip_gic_cpuif_disable(void)
{
gicv3_cpuif_disable(plat_my_core_pos());
}
/******************************************************************************
* RockChip common helper to initialize the per-cpu redistributor interface
* in GICv3
*****************************************************************************/
void plat_rockchip_gic_pcpu_init(void)
{
gicv3_rdistif_init(plat_my_core_pos());
}

View File

@ -0,0 +1,499 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <mmio.h>
#include <ddr_rk3368.h>
#include <debug.h>
#include <stdint.h>
#include <string.h>
#include <platform_def.h>
#include <pmu.h>
#include <rk3368_def.h>
#include <soc.h>
/* GRF_SOC_STATUS0 */
#define DPLL_LOCK (0x1 << 2)
/* GRF_DDRC0_CON0 */
#define GRF_DDR_16BIT_EN (((0x1 << 3) << 16) | (0x1 << 3))
#define GRF_DDR_32BIT_EN (((0x1 << 3) << 16) | (0x0 << 3))
#define GRF_MOBILE_DDR_EN (((0x1 << 4) << 16) | (0x1 << 4))
#define GRF_MOBILE_DDR_DISB (((0x1 << 4) << 16) | (0x0 << 4))
#define GRF_DDR3_EN (((0x1 << 2) << 16) | (0x1 << 2))
#define GRF_LPDDR2_3_EN (((0x1 << 2) << 16) | (0x0 << 2))
/* PMUGRF_SOC_CON0 */
#define ddrphy_bufferen_io_en(n) ((0x1 << (9 + 16)) | (n << 9))
#define ddrphy_bufferen_core_en(n) ((0x1 << (8 + 16)) | (n << 8))
struct PCTRL_TIMING_TAG {
uint32_t ddrfreq;
uint32_t TOGCNT1U;
uint32_t TINIT;
uint32_t TRSTH;
uint32_t TOGCNT100N;
uint32_t TREFI;
uint32_t TMRD;
uint32_t TRFC;
uint32_t TRP;
uint32_t TRTW;
uint32_t TAL;
uint32_t TCL;
uint32_t TCWL;
uint32_t TRAS;
uint32_t TRC;
uint32_t TRCD;
uint32_t TRRD;
uint32_t TRTP;
uint32_t TWR;
uint32_t TWTR;
uint32_t TEXSR;
uint32_t TXP;
uint32_t TXPDLL;
uint32_t TZQCS;
uint32_t TZQCSI;
uint32_t TDQS;
uint32_t TCKSRE;
uint32_t TCKSRX;
uint32_t TCKE;
uint32_t TMOD;
uint32_t TRSTL;
uint32_t TZQCL;
uint32_t TMRR;
uint32_t TCKESR;
uint32_t TDPD;
uint32_t TREFI_MEM_DDR3;
};
struct MSCH_SAVE_REG_TAG {
uint32_t ddrconf;
uint32_t ddrtiming;
uint32_t ddrmode;
uint32_t readlatency;
uint32_t activate;
uint32_t devtodev;
};
/* ddr suspend need save reg */
struct PCTL_SAVE_REG_TAG {
uint32_t SCFG;
uint32_t CMDTSTATEN;
uint32_t MCFG1;
uint32_t MCFG;
uint32_t PPCFG;
struct PCTRL_TIMING_TAG pctl_timing;
/* DFI Control Registers */
uint32_t DFITCTRLDELAY;
uint32_t DFIODTCFG;
uint32_t DFIODTCFG1;
uint32_t DFIODTRANKMAP;
/* DFI Write Data Registers */
uint32_t DFITPHYWRDATA;
uint32_t DFITPHYWRLAT;
uint32_t DFITPHYWRDATALAT;
/* DFI Read Data Registers */
uint32_t DFITRDDATAEN;
uint32_t DFITPHYRDLAT;
/* DFI Update Registers */
uint32_t DFITPHYUPDTYPE0;
uint32_t DFITPHYUPDTYPE1;
uint32_t DFITPHYUPDTYPE2;
uint32_t DFITPHYUPDTYPE3;
uint32_t DFITCTRLUPDMIN;
uint32_t DFITCTRLUPDMAX;
uint32_t DFITCTRLUPDDLY;
uint32_t DFIUPDCFG;
uint32_t DFITREFMSKI;
uint32_t DFITCTRLUPDI;
/* DFI Status Registers */
uint32_t DFISTCFG0;
uint32_t DFISTCFG1;
uint32_t DFITDRAMCLKEN;
uint32_t DFITDRAMCLKDIS;
uint32_t DFISTCFG2;
/* DFI Low Power Register */
uint32_t DFILPCFG0;
};
struct DDRPHY_SAVE_REG_TAG {
uint32_t PHY_REG0;
uint32_t PHY_REG1;
uint32_t PHY_REGB;
uint32_t PHY_REGC;
uint32_t PHY_REG11;
uint32_t PHY_REG13;
uint32_t PHY_REG14;
uint32_t PHY_REG16;
uint32_t PHY_REG20;
uint32_t PHY_REG21;
uint32_t PHY_REG26;
uint32_t PHY_REG27;
uint32_t PHY_REG28;
uint32_t PHY_REG30;
uint32_t PHY_REG31;
uint32_t PHY_REG36;
uint32_t PHY_REG37;
uint32_t PHY_REG38;
uint32_t PHY_REG40;
uint32_t PHY_REG41;
uint32_t PHY_REG46;
uint32_t PHY_REG47;
uint32_t PHY_REG48;
uint32_t PHY_REG50;
uint32_t PHY_REG51;
uint32_t PHY_REG56;
uint32_t PHY_REG57;
uint32_t PHY_REG58;
uint32_t PHY_REGDLL;
uint32_t PHY_REGEC;
uint32_t PHY_REGED;
uint32_t PHY_REGEE;
uint32_t PHY_REGEF;
uint32_t PHY_REGFB;
uint32_t PHY_REGFC;
uint32_t PHY_REGFD;
uint32_t PHY_REGFE;
};
struct BACKUP_REG_TAG {
uint32_t tag;
uint32_t pctladdr;
struct PCTL_SAVE_REG_TAG pctl;
uint32_t phyaddr;
struct DDRPHY_SAVE_REG_TAG phy;
uint32_t nocaddr;
struct MSCH_SAVE_REG_TAG noc;
uint32_t pllselect;
uint32_t phypllockaddr;
uint32_t phyplllockmask;
uint32_t phyplllockval;
uint32_t pllpdstat;
uint32_t dpllmodeaddr;
uint32_t dpllslowmode;
uint32_t dpllnormalmode;
uint32_t dpllresetaddr;
uint32_t dpllreset;
uint32_t dplldereset;
uint32_t dpllconaddr;
uint32_t dpllcon[4];
uint32_t dplllockaddr;
uint32_t dplllockmask;
uint32_t dplllockval;
uint32_t ddrpllsrcdivaddr;
uint32_t ddrpllsrcdiv;
uint32_t retendisaddr;
uint32_t retendisval;
uint32_t grfregaddr;
uint32_t grfddrcreg;
uint32_t crupctlphysoftrstaddr;
uint32_t cruresetpctlphy;
uint32_t cruderesetphy;
uint32_t cruderesetpctlphy;
uint32_t physoftrstaddr;
uint32_t endtag;
};
static uint32_t ddr_get_phy_pll_freq(void)
{
uint32_t ret = 0;
uint32_t fb_div, pre_div;
fb_div = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEC);
fb_div |= (mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGED) & 0x1) << 8;
pre_div = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE) & 0xff;
ret = 2 * 24 * fb_div / (4 * pre_div);
return ret;
}
static void ddr_copy(uint32_t *pdest, uint32_t *psrc, uint32_t words)
{
uint32_t i;
for (i = 0; i < words; i++)
pdest[i] = psrc[i];
}
static void ddr_get_dpll_cfg(uint32_t *p)
{
uint32_t nmhz, NO, NF, NR;
nmhz = ddr_get_phy_pll_freq();
if (nmhz <= 150)
NO = 6;
else if (nmhz <= 250)
NO = 4;
else if (nmhz <= 500)
NO = 2;
else
NO = 1;
NR = 1;
NF = 2 * nmhz * NR * NO / 24;
p[0] = SET_NR(NR) | SET_NO(NO);
p[1] = SET_NF(NF);
p[2] = SET_NB(NF / 2);
}
void ddr_reg_save(uint32_t pllpdstat, uint64_t base_addr)
{
struct BACKUP_REG_TAG *p_ddr_reg = (struct BACKUP_REG_TAG *)base_addr;
struct PCTL_SAVE_REG_TAG *pctl_tim = &p_ddr_reg->pctl;
p_ddr_reg->tag = 0x56313031;
p_ddr_reg->pctladdr = DDR_PCTL_BASE;
p_ddr_reg->phyaddr = DDR_PHY_BASE;
p_ddr_reg->nocaddr = SERVICE_BUS_BASE;
/* PCTLR */
ddr_copy((uint32_t *)&pctl_tim->pctl_timing.TOGCNT1U,
(uint32_t *)(DDR_PCTL_BASE + DDR_PCTL_TOGCNT1U), 35);
pctl_tim->pctl_timing.TREFI |= DDR_UPD_REF_ENABLE;
pctl_tim->SCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_SCFG);
pctl_tim->CMDTSTATEN = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_CMDTSTATEN);
pctl_tim->MCFG1 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_MCFG1);
pctl_tim->MCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_MCFG);
pctl_tim->PPCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_PPCFG);
pctl_tim->pctl_timing.ddrfreq = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_TOGCNT1U * 2);
pctl_tim->DFITCTRLDELAY = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITCTRLDELAY);
pctl_tim->DFIODTCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFIODTCFG);
pctl_tim->DFIODTCFG1 = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFIODTCFG1);
pctl_tim->DFIODTRANKMAP = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFIODTRANKMAP);
pctl_tim->DFITPHYWRDATA = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITPHYWRDATA);
pctl_tim->DFITPHYWRLAT = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITPHYWRLAT);
pctl_tim->DFITPHYWRDATALAT = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITPHYWRDATALAT);
pctl_tim->DFITRDDATAEN = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITRDDATAEN);
pctl_tim->DFITPHYRDLAT = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITPHYRDLAT);
pctl_tim->DFITPHYUPDTYPE0 = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITPHYUPDTYPE0);
pctl_tim->DFITPHYUPDTYPE1 = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITPHYUPDTYPE1);
pctl_tim->DFITPHYUPDTYPE2 = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITPHYUPDTYPE2);
pctl_tim->DFITPHYUPDTYPE3 = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITPHYUPDTYPE3);
pctl_tim->DFITCTRLUPDMIN = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITCTRLUPDMIN);
pctl_tim->DFITCTRLUPDMAX = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITCTRLUPDMAX);
pctl_tim->DFITCTRLUPDDLY = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITCTRLUPDDLY);
pctl_tim->DFIUPDCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFIUPDCFG);
pctl_tim->DFITREFMSKI = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITREFMSKI);
pctl_tim->DFITCTRLUPDI = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITCTRLUPDI);
pctl_tim->DFISTCFG0 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG0);
pctl_tim->DFISTCFG1 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG1);
pctl_tim->DFITDRAMCLKEN = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITDRAMCLKEN);
pctl_tim->DFITDRAMCLKDIS = mmio_read_32(DDR_PCTL_BASE +
DDR_PCTL_DFITDRAMCLKDIS);
pctl_tim->DFISTCFG2 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG2);
pctl_tim->DFILPCFG0 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFILPCFG0);
/* PHY */
p_ddr_reg->phy.PHY_REG0 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG0);
p_ddr_reg->phy.PHY_REG1 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG1);
p_ddr_reg->phy.PHY_REGB = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGB);
p_ddr_reg->phy.PHY_REGC = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGC);
p_ddr_reg->phy.PHY_REG11 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG11);
p_ddr_reg->phy.PHY_REG13 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG13);
p_ddr_reg->phy.PHY_REG14 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG14);
p_ddr_reg->phy.PHY_REG16 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG16);
p_ddr_reg->phy.PHY_REG20 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG20);
p_ddr_reg->phy.PHY_REG21 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG21);
p_ddr_reg->phy.PHY_REG26 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG26);
p_ddr_reg->phy.PHY_REG27 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG27);
p_ddr_reg->phy.PHY_REG28 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG28);
p_ddr_reg->phy.PHY_REG30 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG30);
p_ddr_reg->phy.PHY_REG31 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG31);
p_ddr_reg->phy.PHY_REG36 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG36);
p_ddr_reg->phy.PHY_REG37 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG37);
p_ddr_reg->phy.PHY_REG38 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG38);
p_ddr_reg->phy.PHY_REG40 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG40);
p_ddr_reg->phy.PHY_REG41 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG41);
p_ddr_reg->phy.PHY_REG46 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG46);
p_ddr_reg->phy.PHY_REG47 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG47);
p_ddr_reg->phy.PHY_REG48 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG48);
p_ddr_reg->phy.PHY_REG50 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG50);
p_ddr_reg->phy.PHY_REG51 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG51);
p_ddr_reg->phy.PHY_REG56 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG56);
p_ddr_reg->phy.PHY_REG57 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG57);
p_ddr_reg->phy.PHY_REG58 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG58);
p_ddr_reg->phy.PHY_REGDLL = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REGDLL);
p_ddr_reg->phy.PHY_REGEC = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEC);
p_ddr_reg->phy.PHY_REGED = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGED);
p_ddr_reg->phy.PHY_REGEE = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE);
p_ddr_reg->phy.PHY_REGEF = 0;
if (mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG2) & 0x2) {
p_ddr_reg->phy.PHY_REGFB = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REG2C);
p_ddr_reg->phy.PHY_REGFC = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REG3C);
p_ddr_reg->phy.PHY_REGFD = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REG4C);
p_ddr_reg->phy.PHY_REGFE = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REG5C);
} else {
p_ddr_reg->phy.PHY_REGFB = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REGFB);
p_ddr_reg->phy.PHY_REGFC = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REGFC);
p_ddr_reg->phy.PHY_REGFD = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REGFD);
p_ddr_reg->phy.PHY_REGFE = mmio_read_32(DDR_PHY_BASE +
DDR_PHY_REGFE);
}
/* NOC */
p_ddr_reg->noc.ddrconf = mmio_read_32(SERVICE_BUS_BASE + MSCH_DDRCONF);
p_ddr_reg->noc.ddrtiming = mmio_read_32(SERVICE_BUS_BASE +
MSCH_DDRTIMING);
p_ddr_reg->noc.ddrmode = mmio_read_32(SERVICE_BUS_BASE + MSCH_DDRMODE);
p_ddr_reg->noc.readlatency = mmio_read_32(SERVICE_BUS_BASE +
MSCH_READLATENCY);
p_ddr_reg->noc.activate = mmio_read_32(SERVICE_BUS_BASE +
MSCH_ACTIVATE);
p_ddr_reg->noc.devtodev = mmio_read_32(SERVICE_BUS_BASE +
MSCH_DEVTODEV);
p_ddr_reg->pllselect = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE) * 0x1;
p_ddr_reg->phypllockaddr = GRF_BASE + GRF_SOC_STATUS0;
p_ddr_reg->phyplllockmask = GRF_DDRPHY_LOCK;
p_ddr_reg->phyplllockval = 0;
/* PLLPD */
p_ddr_reg->pllpdstat = pllpdstat;
/* DPLL */
p_ddr_reg->dpllmodeaddr = CRU_BASE + PLL_CONS(DPLL_ID, 3);
/* slow mode and power on */
p_ddr_reg->dpllslowmode = DPLL_WORK_SLOW_MODE | DPLL_POWER_DOWN;
p_ddr_reg->dpllnormalmode = DPLL_WORK_NORMAL_MODE;
p_ddr_reg->dpllresetaddr = CRU_BASE + PLL_CONS(DPLL_ID, 3);
p_ddr_reg->dpllreset = DPLL_RESET_CONTROL_NORMAL;
p_ddr_reg->dplldereset = DPLL_RESET_CONTROL_RESET;
p_ddr_reg->dpllconaddr = CRU_BASE + PLL_CONS(DPLL_ID, 0);
if (p_ddr_reg->pllselect == 0) {
p_ddr_reg->dpllcon[0] = (mmio_read_32(CRU_BASE +
PLL_CONS(DPLL_ID, 0))
& 0xffff) |
(0xFFFF << 16);
p_ddr_reg->dpllcon[1] = (mmio_read_32(CRU_BASE +
PLL_CONS(DPLL_ID, 1))
& 0xffff);
p_ddr_reg->dpllcon[2] = (mmio_read_32(CRU_BASE +
PLL_CONS(DPLL_ID, 2))
& 0xffff);
p_ddr_reg->dpllcon[3] = (mmio_read_32(CRU_BASE +
PLL_CONS(DPLL_ID, 3))
& 0xffff) |
(0xFFFF << 16);
} else {
ddr_get_dpll_cfg(&p_ddr_reg->dpllcon[0]);
}
p_ddr_reg->pllselect = 0;
p_ddr_reg->dplllockaddr = CRU_BASE + PLL_CONS(DPLL_ID, 1);
p_ddr_reg->dplllockmask = DPLL_STATUS_LOCK;
p_ddr_reg->dplllockval = DPLL_STATUS_LOCK;
/* SET_DDR_PLL_SRC */
p_ddr_reg->ddrpllsrcdivaddr = CRU_BASE + CRU_CLKSELS_CON(13);
p_ddr_reg->ddrpllsrcdiv = (mmio_read_32(CRU_BASE + CRU_CLKSELS_CON(13))
& DDR_PLL_SRC_MASK)
| (DDR_PLL_SRC_MASK << 16);
p_ddr_reg->retendisaddr = PMU_BASE + PMU_PWRMD_COM;
p_ddr_reg->retendisval = PD_PERI_PWRDN_ENABLE;
p_ddr_reg->grfregaddr = GRF_BASE + GRF_DDRC0_CON0;
p_ddr_reg->grfddrcreg = (mmio_read_32(GRF_BASE + GRF_DDRC0_CON0) &
DDR_PLL_SRC_MASK) |
(DDR_PLL_SRC_MASK << 16);
/* pctl phy soft reset */
p_ddr_reg->crupctlphysoftrstaddr = CRU_BASE + CRU_SOFTRSTS_CON(10);
p_ddr_reg->cruresetpctlphy = DDRCTRL0_PSRSTN_REQ(1) |
DDRCTRL0_SRSTN_REQ(1) |
DDRPHY0_PSRSTN_REQ(1) |
DDRPHY0_SRSTN_REQ(1);
p_ddr_reg->cruderesetphy = DDRCTRL0_PSRSTN_REQ(1) |
DDRCTRL0_SRSTN_REQ(1) |
DDRPHY0_PSRSTN_REQ(0) |
DDRPHY0_SRSTN_REQ(0);
p_ddr_reg->cruderesetpctlphy = DDRCTRL0_PSRSTN_REQ(0) |
DDRCTRL0_SRSTN_REQ(0) |
DDRPHY0_PSRSTN_REQ(0) |
DDRPHY0_SRSTN_REQ(0);
p_ddr_reg->physoftrstaddr = DDR_PHY_BASE + DDR_PHY_REG0;
p_ddr_reg->endtag = 0xFFFFFFFF;
}
/*
* "rk3368_ddr_reg_resume_V1.05.bin" is an executable bin which is generated
* by ARM DS5 for resuming ddr controller. If the soc wakes up from system
* suspend, ddr needs to be resumed and the resuming code needs to be run in
* sram. But there is not a way to pointing the resuming code to the PMUSRAM
* when linking .o files of bl31, so we use the
* "rk3368_ddr_reg_resume_V1.05.bin" whose code is position-independent and
* it can be loaded anywhere and run.
*/
static __aligned(4) unsigned int ddr_reg_resume[] = {
#include "rk3368_ddr_reg_resume_V1.05.bin"
};
uint32_t ddr_get_resume_code_size(void)
{
return sizeof(ddr_reg_resume);
}
uint32_t ddr_get_resume_data_size(void)
{
return sizeof(struct BACKUP_REG_TAG);
}
uint32_t *ddr_get_resume_code_base(void)
{
return (unsigned int *)ddr_reg_resume;
}

View File

@ -0,0 +1,267 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DDR_RK3368_H__
#define __DDR_RK3368_H__
#define DDR_PCTL_SCFG 0x0
#define DDR_PCTL_SCTL 0x4
#define DDR_PCTL_STAT 0x8
#define DDR_PCTL_INTRSTAT 0xc
#define DDR_PCTL_MCMD 0x40
#define DDR_PCTL_POWCTL 0x44
#define DDR_PCTL_POWSTAT 0x48
#define DDR_PCTL_CMDTSTAT 0x4c
#define DDR_PCTL_CMDTSTATEN 0x50
#define DDR_PCTL_MRRCFG0 0x60
#define DDR_PCTL_MRRSTAT0 0x64
#define DDR_PCTL_MRRSTAT1 0x68
#define DDR_PCTL_MCFG1 0x7c
#define DDR_PCTL_MCFG 0x80
#define DDR_PCTL_PPCFG 0x84
#define DDR_PCTL_MSTAT 0x88
#define DDR_PCTL_LPDDR2ZQCFG 0x8c
#define DDR_PCTL_DTUPDES 0x94
#define DDR_PCTL_DTUNA 0x98
#define DDR_PCTL_DTUNE 0x9c
#define DDR_PCTL_DTUPRD0 0xa0
#define DDR_PCTL_DTUPRD1 0xa4
#define DDR_PCTL_DTUPRD2 0xa8
#define DDR_PCTL_DTUPRD3 0xac
#define DDR_PCTL_DTUAWDT 0xb0
#define DDR_PCTL_TOGCNT1U 0xc0
#define DDR_PCTL_TINIT 0xc4
#define DDR_PCTL_TRSTH 0xc8
#define DDR_PCTL_TOGCNT100N 0xcc
#define DDR_PCTL_TREFI 0xd0
#define DDR_PCTL_TMRD 0xd4
#define DDR_PCTL_TRFC 0xd8
#define DDR_PCTL_TRP 0xdc
#define DDR_PCTL_TRTW 0xe0
#define DDR_PCTL_TAL 0xe4
#define DDR_PCTL_TCL 0xe8
#define DDR_PCTL_TCWL 0xec
#define DDR_PCTL_TRAS 0xf0
#define DDR_PCTL_TRC 0xf4
#define DDR_PCTL_TRCD 0xf8
#define DDR_PCTL_TRRD 0xfc
#define DDR_PCTL_TRTP 0x100
#define DDR_PCTL_TWR 0x104
#define DDR_PCTL_TWTR 0x108
#define DDR_PCTL_TEXSR 0x10c
#define DDR_PCTL_TXP 0x110
#define DDR_PCTL_TXPDLL 0x114
#define DDR_PCTL_TZQCS 0x118
#define DDR_PCTL_TZQCSI 0x11c
#define DDR_PCTL_TDQS 0x120
#define DDR_PCTL_TCKSRE 0x124
#define DDR_PCTL_TCKSRX 0x128
#define DDR_PCTL_TCKE 0x12c
#define DDR_PCTL_TMOD 0x130
#define DDR_PCTL_TRSTL 0x134
#define DDR_PCTL_TZQCL 0x138
#define DDR_PCTL_TMRR 0x13c
#define DDR_PCTL_TCKESR 0x140
#define DDR_PCTL_TDPD 0x144
#define DDR_PCTL_TREFI_MEM_DDR3 0x148
#define DDR_PCTL_ECCCFG 0x180
#define DDR_PCTL_ECCTST 0x184
#define DDR_PCTL_ECCCLR 0x188
#define DDR_PCTL_ECCLOG 0x18c
#define DDR_PCTL_DTUWACTL 0x200
#define DDR_PCTL_DTURACTL 0x204
#define DDR_PCTL_DTUCFG 0x208
#define DDR_PCTL_DTUECTL 0x20c
#define DDR_PCTL_DTUWD0 0x210
#define DDR_PCTL_DTUWD1 0x214
#define DDR_PCTL_DTUWD2 0x218
#define DDR_PCTL_DTUWD3 0x21c
#define DDR_PCTL_DTUWDM 0x220
#define DDR_PCTL_DTURD0 0x224
#define DDR_PCTL_DTURD1 0x228
#define DDR_PCTL_DTURD2 0x22c
#define DDR_PCTL_DTURD3 0x230
#define DDR_PCTL_DTULFSRWD 0x234
#define DDR_PCTL_DTULFSRRD 0x238
#define DDR_PCTL_DTUEAF 0x23c
#define DDR_PCTL_DFITCTRLDELAY 0x240
#define DDR_PCTL_DFIODTCFG 0x244
#define DDR_PCTL_DFIODTCFG1 0x248
#define DDR_PCTL_DFIODTRANKMAP 0x24c
#define DDR_PCTL_DFITPHYWRDATA 0x250
#define DDR_PCTL_DFITPHYWRLAT 0x254
#define DDR_PCTL_DFITPHYWRDATALAT 0x258
#define DDR_PCTL_DFITRDDATAEN 0x260
#define DDR_PCTL_DFITPHYRDLAT 0x264
#define DDR_PCTL_DFITPHYUPDTYPE0 0x270
#define DDR_PCTL_DFITPHYUPDTYPE1 0x274
#define DDR_PCTL_DFITPHYUPDTYPE2 0x278
#define DDR_PCTL_DFITPHYUPDTYPE3 0x27c
#define DDR_PCTL_DFITCTRLUPDMIN 0x280
#define DDR_PCTL_DFITCTRLUPDMAX 0x284
#define DDR_PCTL_DFITCTRLUPDDLY 0x288
#define DDR_PCTL_DFIUPDCFG 0x290
#define DDR_PCTL_DFITREFMSKI 0x294
#define DDR_PCTL_DFITCTRLUPDI 0x298
#define DDR_PCTL_DFITRCFG0 0x2ac
#define DDR_PCTL_DFITRSTAT0 0x2b0
#define DDR_PCTL_DFITRWRLVLEN 0x2b4
#define DDR_PCTL_DFITRRDLVLEN 0x2b8
#define DDR_PCTL_DFITRRDLVLGATEEN 0x2bc
#define DDR_PCTL_DFISTSTAT0 0x2c0
#define DDR_PCTL_DFISTCFG0 0x2c4
#define DDR_PCTL_DFISTCFG1 0x2c8
#define DDR_PCTL_DFITDRAMCLKEN 0x2d0
#define DDR_PCTL_DFITDRAMCLKDIS 0x2d4
#define DDR_PCTL_DFISTCFG2 0x2d8
#define DDR_PCTL_DFISTPARCLR 0x2dc
#define DDR_PCTL_DFISTPARLOG 0x2e0
#define DDR_PCTL_DFILPCFG0 0x2f0
#define DDR_PCTL_DFITRWRLVLRESP0 0x300
#define DDR_PCTL_DFITRWRLVLRESP1 0x304
#define DDR_PCTL_DFITRWRLVLRESP2 0x308
#define DDR_PCTL_DFITRRDLVLRESP0 0x30c
#define DDR_PCTL_DFITRRDLVLRESP1 0x310
#define DDR_PCTL_DFITRRDLVLRESP2 0x314
#define DDR_PCTL_DFITRWRLVLDELAY0 0x318
#define DDR_PCTL_DFITRWRLVLDELAY1 0x31c
#define DDR_PCTL_DFITRWRLVLDELAY2 0x320
#define DDR_PCTL_DFITRRDLVLDELAY0 0x324
#define DDR_PCTL_DFITRRDLVLDELAY1 0x328
#define DDR_PCTL_DFITRRDLVLDELAY2 0x32c
#define DDR_PCTL_DFITRRDLVLGATEDELAY0 0x330
#define DDR_PCTL_DFITRRDLVLGATEDELAY1 0x334
#define DDR_PCTL_DFITRRDLVLGATEDELAY2 0x338
#define DDR_PCTL_DFITRCMD 0x33c
#define DDR_PCTL_IPVR 0x3f8
#define DDR_PCTL_IPTR 0x3fc
/* DDR PHY REG */
#define DDR_PHY_REG0 0x0
#define DDR_PHY_REG1 0x4
#define DDR_PHY_REG2 0x8
#define DDR_PHY_REG3 0xc
#define DDR_PHY_REG4 0x10
#define DDR_PHY_REG5 0x14
#define DDR_PHY_REG6 0x18
#define DDR_PHY_REGB 0x2c
#define DDR_PHY_REGC 0x30
#define DDR_PHY_REG11 0x44
#define DDR_PHY_REG12 0x48
#define DDR_PHY_REG13 0x4c
#define DDR_PHY_REG14 0x50
#define DDR_PHY_REG16 0x58
#define DDR_PHY_REG20 0x80
#define DDR_PHY_REG21 0x84
#define DDR_PHY_REG26 0x98
#define DDR_PHY_REG27 0x9c
#define DDR_PHY_REG28 0xa0
#define DDR_PHY_REG2C 0xb0
#define DDR_PHY_REG30 0xc0
#define DDR_PHY_REG31 0xc4
#define DDR_PHY_REG36 0xd8
#define DDR_PHY_REG37 0xdc
#define DDR_PHY_REG38 0xe0
#define DDR_PHY_REG3C 0xf0
#define DDR_PHY_REG40 0x100
#define DDR_PHY_REG41 0x104
#define DDR_PHY_REG46 0x118
#define DDR_PHY_REG47 0x11c
#define DDR_PHY_REG48 0x120
#define DDR_PHY_REG4C 0x130
#define DDR_PHY_REG50 0x140
#define DDR_PHY_REG51 0x144
#define DDR_PHY_REG56 0x158
#define DDR_PHY_REG57 0x15c
#define DDR_PHY_REG58 0x160
#define DDR_PHY_REG5C 0x170
#define DDR_PHY_REGDLL 0x290
#define DDR_PHY_REGEC 0x3b0
#define DDR_PHY_REGED 0x3b4
#define DDR_PHY_REGEE 0x3b8
#define DDR_PHY_REGEF 0x3bc
#define DDR_PHY_REGF0 0x3c0
#define DDR_PHY_REGF1 0x3c4
#define DDR_PHY_REGF2 0x3c8
#define DDR_PHY_REGFA 0x3e8
#define DDR_PHY_REGFB 0x3ec
#define DDR_PHY_REGFC 0x3f0
#define DDR_PHY_REGFD 0x3f4
#define DDR_PHY_REGFE 0x3f8
#define DDR_PHY_REGFF 0x3fc
/* MSCH REG define */
#define MSCH_COREID 0x0
#define MSCH_DDRCONF 0x8
#define MSCH_DDRTIMING 0xc
#define MSCH_DDRMODE 0x10
#define MSCH_READLATENCY 0x14
#define MSCH_ACTIVATE 0x38
#define MSCH_DEVTODEV 0x3c
#define SET_NR(n) ((0x3f << (8 + 16)) | ((n - 1) << 8))
#define SET_NO(n) ((0xf << (0 + 16)) | ((n - 1) << 0))
#define SET_NF(n) ((n - 1) & 0x1fff)
#define SET_NB(n) ((n - 1) & 0xfff)
#define PLLMODE(n) ((0x3 << (8 + 16)) | (n << 8))
/* GRF REG define */
#define GRF_SOC_STATUS0 0x480
#define GRF_DDRPHY_LOCK (0x1 << 15)
#define GRF_DDRC0_CON0 0x600
/* CRU softreset ddr pctl, phy */
#define DDRMSCH0_SRSTN_REQ(n) (((0x1 << 10) << 16) | (n << 10))
#define DDRCTRL0_PSRSTN_REQ(n) (((0x1 << 3) << 16) | (n << 3))
#define DDRCTRL0_SRSTN_REQ(n) (((0x1 << 2) << 16) | (n << 2))
#define DDRPHY0_PSRSTN_REQ(n) (((0x1 << 1) << 16) | (n << 1))
#define DDRPHY0_SRSTN_REQ(n) (((0x1 << 0) << 16) | (n << 0))
/* CRU_DPLL_CON2 */
#define DPLL_STATUS_LOCK (1 << 31)
/* CRU_DPLL_CON3 */
#define DPLL_POWER_DOWN ((0x1 << (1 + 16)) | (0 << 1))
#define DPLL_WORK_NORMAL_MODE ((0x3 << (8 + 16)) | (0 << 8))
#define DPLL_WORK_SLOW_MODE ((0x3 << (8 + 16)) | (1 << 8))
#define DPLL_RESET_CONTROL_NORMAL ((0x1 << (5 + 16)) | (0x0 << 5))
#define DPLL_RESET_CONTROL_RESET ((0x1 << (5 + 16)) | (0x1 << 5))
/* PMU_PWRDN_CON */
#define PD_PERI_PWRDN_ENABLE (1 << 13)
#define DDR_PLL_SRC_MASK 0x13
/* DDR_PCTL_TREFI */
#define DDR_UPD_REF_ENABLE (0X1 << 31)
uint32_t ddr_get_resume_code_size(void);
uint32_t ddr_get_resume_data_size(void);
uint32_t *ddr_get_resume_code_base(void);
void ddr_reg_save(uint32_t pllpdstat, uint64_t base_addr);
#endif

View File

@ -0,0 +1,461 @@
0x14000088,
0xd10043ff,
0x5283ffe1,
0x52824902,
0x1b020400,
0x530d7c00,
0xb9000fe0,
0xb9400fe0,
0x340000a0,
0xb9400fe0,
0x51000401,
0xb9000fe1,
0x35ffffa0,
0x910043ff,
0xd65f03c0,
0x340000e2,
0xb9400023,
0xb9000003,
0x91001021,
0x91001000,
0x51000442,
0x35ffff62,
0xd65f03c0,
0xd10043ff,
0xb9400801,
0x12000821,
0xb9000fe1,
0xb9400fe1,
0x7100043f,
0x54000320,
0x52800021,
0x52800082,
0xb9400fe3,
0x34000143,
0x71000c7f,
0x54000100,
0x7100147f,
0x54000161,
0xb9000402,
0xb9400803,
0x12000863,
0x71000c7f,
0x54ffffa1,
0xb9000401,
0xb9400803,
0x12000863,
0x7100047f,
0x54ffffa1,
0xb9400803,
0x12000863,
0xb9000fe3,
0xb9400fe3,
0x7100047f,
0x54fffd61,
0x910043ff,
0xd65f03c0,
0xd10043ff,
0xb9400801,
0x12000821,
0xb9000fe1,
0xb9400fe1,
0x7100143f,
0x54000400,
0x52800021,
0x52800042,
0x52800063,
0xb9400fe4,
0x340000c4,
0x7100049f,
0x54000120,
0x71000c9f,
0x54000180,
0x14000010,
0xb9000401,
0xb9400804,
0x12000884,
0x7100049f,
0x54ffffa1,
0xb9000402,
0xb9400804,
0x12000884,
0x71000c9f,
0x54ffffa1,
0xb9000403,
0xb9400804,
0x12000884,
0x7100149f,
0x54ffffa1,
0xb9400804,
0x12000884,
0xb9000fe4,
0xb9400fe4,
0x7100149f,
0x54fffca1,
0x910043ff,
0xd65f03c0,
0xd10043ff,
0xb9400801,
0x12000821,
0xb9000fe1,
0xb9400fe1,
0x71000c3f,
0x54000400,
0x52800021,
0x52800042,
0x52800083,
0xb9400fe4,
0x34000164,
0x7100049f,
0x540001c0,
0x7100149f,
0x54000221,
0xb9000403,
0xb9400804,
0x12000884,
0x71000c9f,
0x54ffffa1,
0x1400000b,
0xb9000401,
0xb9400804,
0x12000884,
0x7100049f,
0x54ffffa1,
0xb9000402,
0xb9400804,
0x12000884,
0x71000c9f,
0x54ffffa1,
0xb9400804,
0x12000884,
0xb9000fe4,
0xb9400fe4,
0x71000c9f,
0x54fffca1,
0x910043ff,
0xd65f03c0,
0xd10103ff,
0xa9037bfd,
0x9100c3fd,
0xa9025ff6,
0xa90157f4,
0xf90007f3,
0xaa0003f3,
0xb9400674,
0xb9411276,
0xb941c660,
0xb941aa75,
0x7100041f,
0x54000261,
0xb9418e60,
0x321f0000,
0xb903b6c0,
0xb9418a60,
0xb903b2c0,
0xb9419260,
0xb903bac0,
0xb9418e60,
0x121e7800,
0xb903b6c0,
0xb941ca60,
0xb941ce61,
0xb941d262,
0xb9400003,
0xa030023,
0x6b22407f,
0x54ffffa0,
0x1400003b,
0xb941d660,
0x7100041f,
0x54000701,
0xb941da60,
0x3100041f,
0x54000080,
0xb941de61,
0x53007c00,
0xb9000001,
0xb941e660,
0x3100041f,
0x54000080,
0xb941ea61,
0x53007c00,
0xb9000001,
0xb941f260,
0x3100041f,
0x54000120,
0xaa1f03e1,
0x53007c00,
0x9107d262,
0xb8616843,
0xb8216803,
0x91001021,
0xf100203f,
0x54ffff81,
0x52800020,
0x97ffff3f,
0xb941e660,
0x3100041f,
0x54000080,
0xb941ee61,
0x53007c00,
0xb9000001,
0x52800020,
0x97ffff37,
0xb9420660,
0x3100041f,
0x54000100,
0xb9420a61,
0xb9420e62,
0x53007c00,
0xb9400003,
0xa030023,
0x6b22407f,
0x54ffffa1,
0xb9421260,
0x3100041f,
0x54000080,
0xb9421661,
0x53007c00,
0xb9000001,
0xb941da60,
0x3100041f,
0x54000080,
0xb941e261,
0x53007c00,
0xb9000001,
0xb9419660,
0xb903bec0,
0xb9422a60,
0x34000400,
0xb9422e61,
0x53007c17,
0xb90002e1,
0x52800140,
0x97ffff18,
0xb9423260,
0xb90002e0,
0x52800140,
0x97ffff14,
0xb9423660,
0xb90002e0,
0x52800140,
0x97ffff10,
0xb9423a60,
0x34000220,
0x53007c17,
0xb94002e0,
0x121c7400,
0xb90002e0,
0x52800020,
0x97ffff08,
0xb94002e0,
0x321e0000,
0xb90002e0,
0x528000a0,
0x97ffff03,
0xb94002e0,
0x321d0000,
0xb90002e0,
0x52800020,
0x97fffefe,
0xb9412a60,
0xb9004ec0,
0xb9412e60,
0xb90052c0,
0xb9413e60,
0xb9009ac0,
0xb9414260,
0xb9009ec0,
0xb9415260,
0xb900dac0,
0xb9415660,
0xb900dec0,
0xb9416660,
0xb9011ac0,
0xb9416a60,
0xb9011ec0,
0xb9417a60,
0xb9015ac0,
0xb9417e60,
0xb9015ec0,
0xb9418660,
0xb90292c0,
0xb9414660,
0xb900a2c0,
0xb9415a60,
0xb900e2c0,
0xb9416e60,
0xb90122c0,
0xb9418260,
0xb90162c0,
0xb9411660,
0xb90002c0,
0xb9411a60,
0xb90006c0,
0xb9411e60,
0xb9002ec0,
0xb9412260,
0xb90032c0,
0xb9412660,
0xb90046c0,
0xb9413260,
0xb9005ac0,
0xb9413660,
0xb90082c0,
0xb9413a60,
0xb90086c0,
0xb9414a60,
0xb900c2c0,
0xb9414e60,
0xb900c6c0,
0xb9415e60,
0xb90102c0,
0xb9416260,
0xb90106c0,
0xb9417260,
0xb90142c0,
0xb9417660,
0xb90146c0,
0x52800040,
0xb9000ac0,
0xb9411261,
0xb9419a60,
0xb900b020,
0xb9419a60,
0xb900b420,
0xb9419e60,
0xb900f020,
0xb9419e60,
0xb900f420,
0xb941a260,
0xb9013020,
0xb941a260,
0xb9013420,
0xb941a660,
0xb9017020,
0xb941a662,
0xaa1f03e0,
0xb9017422,
0x91008261,
0xb8606822,
0x8b000283,
0xb900c062,
0x91001000,
0xf102301f,
0x54ffff61,
0xb9400a60,
0xb9000280,
0xb9400e60,
0xb9005280,
0xb9401260,
0xb9007e80,
0xb9401660,
0xb9008280,
0xb9401a60,
0xb9008680,
0xb940ae60,
0xb9024280,
0xb940b260,
0xb9024680,
0xb940b660,
0xb9024a80,
0xb940ba60,
0xb9024e80,
0xb940be60,
0xb9025280,
0xb940c260,
0xb9025680,
0xb940c660,
0xb9025a80,
0xb940ca60,
0xb9026280,
0xb940ce60,
0xb9026680,
0xb940d260,
0xb9027280,
0xb940d660,
0xb9027680,
0xb940da60,
0xb9027a80,
0xb940de60,
0xb9027e80,
0xb940e260,
0xb9028280,
0xb940e660,
0xb9028680,
0xb940ea60,
0xb9028a80,
0xb940ee60,
0xb9029280,
0xb940f260,
0xb9029680,
0xb940f660,
0xb9029a80,
0xb940fa60,
0xb902c680,
0xb940fe60,
0xb902ca80,
0xb9410260,
0xb902d280,
0xb9410660,
0xb902d680,
0xb9410a60,
0xb902da80,
0xb9410e60,
0xb902f280,
0xb9422260,
0x3100041f,
0x540000c0,
0xb9422661,
0x53007c00,
0xb9000001,
0x52800020,
0x97fffe65,
0x52800020,
0xb9004680,
0xb9404a80,
0x3607ffe0,
0xb941ae60,
0xb9000aa0,
0xb941b260,
0xb9000ea0,
0xb941b660,
0xb90012a0,
0xb941ba60,
0xb90016a0,
0xb941be60,
0xb9003aa0,
0xb941c260,
0xb9003ea0,
0xb9422260,
0x3100041f,
0x54000080,
0x53007c00,
0x320083e1,
0xb9000001,
0xaa1403e0,
0x97fffe84,
0xb9421a60,
0x3100041f,
0x54000100,
0x53007c00,
0xb9421e61,
0xb9400002,
0x2a010041,
0xb9000001,
0x52800020,
0x97fffe43,
0xaa1403e0,
0x97fffea0,
0xb9422260,
0x3100041f,
0x54000080,
0x53007c00,
0x52a00021,
0xb9000001,
0xf94007f3,
0xa94157f4,
0xa9425ff6,
0xa9437bfd,
0x910103ff,
0xd65f03c0,

View File

@ -0,0 +1,425 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <delay_timer.h>
#include <errno.h>
#include <mmio.h>
#include <platform.h>
#include <platform_def.h>
#include <plat_private.h>
#include <rk3368_def.h>
#include <pmu_sram.h>
#include <soc.h>
#include <pmu.h>
#include <ddr_rk3368.h>
#include <pmu_com.h>
static struct psram_data_t *psram_sleep_cfg =
(struct psram_data_t *)PSRAM_DT_BASE;
void rk3368_flash_l2_b(void)
{
uint32_t wait_cnt = 0;
regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
dsb();
while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
& BIT(clst_b_l2_flsh_done))) {
wait_cnt++;
if (!(wait_cnt % MAX_WAIT_CONUT))
WARN("%s:reg %x,wait\n", __func__,
mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
}
regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
}
static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
{
uint32_t mask = BIT(req);
uint32_t idle_mask = 0;
uint32_t idle_target = 0;
uint32_t val;
uint32_t wait_cnt = 0;
switch (req) {
case bus_ide_req_clst_l:
idle_mask = BIT(pmu_idle_ack_cluster_l);
idle_target = (idle << pmu_idle_ack_cluster_l);
break;
case bus_ide_req_clst_b:
idle_mask = BIT(pmu_idle_ack_cluster_b);
idle_target = (idle << pmu_idle_ack_cluster_b);
break;
case bus_ide_req_cxcs:
idle_mask = BIT(pmu_idle_ack_cxcs);
idle_target = ((!idle) << pmu_idle_ack_cxcs);
break;
case bus_ide_req_cci400:
idle_mask = BIT(pmu_idle_ack_cci400);
idle_target = ((!idle) << pmu_idle_ack_cci400);
break;
case bus_ide_req_gpu:
idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
idle_target = (idle << pmu_idle_ack_gpu) |
(idle << pmu_idle_gpu);
break;
case bus_ide_req_core:
idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
idle_target = (idle << pmu_idle_ack_core) |
(idle << pmu_idle_core);
break;
case bus_ide_req_bus:
idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
idle_target = (idle << pmu_idle_ack_bus) |
(idle << pmu_idle_bus);
break;
case bus_ide_req_dma:
idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
idle_target = (idle << pmu_idle_ack_dma) |
(idle << pmu_idle_dma);
break;
case bus_ide_req_peri:
idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
idle_target = (idle << pmu_idle_ack_peri) |
(idle << pmu_idle_peri);
break;
case bus_ide_req_video:
idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
idle_target = (idle << pmu_idle_ack_video) |
(idle << pmu_idle_video);
break;
case bus_ide_req_vio:
idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
idle_target = (pmu_idle_ack_vio) |
(idle << pmu_idle_vio);
break;
case bus_ide_req_alive:
idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
idle_target = (idle << pmu_idle_ack_alive) |
(idle << pmu_idle_alive);
break;
case bus_ide_req_pmu:
idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
idle_target = (idle << pmu_idle_ack_pmu) |
(idle << pmu_idle_pmu);
break;
case bus_ide_req_msch:
idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
idle_target = (idle << pmu_idle_ack_msch) |
(idle << pmu_idle_msch);
break;
case bus_ide_req_cci:
idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
idle_target = (idle << pmu_idle_ack_cci) |
(idle << pmu_idle_cci);
break;
default:
ERROR("%s: Unsupported the idle request\n", __func__);
break;
}
val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
if (idle)
val |= mask;
else
val &= ~mask;
mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
while ((mmio_read_32(PMU_BASE +
PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
wait_cnt++;
if (!(wait_cnt % MAX_WAIT_CONUT))
WARN("%s:st=%x(%x)\n", __func__,
mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
idle_mask);
}
return 0;
}
void pmu_scu_b_pwrup(void)
{
regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
}
static void pmu_scu_b_pwrdn(void)
{
uint32_t wait_cnt = 0;
if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
ERROR("%s: not all cpus is off\n", __func__);
return;
}
rk3368_flash_l2_b();
regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
while (!(mmio_read_32(PMU_BASE +
PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
wait_cnt++;
if (!(wait_cnt % MAX_WAIT_CONUT))
ERROR("%s:wait cluster-b l2(%x)\n", __func__,
mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
}
rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
}
static void pmu_sleep_mode_config(void)
{
uint32_t pwrmd_core, pwrmd_com;
pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
BIT(pmu_mdcr_scu_l_pd) |
BIT(pmu_mdcr_l2_flush) |
BIT(pmu_mdcr_l2_idle) |
BIT(pmu_mdcr_clr_clst_l) |
BIT(pmu_mdcr_clr_core) |
BIT(pmu_mdcr_clr_cci) |
BIT(pmu_mdcr_core_pd);
pwrmd_com = BIT(pmu_mode_en) |
BIT(pmu_mode_sref_enter) |
BIT(pmu_mode_pwr_off);
regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
dsb();
}
static void ddr_suspend_save(void)
{
ddr_reg_save(1, psram_sleep_cfg->ddr_data);
}
static void pmu_set_sleep_mode(void)
{
ddr_suspend_save();
pmu_sleep_mode_config();
soc_sleep_config();
regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
pmu_scu_b_pwrdn();
mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
(PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
CPU_BOOT_ADDR_WMASK);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
(PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
CPU_BOOT_ADDR_WMASK);
}
void plat_rockchip_pmusram_prepare(void)
{
uint32_t *sram_dst, *sram_src;
size_t sram_size = 2;
uint32_t code_size, data_size;
/* pmu sram code and data prepare */
sram_dst = (uint32_t *)PMUSRAM_BASE;
sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
(uint32_t *)sram_src;
u32_align_cpy(sram_dst, sram_src, sram_size);
/* ddr code */
sram_dst += sram_size;
sram_src = ddr_get_resume_code_base();
code_size = ddr_get_resume_code_size();
u32_align_cpy(sram_dst, sram_src, code_size / 4);
psram_sleep_cfg->ddr_func = (uint64_t)sram_dst;
/* ddr data */
sram_dst += (code_size / 4);
data_size = ddr_get_resume_data_size();
psram_sleep_cfg->ddr_data = (uint64_t)sram_dst;
assert((uint64_t)(sram_dst + data_size / 4) < PSRAM_SP_BOTTOM);
psram_sleep_cfg->sp = PSRAM_SP_TOP;
}
static int cpus_id_power_domain(uint32_t cluster,
uint32_t cpu,
uint32_t pd_state,
uint32_t wfie_msk)
{
uint32_t pd;
uint64_t mpidr;
if (cluster)
pd = PD_CPUB0 + cpu;
else
pd = PD_CPUL0 + cpu;
if (pmu_power_domain_st(pd) == pd_state)
return 0;
if (pd_state == pmu_pd_off) {
mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
if (check_cpu_wfie(mpidr, wfie_msk))
return -EINVAL;
}
return pmu_power_domain_ctr(pd, pd_state);
}
static void nonboot_cpus_off(void)
{
uint32_t boot_cpu, boot_cluster, cpu;
boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
/* turn off noboot cpus */
for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
if (!boot_cluster && (cpu == boot_cpu))
continue;
cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
}
for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
if (boot_cluster && (cpu == boot_cpu))
continue;
cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
}
}
static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
{
uint32_t cpu, cluster;
uint32_t cpuon_id;
cpu = MPIDR_AFFLVL0_VAL(mpidr);
cluster = MPIDR_AFFLVL1_VAL(mpidr);
/* Make sure the cpu is off,Before power up the cpu! */
cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
assert(cpuson_flags[cpuon_id] == 0);
cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
cpuson_entry_point[cpuon_id] = entrypoint;
/* Switch boot addr to pmusram */
mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
(PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
CPU_BOOT_ADDR_WMASK);
dsb();
cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
(COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
CPU_BOOT_ADDR_WMASK);
return 0;
}
static int cores_pwr_domain_on_finish(void)
{
uint32_t cpuon_id;
cpuon_id = plat_my_core_pos();
assert(cpuson_flags[cpuon_id] == 0);
cpuson_flags[cpuon_id] = 0x00;
return 0;
}
static int sys_pwr_domain_resume(void)
{
psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE;
pm_plls_resume();
pmu_scu_b_pwrup();
return 0;
}
static int sys_pwr_domain_suspend(void)
{
nonboot_cpus_off();
pmu_set_sleep_mode();
psram_sleep_cfg->sys_mode = PMU_SYS_SLP_MODE;
psram_sleep_cfg->ddr_flag = 0;
return 0;
}
static struct rockchip_pm_ops_cb pm_ops = {
.cores_pwr_dm_on = cores_pwr_domain_on,
.cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
.sys_pwr_dm_suspend = sys_pwr_domain_suspend,
.sys_pwr_dm_resume = sys_pwr_domain_resume,
.sys_gbl_soft_reset = soc_sys_global_soft_reset,
};
void plat_rockchip_pmu_init(void)
{
uint32_t cpu;
plat_setup_rockchip_pm_ops(&pm_ops);
for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
cpuson_flags[cpu] = 0;
psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE;
psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
nonboot_cpus_off();
INFO("%s(%d): pd status %x\n", __func__, __LINE__,
mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
}

View File

@ -0,0 +1,227 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PMU_H__
#define __PMU_H__
/* Allocate sp reginon in pmusram */
#define PSRAM_SP_SIZE 0x80
#define PSRAM_SP_BOTTOM (PSRAM_SP_TOP - PSRAM_SP_SIZE)
/*****************************************************************************
* pmu con,reg
*****************************************************************************/
#define PMU_WKUP_CFG0 0x0
#define PMU_WKUP_CFG1 0x4
#define PMU_WKUP_CFG2 0x8
#define PMU_TIMEOUT_CNT 0x7c
#define PMU_PWRDN_CON 0xc
#define PMU_PWRDN_ST 0x10
#define PMU_CORE_PWR_ST 0x38
#define PMU_PWRMD_CORE 0x14
#define PMU_PWRMD_COM 0x18
#define PMU_SFT_CON 0x1c
#define PMU_BUS_IDE_REQ 0x3c
#define PMU_BUS_IDE_ST 0x40
#define PMU_OSC_CNT 0x48
#define PMU_PLLLOCK_CNT 0x4c
#define PMU_PLLRST_CNT 0x50
#define PMU_STABLE_CNT 0x54
#define PMU_DDRIO_PWR_CNT 0x58
#define PMU_WKUPRST_CNT 0x5c
enum pmu_powermode_core {
pmu_mdcr_global_int_dis = 0,
pmu_mdcr_core_src_gt,
pmu_mdcr_clr_cci,
pmu_mdcr_cpu0_pd,
pmu_mdcr_clr_clst_l = 4,
pmu_mdcr_clr_core,
pmu_mdcr_scu_l_pd,
pmu_mdcr_core_pd,
pmu_mdcr_l2_idle = 8,
pmu_mdcr_l2_flush
};
/*
* the shift of bits for cores status
*/
enum pmu_core_pwrst_shift {
clstl_cpu_wfe = 2,
clstl_cpu_wfi = 6,
clstb_cpu_wfe = 12,
clstb_cpu_wfi = 16
};
enum pmu_pdid {
PD_CPUL0 = 0,
PD_CPUL1,
PD_CPUL2,
PD_CPUL3,
PD_SCUL,
PD_CPUB0 = 5,
PD_CPUB1,
PD_CPUB2,
PD_CPUB3,
PD_SCUB = 9,
PD_PERI = 13,
PD_VIDEO,
PD_VIO,
PD_GPU0,
PD_GPU1,
PD_END
};
enum pmu_bus_ide {
bus_ide_req_clst_l = 0,
bus_ide_req_clst_b,
bus_ide_req_gpu,
bus_ide_req_core,
bus_ide_req_bus = 4,
bus_ide_req_dma,
bus_ide_req_peri,
bus_ide_req_video,
bus_ide_req_vio = 8,
bus_ide_req_res0,
bus_ide_req_cxcs,
bus_ide_req_alive,
bus_ide_req_pmu = 12,
bus_ide_req_msch,
bus_ide_req_cci,
bus_ide_req_cci400 = 15,
bus_ide_req_end
};
enum pmu_powermode_common {
pmu_mode_en = 0,
pmu_mode_res0,
pmu_mode_bus_pd,
pmu_mode_wkup_rst,
pmu_mode_pll_pd = 4,
pmu_mode_pwr_off,
pmu_mode_pmu_use_if,
pmu_mode_pmu_alive_use_if,
pmu_mode_osc_dis = 8,
pmu_mode_input_clamp,
pmu_mode_sref_enter,
pmu_mode_ddrc_gt,
pmu_mode_ddrio_ret = 12,
pmu_mode_ddrio_ret_deq,
pmu_mode_clr_pmu,
pmu_mode_clr_alive,
pmu_mode_clr_bus = 16,
pmu_mode_clr_dma,
pmu_mode_clr_msch,
pmu_mode_clr_peri,
pmu_mode_clr_video = 20,
pmu_mode_clr_vio,
pmu_mode_clr_gpu,
pmu_mode_clr_mcu,
pmu_mode_clr_cxcs = 24,
pmu_mode_clr_cci400,
pmu_mode_res1,
pmu_mode_res2,
pmu_mode_res3 = 28,
pmu_mode_mclst
};
enum pmu_core_power_st {
clst_l_cpu_wfe = 2,
clst_l_cpu_wfi = 6,
clst_b_l2_flsh_done = 10,
clst_b_l2_wfi = 11,
clst_b_cpu_wfe = 12,
clst_b_cpu_wfi = 16,
mcu_sleeping = 20,
};
enum pmu_sft_con {
pmu_sft_acinactm_clst_b = 5,
pmu_sft_l2flsh_clst_b,
pmu_sft_glbl_int_dis_b = 9,
pmu_sft_ddrio_ret_cfg = 11,
};
enum pmu_wkup_cfg2 {
pmu_cluster_l_wkup_en = 0,
pmu_cluster_b_wkup_en,
pmu_gpio_wkup_en,
pmu_sdio_wkup_en,
pmu_sdmmc_wkup_en,
pmu_sim_wkup_en,
pmu_timer_wkup_en,
pmu_usbdev_wkup_en,
pmu_sft_wkup_en,
pmu_wdt_mcu_wkup_en,
pmu_timeout_wkup_en,
};
enum pmu_bus_idle_st {
pmu_idle_ack_cluster_l = 0,
pmu_idle_ack_cluster_b,
pmu_idle_ack_gpu,
pmu_idle_ack_core,
pmu_idle_ack_bus,
pmu_idle_ack_dma,
pmu_idle_ack_peri,
pmu_idle_ack_video,
pmu_idle_ack_vio,
pmu_idle_ack_cci = 10,
pmu_idle_ack_msch,
pmu_idle_ack_alive,
pmu_idle_ack_pmu,
pmu_idle_ack_cxcs,
pmu_idle_ack_cci400,
pmu_inactive_cluster_l,
pmu_inactive_cluster_b,
pmu_idle_gpu,
pmu_idle_core,
pmu_idle_bus,
pmu_idle_dma,
pmu_idle_peri,
pmu_idle_video,
pmu_idle_vio,
pmu_idle_cci = 26,
pmu_idle_msch,
pmu_idle_alive,
pmu_idle_pmu,
pmu_active_cxcs,
pmu_active_cci,
};
#define PM_PWRDM_CPUSB_MSK (0xf << 5)
#define CKECK_WFE_MSK 0x1
#define CKECK_WFI_MSK 0x10
#define CKECK_WFEI_MSK 0x11
#define PD_CTR_LOOP 500
#define CHK_CPU_LOOP 500
#define MAX_WAIT_CONUT 1000
#endif /* __PMU_H__ */

View File

@ -0,0 +1,219 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <debug.h>
#include <mmio.h>
#include <platform_def.h>
#include <plat_private.h>
#include <rk3368_def.h>
#include <soc.h>
static uint32_t plls_con[END_PLL_ID][4];
/* Table of regions to map using the MMU. */
const mmap_region_t plat_rk_mmap[] = {
MAP_REGION_FLAT(CCI400_BASE, CCI400_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(GIC400_BASE, GIC400_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(STIME_BASE, STIME_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE,
MT_MEMORY | MT_RW | MT_SECURE),
MAP_REGION_FLAT(PMU_BASE, PMU_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(UART_DBG_BASE, UART_DBG_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(CRU_BASE, CRU_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(DDR_PCTL_BASE, DDR_PCTL_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(DDR_PHY_BASE, DDR_PHY_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(GRF_BASE, GRF_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(SERVICE_BUS_BASE, SERVICE_BUS_SISE,
MT_DEVICE | MT_RW | MT_SECURE),
{ 0 }
};
/* The RockChip power domain tree descriptor */
const unsigned char rockchip_power_domain_tree_desc[] = {
/* No of root nodes */
PLATFORM_SYSTEM_COUNT,
/* No of children for the root node */
PLATFORM_CLUSTER_COUNT,
/* No of children for the first cluster node */
PLATFORM_CLUSTER0_CORE_COUNT,
/* No of children for the second cluster node */
PLATFORM_CLUSTER1_CORE_COUNT
};
void secure_timer_init(void)
{
mmio_write_32(STIMER1_BASE + TIMER_LOADE_COUNT0, 0xffffffff);
mmio_write_32(STIMER1_BASE + TIMER_LOADE_COUNT1, 0xffffffff);
/* auto reload & enable the timer */
mmio_write_32(STIMER1_BASE + TIMER_CONTROL_REG, TIMER_EN);
}
void sgrf_init(void)
{
/* setting all configurable ip into no-secure */
mmio_write_32(SGRF_BASE + SGRF_SOC_CON(5), SGRF_SOC_CON_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6), SGRF_SOC_CON7_BITS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON(7), SGRF_SOC_CON_NS);
/* secure dma to no sesure */
mmio_write_32(SGRF_BASE + SGRF_BUSDMAC_CON(0), SGRF_BUSDMAC_CON0_NS);
mmio_write_32(SGRF_BASE + SGRF_BUSDMAC_CON(1), SGRF_BUSDMAC_CON1_NS);
dsb();
/* rst dma1 */
mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(1),
RST_DMA1_MSK | (RST_DMA1_MSK << 16));
/* rst dma2 */
mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(4),
RST_DMA2_MSK | (RST_DMA2_MSK << 16));
dsb();
/* release dma1 rst*/
mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(1), (RST_DMA1_MSK << 16));
/* release dma2 rst*/
mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(4), (RST_DMA2_MSK << 16));
}
void plat_rockchip_soc_init(void)
{
secure_timer_init();
sgrf_init();
}
void regs_updata_bits(uintptr_t addr, uint32_t val,
uint32_t mask, uint32_t shift)
{
uint32_t tmp, orig;
orig = mmio_read_32(addr);
tmp = orig & ~(mask << shift);
tmp |= (val & mask) << shift;
if (tmp != orig)
mmio_write_32(addr, tmp);
dsb();
}
static void plls_suspend(uint32_t pll_id)
{
plls_con[pll_id][0] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 0));
plls_con[pll_id][1] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 1));
plls_con[pll_id][2] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 2));
plls_con[pll_id][3] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 3));
mmio_write_32(CRU_BASE + PLL_CONS((pll_id), 3), PLL_SLOW_BITS);
mmio_write_32(CRU_BASE + PLL_CONS((pll_id), 3), PLL_BYPASS);
}
static void pm_plls_suspend(void)
{
plls_suspend(NPLL_ID);
plls_suspend(CPLL_ID);
plls_suspend(GPLL_ID);
plls_suspend(ABPLL_ID);
plls_suspend(ALPLL_ID);
}
static inline void plls_resume(void)
{
mmio_write_32(CRU_BASE + PLL_CONS(ABPLL_ID, 3),
plls_con[ABPLL_ID][3] | PLL_BYPASS_W_MSK);
mmio_write_32(CRU_BASE + PLL_CONS(ALPLL_ID, 3),
plls_con[ALPLL_ID][3] | PLL_BYPASS_W_MSK);
mmio_write_32(CRU_BASE + PLL_CONS(GPLL_ID, 3),
plls_con[GPLL_ID][3] | PLL_BYPASS_W_MSK);
mmio_write_32(CRU_BASE + PLL_CONS(CPLL_ID, 3),
plls_con[CPLL_ID][3] | PLL_BYPASS_W_MSK);
mmio_write_32(CRU_BASE + PLL_CONS(NPLL_ID, 3),
plls_con[NPLL_ID][3] | PLL_BYPASS_W_MSK);
}
void soc_sleep_config(void)
{
int i = 0;
for (i = 0; i < CRU_CLKGATES_CON_CNT; i++)
mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), 0xffff0000);
pm_plls_suspend();
for (i = 0; i < CRU_CLKGATES_CON_CNT; i++)
mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), 0xffff0000);
}
void pm_plls_resume(void)
{
plls_resume();
mmio_write_32(CRU_BASE + PLL_CONS(ABPLL_ID, 3),
plls_con[ABPLL_ID][3] | PLLS_MODE_WMASK);
mmio_write_32(CRU_BASE + PLL_CONS(ALPLL_ID, 3),
plls_con[ALPLL_ID][3] | PLLS_MODE_WMASK);
mmio_write_32(CRU_BASE + PLL_CONS(GPLL_ID, 3),
plls_con[GPLL_ID][3] | PLLS_MODE_WMASK);
mmio_write_32(CRU_BASE + PLL_CONS(CPLL_ID, 3),
plls_con[CPLL_ID][3] | PLLS_MODE_WMASK);
mmio_write_32(CRU_BASE + PLL_CONS(NPLL_ID, 3),
plls_con[NPLL_ID][3] | PLLS_MODE_WMASK);
}
void __dead2 soc_sys_global_soft_reset(void)
{
uint32_t temp_val;
mmio_write_32(CRU_BASE + PLL_CONS((GPLL_ID), 3), PLL_SLOW_BITS);
mmio_write_32(CRU_BASE + PLL_CONS((CPLL_ID), 3), PLL_SLOW_BITS);
mmio_write_32(CRU_BASE + PLL_CONS((NPLL_ID), 3), PLL_SLOW_BITS);
mmio_write_32(CRU_BASE + PLL_CONS((ABPLL_ID), 3), PLL_SLOW_BITS);
mmio_write_32(CRU_BASE + PLL_CONS((ALPLL_ID), 3), PLL_SLOW_BITS);
temp_val = mmio_read_32(CRU_BASE + CRU_GLB_RST_CON) |
PMU_RST_BY_SECOND_SFT;
mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, temp_val);
mmio_write_32(CRU_BASE + CRU_GLB_SRST_SND, 0xeca8);
/*
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to excute valid codes.
*/
while (1)
;
}

View File

@ -0,0 +1,162 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SOC_H__
#define __SOC_H__
enum plls_id {
ABPLL_ID = 0,
ALPLL_ID,
DPLL_ID,
CPLL_ID,
GPLL_ID,
NPLL_ID,
END_PLL_ID,
};
/*****************************************************************************
* secure timer
*****************************************************************************/
#define TIMER_LOADE_COUNT0 0x00
#define TIMER_LOADE_COUNT1 0x04
#define TIMER_CURRENT_VALUE0 0x08
#define TIMER_CURRENT_VALUE1 0x0C
#define TIMER_CONTROL_REG 0x10
#define TIMER_INTSTATUS 0x18
#define TIMER_EN 0x1
#define STIMER1_BASE (STIME_BASE + 0x20)
#define CYCL_24M_CNT_US(us) (24 * us)
#define CYCL_24M_CNT_MS(ms) (ms * CYCL_24M_CNT_US(1000))
/*****************************************************************************
* sgrf reg, offset
*****************************************************************************/
#define SGRF_SOC_CON(n) (0x0 + (n) * 4)
#define SGRF_BUSDMAC_CON(n) (0x100 + (n) * 4)
#define SGRF_SOC_CON_NS 0xffff0000
/*****************************************************************************
* con6[2]pmusram is security.
* con6[6]stimer is security.
*****************************************************************************/
#define PMUSRAM_S_SHIFT 2
#define PMUSRAM_S 1
#define STIMER_S_SHIFT 6
#define STIMER_S 1
#define SGRF_SOC_CON7_BITS ((0xffff << 16) | \
(PMUSRAM_S << PMUSRAM_S_SHIFT) | \
(STIMER_S << STIMER_S_SHIFT))
#define SGRF_BUSDMAC_CON0_NS 0xfffcfff8
#define SGRF_BUSDMAC_CON1_NS 0xffff0fff
/*
* sgrf_soc_con1~2, mask and offset
*/
#define CPU_BOOT_ADDR_WMASK 0xffff0000
#define CPU_BOOT_ADDR_ALIGN 16
/*****************************************************************************
* cru reg, offset
*****************************************************************************/
#define CRU_SOFTRST_CON 0x300
#define CRU_SOFTRSTS_CON(n) (CRU_SOFTRST_CON + ((n) * 4))
#define CRU_SOFTRSTS_CON_CNT 15
#define SOFTRST_DMA1 0x40004
#define SOFTRST_DMA2 0x10001
#define RST_DMA1_MSK 0x4
#define RST_DMA2_MSK 0x0
#define CRU_CLKSEL_CON 0x100
#define CRU_CLKSELS_CON(i) (CRU_CLKSEL_CON + ((i) * 4))
#define CRU_CLKSEL_CON_CNT 56
#define CRU_CLKGATE_CON 0x200
#define CRU_CLKGATES_CON(i) (CRU_CLKGATE_CON + ((i) * 4))
#define CRU_CLKGATES_CON_CNT 25
#define CRU_GLB_SRST_FST 0x280
#define CRU_GLB_SRST_SND 0x284
#define CRU_GLB_RST_CON 0x388
#define CRU_CONS_GATEID(i) (16 * (i))
#define GATE_ID(reg, bit) ((reg * 16) + bit)
#define PMU_RST_BY_SECOND_SFT (BIT(1) << 2)
#define PMU_RST_NOT_BY_SFT (BIT(1) << 2)
/***************************************************************************
* pll
***************************************************************************/
#define PLL_PWR_DN_MSK (0x1 << 1)
#define PLL_PWR_DN REG_WMSK_BITS(1, 1, 0x1)
#define PLL_PWR_ON REG_WMSK_BITS(0, 1, 0x1)
#define PLL_RESET REG_WMSK_BITS(1, 5, 0x1)
#define PLL_RESET_RESUME REG_WMSK_BITS(0, 5, 0x1)
#define PLL_BYPASS_MSK (0x1 << 0)
#define PLL_BYPASS_W_MSK (PLL_BYPASS_MSK << 16)
#define PLL_BYPASS REG_WMSK_BITS(1, 0, 0x1)
#define PLL_NO_BYPASS REG_WMSK_BITS(0, 0, 0x1)
#define PLL_MODE_SHIFT 8
#define PLL_MODE_MSK 0x3
#define PLLS_MODE_WMASK (PLL_MODE_MSK << (16 + PLL_MODE_SHIFT))
#define PLL_SLOW 0x0
#define PLL_NORM 0x1
#define PLL_DEEP 0x2
#define PLL_SLOW_BITS REG_WMSK_BITS(PLL_SLOW, 8, 0x3)
#define PLL_NORM_BITS REG_WMSK_BITS(PLL_NORM, 8, 0x3)
#define PLL_DEEP_BITS REG_WMSK_BITS(PLL_DEEP, 8, 0x3)
#define PLL_CONS(id, i) ((id) * 0x10 + ((i) * 4))
#define REG_W_MSK(bits_shift, msk) \
((msk) << ((bits_shift) + 16))
#define REG_VAL_CLRBITS(val, bits_shift, msk) \
(val & (~(msk << bits_shift)))
#define REG_SET_BITS(bits, bits_shift, msk) \
(((bits) & (msk)) << (bits_shift))
#define REG_WMSK_BITS(bits, bits_shift, msk) \
(REG_W_MSK(bits_shift, msk) | \
REG_SET_BITS(bits, bits_shift, msk))
#define regs_updata_bit_set(addr, shift) \
regs_updata_bits((addr), 0x1, 0x1, (shift))
#define regs_updata_bit_clr(addr, shift) \
regs_updata_bits((addr), 0x0, 0x1, (shift))
void __dead2 soc_sys_global_soft_reset(void);
void regs_updata_bits(uintptr_t addr, uint32_t val,
uint32_t mask, uint32_t shift);
void soc_sleep_config(void);
void pm_plls_resume(void);
#endif /* __SOC_H__ */

View File

@ -0,0 +1,149 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PLATFORM_DEF_H__
#define __PLATFORM_DEF_H__
#include <arch.h>
#include <common_def.h>
#include <rk3368_def.h>
#define DEBUG_XLAT_TABLE 0
/*******************************************************************************
* Platform binary types for linking
******************************************************************************/
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
/*******************************************************************************
* Generic platform constants
******************************************************************************/
/* Size of cacheable stacks */
#if DEBUG_XLAT_TABLE
#define PLATFORM_STACK_SIZE 0x800
#elif IMAGE_BL1
#define PLATFORM_STACK_SIZE 0x440
#elif IMAGE_BL2
#define PLATFORM_STACK_SIZE 0x400
#elif IMAGE_BL31
#define PLATFORM_STACK_SIZE 0x800
#elif IMAGE_BL32
#define PLATFORM_STACK_SIZE 0x440
#endif
#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n"
#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
#define PLATFORM_SYSTEM_COUNT 1
#define PLATFORM_CLUSTER_COUNT 2
#define PLATFORM_CLUSTER0_CORE_COUNT 4
#define PLATFORM_CLUSTER1_CORE_COUNT 4
#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \
PLATFORM_CLUSTER0_CORE_COUNT)
#define PLATFORM_MAX_CPUS_PER_CLUSTER 4
#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \
PLATFORM_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
/*
* This macro defines the deepest retention state possible. A higher state
* id will represent an invalid or a power down state.
*/
#define PLAT_MAX_RET_STATE 1
/*
* This macro defines the deepest power down states possible. Any state ID
* higher than this is invalid.
*/
#define PLAT_MAX_OFF_STATE 2
/*******************************************************************************
* Platform memory map related constants
******************************************************************************/
/* TF txet, ro, rw, Size: 512KB */
#define TZRAM_BASE (0x0)
#define TZRAM_SIZE (0x80000)
/*******************************************************************************
* BL31 specific defines.
******************************************************************************/
/*
* Put BL3-1 at the top of the Trusted RAM
*/
#define BL31_BASE (TZRAM_BASE + 0x8000)
#define BL31_LIMIT (TZRAM_BASE + TZRAM_SIZE)
/*******************************************************************************
* Platform specific page table and MMU setup constants
******************************************************************************/
#define ADDR_SPACE_SIZE (1ull << 32)
#define MAX_XLAT_TABLES 8
#define MAX_MMAP_REGIONS 16
/*******************************************************************************
* Declarations and constants to access the mailboxes safely. Each mailbox is
* aligned on the biggest cache line size in the platform. This is known only
* to the platform as it might have a combination of integrated and external
* caches. Such alignment ensures that two maiboxes do not sit on the same cache
* line at any cache level. They could belong to different cpus/clusters &
* get written while being protected by different locks causing corruption of
* a valid mailbox address.
******************************************************************************/
#define CACHE_WRITEBACK_SHIFT 6
#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
/*
* Define GICD and GICC and GICR base
*/
#define PLAT_RK_GICD_BASE RK3368_GICD_BASE
#define PLAT_RK_GICC_BASE RK3368_GICC_BASE
/*
* Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
*/
#define PLAT_RK_G1S_IRQS RK_G1S_IRQS
#define PLAT_RK_UART_BASE RK3368_UART2_BASE
#define PLAT_RK_UART_CLOCK RK3368_UART_CLOCK
#define PLAT_RK_UART_BAUDRATE RK3368_BAUDRATE
#define PLAT_RK_CCI_BASE CCI400_BASE
#define PLAT_RK_PRIMARY_CPU 0x0
#define RK_PLAT_AARCH_CFG RK_PLAT_CFG0
#endif /* __PLATFORM_DEF_H__ */

View File

@ -0,0 +1,74 @@
#
# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
RK_PLAT := plat/rockchip
RK_PLAT_SOC := ${RK_PLAT}/${PLAT}
RK_PLAT_COMMON := ${RK_PLAT}/common
PLAT_INCLUDES := -I${RK_PLAT_COMMON}/ \
-I${RK_PLAT_COMMON}/include/ \
-I${RK_PLAT_COMMON}/pmusram \
-I${RK_PLAT_COMMON}/drivers/pmu/ \
-I${RK_PLAT_SOC}/ \
-I${RK_PLAT_SOC}/drivers/pmu/ \
-I${RK_PLAT_SOC}/drivers/soc/ \
-I${RK_PLAT_SOC}/drivers/ddr/ \
-I${RK_PLAT_SOC}/include/
RK_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
drivers/arm/gic/v2/gicv2_main.c \
drivers/arm/gic/v2/gicv2_helpers.c \
plat/common/plat_gicv2.c \
${RK_PLAT}/common/rockchip_gicv2.c
PLAT_BL_COMMON_SOURCES := lib/aarch64/xlat_tables.c \
plat/common/aarch64/plat_common.c \
plat/common/aarch64/plat_psci_common.c
BL31_SOURCES += ${RK_GIC_SOURCES} \
drivers/arm/cci/cci.c \
drivers/console/console.S \
drivers/ti/uart/16550_console.S \
drivers/delay_timer/delay_timer.c \
lib/cpus/aarch64/cortex_a53.S \
plat/common/aarch64/platform_mp_stack.S \
${RK_PLAT_COMMON}/aarch64/plat_helpers.S \
${RK_PLAT_COMMON}/bl31_plat_setup.c \
${RK_PLAT_COMMON}/pmusram/pmu_sram_cpus_on.S \
${RK_PLAT_COMMON}/pmusram/pmu_sram.c \
${RK_PLAT_COMMON}/plat_delay_timer.c \
${RK_PLAT_COMMON}/plat_pm.c \
${RK_PLAT_COMMON}/plat_topology.c \
${RK_PLAT_COMMON}/aarch64/platform_common.c \
${RK_PLAT_SOC}/drivers/pmu/pmu.c \
${RK_PLAT_SOC}/drivers/soc/soc.c \
${RK_PLAT_SOC}/drivers/ddr/ddr_rk3368.c \
ENABLE_PLAT_COMPAT := 0

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PLAT_DEF_H__
#define __PLAT_DEF_H__
/* Special value used to verify platform parameters from BL2 to BL3-1 */
#define RK_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL
#define CCI400_BASE 0xffb90000
#define CCI400_SIZE 0x10000
#define GIC400_BASE 0xffb70000
#define GIC400_SIZE 0x10000
#define STIME_BASE 0xff830000
#define STIME_SIZE 0x10000
#define CRU_BASE 0xff760000
#define CRU_SIZE 0x10000
#define GRF_BASE 0xff770000
#define GRF_SIZE 0x10000
#define SGRF_BASE 0xff740000
#define SGRF_SIZE 0x10000
#define PMU_BASE 0xff730000
#define PMU_GRF_BASE 0xff738000
#define PMU_SIZE 0x10000
#define RK_INTMEM_BASE 0xff8c0000
#define RK_INTMEM_SIZE 0x10000
#define UART_DBG_BASE 0xff690000
#define UART_DBG_SIZE 0x10000
#define CRU_BASE 0xff760000
#define PMUSRAM_BASE 0xff720000
#define PMUSRAM_SIZE 0x10000
#define PMUSRAM_RSIZE 0x1000
#define DDR_PCTL_BASE 0xff610000
#define DDR_PCTL_SIZE 0x10000
#define DDR_PHY_BASE 0xff620000
#define DDR_PHY_SIZE 0x10000
#define SERVICE_BUS_BASE 0xffac0000
#define SERVICE_BUS_SISE 0x50000
#define COLD_BOOT_BASE 0xffff0000
/**************************************************************************
* UART related constants
**************************************************************************/
#define RK3368_UART2_BASE UART_DBG_BASE
#define RK3368_BAUDRATE 115200
#define RK3368_UART_CLOCK 24000000
/******************************************************************************
* System counter frequency related constants
******************************************************************************/
#define SYS_COUNTER_FREQ_IN_TICKS 24000000
#define SYS_COUNTER_FREQ_IN_MHZ 24
/******************************************************************************
* GIC-400 & interrupt handling related constants
******************************************************************************/
/* Base rk_platform compatible GIC memory map */
#define RK3368_GICD_BASE (GIC400_BASE + 0x1000)
#define RK3368_GICC_BASE (GIC400_BASE + 0x2000)
#define RK3368_GICR_BASE 0 /* no GICR in GIC-400 */
/*****************************************************************************
* CCI-400 related constants
******************************************************************************/
#define PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX 3
#define PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX 4
/******************************************************************************
* cpu up status
******************************************************************************/
#define PMU_CPU_HOTPLUG 0xdeadbeaf
#define PMU_CPU_AUTO_PWRDN 0xabcdef12
/******************************************************************************
* sgi, ppi
******************************************************************************/
#define RK_IRQ_SEC_PHY_TIMER 29
#define RK_IRQ_SEC_SGI_0 8
#define RK_IRQ_SEC_SGI_1 9
#define RK_IRQ_SEC_SGI_2 10
#define RK_IRQ_SEC_SGI_3 11
#define RK_IRQ_SEC_SGI_4 12
#define RK_IRQ_SEC_SGI_5 13
#define RK_IRQ_SEC_SGI_6 14
#define RK_IRQ_SEC_SGI_7 15
/*
* Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
*/
#define RK_G1S_IRQS (RK_IRQ_SEC_PHY_TIMER)
#endif /* __PLAT_DEF_H__ */

View File

@ -0,0 +1,312 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <bakery_lock.h>
#include <debug.h>
#include <delay_timer.h>
#include <errno.h>
#include <mmio.h>
#include <platform.h>
#include <platform_def.h>
#include <plat_private.h>
#include <rk3399_def.h>
#include <pmu_sram.h>
#include <soc.h>
#include <pmu.h>
#include <pmu_com.h>
static struct psram_data_t *psram_sleep_cfg =
(struct psram_data_t *)PSRAM_DT_BASE;
/*
* There are two ways to powering on or off on core.
* 1) Control it power domain into on or off in PMU_PWRDN_CON reg,
* it is core_pwr_pd mode
* 2) Enable the core power manage in PMU_CORE_PM_CON reg,
* then, if the core enter into wfi, it power domain will be
* powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode
* so we need core_pm_cfg_info to distinguish which method be used now.
*/
static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT]
#if USE_COHERENT_MEM
__attribute__ ((section("tzfw_coherent_mem")))
#endif
;/* coheront */
void plat_rockchip_pmusram_prepare(void)
{
uint32_t *sram_dst, *sram_src;
size_t sram_size = 2;
/*
* pmu sram code and data prepare
*/
sram_dst = (uint32_t *)PMUSRAM_BASE;
sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
(uint32_t *)sram_src;
u32_align_cpy(sram_dst, sram_src, sram_size);
psram_sleep_cfg->sp = PSRAM_DT_BASE;
}
static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
{
return core_pm_cfg_info[cpu_id];
}
static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value)
{
core_pm_cfg_info[cpu_id] = value;
#if !USE_COHERENT_MEM
flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id],
sizeof(uint32_t));
#endif
}
static int cpus_power_domain_on(uint32_t cpu_id)
{
uint32_t cfg_info;
uint32_t cpu_pd = PD_CPUL0 + cpu_id;
/*
* There are two ways to powering on or off on core.
* 1) Control it power domain into on or off in PMU_PWRDN_CON reg
* 2) Enable the core power manage in PMU_CORE_PM_CON reg,
* then, if the core enter into wfi, it power domain will be
* powered off automatically.
*/
cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
if (cfg_info == core_pwr_pd) {
/* disable core_pm cfg */
mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
CORES_PM_DISABLE);
/* if the cores have be on, power off it firstly */
if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0);
pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
}
pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
} else {
if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
return -EINVAL;
}
mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
BIT(core_pm_sft_wakeup_en));
}
return 0;
}
static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
{
uint32_t cpu_pd;
uint32_t core_pm_value;
cpu_pd = PD_CPUL0 + cpu_id;
if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
return 0;
if (pd_cfg == core_pwr_pd) {
if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
return -EINVAL;
/* disable core_pm cfg */
mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
CORES_PM_DISABLE);
set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
} else {
set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
core_pm_value = BIT(core_pm_en);
if (pd_cfg == core_pwr_wfi_int)
core_pm_value |= BIT(core_pm_int_wakeup_en);
mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
core_pm_value);
}
return 0;
}
static void nonboot_cpus_off(void)
{
uint32_t boot_cpu, cpu;
boot_cpu = plat_my_core_pos();
/* turn off noboot cpus */
for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
if (cpu == boot_cpu)
continue;
cpus_power_domain_off(cpu, core_pwr_pd);
}
}
static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
{
uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
assert(cpuson_flags[cpu_id] == 0);
cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
cpuson_entry_point[cpu_id] = entrypoint;
dsb();
cpus_power_domain_on(cpu_id);
return 0;
}
static int cores_pwr_domain_off(void)
{
uint32_t cpu_id = plat_my_core_pos();
cpus_power_domain_off(cpu_id, core_pwr_wfi);
return 0;
}
static int cores_pwr_domain_suspend(void)
{
uint32_t cpu_id = plat_my_core_pos();
assert(cpuson_flags[cpu_id] == 0);
cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
cpuson_entry_point[cpu_id] = (uintptr_t)psci_entrypoint;
dsb();
cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
return 0;
}
static int cores_pwr_domain_on_finish(void)
{
uint32_t cpu_id = plat_my_core_pos();
cpuson_flags[cpu_id] = 0;
cpuson_entry_point[cpu_id] = 0;
/* Disable core_pm */
mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE);
return 0;
}
static int cores_pwr_domain_resume(void)
{
uint32_t cpu_id = plat_my_core_pos();
cpuson_flags[cpu_id] = 0;
cpuson_entry_point[cpu_id] = 0;
/* Disable core_pm */
mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE);
return 0;
}
static void sys_slp_config(void)
{
uint32_t slp_mode_cfg = 0;
slp_mode_cfg = PMU_PWR_MODE_EN |
PMU_CPU0_PD_EN |
PMU_L2_FLUSH_EN |
PMU_L2_IDLE_EN |
PMU_SCU_PD_EN |
PMU_CLK_CORE_SRC_GATE_EN;
mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_CLUSTER_L_WKUP_EN);
mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_CLUSTER_B_WKUP_EN);
mmio_clrbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_GPIO_WKUP_EN);
mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg);
}
static int sys_pwr_domain_suspend(void)
{
sys_slp_config();
plls_suspend();
psram_sleep_cfg->sys_mode = PMU_SYS_SLP_MODE;
pmu_sgrf_rst_hld();
return 0;
}
static int sys_pwr_domain_resume(void)
{
pmu_sgrf_rst_hld_release();
psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE;
plls_resume();
return 0;
}
static struct rockchip_pm_ops_cb pm_ops = {
.cores_pwr_dm_on = cores_pwr_domain_on,
.cores_pwr_dm_off = cores_pwr_domain_off,
.cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
.cores_pwr_dm_suspend = cores_pwr_domain_suspend,
.cores_pwr_dm_resume = cores_pwr_domain_resume,
.sys_pwr_dm_suspend = sys_pwr_domain_suspend,
.sys_pwr_dm_resume = sys_pwr_domain_resume,
.sys_gbl_soft_reset = soc_global_soft_reset,
};
void plat_rockchip_pmu_init(void)
{
uint32_t cpu;
rockchip_pd_lock_init();
plat_setup_rockchip_pm_ops(&pm_ops);
for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
cpuson_flags[cpu] = 0;
psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE;
psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
/* cpu boot from pmusram */
mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1),
(PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
CPU_BOOT_ADDR_WMASK);
nonboot_cpus_off();
INFO("%s(%d): pd status %x\n", __func__, __LINE__,
mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
}

View File

@ -0,0 +1,709 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PMU_H__
#define __PMU_H__
/* Allocate sp reginon in pmusram */
#define PSRAM_SP_SIZE 0x80
#define PSRAM_SP_BOTTOM (PSRAM_SP_TOP - PSRAM_SP_SIZE)
/*****************************************************************************
* Common define for per soc pmu.h
*****************************************************************************/
/* The ways of cores power domain contorlling */
enum cores_pm_ctr_mode {
core_pwr_pd = 0,
core_pwr_wfi = 1,
core_pwr_wfi_int = 2
};
/*****************************************************************************
* pmu con,reg
*****************************************************************************/
#define PMU_WKUP_CFG(n) ((n) * 4)
#define PMU_CORE_PM_CON(cpu) (0xc0 + (cpu * 4))
/* the shift of bits for cores status */
enum pmu_core_pwrst_shift {
clstl_cpu_wfe = 2,
clstl_cpu_wfi = 6,
clstb_cpu_wfe = 12,
clstb_cpu_wfi = 16
};
#define CKECK_WFE_MSK 0x1
#define CKECK_WFI_MSK 0x10
#define CKECK_WFEI_MSK 0x11
enum pmu_powerdomain_id {
PD_CPUL0 = 0,
PD_CPUL1,
PD_CPUL2,
PD_CPUL3,
PD_CPUB0,
PD_CPUB1,
PD_SCUL,
PD_SCUB,
PD_TCPD0,
PD_TCPD1,
PD_CCI,
PD_PERILP,
PD_PERIHP,
PD_CENTER,
PD_VIO,
PD_GPU,
PD_VCODEC,
PD_VDU,
PD_RGA,
PD_IEP,
PD_VO,
PD_ISP0 = 22,
PD_ISP1,
PD_HDCP,
PD_GMAC,
PD_EMMC,
PD_USB3,
PD_EDP,
PD_GIC,
PD_SD,
PD_SDIOAUDIO,
PD_END
};
enum powerdomain_state {
PMU_POWER_ON = 0,
PMU_POWER_OFF,
};
enum pmu_bus_id {
BUS_ID_GPU = 0,
BUS_ID_PERILP,
BUS_ID_PERIHP,
BUS_ID_VCODEC,
BUS_ID_VDU,
BUS_ID_RGA,
BUS_ID_IEP,
BUS_ID_VOPB,
BUS_ID_VOPL,
BUS_ID_ISP0,
BUS_ID_ISP1,
BUS_ID_HDCP,
BUS_ID_USB3,
BUS_ID_PERILPM0,
BUS_ID_CENTER,
BUS_ID_CCIM0,
BUS_ID_CCIM1,
BUS_ID_VIO,
BUS_ID_MSCH0,
BUS_ID_MSCH1,
BUS_ID_ALIVE,
BUS_ID_PMU,
BUS_ID_EDP,
BUS_ID_GMAC,
BUS_ID_EMMC,
BUS_ID_CENTER1,
BUS_ID_PMUM0,
BUS_ID_GIC,
BUS_ID_SD,
BUS_ID_SDIOAUDIO,
};
enum pmu_bus_state {
BUS_ACTIVE,
BUS_IDLE,
};
/* pmu_cpuapm bit */
enum pmu_cores_pm_by_wfi {
core_pm_en = 0,
core_pm_int_wakeup_en,
core_pm_resv,
core_pm_sft_wakeup_en
};
enum pmu_wkup_cfg0 {
PMU_GPIO0A_POSE_WKUP_EN = 0,
PMU_GPIO0B_POSE_WKUP_EN = 8,
PMU_GPIO0C_POSE_WKUP_EN = 16,
PMU_GPIO0D_POSE_WKUP_EN = 24,
};
enum pmu_wkup_cfg1 {
PMU_GPIO0A_NEGEDGE_WKUP_EN = 0,
PMU_GPIO0B_NEGEDGE_WKUP_EN = 7,
PMU_GPIO0C_NEGEDGE_WKUP_EN = 16,
PMU_GPIO0D_NEGEDGE_WKUP_EN = 24,
};
enum pmu_wkup_cfg2 {
PMU_GPIO1A_POSE_WKUP_EN = 0,
PMU_GPIO1B_POSE_WKUP_EN = 7,
PMU_GPIO1C_POSE_WKUP_EN = 16,
PMU_GPIO1D_POSE_WKUP_EN = 24,
};
enum pmu_wkup_cfg3 {
PMU_GPIO1A_NEGEDGE_WKUP_EN = 0,
PMU_GPIO1B_NEGEDGE_WKUP_EN = 7,
PMU_GPIO1C_NEGEDGE_WKUP_EN = 16,
PMU_GPIO1D_NEGEDGE_WKUP_EN = 24,
};
/* pmu_wkup_cfg4 */
enum pmu_wkup_cfg4 {
PMU_CLUSTER_L_WKUP_EN = 0,
PMU_CLUSTER_B_WKUP_EN,
PMU_GPIO_WKUP_EN,
PMU_SDIO_WKUP_EN,
PMU_SDMMC_WKUP_EN,
PMU_TIMER_WKUP_EN = 6,
PMU_USBDEV_WKUP_EN,
PMU_SFT_WKUP_EN,
PMU_M0_WDT_WKUP_EN,
PMU_TIMEOUT_WKUP_EN,
PMU_PWM_WKUP_EN,
PMU_PCIE_WKUP_EN = 13,
};
enum pmu_pwrdn_con {
PMU_A53_L0_PWRDWN_EN = 0,
PMU_A53_L1_PWRDWN_EN,
PMU_A53_L2_PWRDWN_EN,
PMU_A53_L3_PWRDWN_EN,
PMU_A72_B0_PWRDWN_EN,
PMU_A72_B1_PWRDWN_EN,
PMU_SCU_L_PWRDWN_EN,
PMU_SCU_B_PWRDWN_EN,
PMU_TCPD0_PWRDWN_EN,
PMU_TCPD1_PWRDWN_EN,
PMU_CCI_PWRDWN_EN,
PMU_PERILP_PWRDWN_EN,
PMU_PERIHP_PWRDWN_EN,
PMU_CENTER_PWRDWN_EN,
PMU_VIO_PWRDWN_EN,
PMU_GPU_PWRDWN_EN,
PMU_VCODEC_PWRDWN_EN,
PMU_VDU_PWRDWN_EN,
PMU_RGA_PWRDWN_EN,
PMU_IEP_PWRDWN_EN,
PMU_VO_PWRDWN_EN,
PMU_ISP0_PWRDWN_EN = 22,
PMU_ISP1_PWRDWN_EN,
PMU_HDCP_PWRDWN_EN,
PMU_GMAC_PWRDWN_EN,
PMU_EMMC_PWRDWN_EN,
PMU_USB3_PWRDWN_EN,
PMU_EDP_PWRDWN_EN,
PMU_GIC_PWRDWN_EN,
PMU_SD_PWRDWN_EN,
PMU_SDIOAUDIO_PWRDWN_EN,
};
enum pmu_pwrdn_st {
PMU_A53_L0_PWRDWN_ST = 0,
PMU_A53_L1_PWRDWN_ST,
PMU_A53_L2_PWRDWN_ST,
PMU_A53_L3_PWRDWN_ST,
PMU_A72_B0_PWRDWN_ST,
PMU_A72_B1_PWRDWN_ST,
PMU_SCU_L_PWRDWN_ST,
PMU_SCU_B_PWRDWN_ST,
PMU_TCPD0_PWRDWN_ST,
PMU_TCPD1_PWRDWN_ST,
PMU_CCI_PWRDWN_ST,
PMU_PERILP_PWRDWN_ST,
PMU_PERIHP_PWRDWN_ST,
PMU_CENTER_PWRDWN_ST,
PMU_VIO_PWRDWN_ST,
PMU_GPU_PWRDWN_ST,
PMU_VCODEC_PWRDWN_ST,
PMU_VDU_PWRDWN_ST,
PMU_RGA_PWRDWN_ST,
PMU_IEP_PWRDWN_ST,
PMU_VO_PWRDWN_ST,
PMU_ISP0_PWRDWN_ST = 22,
PMU_ISP1_PWRDWN_ST,
PMU_HDCP_PWRDWN_ST,
PMU_GMAC_PWRDWN_ST,
PMU_EMMC_PWRDWN_ST,
PMU_USB3_PWRDWN_ST,
PMU_EDP_PWRDWN_ST,
PMU_GIC_PWRDWN_ST,
PMU_SD_PWRDWN_ST,
PMU_SDIOAUDIO_PWRDWN_ST,
};
enum pmu_pll_con {
PMU_PLL_PD_CFG = 0,
PMU_SFT_PLL_PD = 8,
};
enum pmu_pwermode_con {
PMU_PWR_MODE_EN = 0,
PMU_WKUP_RST_EN,
PMU_INPUT_CLAMP_EN,
PMU_OSC_DIS,
PMU_ALIVE_USE_LF,
PMU_PMU_USE_LF,
PMU_POWER_OFF_REQ_CFG,
PMU_CHIP_PD_EN,
PMU_PLL_PD_EN,
PMU_CPU0_PD_EN,
PMU_L2_FLUSH_EN,
PMU_L2_IDLE_EN,
PMU_SCU_PD_EN,
PMU_CCI_PD_EN,
PMU_PERILP_PD_EN,
PMU_CENTER_PD_EN,
PMU_SREF0_ENTER_EN,
PMU_DDRC0_GATING_EN,
PMU_DDRIO0_RET_EN,
PMU_DDRIO0_RET_DE_REQ,
PMU_SREF1_ENTER_EN,
PMU_DDRC1_GATING_EN,
PMU_DDRIO1_RET_EN,
PMU_DDRIO1_RET_DE_REQ,
PMU_CLK_CENTER_SRC_GATE_EN = 26,
PMU_CLK_PERILP_SRC_GATE_EN,
PMU_CLK_CORE_SRC_GATE_EN,
PMU_DDRIO_RET_HW_DE_REQ,
PMU_SLP_OUTPUT_CFG,
PMU_MAIN_CLUSTER,
};
enum pmu_sft_con {
PMU_WKUP_SFT = 0,
PMU_INPUT_CLAMP_CFG,
PMU_OSC_DIS_CFG,
PMU_PMU_LF_EN_CFG,
PMU_ALIVE_LF_EN_CFG,
PMU_24M_EN_CFG,
PMU_DBG_PWRUP_L0_CFG,
PMU_WKUP_SFT_M0,
PMU_DDRCTL0_C_SYSREQ_CFG,
PMU_DDR0_IO_RET_CFG,
PMU_DDRCTL1_C_SYSREQ_CFG = 12,
PMU_DDR1_IO_RET_CFG,
};
enum pmu_int_con {
PMU_PMU_INT_EN = 0,
PMU_PWRMD_WKUP_INT_EN,
PMU_WKUP_GPIO0_NEG_INT_EN,
PMU_WKUP_GPIO0_POS_INT_EN,
PMU_WKUP_GPIO1_NEG_INT_EN,
PMU_WKUP_GPIO1_POS_INT_EN,
};
enum pmu_int_st {
PMU_PWRMD_WKUP_INT_ST = 1,
PMU_WKUP_GPIO0_NEG_INT_ST,
PMU_WKUP_GPIO0_POS_INT_ST,
PMU_WKUP_GPIO1_NEG_INT_ST,
PMU_WKUP_GPIO1_POS_INT_ST,
};
enum pmu_gpio0_pos_int_con {
PMU_GPIO0A_POS_INT_EN = 0,
PMU_GPIO0B_POS_INT_EN = 8,
PMU_GPIO0C_POS_INT_EN = 16,
PMU_GPIO0D_POS_INT_EN = 24,
};
enum pmu_gpio0_neg_int_con {
PMU_GPIO0A_NEG_INT_EN = 0,
PMU_GPIO0B_NEG_INT_EN = 8,
PMU_GPIO0C_NEG_INT_EN = 16,
PMU_GPIO0D_NEG_INT_EN = 24,
};
enum pmu_gpio1_pos_int_con {
PMU_GPIO1A_POS_INT_EN = 0,
PMU_GPIO1B_POS_INT_EN = 8,
PMU_GPIO1C_POS_INT_EN = 16,
PMU_GPIO1D_POS_INT_EN = 24,
};
enum pmu_gpio1_neg_int_con {
PMU_GPIO1A_NEG_INT_EN = 0,
PMU_GPIO1B_NEG_INT_EN = 8,
PMU_GPIO1C_NEG_INT_EN = 16,
PMU_GPIO1D_NEG_INT_EN = 24,
};
enum pmu_gpio0_pos_int_st {
PMU_GPIO0A_POS_INT_ST = 0,
PMU_GPIO0B_POS_INT_ST = 8,
PMU_GPIO0C_POS_INT_ST = 16,
PMU_GPIO0D_POS_INT_ST = 24,
};
enum pmu_gpio0_neg_int_st {
PMU_GPIO0A_NEG_INT_ST = 0,
PMU_GPIO0B_NEG_INT_ST = 8,
PMU_GPIO0C_NEG_INT_ST = 16,
PMU_GPIO0D_NEG_INT_ST = 24,
};
enum pmu_gpio1_pos_int_st {
PMU_GPIO1A_POS_INT_ST = 0,
PMU_GPIO1B_POS_INT_ST = 8,
PMU_GPIO1C_POS_INT_ST = 16,
PMU_GPIO1D_POS_INT_ST = 24,
};
enum pmu_gpio1_neg_int_st {
PMU_GPIO1A_NEG_INT_ST = 0,
PMU_GPIO1B_NEG_INT_ST = 8,
PMU_GPIO1C_NEG_INT_ST = 16,
PMU_GPIO1D_NEG_INT_ST = 24,
};
/* pmu power down configure register 0x0050 */
enum pmu_pwrdn_inten {
PMU_A53_L0_PWR_SWITCH_INT_EN = 0,
PMU_A53_L1_PWR_SWITCH_INT_EN,
PMU_A53_L2_PWR_SWITCH_INT_EN,
PMU_A53_L3_PWR_SWITCH_INT_EN,
PMU_A72_B0_PWR_SWITCH_INT_EN,
PMU_A72_B1_PWR_SWITCH_INT_EN,
PMU_SCU_L_PWR_SWITCH_INT_EN,
PMU_SCU_B_PWR_SWITCH_INT_EN,
PMU_TCPD0_PWR_SWITCH_INT_EN,
PMU_TCPD1_PWR_SWITCH_INT_EN,
PMU_CCI_PWR_SWITCH_INT_EN,
PMU_PERILP_PWR_SWITCH_INT_EN,
PMU_PERIHP_PWR_SWITCH_INT_EN,
PMU_CENTER_PWR_SWITCH_INT_EN,
PMU_VIO_PWR_SWITCH_INT_EN,
PMU_GPU_PWR_SWITCH_INT_EN,
PMU_VCODEC_PWR_SWITCH_INT_EN,
PMU_VDU_PWR_SWITCH_INT_EN,
PMU_RGA_PWR_SWITCH_INT_EN,
PMU_IEP_PWR_SWITCH_INT_EN,
PMU_VO_PWR_SWITCH_INT_EN,
PMU_ISP0_PWR_SWITCH_INT_EN = 22,
PMU_ISP1_PWR_SWITCH_INT_EN,
PMU_HDCP_PWR_SWITCH_INT_EN,
PMU_GMAC_PWR_SWITCH_INT_EN,
PMU_EMMC_PWR_SWITCH_INT_EN,
PMU_USB3_PWR_SWITCH_INT_EN,
PMU_EDP_PWR_SWITCH_INT_EN,
PMU_GIC_PWR_SWITCH_INT_EN,
PMU_SD_PWR_SWITCH_INT_EN,
PMU_SDIOAUDIO_PWR_SWITCH_INT_EN,
};
enum pmu_wkup_status {
PMU_WKUP_BY_CLSTER_L_INT = 0,
PMU_WKUP_BY_CLSTER_b_INT,
PMU_WKUP_BY_GPIO_INT,
PMU_WKUP_BY_SDIO_DET,
PMU_WKUP_BY_SDMMC_DET,
PMU_WKUP_BY_TIMER = 6,
PMU_WKUP_BY_USBDEV_DET,
PMU_WKUP_BY_M0_SFT,
PMU_WKUP_BY_M0_WDT_INT,
PMU_WKUP_BY_TIMEOUT,
PMU_WKUP_BY_PWM,
PMU_WKUP_BY_PCIE = 13,
};
enum pmu_bus_clr {
PMU_CLR_GPU = 0,
PMU_CLR_PERILP,
PMU_CLR_PERIHP,
PMU_CLR_VCODEC,
PMU_CLR_VDU,
PMU_CLR_RGA,
PMU_CLR_IEP,
PMU_CLR_VOPB,
PMU_CLR_VOPL,
PMU_CLR_ISP0,
PMU_CLR_ISP1,
PMU_CLR_HDCP,
PMU_CLR_USB3,
PMU_CLR_PERILPM0,
PMU_CLR_CENTER,
PMU_CLR_CCIM1,
PMU_CLR_CCIM0,
PMU_CLR_VIO,
PMU_CLR_MSCH0,
PMU_CLR_MSCH1,
PMU_CLR_ALIVE,
PMU_CLR_PMU,
PMU_CLR_EDP,
PMU_CLR_GMAC,
PMU_CLR_EMMC,
PMU_CLR_CENTER1,
PMU_CLR_PMUM0,
PMU_CLR_GIC,
PMU_CLR_SD,
PMU_CLR_SDIOAUDIO,
};
/* PMU bus idle request register */
enum pmu_bus_idle_req {
PMU_IDLE_REQ_GPU = 0,
PMU_IDLE_REQ_PERILP,
PMU_IDLE_REQ_PERIHP,
PMU_IDLE_REQ_VCODEC,
PMU_IDLE_REQ_VDU,
PMU_IDLE_REQ_RGA,
PMU_IDLE_REQ_IEP,
PMU_IDLE_REQ_VOPB,
PMU_IDLE_REQ_VOPL,
PMU_IDLE_REQ_ISP0,
PMU_IDLE_REQ_ISP1,
PMU_IDLE_REQ_HDCP,
PMU_IDLE_REQ_USB3,
PMU_IDLE_REQ_PERILPM0,
PMU_IDLE_REQ_CENTER,
PMU_IDLE_REQ_CCIM0,
PMU_IDLE_REQ_CCIM1,
PMU_IDLE_REQ_VIO,
PMU_IDLE_REQ_MSCH0,
PMU_IDLE_REQ_MSCH1,
PMU_IDLE_REQ_ALIVE,
PMU_IDLE_REQ_PMU,
PMU_IDLE_REQ_EDP,
PMU_IDLE_REQ_GMAC,
PMU_IDLE_REQ_EMMC,
PMU_IDLE_REQ_CENTER1,
PMU_IDLE_REQ_PMUM0,
PMU_IDLE_REQ_GIC,
PMU_IDLE_REQ_SD,
PMU_IDLE_REQ_SDIOAUDIO,
};
/* pmu bus idle status register */
enum pmu_bus_idle_st {
PMU_IDLE_ST_GPU = 0,
PMU_IDLE_ST_PERILP,
PMU_IDLE_ST_PERIHP,
PMU_IDLE_ST_VCODEC,
PMU_IDLE_ST_VDU,
PMU_IDLE_ST_RGA,
PMU_IDLE_ST_IEP,
PMU_IDLE_ST_VOPB,
PMU_IDLE_ST_VOPL,
PMU_IDLE_ST_ISP0,
PMU_IDLE_ST_ISP1,
PMU_IDLE_ST_HDCP,
PMU_IDLE_ST_USB3,
PMU_IDLE_ST_PERILPM0,
PMU_IDLE_ST_CENTER,
PMU_IDLE_ST_CCIM0,
PMU_IDLE_ST_CCIM1,
PMU_IDLE_ST_VIO,
PMU_IDLE_ST_MSCH0,
PMU_IDLE_ST_MSCH1,
PMU_IDLE_ST_ALIVE,
PMU_IDLE_ST_PMU,
PMU_IDLE_ST_EDP,
PMU_IDLE_ST_GMAC,
PMU_IDLE_ST_EMMC,
PMU_IDLE_ST_CENTER1,
PMU_IDLE_ST_PMUM0,
PMU_IDLE_ST_GIC,
PMU_IDLE_ST_SD,
PMU_IDLE_ST_SDIOAUDIO,
};
enum pmu_bus_idle_ack {
PMU_IDLE_ACK_GPU = 0,
PMU_IDLE_ACK_PERILP,
PMU_IDLE_ACK_PERIHP,
PMU_IDLE_ACK_VCODEC,
PMU_IDLE_ACK_VDU,
PMU_IDLE_ACK_RGA,
PMU_IDLE_ACK_IEP,
PMU_IDLE_ACK_VOPB,
PMU_IDLE_ACK_VOPL,
PMU_IDLE_ACK_ISP0,
PMU_IDLE_ACK_ISP1,
PMU_IDLE_ACK_HDCP,
PMU_IDLE_ACK_USB3,
PMU_IDLE_ACK_PERILPM0,
PMU_IDLE_ACK_CENTER,
PMU_IDLE_ACK_CCIM0,
PMU_IDLE_ACK_CCIM1,
PMU_IDLE_ACK_VIO,
PMU_IDLE_ACK_MSCH0,
PMU_IDLE_ACK_MSCH1,
PMU_IDLE_ACK_ALIVE,
PMU_IDLE_ACK_PMU,
PMU_IDLE_ACK_EDP,
PMU_IDLE_ACK_GMAC,
PMU_IDLE_ACK_EMMC,
PMU_IDLE_ACK_CENTER1,
PMU_IDLE_ACK_PMUM0,
PMU_IDLE_ACK_GIC,
PMU_IDLE_ACK_SD,
PMU_IDLE_ACK_SDIOAUDIO,
};
enum pmu_pwrdn_con1 {
PMU_VD_SCU_L_PWRDN_EN = 0,
PMU_VD_SCU_B_PWRDN_EN,
PMU_VD_CENTER_PWRDN_EN,
};
#define PMU_WKUP_CFG0 0x00
#define PMU_WKUP_CFG1 0x04
#define PMU_WKUP_CFG2 0x08
#define PMU_WKUP_CFG3 0x0c
#define PMU_WKUP_CFG4 0x10
#define PMU_PWRDN_CON 0x14
#define PMU_PWRDN_ST 0x18
#define PMU_PLL_CON 0x1c
#define PMU_PWRMODE_CON 0x20
#define PMU_SFT_CON 0x24
#define PMU_INT_CON 0x28
#define PMU_INT_ST 0x2c
#define PMU_GPIO0_POS_INT_CON 0x30
#define PMU_GPIO0_NEG_INT_CON 0x34
#define PMU_GPIO1_POS_INT_CON 0x38
#define PMU_GPIO1_NEG_INT_CON 0x3c
#define PMU_GPIO0_POS_INT_ST 0x40
#define PMU_GPIO0_NEG_INT_ST 0x44
#define PMU_GPIO1_POS_INT_ST 0x48
#define PMU_GPIO1_NEG_INT_ST 0x4c
#define PMU_PWRDN_INTEN 0x50
#define PMU_PWRDN_STATUS 0x54
#define PMU_WAKEUP_STATUS 0x58
#define PMU_BUS_CLR 0x5c
#define PMU_BUS_IDLE_REQ 0x60
#define PMU_BUS_IDLE_ST 0x64
#define PMU_BUS_IDLE_ACK 0x68
#define PMU_CCI500_CON 0x6c
#define PMU_ADB400_CON 0x70
#define PMU_ADB400_ST 0x74
#define PMU_POWER_ST 0x78
#define PMU_CORE_PWR_ST 0x7c
#define PMU_OSC_CNT 0x80
#define PMU_PLLLOCK_CNT 0x84
#define PMU_PLLRST_CNT 0x88
#define PMU_STABLE_CNT 0x8c
#define PMU_DDRIO_PWRON_CNT 0x90
#define PMU_WAKEUP_RST_CLR_CNT 0x94
#define PMU_DDR_SREF_ST 0x98
#define PMU_SCU_L_PWRDN_CNT 0x9c
#define PMU_SCU_L_PWRUP_CNT 0xa0
#define PMU_SCU_B_PWRDN_CNT 0xa4
#define PMU_SCU_B_PWRUP_CNT 0xa8
#define PMU_GPU_PWRDN_CNT 0xac
#define PMU_GPU_PWRUP_CNT 0xb0
#define PMU_CENTER_PWRDN_CNT 0xb4
#define PMU_CENTER_PWRUP_CNT 0xb8
#define PMU_TIMEOUT_CNT 0xbc
#define PMU_CPU0APM_CON 0xc0
#define PMU_CPU1APM_CON 0xc4
#define PMU_CPU2APM_CON 0xc8
#define PMU_CPU3APM_CON 0xcc
#define PMU_CPU0BPM_CON 0xd0
#define PMU_CPU1BPM_CON 0xd4
#define PMU_NOC_AUTO_ENA 0xd8
#define PMU_PWRDN_CON1 0xdc
#define CORES_PM_DISABLE 0x0
#define PD_CTR_LOOP 500
#define CHK_CPU_LOOP 500
#endif /* __PMU_H__ */

View File

@ -0,0 +1,348 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <debug.h>
#include <delay_timer.h>
#include <mmio.h>
#include <platform_def.h>
#include <plat_private.h>
#include <rk3399_def.h>
#include <soc.h>
/* Table of regions to map using the MMU. */
const mmap_region_t plat_rk_mmap[] = {
MAP_REGION_FLAT(GIC500_BASE, GIC500_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(CCI500_BASE, CCI500_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(STIME_BASE, STIME_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(CRUS_BASE, CRUS_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(PMU_BASE, PMU_SIZE,
MT_DEVICE | MT_RW | MT_NS),
MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(RK3399_UART2_BASE, RK3399_UART2_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
{ 0 }
};
/* The RockChip power domain tree descriptor */
const unsigned char rockchip_power_domain_tree_desc[] = {
/* No of root nodes */
PLATFORM_SYSTEM_COUNT,
/* No of children for the root node */
PLATFORM_CLUSTER_COUNT,
/* No of children for the first cluster node */
PLATFORM_CLUSTER0_CORE_COUNT,
/* No of children for the second cluster node */
PLATFORM_CLUSTER1_CORE_COUNT
};
void secure_timer_init(void)
{
mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT0, 0xffffffff);
mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT1, 0xffffffff);
mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
/* auto reload & enable the timer */
mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG,
TIMER_EN | TIMER_FMODE);
}
void sgrf_init(void)
{
/* security config for master */
mmio_write_32(SGRF_BASE + SGRF_SOC_CON3_7(5),
SGRF_SOC_CON_WMSK | SGRF_SOC_ALLMST_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON3_7(6),
SGRF_SOC_CON_WMSK | SGRF_SOC_ALLMST_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON3_7(7),
SGRF_SOC_CON_WMSK | SGRF_SOC_ALLMST_NS);
/* security config for slave */
mmio_write_32(SGRF_BASE + SGRF_PMU_SLV_CON0_1(0),
SGRF_PMU_SLV_S_CFGED |
SGRF_PMU_SLV_CRYPTO1_NS);
mmio_write_32(SGRF_BASE + SGRF_PMU_SLV_CON0_1(1),
SGRF_PMU_SLV_CON1_CFG);
mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(0),
SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(1),
SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(2),
SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(3),
SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(4),
SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
/* security config for ddr memery */
mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
SGRF_DDR_RGN_BYPS);
}
static void dma_secure_cfg(uint32_t secure)
{
if (secure) {
/* rgn0 secure for dmac0 and dmac1 */
mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON20_34(22),
SGRF_L_MST_S_DDR_RGN(0) | /* dmac0 */
SGRF_H_MST_S_DDR_RGN(0) /* dmac1 */
);
/* set dmac0 boot, under secure state */
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(8),
SGRF_DMAC_CFG_S);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(9),
SGRF_DMAC_CFG_S);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(10),
SGRF_DMAC_CFG_S);
/* dmac0 soft reset */
mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(10),
CRU_DMAC0_RST);
udelay(5);
mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(10),
CRU_DMAC0_RST_RLS);
/* set dmac1 boot, under secure state */
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(11),
SGRF_DMAC_CFG_S);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(12),
SGRF_DMAC_CFG_S);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(13),
SGRF_DMAC_CFG_S);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(14),
SGRF_DMAC_CFG_S);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(15),
SGRF_DMAC_CFG_S);
/* dmac1 soft reset */
mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(10),
CRU_DMAC1_RST);
udelay(5);
mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(10),
CRU_DMAC1_RST_RLS);
} else {
/* rgn non-secure for dmac0 and dmac1 */
mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON20_34(22),
DMAC1_RGN_NS | DMAC0_RGN_NS);
/* set dmac0 boot, under non-secure state */
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(8),
DMAC0_BOOT_CFG_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(9),
DMAC0_BOOT_PERIPH_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(10),
DMAC0_BOOT_ADDR_NS);
/* dmac0 soft reset */
mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(10),
CRU_DMAC0_RST);
udelay(5);
mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(10),
CRU_DMAC0_RST_RLS);
/* set dmac1 boot, under non-secure state */
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(11),
DMAC1_BOOT_CFG_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(12),
DMAC1_BOOT_PERIPH_L_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(13),
DMAC1_BOOT_ADDR_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(14),
DMAC1_BOOT_PERIPH_H_NS);
mmio_write_32(SGRF_BASE + SGRF_SOC_CON8_15(15),
DMAC1_BOOT_IRQ_NS);
/* dmac1 soft reset */
mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(10),
CRU_DMAC1_RST);
udelay(5);
mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(10),
CRU_DMAC1_RST_RLS);
}
}
/* pll suspend */
struct deepsleep_data_s slp_data;
static void pll_suspend_prepare(uint32_t pll_id)
{
int i;
if (pll_id == PPLL_ID)
for (i = 0; i < PLL_CON_COUNT; i++)
slp_data.plls_con[pll_id][i] =
mmio_read_32(PMUCRU_BASE + PMUCRU_PPLL_CON(i));
else
for (i = 0; i < PLL_CON_COUNT; i++)
slp_data.plls_con[pll_id][i] =
mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, i));
}
static void set_pll_slow_mode(uint32_t pll_id)
{
if (pll_id == PPLL_ID)
mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), PLL_SLOW_MODE);
else
mmio_write_32((CRU_BASE +
CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
}
static void set_pll_normal_mode(uint32_t pll_id)
{
if (pll_id == PPLL_ID)
mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), PLL_NOMAL_MODE);
else
mmio_write_32(CRU_BASE +
CRU_PLL_CON(pll_id, 3), PLL_NOMAL_MODE);
}
static void set_pll_bypass(uint32_t pll_id)
{
if (pll_id == PPLL_ID)
mmio_write_32(PMUCRU_BASE +
PMUCRU_PPLL_CON(3), PLL_BYPASS_MODE);
else
mmio_write_32(CRU_BASE +
CRU_PLL_CON(pll_id, 3), PLL_BYPASS_MODE);
}
static void _pll_suspend(uint32_t pll_id)
{
set_pll_slow_mode(pll_id);
set_pll_bypass(pll_id);
}
void plls_suspend(void)
{
uint32_t i, pll_id;
for (pll_id = ALPLL_ID; pll_id < END_PLL_ID; pll_id++)
pll_suspend_prepare(pll_id);
for (i = 0; i < CRU_CLKSEL_COUNT; i++)
slp_data.cru_clksel_con[i] =
mmio_read_32(CRU_BASE +
CRU_CLKSEL_OFFSET + i * REG_SIZE);
for (i = 0; i < PMUCRU_CLKSEL_CONUT; i++)
slp_data.pmucru_clksel_con[i] =
mmio_read_32(PMUCRU_BASE +
PMUCRU_CLKSEL_OFFSET + i * REG_SIZE);
_pll_suspend(CPLL_ID);
_pll_suspend(NPLL_ID);
_pll_suspend(VPLL_ID);
_pll_suspend(PPLL_ID);
_pll_suspend(GPLL_ID);
_pll_suspend(ABPLL_ID);
_pll_suspend(ALPLL_ID);
}
static void set_plls_nobypass(uint32_t pll_id)
{
if (pll_id == PPLL_ID)
mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3),
PLL_NO_BYPASS_MODE);
else
mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
PLL_NO_BYPASS_MODE);
}
static void plls_resume_prepare(void)
{
int i;
for (i = 0; i < CRU_CLKSEL_COUNT; i++)
mmio_write_32((CRU_BASE + CRU_CLKSEL_OFFSET + i * REG_SIZE),
REG_SOC_WMSK | slp_data.cru_clksel_con[i]);
for (i = 0; i < PMUCRU_CLKSEL_CONUT; i++)
mmio_write_32((PMUCRU_BASE +
PMUCRU_CLKSEL_OFFSET + i * REG_SIZE),
REG_SOC_WMSK | slp_data.pmucru_clksel_con[i]);
}
void plls_resume(void)
{
int pll_id;
plls_resume_prepare();
for (pll_id = ALPLL_ID; pll_id < END_PLL_ID; pll_id++) {
set_plls_nobypass(pll_id);
set_pll_normal_mode(pll_id);
}
}
void soc_global_soft_reset_init(void)
{
mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
CRU_PMU_SGRF_RST_RLS);
}
void __dead2 soc_global_soft_reset(void)
{
uint32_t temp_val;
set_pll_slow_mode(VPLL_ID);
set_pll_slow_mode(NPLL_ID);
set_pll_slow_mode(GPLL_ID);
set_pll_slow_mode(CPLL_ID);
set_pll_slow_mode(PPLL_ID);
set_pll_slow_mode(ABPLL_ID);
set_pll_slow_mode(ALPLL_ID);
temp_val = mmio_read_32(CRU_BASE + CRU_GLB_RST_CON) |
PMU_RST_BY_FIRST_SFT;
mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, temp_val);
mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, GLB_SRST_FST_CFG_VAL);
/*
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to excute valid codes.
*/
while (1)
;
}
void plat_rockchip_soc_init(void)
{
secure_timer_init();
dma_secure_cfg(0);
sgrf_init();
soc_global_soft_reset_init();
}

View File

@ -0,0 +1,242 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SOC_H__
#define __SOC_H__
#define GLB_SRST_FST_CFG_VAL 0xfdb9
#define GLB_SRST_SND_CFG_VAL 0xeca8
#define PMUCRU_PPLL_CON_OFFSET 0x000
#define PMUCRU_PPLL_CON_BASE_ADDR (PMUCRU_BASE + PMUCRU_PPLL_CON_OFFSET)
#define PMUCRU_PPLL_CON_CONUT 0x06
#define PMUCRU_PPLL_CON(num) (PMUCRU_PPLL_CON_BASE_ADDR + num * 4)
#define CRU_PLL_CON(pll_id, num) (CRU_BASE + pll_id * 0x20 + num * 4)
#define PLL_MODE_MSK 0x03
#define PLL_MODE_SHIFT 0x08
#define PLL_BYPASS_MSK 0x01
#define PLL_BYPASS_SHIFT 0x01
#define PLL_PWRDN_MSK 0x01
#define PLL_PWRDN_SHIFT 0x0
#define PLL_BYPASS BIT(1)
#define PLL_PWRDN BIT(0)
#define NO_PLL_BYPASS (0x00)
#define NO_PLL_PWRDN (0x00)
#define PLL_SLOW_MODE BITS_WITH_WMASK(PLL_MODE_MSK,\
SLOW_MODE, PLL_MODE_SHIFT)
#define PLL_BYPASS_MODE BITS_WITH_WMASK(PLL_BYPASS_MSK,\
PLL_BYPASS, PLL_BYPASS_SHIFT)
#define PLL_NO_BYPASS_MODE BITS_WITH_WMASK(PLL_BYPASS_MSK,\
NO_PLL_BYPASS, PLL_BYPASS_SHIFT)
#define PLL_NOMAL_MODE BITS_WITH_WMASK(PLL_MODE_MSK,\
NORMAL_MODE, PLL_MODE_SHIFT)
#define PLL_CON_COUNT 0x06
#define CRU_CLKSEL_COUNT 0x108
#define CRU_CLKSEL_OFFSET 0x300
#define PMUCRU_CLKSEL_CONUT 0x06
#define PMUCRU_CLKSEL_OFFSET 0x080
#define REG_SIZE 0x04
#define REG_SOC_WMSK 0xffff0000
enum plls_id {
ALPLL_ID = 0,
ABPLL_ID,
DPLL_ID,
CPLL_ID,
GPLL_ID,
NPLL_ID,
VPLL_ID,
PPLL_ID,
END_PLL_ID,
};
enum pll_work_mode {
SLOW_MODE = 0x00,
NORMAL_MODE = 0x01,
DEEP_SLOW_MODE = 0x02,
};
enum glb_sft_reset {
PMU_RST_BY_FIRST_SFT,
PMU_RST_BY_SECOND_SFT = BIT(2),
PMU_RST_NOT_BY_SFT = BIT(3),
};
struct deepsleep_data_s {
uint32_t plls_con[END_PLL_ID][PLL_CON_COUNT];
uint32_t pmucru_clksel_con[PMUCRU_CLKSEL_CONUT];
uint32_t cru_clksel_con[CRU_CLKSEL_COUNT];
};
/**************************************************
* secure timer
**************************************************/
/* chanal0~5 */
#define STIMER0_CHN_BASE(n) (STIME_BASE + 0x20 * (n))
/* chanal6~11 */
#define STIMER1_CHN_BASE(n) (STIME_BASE + 0x8000 + 0x20 * (n))
/* low 32 bits */
#define TIMER_END_COUNT0 0x00
/* high 32 bits */
#define TIMER_END_COUNT1 0x04
#define TIMER_CURRENT_VALUE0 0x08
#define TIMER_CURRENT_VALUE1 0x0C
/* low 32 bits */
#define TIMER_INIT_COUNT0 0x10
/* high 32 bits */
#define TIMER_INIT_COUNT1 0x14
#define TIMER_INTSTATUS 0x18
#define TIMER_CONTROL_REG 0x1c
#define TIMER_EN 0x1
#define TIMER_FMODE (0x0 << 1)
#define TIMER_RMODE (0x1 << 1)
/**************************************************
* cru reg, offset
**************************************************/
#define CRU_SOFTRST_CON(n) (0x400 + (n) * 4)
#define CRU_DMAC0_RST BIT_WITH_WMSK(3)
/* reset release*/
#define CRU_DMAC0_RST_RLS WMSK_BIT(3)
#define CRU_DMAC1_RST BIT_WITH_WMSK(4)
/* reset release*/
#define CRU_DMAC1_RST_RLS WMSK_BIT(4)
#define CRU_GLB_RST_CON 0x0510
#define CRU_GLB_SRST_FST 0x0500
#define CRU_GLB_SRST_SND 0x0504
/**************************************************
* pmu cru reg, offset
**************************************************/
#define CRU_PMU_RSTHOLD_CON(n) (0x120 + n * 4)
/* reset hold*/
#define CRU_PMU_SGRF_RST_HOLD BIT_WITH_WMSK(6)
/* reset hold release*/
#define CRU_PMU_SGRF_RST_RLS WMSK_BIT(6)
/**************************************************
* sgrf reg, offset
**************************************************/
#define SGRF_SOC_CON0_1(n) (0xc000 + (n) * 4)
#define SGRF_SOC_CON3_7(n) (0xe00c + ((n) - 3) * 4)
#define SGRF_SOC_CON8_15(n) (0x8020 + ((n) - 8) * 4)
#define SGRF_PMU_SLV_CON0_1(n) (0xc240 + ((n) - 0) * 4)
#define SGRF_SLV_SECURE_CON0_4(n) (0xe3c0 + ((n) - 0) * 4)
#define SGRF_DDRRGN_CON0_16(n) ((n) * 4)
#define SGRF_DDRRGN_CON20_34(n) (0x50 + ((n) - 20) * 4)
/* security config for master */
#define SGRF_SOC_CON_WMSK 0xffff0000
/* All of master in ns */
#define SGRF_SOC_ALLMST_NS 0xffff
/* security config for slave */
#define SGRF_SLV_S_WMSK 0xffff0000
#define SGRF_SLV_S_ALL_NS 0x0
/* security config pmu slave ip */
/* All of slaves is ns */
#define SGRF_PMU_SLV_S_NS BIT_WITH_WMSK(0)
/* slaves secure attr is configed */
#define SGRF_PMU_SLV_S_CFGED WMSK_BIT(0)
#define SGRF_PMU_SLV_CRYPTO1_NS WMSK_BIT(1)
#define SGRF_PMUSRAM_S BIT(8)
#define SGRF_PMU_SLV_CON1_CFG (SGRF_SLV_S_WMSK | \
SGRF_PMUSRAM_S)
/* ddr region */
#define SGRF_DDR_RGN_BYPS BIT_WITH_WMSK(9) /* All of ddr rgn is ns */
/* The MST access the ddr rgn n with secure attribution */
#define SGRF_L_MST_S_DDR_RGN(n) BIT_WITH_WMSK((n))
/* bits[16:8]*/
#define SGRF_H_MST_S_DDR_RGN(n) BIT_WITH_WMSK((n) + 8)
/* dmac to periph s or ns*/
#define SGRF_DMAC_CFG_S 0xffff0000
#define DMAC1_RGN_NS 0xff000000
#define DMAC0_RGN_NS 0x00ff0000
#define DMAC0_BOOT_CFG_NS 0xfffffff8
#define DMAC0_BOOT_PERIPH_NS 0xffff0fff
#define DMAC0_BOOT_ADDR_NS 0xffff0000
#define DMAC1_BOOT_CFG_NS 0xffff0008
#define DMAC1_BOOT_PERIPH_L_NS 0xffff0fff
#define DMAC1_BOOT_ADDR_NS 0xffff0000
#define DMAC1_BOOT_PERIPH_H_NS 0xffffffff
#define DMAC1_BOOT_IRQ_NS 0xffffffff
#define CPU_BOOT_ADDR_WMASK 0xffff0000
#define CPU_BOOT_ADDR_ALIGN 16
/*
* When system reset in running state, we want the cpus to be reboot
* from maskrom (system reboot),
* the pmusgrf reset-hold bits needs to be released.
* When system wake up from system deep suspend, some soc will be reset
* when waked up,
* we want the bootcpu to be reboot from pmusram,
* the pmusgrf reset-hold bits needs to be held.
*/
static inline void pmu_sgrf_rst_hld_release(void)
{
mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
CRU_PMU_SGRF_RST_RLS);
}
static inline void pmu_sgrf_rst_hld(void)
{
mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
CRU_PMU_SGRF_RST_HOLD);
}
/* funciton*/
void __dead2 soc_global_soft_reset(void);
void plls_resume(void);
void plls_suspend(void);
#endif /* __SOC_H__ */

View File

@ -0,0 +1,151 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PLATFORM_DEF_H__
#define __PLATFORM_DEF_H__
#include <arch.h>
#include <common_def.h>
#include <rk3399_def.h>
#define DEBUG_XLAT_TABLE 0
/*******************************************************************************
* Platform binary types for linking
******************************************************************************/
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
/*******************************************************************************
* Generic platform constants
******************************************************************************/
/* Size of cacheable stacks */
#if DEBUG_XLAT_TABLE
#define PLATFORM_STACK_SIZE 0x800
#elif IMAGE_BL1
#define PLATFORM_STACK_SIZE 0x440
#elif IMAGE_BL2
#define PLATFORM_STACK_SIZE 0x400
#elif IMAGE_BL31
#define PLATFORM_STACK_SIZE 0x800
#elif IMAGE_BL32
#define PLATFORM_STACK_SIZE 0x440
#endif
#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n"
#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
#define PLATFORM_SYSTEM_COUNT 1
#define PLATFORM_CLUSTER_COUNT 2
#define PLATFORM_CLUSTER0_CORE_COUNT 4
#define PLATFORM_CLUSTER1_CORE_COUNT 2
#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \
PLATFORM_CLUSTER0_CORE_COUNT)
#define PLATFORM_MAX_CPUS_PER_CLUSTER 4
#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \
PLATFORM_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
/*
* This macro defines the deepest retention state possible. A higher state
* id will represent an invalid or a power down state.
*/
#define PLAT_MAX_RET_STATE 1
/*
* This macro defines the deepest power down states possible. Any state ID
* higher than this is invalid.
*/
#define PLAT_MAX_OFF_STATE 2
/*******************************************************************************
* Platform memory map related constants
******************************************************************************/
/* TF txet, ro, rw, Size: 512KB */
#define TZRAM_BASE (0x0)
#define TZRAM_SIZE (0x80000)
/*******************************************************************************
* BL31 specific defines.
******************************************************************************/
/*
* Put BL3-1 at the top of the Trusted RAM
*/
#define BL31_BASE (TZRAM_BASE + 0x8000)
#define BL31_LIMIT (TZRAM_BASE + TZRAM_SIZE)
/*******************************************************************************
* Platform specific page table and MMU setup constants
******************************************************************************/
#define ADDR_SPACE_SIZE (1ull << 32)
#define MAX_XLAT_TABLES 20
#define MAX_MMAP_REGIONS 16
/*******************************************************************************
* Declarations and constants to access the mailboxes safely. Each mailbox is
* aligned on the biggest cache line size in the platform. This is known only
* to the platform as it might have a combination of integrated and external
* caches. Such alignment ensures that two maiboxes do not sit on the same cache
* line at any cache level. They could belong to different cpus/clusters &
* get written while being protected by different locks causing corruption of
* a valid mailbox address.
******************************************************************************/
#define CACHE_WRITEBACK_SHIFT 6
#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
/*
* Define GICD and GICC and GICR base
*/
#define PLAT_RK_GICD_BASE BASE_GICD_BASE
#define PLAT_RK_GICR_BASE BASE_GICR_BASE
#define PLAT_RK_GICC_BASE 0
/*
* Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
*/
#define PLAT_RK_G1S_IRQS RK3399_G1S_IRQS
#define PLAT_RK_G0_IRQS RK3399_G0_IRQS
#define PLAT_RK_UART_BASE RK3399_UART2_BASE
#define PLAT_RK_UART_CLOCK RK3399_UART_CLOCK
#define PLAT_RK_UART_BAUDRATE RK3399_BAUDRATE
#define PLAT_RK_CCI_BASE CCI500_BASE
#define PLAT_RK_PRIMARY_CPU 0x0
#define RK_PLAT_AARCH_CFG RK_PLAT_CFG1
#endif /* __PLATFORM_DEF_H__ */

View File

@ -0,0 +1,73 @@
#
# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
RK_PLAT := plat/rockchip
RK_PLAT_SOC := ${RK_PLAT}/${PLAT}
RK_PLAT_COMMON := ${RK_PLAT}/common
PLAT_INCLUDES := -I${RK_PLAT_COMMON}/ \
-I${RK_PLAT_COMMON}/include/ \
-I${RK_PLAT_COMMON}/pmusram \
-I${RK_PLAT_COMMON}/drivers/pmu/ \
-I${RK_PLAT_SOC}/ \
-I${RK_PLAT_SOC}/drivers/pmu/ \
-I${RK_PLAT_SOC}/drivers/soc/ \
-I${RK_PLAT_SOC}/include/ \
RK_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
drivers/arm/gic/v3/gicv3_main.c \
drivers/arm/gic/v3/gicv3_helpers.c \
plat/common/plat_gicv3.c \
${RK_PLAT}/common/rockchip_gicv3.c
PLAT_BL_COMMON_SOURCES := lib/aarch64/xlat_tables.c \
plat/common/aarch64/plat_common.c \
plat/common/aarch64/plat_psci_common.c
BL31_SOURCES += ${RK_GIC_SOURCES} \
drivers/arm/cci/cci.c \
drivers/console/console.S \
drivers/ti/uart/16550_console.S \
drivers/delay_timer/delay_timer.c \
lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a72.S \
plat/common/aarch64/platform_mp_stack.S \
${RK_PLAT_COMMON}/aarch64/plat_helpers.S \
${RK_PLAT_COMMON}/bl31_plat_setup.c \
${RK_PLAT_COMMON}/pmusram/pmu_sram_cpus_on.S \
${RK_PLAT_COMMON}/pmusram/pmu_sram.c \
${RK_PLAT_COMMON}/plat_delay_timer.c \
${RK_PLAT_COMMON}/plat_pm.c \
${RK_PLAT_COMMON}/plat_topology.c \
${RK_PLAT_COMMON}/aarch64/platform_common.c \
${RK_PLAT_SOC}/drivers/pmu/pmu.c \
${RK_PLAT_SOC}/drivers/soc/soc.c
ENABLE_PLAT_COMPAT := 0

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PLAT_DEF_H__
#define __PLAT_DEF_H__
#define RK3399_PRIMARY_CPU 0x0
/* Special value used to verify platform parameters from BL2 to BL3-1 */
#define RK_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL
#define SIZE_K(n) ((n) * 1024)
#define SIZE_M(n) ((n) * 1024 * 1024)
#define CCI500_BASE 0xffb00000
#define CCI500_SIZE SIZE_M(1)
#define GIC500_BASE 0xfee00000
#define GIC500_SIZE SIZE_M(2)
#define STIME_BASE 0xff860000
#define STIME_SIZE SIZE_K(64)
#define CRUS_BASE 0xff750000
#define CRUS_SIZE SIZE_K(128)
#define SGRF_BASE 0xff330000
#define SGRF_SIZE SIZE_K(64)
#define PMU_BASE 0xff310000
#define PMU_SIZE SIZE_K(64)
#define PMUSRAM_BASE 0xff3b0000
#define PMUSRAM_SIZE SIZE_K(64)
#define PMUSRAM_RSIZE SIZE_K(8)
/*
* include i2c pmu/audio, pwm0-3 rkpwm0-3 uart_dbg,mailbox scr
* 0xff650000 -0xff6c0000
*/
#define PD_BUS0_BASE 0xff650000
#define PD_BUS0_SIZE 0x70000
#define PMUCRU_BASE 0xff750000
#define CRU_BASE 0xff760000
#define COLD_BOOT_BASE 0xffff0000
/**************************************************************************
* UART related constants
**************************************************************************/
#define RK3399_UART2_BASE (0xff1a0000)
#define RK3399_UART2_SIZE SIZE_K(64)
#define RK3399_BAUDRATE (1500000)
#define RK3399_UART_CLOCK (24000000)
/******************************************************************************
* System counter frequency related constants
******************************************************************************/
#define SYS_COUNTER_FREQ_IN_TICKS 24000000
#define SYS_COUNTER_FREQ_IN_MHZ 24
/* Base rockchip_platform compatible GIC memory map */
#define BASE_GICD_BASE (GIC500_BASE)
#define BASE_GICR_BASE (GIC500_BASE + SIZE_M(1))
/*****************************************************************************
* CCI-400 related constants
******************************************************************************/
#define PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX 0
#define PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX 1
/******************************************************************************
* cpu up status
******************************************************************************/
#define PMU_CPU_HOTPLUG 0xdeadbeaf
#define PMU_CPU_AUTO_PWRDN 0xabcdef12
/******************************************************************************
* sgi, ppi
******************************************************************************/
#define ARM_IRQ_SEC_PHY_TIMER 29
#define ARM_IRQ_SEC_SGI_0 8
#define ARM_IRQ_SEC_SGI_1 9
#define ARM_IRQ_SEC_SGI_2 10
#define ARM_IRQ_SEC_SGI_3 11
#define ARM_IRQ_SEC_SGI_4 12
#define ARM_IRQ_SEC_SGI_5 13
#define ARM_IRQ_SEC_SGI_6 14
#define ARM_IRQ_SEC_SGI_7 15
/*
* Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
*/
#define RK3399_G1S_IRQS ARM_IRQ_SEC_PHY_TIMER
#define RK3399_G0_IRQS ARM_IRQ_SEC_SGI_6
#endif /* __PLAT_DEF_H__ */