Merge branch 'integration' into tf_issue_461

This commit is contained in:
Scott Branden 2017-04-29 08:36:12 -07:00 committed by GitHub
commit 0f22bef31d
133 changed files with 4305 additions and 2056 deletions

View File

@ -50,10 +50,14 @@ include ${MAKE_HELPERS_DIRECTORY}build_env.mk
# Default values for build configurations, and their dependencies
################################################################################
ifdef ASM_ASSERTION
$(warning ASM_ASSERTION is removed, use ENABLE_ASSERTIONS instead.)
endif
include ${MAKE_HELPERS_DIRECTORY}defaults.mk
# ASM_ASSERTION enabled for DEBUG builds only
ASM_ASSERTION := ${DEBUG}
# Assertions enabled for DEBUG builds by default
ENABLE_ASSERTIONS := ${DEBUG}
ENABLE_PMF := ${ENABLE_RUNTIME_INSTRUMENTATION}
PLAT := ${DEFAULT_PLAT}
@ -347,6 +351,11 @@ ifdef BL2_SOURCES
endif
endif
# If SCP_BL2 is given, we always want FIP to include it.
ifdef SCP_BL2
NEED_SCP_BL2 := yes
endif
# Process TBB related flags
ifneq (${GENERATE_COT},0)
# Common cert_create options
@ -434,13 +443,13 @@ endif
# Build options checks
################################################################################
$(eval $(call assert_boolean,ASM_ASSERTION))
$(eval $(call assert_boolean,COLD_BOOT_SINGLE_CPU))
$(eval $(call assert_boolean,CREATE_KEYS))
$(eval $(call assert_boolean,CTX_INCLUDE_AARCH32_REGS))
$(eval $(call assert_boolean,CTX_INCLUDE_FPREGS))
$(eval $(call assert_boolean,DEBUG))
$(eval $(call assert_boolean,DISABLE_PEDANTIC))
$(eval $(call assert_boolean,ENABLE_ASSERTIONS))
$(eval $(call assert_boolean,ENABLE_PLAT_COMPAT))
$(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT))
@ -459,6 +468,7 @@ $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA))
$(eval $(call assert_boolean,SPIN_ON_BL1_EXIT))
$(eval $(call assert_boolean,TRUSTED_BOARD_BOOT))
$(eval $(call assert_boolean,USE_COHERENT_MEM))
$(eval $(call assert_boolean,WARMBOOT_ENABLE_DCACHE_EARLY))
$(eval $(call assert_numeric,ARM_ARCH_MAJOR))
$(eval $(call assert_numeric,ARM_ARCH_MINOR))
@ -473,10 +483,10 @@ $(eval $(call add_define,ARM_CCI_PRODUCT_ID))
$(eval $(call add_define,ARM_ARCH_MAJOR))
$(eval $(call add_define,ARM_ARCH_MINOR))
$(eval $(call add_define,ARM_GIC_ARCH))
$(eval $(call add_define,ASM_ASSERTION))
$(eval $(call add_define,COLD_BOOT_SINGLE_CPU))
$(eval $(call add_define,CTX_INCLUDE_AARCH32_REGS))
$(eval $(call add_define,CTX_INCLUDE_FPREGS))
$(eval $(call add_define,ENABLE_ASSERTIONS))
$(eval $(call add_define,ENABLE_PLAT_COMPAT))
$(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT))
@ -496,6 +506,7 @@ $(eval $(call add_define,SPD_${SPD}))
$(eval $(call add_define,SPIN_ON_BL1_EXIT))
$(eval $(call add_define,TRUSTED_BOARD_BOOT))
$(eval $(call add_define,USE_COHERENT_MEM))
$(eval $(call add_define,WARMBOOT_ENABLE_DCACHE_EARLY))
# Define the EL3_PAYLOAD_BASE flag only if it is provided.
ifdef EL3_PAYLOAD_BASE
@ -541,6 +552,10 @@ $(if ${BL2}, $(eval $(call MAKE_TOOL_ARGS,2,${BL2},tb-fw)),\
$(eval $(call MAKE_BL,2,tb-fw)))
endif
ifeq (${NEED_SCP_BL2},yes)
$(eval $(call FIP_ADD_IMG,SCP_BL2,--scp-fw))
endif
ifeq (${NEED_BL31},yes)
BL31_SOURCES += ${SPD_SOURCES}
$(if ${BL31}, $(eval $(call MAKE_TOOL_ARGS,31,${BL31},soc-fw)),\

View File

@ -109,7 +109,7 @@ register_t bl1_fwu_smc_handler(unsigned int smc_fid,
break;
}
SMC_RET0(handle);
SMC_RET1(handle, SMC_UNK);
}
/*******************************************************************************

View File

@ -34,6 +34,7 @@
#include <auth_mod.h>
#include <bl1.h>
#include <bl_common.h>
#include <console.h>
#include <debug.h>
#include <errata_report.h>
#include <platform.h>
@ -113,7 +114,7 @@ void bl1_main(void)
print_errata_status();
#if DEBUG
#if ENABLE_ASSERTIONS
u_register_t val;
/*
* Ensure that MMU/Caches and coherency are turned on
@ -140,7 +141,7 @@ void bl1_main(void)
assert(CACHE_WRITEBACK_GRANULE == SIZE_FROM_LOG2_WORDS(val));
else
assert(CACHE_WRITEBACK_GRANULE <= MAX_CACHE_LINE_SIZE);
#endif
#endif /* ENABLE_ASSERTIONS */
/* Perform remaining generic architectural setup from EL3 */
bl1_arch_setup();
@ -166,6 +167,8 @@ void bl1_main(void)
NOTICE("BL1-FWU: *******FWU Process Started*******\n");
bl1_prepare_next_image(image_id);
console_flush();
}
/*******************************************************************************

View File

@ -109,6 +109,10 @@ entry_point_info_t *bl2_load_images(void)
assert(bl2_to_next_bl_params->head);
assert(bl2_to_next_bl_params->h.type == PARAM_BL_PARAMS);
assert(bl2_to_next_bl_params->h.version >= VERSION_2);
assert(bl2_to_next_bl_params->head->ep_info);
/* Populate arg0 for the next BL image */
bl2_to_next_bl_params->head->ep_info->args.arg0 = (u_register_t)bl2_to_next_bl_params;
/* Flush the parameters to be passed to next image */
plat_flush_next_bl_params();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,6 +32,7 @@
#include <auth_mod.h>
#include <bl1.h>
#include <bl_common.h>
#include <console.h>
#include <debug.h>
#include <platform.h>
#include "bl2_private.h"
@ -69,6 +70,8 @@ void bl2_main(void)
disable_mmu_icache_secure();
#endif /* AARCH32 */
console_flush();
/*
* Run next BL image via an SMC to BL1. Information on how to pass
* control to the BL32 (if present) and BL33 software images will

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -34,6 +34,7 @@
#include <auth_mod.h>
#include <bl_common.h>
#include <bl1.h>
#include <console.h>
#include <debug.h>
#include <platform.h>
#include <platform_def.h>
@ -63,6 +64,8 @@ void bl2u_main(void)
/* Perform platform setup in BL2U after loading SCP_BL2U */
bl2u_platform_setup();
console_flush();
/*
* Indicate that BL2U is done and resume back to
* normal world via an SMC to BL1.

View File

@ -185,26 +185,27 @@ func bl31_warm_entrypoint
*
* The PSCI implementation invokes platform routines that enable CPUs to
* participate in coherency. On a system where CPUs are not
* cache-coherent out of reset, having caches enabled until such time
* might lead to coherency issues (resulting from stale data getting
* speculatively fetched, among others). Therefore we keep data caches
* disabled while enabling the MMU, thereby forcing data accesses to
* have non-cacheable, nGnRnE attributes (these will always be coherent
* with main memory).
* cache-coherent without appropriate platform specific programming,
* having caches enabled until such time might lead to coherency issues
* (resulting from stale data getting speculatively fetched, among
* others). Therefore we keep data caches disabled even after enabling
* the MMU for such platforms.
*
* On systems with hardware-assisted coherency, where CPUs are expected
* to be cache-coherent out of reset without needing explicit software
* intervention, PSCI need not invoke platform routines to enter
* coherency (as CPUs already are); and there's no reason to have caches
* disabled either.
* On systems with hardware-assisted coherency, or on single cluster
* platforms, such platform specific programming is not required to
* enter coherency (as CPUs already are); and there's no reason to have
* caches disabled either.
*/
#if HW_ASSISTED_COHERENCY
mov x0, #0
#else
mov x0, #DISABLE_DCACHE
#endif
bl bl31_plat_enable_mmu
#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
mrs x0, sctlr_el3
orr x0, x0, #SCTLR_C_BIT
msr sctlr_el3, x0
isb
#endif
bl psci_warmboot_entrypoint
#if ENABLE_RUNTIME_INSTRUMENTATION

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -349,6 +349,8 @@ func do_crash_reporting
/* Print some platform registers */
plat_crash_print_regs
bl plat_crash_console_flush
/* Done reporting */
no_ret plat_panic_handler
endfunc do_crash_reporting

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -33,6 +33,7 @@
#include <assert.h>
#include <bl_common.h>
#include <bl31.h>
#include <console.h>
#include <context_mgmt.h>
#include <debug.h>
#include <platform.h>
@ -129,6 +130,8 @@ void bl31_main(void)
*/
bl31_prepare_next_image_entry();
console_flush();
/*
* Perform any platform specific runtime setup prior to cold boot exit
* from BL31

View File

@ -236,24 +236,27 @@ func sp_min_warm_entrypoint
*
* The PSCI implementation invokes platform routines that enable CPUs to
* participate in coherency. On a system where CPUs are not
* cache-coherent out of reset, having caches enabled until such time
* might lead to coherency issues (resulting from stale data getting
* speculatively fetched, among others). Therefore we keep data caches
* disabled while enabling the MMU, thereby forcing data accesses to
* have non-cacheable, nGnRnE attributes (these will always be coherent
* with main memory).
* cache-coherent without appropriate platform specific programming,
* having caches enabled until such time might lead to coherency issues
* (resulting from stale data getting speculatively fetched, among
* others). Therefore we keep data caches disabled even after enabling
* the MMU for such platforms.
*
* On systems where CPUs are cache-coherent out of reset, however, PSCI
* need not invoke platform routines to enter coherency (as CPUs already
* are), and there's no reason to have caches disabled either.
* On systems with hardware-assisted coherency, or on single cluster
* platforms, such platform specific programming is not required to
* enter coherency (as CPUs already are); and there's no reason to have
* caches disabled either.
*/
#if HW_ASSISTED_COHERENCY
mov r0, #0
#else
mov r0, #DISABLE_DCACHE
#endif
bl bl32_plat_enable_mmu
#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
ldcopr r0, SCTLR
orr r0, r0, #SCTLR_C_BIT
stcopr r0, SCTLR
isb
#endif
bl sp_min_warm_boot
/* Program the registers in cpu_context and exit monitor mode */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -70,9 +70,12 @@ func do_panic
/* Print new line */
ldr r4, =panic_end
bl asm_print_str
bl plat_crash_console_flush
1:
mov lr, r6
b plat_panic_handler
no_ret plat_panic_handler
endfunc do_panic
/***********************************************************
@ -87,7 +90,7 @@ func report_exception
no_ret plat_panic_handler
endfunc report_exception
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
.section .rodata.assert_str, "aS"
assert_msg1:
.asciz "ASSERT: File "
@ -104,6 +107,11 @@ assert_msg2:
* ---------------------------------------------------------------------------
*/
func asm_assert
#if LOG_LEVEL >= LOG_LEVEL_INFO
/*
* Only print the output if LOG_LEVEL is higher or equal to
* LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
*/
/* Stash the parameters already in r0 and r1 */
mov r5, r0
mov r6, r1
@ -140,10 +148,14 @@ dec_print_loop:
udiv r5, r5, r6 /* Reduce divisor */
cmp r5, #0
bne dec_print_loop
bl plat_crash_console_flush
1:
#endif /* LOG_LEVEL >= LOG_LEVEL_INFO */
no_ret plat_panic_handler
endfunc asm_assert
#endif
#endif /* ENABLE_ASSERTIONS */
/*
* This function prints a string from address in r4

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -41,7 +41,7 @@
/* The offset to add to get ascii for numerals '0 - 9' */
#define ASCII_OFFSET_NUM 0x30
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
.section .rodata.assert_str, "aS"
assert_msg1:
.asciz "ASSERT: File "
@ -78,6 +78,11 @@ dec_print_loop:
* ---------------------------------------------------------------------------
*/
func asm_assert
#if LOG_LEVEL >= LOG_LEVEL_INFO
/*
* Only print the output if LOG_LEVEL is higher or equal to
* LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
*/
mov x5, x0
mov x6, x1
/* Ensure the console is initialized */
@ -96,10 +101,12 @@ func asm_assert
b.ne _assert_loop
mov x4, x6
asm_print_line_dec
bl plat_crash_console_flush
_assert_loop:
b _assert_loop
#endif /* LOG_LEVEL >= LOG_LEVEL_INFO */
no_ret plat_panic_handler
endfunc asm_assert
#endif
#endif /* ENABLE_ASSERTIONS */
/*
* This function prints a string from address in x4.
@ -187,6 +194,8 @@ el3_panic:
sub x4, x4, #4
bl asm_print_hex
bl plat_crash_console_flush
_panic_handler:
/* Pass to plat_panic_handler the address from where el3_panic was
* called, not the address of the call from el3_panic. */

View File

@ -47,8 +47,11 @@ static bl_params_t next_bl_params;
******************************************************************************/
void flush_bl_params_desc(void)
{
flush_dcache_range((unsigned long)bl_mem_params_desc_ptr,
flush_dcache_range((uintptr_t)bl_mem_params_desc_ptr,
sizeof(*bl_mem_params_desc_ptr) * bl_mem_params_desc_num);
flush_dcache_range((uintptr_t)&next_bl_params,
sizeof(next_bl_params));
}
/*******************************************************************************
@ -209,12 +212,5 @@ bl_params_t *get_next_bl_params_from_mem_params_desc(void)
/* Invalid image is expected to terminate the loop */
assert(img_id == INVALID_IMAGE_ID);
/* Populate arg0 for the next BL image */
next_bl_params.head->ep_info->args.arg0 = (unsigned long)&next_bl_params;
/* Flush the parameters to be passed to the next BL image */
flush_dcache_range((unsigned long)&next_bl_params,
sizeof(next_bl_params));
return &next_bl_params;
}

View File

@ -2242,6 +2242,17 @@ designated crash console. It must only use general purpose registers x1 and
x2 to do its work. The parameter and the return value are in general purpose
register x0.
### Function : plat_crash_console_flush
Argument : void
Return : int
This API is used by the crash reporting mechanism to force write of all buffered
data on the designated crash console. It should only use general purpose
registers x0 and x1 to do its work. The return value is 0 on successful
completion; otherwise the return value is -1.
4. Build flags
---------------

View File

@ -203,11 +203,6 @@ performed.
in MPIDR is set and access the bit-fields in MPIDR accordingly. Default
value of this flag is 0.
* `ASM_ASSERTION`: This flag determines whether the assertion checks within
assembly source files are enabled or not. This option defaults to the
value of `DEBUG` - that is, by default this is only enabled for a debug
build of the firmware.
* `BL2`: This is an optional build option which specifies the path to BL2
image for the `fip` target. In this case, the BL2 in the ARM Trusted
Firmware will not be built.
@ -286,6 +281,14 @@ performed.
payload. Please refer to the "Booting an EL3 payload" section for more
details.
* `ENABLE_ASSERTIONS`: This option controls whether or not calls to `assert()`
are compiled out. For debug builds, this option defaults to 1, and calls to
`assert()` are left in place. For release builds, this option defaults to 0
and calls to `assert()` function are compiled out. This option can be set
independently of `DEBUG`. It can also be used to hide any auxiliary code
that is only required for the assertion and does not fit in the assertion
itself.
* `ENABLE_PMF`: Boolean option to enable support for optional Performance
Measurement Framework(PMF). Default is 0.
@ -349,7 +352,8 @@ performed.
initiate the operations, and the rest is managed in hardware, minimizing
active software management. In such systems, this boolean option enables ARM
Trusted Firmware to carry out build and run-time optimizations during boot
and power management operations. This option defaults to 0.
and power management operations. This option defaults to 0 and if it is
enabled, then it implies `WARMBOOT_ENABLE_DCACHE_EARLY` is also enabled.
* `LOAD_IMAGE_V2`: Boolean option to enable support for new version (v2) of
image loading, which provides more flexibility and scalability around what
@ -508,6 +512,12 @@ performed.
to a string formed by concatenating the version number, build type and build
string.
* `WARMBOOT_ENABLE_DCACHE_EARLY` : Boolean option to enable D-cache early on
the CPU after warm boot. This is applicable for platforms which do not
require interconnect programming to enable cache coherency (eg: single
cluster platforms). If this option is enabled, then warm boot path
enables D-caches immediately after enabling MMU. This option defaults to 0.
#### ARM development platform specific build options
* `ARM_BL31_IN_DRAM`: Boolean option to select loading of BL31 in TZC secured
@ -568,6 +578,10 @@ performed.
- `tdram` : Trusted DRAM (if available)
- `dram` : Secure region in DRAM (configured by the TrustZone controller)
* `ARM_XLAT_TABLES_LIB_V1`: boolean option to compile the Trusted Firmware
with version 1 of the translation tables library instead of version 2. It is
set to 0 by default, which selects version 2.
For a better understanding of these options, the ARM development platform memory
map is explained in the [Firmware Design].

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -39,7 +39,7 @@ static uintptr_t g_cci_base;
static unsigned int g_max_master_id;
static const int *g_cci_slave_if_map;
#if DEBUG
#if ENABLE_ASSERTIONS
static int validate_cci_map(const int *map)
{
unsigned int valid_cci_map = 0;
@ -54,26 +54,25 @@ static int validate_cci_map(const int *map)
continue;
if (slave_if_id >= CCI_SLAVE_INTERFACE_COUNT) {
tf_printf("Slave interface ID is invalid\n");
ERROR("Slave interface ID is invalid\n");
return 0;
}
if (valid_cci_map & (1 << slave_if_id)) {
tf_printf("Multiple masters are assigned same"
" slave interface ID\n");
ERROR("Multiple masters are assigned same slave interface ID\n");
return 0;
}
valid_cci_map |= 1 << slave_if_id;
}
if (!valid_cci_map) {
tf_printf("No master is assigned a valid slave interface\n");
ERROR("No master is assigned a valid slave interface\n");
return 0;
}
return 1;
}
#endif /* DEBUG */
#endif /* ENABLE_ASSERTIONS */
void cci_init(uintptr_t cci_base,
const int *map,

View File

@ -81,7 +81,7 @@ static inline void ccn_reg_write(uintptr_t periphbase,
mmio_write_64(region_base + register_offset, value);
}
#if DEBUG
#if ENABLE_ASSERTIONS
typedef struct rn_info {
unsigned char node_desc[MAX_RN_NODES];
@ -224,7 +224,7 @@ static void ccn_validate_plat_params(const ccn_desc_t *plat_desc)
info.node_desc[node_id]--;
}
}
#endif /* DEBUG */
#endif /* ENABLE_ASSERTIONS */
/*******************************************************************************
* This function validates parameters passed by the platform (in a debug build)
@ -234,7 +234,7 @@ static void ccn_validate_plat_params(const ccn_desc_t *plat_desc)
******************************************************************************/
void ccn_init(const ccn_desc_t *plat_desc)
{
#if DEBUG
#if ENABLE_ASSERTIONS
ccn_validate_plat_params(plat_desc);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -40,6 +40,7 @@
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
@ -158,3 +159,29 @@ getc_error:
mov r0, #-1
bx lr
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : r0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : r0, r1
* ---------------------------------------------
*/
func console_core_flush
cmp r0, #0
beq flush_error
1:
/* Loop while the transmit FIFO is busy */
ldr r1, [r0, #UARTFR]
tst r1, #PL011_UARTFR_BUSY
bne 1b
mov r0, #0
bx lr
flush_error:
mov r0, #-1
bx lr
endfunc console_core_flush

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -41,6 +41,7 @@
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
@ -151,3 +152,27 @@ getc_error:
mov w0, #-1
ret
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_flush
cbz x0, flush_error
1:
/* Loop until the transmit FIFO is empty */
ldr w1, [x0, #UARTFR]
tbnz w1, #PL011_UARTFR_BUSY_BIT, 1b
mov w0, #0
ret
flush_error:
mov w0, #-1
ret
endfunc console_core_flush

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -33,7 +33,7 @@
#include <mmio.h>
#include <stddef.h>
#include <tzc400.h>
#include "tzc_common_private.c"
#include "tzc_common_private.h"
/*
* Macros which will be used by common core functions.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -28,6 +28,9 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __TZC_COMMON_PRIVATE_H__
#define __TZC_COMMON_PRIVATE_H__
#include <arch.h>
#include <arch_helpers.h>
#include <mmio.h>
@ -190,8 +193,9 @@
nsaid_permissions); \
}
#if DEBUG
static unsigned int _tzc_read_peripheral_id(uintptr_t base)
#if ENABLE_ASSERTIONS
static inline unsigned int _tzc_read_peripheral_id(uintptr_t base)
{
unsigned int id;
@ -203,7 +207,7 @@ static unsigned int _tzc_read_peripheral_id(uintptr_t base)
}
#ifdef AARCH32
static unsigned long long _tzc_get_max_top_addr(int addr_width)
static inline unsigned long long _tzc_get_max_top_addr(int addr_width)
{
/*
* Assume at least 32 bit wide address and initialize the max.
@ -232,4 +236,6 @@ static unsigned long long _tzc_get_max_top_addr(int addr_width)
(UINT64_MAX >> (64 - (addr_width)))
#endif /* AARCH32 */
#endif
#endif /* ENABLE_ASSERTIONS */
#endif /* __TZC_COMMON_PRIVATE_H__ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -33,7 +33,7 @@
#include <mmio.h>
#include <tzc_dmc500.h>
#include "tzc_common.h"
#include "tzc_common_private.c"
#include "tzc_common_private.h"
/*
* Macros which will be used by common core functions.
@ -257,7 +257,7 @@ void tzc_dmc500_set_action(tzc_action_t action)
static void validate_plat_driver_data(
const tzc_dmc500_driver_data_t *plat_driver_data)
{
#if DEBUG
#if ENABLE_ASSERTIONS
int i;
unsigned int dmc_id;
uintptr_t dmc_base;
@ -273,7 +273,7 @@ static void validate_plat_driver_data(
dmc_id = _tzc_read_peripheral_id(dmc_base);
assert(dmc_id == DMC500_PERIPHERAL_ID);
}
#endif /* DEBUG */
#endif /* ENABLE_ASSERTIONS */
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,9 +31,10 @@
#include <asm_macros.S>
#include <cadence/cdns_uart.h>
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
* int console_core_init(unsigned long base_addr,
@ -125,3 +126,18 @@ getc_error:
mov w0, #-1
ret
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_flush
/* Placeholder */
mov w0, #0
ret
endfunc console_core_flush

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -33,6 +33,7 @@
.globl console_uninit
.globl console_putc
.globl console_getc
.globl console_flush
/*
* The console base is in the data section and not in .bss
@ -112,3 +113,18 @@ func console_getc
ldr r0, [r1]
b console_core_getc
endfunc console_getc
/* ---------------------------------------------
* int console_flush(void)
* Function to force a write of all buffered
* data that hasn't been output. It returns 0
* upon successful completion, otherwise it
* returns -1.
* Clobber list : r0, r1
* ---------------------------------------------
*/
func console_flush
ldr r1, =console_base
ldr r0, [r1]
b console_core_flush
endfunc console_flush

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -38,6 +38,7 @@
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
* int console_core_init(uintptr_t base_addr,
@ -109,3 +110,23 @@ getc_error:
mov r0, #-1
bx lr
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : r0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : r0, r1
* ---------------------------------------------
*/
func console_core_flush
cmp r0, #0
beq flush_error
/* Insert implementation here */
mov r0, #0
bx lr
flush_error:
mov r0, #-1
bx lr
endfunc console_core_flush

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -33,6 +33,7 @@
.globl console_uninit
.globl console_putc
.globl console_getc
.globl console_flush
/*
* The console base is in the data section and not in .bss
@ -111,3 +112,18 @@ func console_getc
ldr x0, [x1, :lo12:console_base]
b console_core_getc
endfunc console_getc
/* ---------------------------------------------
* int console_flush(void)
* Function to force a write of all buffered
* data that hasn't been output. It returns 0
* upon successful completion, otherwise it
* returns -1.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_flush
adrp x1, console_base
ldr x0, [x1, :lo12:console_base]
b console_core_flush
endfunc console_flush

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -38,6 +38,7 @@
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
* int console_core_init(uintptr_t base_addr,
@ -104,3 +105,22 @@ getc_error:
mov w0, #-1
ret
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_flush
cbz x0, flush_error
/* Insert implementation here */
mov w0, #0
ret
flush_error:
mov w0, #-1
ret
endfunc console_core_flush

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -51,8 +51,8 @@ static const io_dev_info_t *devices[MAX_IO_DEVICES];
/* Number of currently registered devices */
static unsigned int dev_count;
#if DEBUG /* Extra validation functions only used in debug builds */
/* Extra validation functions only used when asserts are enabled */
#if ENABLE_ASSERTIONS
/* Return a boolean value indicating whether a device connector is valid */
static int is_valid_dev_connector(const io_dev_connector_t *dev_con)
@ -89,7 +89,8 @@ static int is_valid_seek_mode(io_seek_mode_t mode)
return ((mode != IO_SEEK_INVALID) && (mode < IO_SEEK_MAX));
}
#endif /* End of debug-only validation functions */
#endif /* ENABLE_ASSERTIONS */
/* End of extra validation functions only used when asserts are enabled */
/* Open a connection to a specific device */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -35,6 +35,7 @@
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
* int console_core_init(unsigned long base_addr,
@ -114,9 +115,6 @@ func console_core_putc
b.ne 1b
mov w2, #0xD /* '\r' */
str w2, [x1, #UARTTX]
ldr w2, [x1, #UARTFCR]
orr w2, w2, #UARTFCR_TXCLR
str w2, [x1, #UARTFCR]
/* Check if the transmit FIFO is full */
2: ldr w2, [x1, #UARTLSR]
@ -124,9 +122,6 @@ func console_core_putc
cmp w2, #(UARTLSR_TEMT | UARTLSR_THRE)
b.ne 2b
str w0, [x1, #UARTTX]
ldr w2, [x1, #UARTFCR]
orr w2, w2, #UARTFCR_TXCLR
str w2, [x1, #UARTFCR]
ret
putc_error:
mov w0, #-1
@ -153,3 +148,18 @@ getc_error:
mov w0, #-1
ret
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_flush
/* Placeholder */
mov w0, #0
ret
endfunc console_core_flush

View File

@ -134,4 +134,37 @@
.space SPINLOCK_ASM_SIZE
.endm
/*
* Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
* and the top 32 bits of `_val` into `_reg_h`. If either the bottom
* or top word of `_val` is zero, the corresponding OR operation
* is skipped.
*/
.macro orr64_imm _reg_l, _reg_h, _val
.if (\_val >> 32)
orr \_reg_h, \_reg_h, #(\_val >> 32)
.endif
.if (\_val & 0xffffffff)
orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
.endif
.endm
/*
* Helper macro to bitwise-clear bits in `_reg_l` and
* `_reg_h` given a 64 bit immediate `_val`. The set bits
* in the bottom word of `_val` dictate which bits from
* `_reg_l` should be cleared. Similarly, the set bits in
* the top word of `_val` dictate which bits from `_reg_h`
* should be cleared. If either the bottom or top word of
* `_val` is zero, the corresponding BIC operation is skipped.
*/
.macro bic64_imm _reg_l, _reg_h, _val
.if (\_val >> 32)
bic \_reg_h, \_reg_h, #(\_val >> 32)
.endif
.if (\_val & 0xffffffff)
bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
.endif
.endm
#endif /* __ASM_MACROS_S__ */

View File

@ -148,7 +148,7 @@
_init_memory, _init_c_runtime, _exception_vectors
/* Make sure we are in Secure Mode */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCR
tst r0, #SCR_NS_BIT
ASM_ASSERT(eq)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -38,6 +38,7 @@ int console_init(uintptr_t base_addr,
void console_uninit(void);
int console_putc(int c);
int console_getc(void);
int console_flush(void);
#endif /* __CONSOLE_H__ */

View File

@ -394,12 +394,14 @@
#define HCR p15, 4, c1, c1, 0
#define HCPTR p15, 4, c1, c1, 2
#define CNTHCTL p15, 4, c14, c1, 0
#define CNTKCTL p15, 0, c14, c1, 0
#define VPIDR p15, 4, c0, c0, 0
#define VMPIDR p15, 4, c0, c0, 5
#define ISR p15, 0, c12, c1, 0
#define CLIDR p15, 1, c0, c0, 1
#define CSSELR p15, 2, c0, c0, 0
#define CCSIDR p15, 1, c0, c0, 0
#define DBGOSDLR p14, 0, c1, c3, 4
/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
#define HDCR p15, 4, c1, c1, 1

View File

@ -209,6 +209,8 @@ DEFINE_SYSOP_FUNC(wfe)
DEFINE_SYSOP_FUNC(sev)
DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
DEFINE_SYSOP_TYPE_FUNC(dmb, st)
DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
DEFINE_SYSOP_TYPE_FUNC(dmb, ish)

View File

@ -261,6 +261,16 @@
#define DISABLE_ALL_EXCEPTIONS \
(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
/*
* RMR_EL3 definitions
*/
#define RMR_EL3_RR_BIT (1 << 1)
#define RMR_EL3_AA64_BIT (1 << 0)
/*
* HI-VECTOR address for AArch32 state
*/
#define HI_VECTOR_BASE (0xFFFF0000)
/*
* TCR defintions
@ -419,6 +429,10 @@
#define EC_BITS(x) (x >> ESR_EC_SHIFT) & ESR_EC_MASK
/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
#define RMR_RESET_REQUEST_SHIFT 0x1u
#define RMR_WARM_RESET_CPU (1u << RMR_RESET_REQUEST_SHIFT)
/*******************************************************************************
* Definitions of register offsets, fields and macros for CPU system
* instructions.

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CORTEX_A53_H__
#define __CORTEX_A53_H__
/* Cortex-A53 midr for revision 0 */
#define CORTEX_A53_MIDR 0x410FD030
/* Retention timer tick definitions */
#define RETENTION_ENTRY_TICKS_2 0x1
#define RETENTION_ENTRY_TICKS_8 0x2
#define RETENTION_ENTRY_TICKS_32 0x3
#define RETENTION_ENTRY_TICKS_64 0x4
#define RETENTION_ENTRY_TICKS_128 0x5
#define RETENTION_ENTRY_TICKS_256 0x6
#define RETENTION_ENTRY_TICKS_512 0x7
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
#define CPUECTLR p15, 1, c15 /* Instruction def. */
#define CPUECTLR_SMP_BIT (1 << 6)
#define CPUECTLR_CPU_RET_CTRL_SHIFT 0
#define CPUECTLR_CPU_RET_CTRL_MASK (0x7 << CPUECTLR_CPU_RET_CTRL_SHIFT)
#define CPUECTLR_FPU_RET_CTRL_SHIFT 3
#define CPUECTLR_FPU_RET_CTRL_MASK (0x7 << CPUECTLR_FPU_RET_CTRL_SHIFT)
/*******************************************************************************
* CPU Memory Error Syndrome register specific definitions.
******************************************************************************/
#define CPUMERRSR p15, 2, c15 /* Instruction def. */
/*******************************************************************************
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
#define CPUACTLR p15, 0, c15 /* Instruction def. */
#define CPUACTLR_DTAH (1 << 24)
/*******************************************************************************
* L2 Auxiliary Control register specific definitions.
******************************************************************************/
#define L2ACTLR p15, 1, c15, c0, 0 /* Instruction def. */
#define L2ACTLR_ENABLE_UNIQUECLEAN (1 << 14)
#define L2ACTLR_DISABLE_CLEAN_PUSH (1 << 3)
/*******************************************************************************
* L2 Extended Control register specific definitions.
******************************************************************************/
#define L2ECTLR p15, 1, c9, c0, 3 /* Instruction def. */
#define L2ECTLR_RET_CTRL_SHIFT 0
#define L2ECTLR_RET_CTRL_MASK (0x7 << L2ECTLR_RET_CTRL_SHIFT)
/*******************************************************************************
* L2 Memory Error Syndrome register specific definitions.
******************************************************************************/
#define L2MERRSR p15, 3, c15 /* Instruction def. */
#endif /* __CORTEX_A53_H__ */

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CORTEX_A57_H__
#define __CORTEX_A57_H__
/* Cortex-A57 midr for revision 0 */
#define CORTEX_A57_MIDR 0x410FD070
/* Retention timer tick definitions */
#define RETENTION_ENTRY_TICKS_2 0x1
#define RETENTION_ENTRY_TICKS_8 0x2
#define RETENTION_ENTRY_TICKS_32 0x3
#define RETENTION_ENTRY_TICKS_64 0x4
#define RETENTION_ENTRY_TICKS_128 0x5
#define RETENTION_ENTRY_TICKS_256 0x6
#define RETENTION_ENTRY_TICKS_512 0x7
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
#define CPUECTLR p15, 1, c15 /* Instruction def. */
#define CPUECTLR_SMP_BIT (1 << 6)
#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT (1 << 38)
#define CPUECTLR_L2_IPFTCH_DIST_MASK (0x3 << 35)
#define CPUECTLR_L2_DPFTCH_DIST_MASK (0x3 << 32)
#define CPUECTLR_CPU_RET_CTRL_SHIFT 0
#define CPUECTLR_CPU_RET_CTRL_MASK (0x7 << CPUECTLR_CPU_RET_CTRL_SHIFT)
/*******************************************************************************
* CPU Memory Error Syndrome register specific definitions.
******************************************************************************/
#define CPUMERRSR p15, 2, c15 /* Instruction def. */
/*******************************************************************************
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
#define CPUACTLR p15, 0, c15 /* Instruction def. */
#define CPUACTLR_DIS_LOAD_PASS_DMB (1 << 59)
#define CPUACTLR_GRE_NGRE_AS_NGNRE (1 << 54)
#define CPUACTLR_DIS_OVERREAD (1 << 52)
#define CPUACTLR_NO_ALLOC_WBWA (1 << 49)
#define CPUACTLR_DCC_AS_DCCI (1 << 44)
#define CPUACTLR_FORCE_FPSCR_FLUSH (1 << 38)
#define CPUACTLR_DIS_STREAMING (3 << 27)
#define CPUACTLR_DIS_L1_STREAMING (3 << 25)
#define CPUACTLR_DIS_INDIRECT_PREDICTOR (1 << 4)
/*******************************************************************************
* L2 Control register specific definitions.
******************************************************************************/
#define L2CTLR p15, 1, c9, c0, 3 /* Instruction def. */
#define L2CTLR_DATA_RAM_LATENCY_SHIFT 0
#define L2CTLR_TAG_RAM_LATENCY_SHIFT 6
#define L2_DATA_RAM_LATENCY_3_CYCLES 0x2
#define L2_TAG_RAM_LATENCY_3_CYCLES 0x2
/*******************************************************************************
* L2 Extended Control register specific definitions.
******************************************************************************/
#define L2ECTLR p15, 1, c9, c0, 3 /* Instruction def. */
#define L2ECTLR_RET_CTRL_SHIFT 0
#define L2ECTLR_RET_CTRL_MASK (0x7 << L2ECTLR_RET_CTRL_SHIFT)
/*******************************************************************************
* L2 Memory Error Syndrome register specific definitions.
******************************************************************************/
#define L2MERRSR p15, 3, c15 /* Instruction def. */
#endif /* __CORTEX_A57_H__ */

View File

@ -0,0 +1,78 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CORTEX_A72_H__
#define __CORTEX_A72_H__
/* Cortex-A72 midr for revision 0 */
#define CORTEX_A72_MIDR 0x410FD080
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
#define CPUECTLR p15, 1, c15 /* Instruction def. */
#define CPUECTLR_SMP_BIT (1 << 6)
#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT (1 << 38)
#define CPUECTLR_L2_IPFTCH_DIST_MASK (0x3 << 35)
#define CPUECTLR_L2_DPFTCH_DIST_MASK (0x3 << 32)
/*******************************************************************************
* CPU Memory Error Syndrome register specific definitions.
******************************************************************************/
#define CPUMERRSR p15, 2, c15 /* Instruction def. */
/*******************************************************************************
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
#define CPUACTLR p15, 0, c15 /* Instruction def. */
#define CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH (1 << 56)
#define CPUACTLR_NO_ALLOC_WBWA (1 << 49)
#define CPUACTLR_DCC_AS_DCCI (1 << 44)
/*******************************************************************************
* L2 Control register specific definitions.
******************************************************************************/
#define L2CTLR p15, 1, c9, c0, 3 /* Instruction def. */
#define L2CTLR_DATA_RAM_LATENCY_SHIFT 0
#define L2CTLR_TAG_RAM_LATENCY_SHIFT 6
#define L2_DATA_RAM_LATENCY_3_CYCLES 0x2
#define L2_TAG_RAM_LATENCY_2_CYCLES 0x1
#define L2_TAG_RAM_LATENCY_3_CYCLES 0x2
/*******************************************************************************
* L2 Memory Error Syndrome register specific definitions.
******************************************************************************/
#define L2MERRSR p15, 3, c15 /* Instruction def. */
#endif /* __CORTEX_A72_H__ */

View File

@ -87,7 +87,7 @@ void cm_set_context_by_mpidr(uint64_t mpidr,
******************************************************************************/
static inline void cm_set_next_context(void *context)
{
#if DEBUG
#if ENABLE_ASSERTIONS
uint64_t sp_mode;
/*
@ -98,7 +98,7 @@ static inline void cm_set_next_context(void *context)
: "=r" (sp_mode));
assert(sp_mode == MODE_SP_EL0);
#endif
#endif /* ENABLE_ASSERTIONS */
__asm__ volatile("msr spsel, #1\n"
"mov sp, %0\n"

View File

@ -58,6 +58,7 @@
#define SMC_64 1
#define SMC_32 0
#define SMC_OK 0
#define SMC_UNK 0xffffffff
#define SMC_TYPE_FAST ULL(1)
#define SMC_TYPE_STD 0

View File

@ -34,30 +34,27 @@
* @(#)assert.h 8.2 (Berkeley) 1/21/94
* $FreeBSD$
*/
#include <sys/cdefs.h>
/*
* Unlike other ANSI header files, <assert.h> may usefully be included
* multiple times, with and without NDEBUG defined.
* Portions copyright (c) 2017, ARM Limited and Contributors.
* All rights reserved.
*/
#undef assert
#undef _assert
#ifdef NDEBUG
#define assert(e) ((void)0)
#define _assert(e) ((void)0)
#else
#define _assert(e) assert(e)
#define assert(e) ((e) ? (void)0 : __assert(__func__, __FILE__, \
__LINE__, #e))
#endif /* NDEBUG */
#ifndef _ASSERT_H_
#define _ASSERT_H_
#include <sys/cdefs.h>
#if ENABLE_ASSERTIONS
#define _assert(e) assert(e)
#define assert(e) ((e) ? (void)0 : __assert(__func__, __FILE__, \
__LINE__, #e))
#else
#define assert(e) ((void)0)
#define _assert(e) ((void)0)
#endif /* ENABLE_ASSERTIONS */
__BEGIN_DECLS
void __assert(const char *, const char *, int, const char *) __dead2;
__END_DECLS
#endif /* !_ASSERT_H_ */

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2000 Jeroen Ruigrok van der Werven <asmodai@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __bool_true_false_are_defined
#define __bool_true_false_are_defined 1
#ifndef __cplusplus
#define false 0
#define true 1
#define bool _Bool
#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER)
typedef int _Bool;
#endif
#endif /* !__cplusplus */
#endif /* __bool_true_false_are_defined */

View File

@ -108,7 +108,7 @@ typedef struct mmap_region {
/* Generic translation table APIs */
void init_xlat_tables(void);
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
size_t size, mmap_attr_t attr);
void mmap_add(const mmap_region_t *mm);
#endif /*__ASSEMBLY__*/

View File

@ -114,7 +114,7 @@ void init_xlat_tables(void);
* be added before initializing the MMU and cannot be removed later.
*/
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
size_t size, mmap_attr_t attr);
/*
* Add a region with defined base PA and base VA. This type of region can be
@ -128,7 +128,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
* EPERM: It overlaps another region in an invalid way.
*/
int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
size_t size, mmap_attr_t attr);
/*
* Add an array of static regions with defined base PA and base VA. This type

View File

@ -30,7 +30,7 @@
#ifndef __V2M_DEF_H__
#define __V2M_DEF_H__
#include <xlat_tables_v2.h>
#include <arm_xlat_tables.h>
/* V2M motherboard system registers & offsets */

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#if ARM_XLAT_TABLES_LIB_V1
#include <xlat_tables.h>
#else
#include <xlat_tables_v2.h>
#endif /* ARM_XLAT_TABLES_LIB_V1 */

View File

@ -30,6 +30,7 @@
#ifndef __PLAT_ARM_H__
#define __PLAT_ARM_H__
#include <arm_xlat_tables.h>
#include <bakery_lock.h>
#include <cassert.h>
#include <cpu_data.h>
@ -80,7 +81,7 @@ void arm_setup_page_tables(uintptr_t total_base,
#else
/*
* Empty macros for all other BL stages other than BL31
* Empty macros for all other BL stages other than BL31 and BL32
*/
#define ARM_INSTANTIATE_LOCK
#define arm_lock_init()
@ -156,6 +157,7 @@ void arm_bl2_platform_setup(void);
void arm_bl2_plat_arch_setup(void);
uint32_t arm_get_spsr_for_bl32_entry(void);
uint32_t arm_get_spsr_for_bl33_entry(void);
int arm_bl2_handle_post_image_load(unsigned int image_id);
/* BL2U utility functions */
void arm_bl2u_early_platform_setup(struct meminfo *mem_layout,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -35,11 +35,15 @@
#include <psci.h>
#include <types.h>
/* System power domain at level 2, as currently implemented by CSS platforms */
#define CSS_SYSTEM_PWR_DMN_LVL ARM_PWR_LVL2
/* Macros to read the CSS power domain state */
#define CSS_CORE_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL0]
#define CSS_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL1]
#define CSS_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > ARM_PWR_LVL1) ?\
(state)->pwr_domain_state[ARM_PWR_LVL2] : 0)
#define CSS_SYSTEM_PWR_STATE(state) \
((PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) ?\
(state)->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] : 0)
int css_pwr_domain_on(u_register_t mpidr);
void css_pwr_domain_on_finish(const psci_power_state_t *target_state);

View File

@ -96,9 +96,16 @@
/*
* Required platform porting definitions common to all ARM CSS SoCs
*/
#if JUNO_AARCH32_EL3_RUNTIME
/*
* Following change is required to initialize TZC
* for enabling access to the HI_VECTOR (0xFFFF0000)
* location needed for JUNO AARCH32 support.
*/
#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x8000)
#else
/* 2MB used for SCP DDR retraining */
#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x00200000)
#endif
#endif /* __SOC_CSS_DEF_H__ */

View File

@ -100,6 +100,7 @@ uintptr_t plat_get_my_stack(void);
void plat_report_exception(unsigned int exception_type);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);
int plat_crash_console_flush(void);
void plat_error_handler(int err) __dead2;
void plat_panic_handler(void) __dead2;

View File

@ -162,7 +162,7 @@ endfunc zeromem
* --------------------------------------------------------------------------
*/
func memcpy4
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
orr r3, r0, r1
tst r3, #0x3
ASM_ASSERT(eq)

View File

@ -215,7 +215,7 @@ func zeromem_dczva
tmp1 .req x4
tmp2 .req x5
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
/*
* Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
* register value and panic if the MMU is disabled.
@ -228,7 +228,7 @@ func zeromem_dczva
tst tmp1, #SCTLR_M_BIT
ASM_ASSERT(ne)
#endif /* ASM_ASSERTION */
#endif /* ENABLE_ASSERTIONS */
/* stop_address is the address past the last to zero */
add stop_address, cursor, length
@ -247,7 +247,7 @@ func zeromem_dczva
mov tmp2, #(1 << 2)
lsl block_size, tmp2, block_size
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
/*
* Assumes block size is at least 16 bytes to avoid manual realignment
* of the cursor at the end of the DCZVA loop.
@ -444,7 +444,7 @@ endfunc zeromem_dczva
* --------------------------------------------------------------------------
*/
func memcpy16
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
orr x3, x0, x1
tst x3, #0xf
ASM_ASSERT(eq)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -35,7 +35,7 @@
func aem_generic_core_pwr_dwn
/* Assert if cache is enabled */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
@ -51,7 +51,7 @@ endfunc aem_generic_core_pwr_dwn
func aem_generic_cluster_pwr_dwn
/* Assert if cache is enabled */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -76,7 +76,7 @@ func cortex_a32_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
@ -107,7 +107,7 @@ func cortex_a32_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)

View File

@ -0,0 +1,141 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a53.h>
#include <cpu_macros.S>
#include <debug.h>
/* ---------------------------------------------
* Disable intra-cluster coherency
* ---------------------------------------------
*/
func cortex_a53_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
isb
dsb sy
bx lr
endfunc cortex_a53_disable_smp
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A53.
* -------------------------------------------------
*/
func cortex_a53_reset_func
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
isb
bx lr
endfunc cortex_a53_reset_func
/* ----------------------------------------------------
* The CPU Ops core power down function for Cortex-A53.
* ----------------------------------------------------
*/
func cortex_a53_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a53_disable_smp
endfunc cortex_a53_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Cortex-A53.
* Clobbers: r0-r3
* -------------------------------------------------------
*/
func cortex_a53_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Disable the optional ACP.
* ---------------------------------------------
*/
bl plat_disable_acp
/* ---------------------------------------------
* Flush L2 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level2
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a53_disable_smp
endfunc cortex_a53_cluster_pwr_dwn
declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
cortex_a53_reset_func, \
cortex_a53_core_pwr_dwn, \
cortex_a53_cluster_pwr_dwn

View File

@ -0,0 +1,192 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a57.h>
#include <cpu_macros.S>
#include <debug.h>
/* ---------------------------------------------
* Disable intra-cluster coherency
* Clobbers: r0-r1
* ---------------------------------------------
*/
func cortex_a57_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
bx lr
endfunc cortex_a57_disable_smp
/* ---------------------------------------------
* Disable all types of L2 prefetches.
* Clobbers: r0-r2
* ---------------------------------------------
*/
func cortex_a57_disable_l2_prefetch
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \
CPUECTLR_L2_DPFTCH_DIST_MASK)
stcopr16 r0, r1, CPUECTLR
isb
dsb ish
bx lr
endfunc cortex_a57_disable_l2_prefetch
/* ---------------------------------------------
* Disable debug interfaces
* ---------------------------------------------
*/
func cortex_a57_disable_ext_debug
mov r0, #1
stcopr r0, DBGOSDLR
isb
dsb sy
bx lr
endfunc cortex_a57_disable_ext_debug
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A57.
* -------------------------------------------------
*/
func cortex_a57_reset_func
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
isb
bx lr
endfunc cortex_a57_reset_func
/* ----------------------------------------------------
* The CPU Ops core power down function for Cortex-A57.
* ----------------------------------------------------
*/
func cortex_a57_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
bl cortex_a57_disable_l2_prefetch
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
bl cortex_a57_disable_smp
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a57_disable_ext_debug
endfunc cortex_a57_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Cortex-A57.
* Clobbers: r0-r3
* -------------------------------------------------------
*/
func cortex_a57_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
bl cortex_a57_disable_l2_prefetch
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Disable the optional ACP.
* ---------------------------------------------
*/
bl plat_disable_acp
/* ---------------------------------------------
* Flush L2 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level2
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
bl cortex_a57_disable_smp
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a57_disable_ext_debug
endfunc cortex_a57_cluster_pwr_dwn
declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
cortex_a57_reset_func, \
cortex_a57_core_pwr_dwn, \
cortex_a57_cluster_pwr_dwn

View File

@ -0,0 +1,216 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a72.h>
#include <cpu_macros.S>
#include <debug.h>
/* ---------------------------------------------
* Disable all types of L2 prefetches.
* ---------------------------------------------
*/
func cortex_a72_disable_l2_prefetch
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \
CPUECTLR_L2_DPFTCH_DIST_MASK)
stcopr16 r0, r1, CPUECTLR
isb
bx lr
endfunc cortex_a72_disable_l2_prefetch
/* ---------------------------------------------
* Disable the load-store hardware prefetcher.
* ---------------------------------------------
*/
func cortex_a72_disable_hw_prefetcher
ldcopr16 r0, r1, CPUACTLR
orr64_imm r0, r1, CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
stcopr16 r0, r1, CPUACTLR
isb
dsb ish
bx lr
endfunc cortex_a72_disable_hw_prefetcher
/* ---------------------------------------------
* Disable intra-cluster coherency
* Clobbers: r0-r1
* ---------------------------------------------
*/
func cortex_a72_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
bx lr
endfunc cortex_a72_disable_smp
/* ---------------------------------------------
* Disable debug interfaces
* ---------------------------------------------
*/
func cortex_a72_disable_ext_debug
mov r0, #1
stcopr r0, DBGOSDLR
isb
dsb sy
bx lr
endfunc cortex_a72_disable_ext_debug
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A72.
* -------------------------------------------------
*/
func cortex_a72_reset_func
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
isb
bx lr
endfunc cortex_a72_reset_func
/* ----------------------------------------------------
* The CPU Ops core power down function for Cortex-A72.
* ----------------------------------------------------
*/
func cortex_a72_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
bl cortex_a72_disable_l2_prefetch
/* ---------------------------------------------
* Disable the load-store hardware prefetcher.
* ---------------------------------------------
*/
bl cortex_a72_disable_hw_prefetcher
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
bl cortex_a72_disable_smp
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a72_disable_ext_debug
endfunc cortex_a72_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Cortex-A72.
* -------------------------------------------------------
*/
func cortex_a72_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
bl cortex_a72_disable_l2_prefetch
/* ---------------------------------------------
* Disable the load-store hardware prefetcher.
* ---------------------------------------------
*/
bl cortex_a72_disable_hw_prefetcher
#if !SKIP_A72_L1_FLUSH_PWR_DWN
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
#endif
/* ---------------------------------------------
* Disable the optional ACP.
* ---------------------------------------------
*/
bl plat_disable_acp
/* -------------------------------------------------
* Flush the L2 caches.
* -------------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level2
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
bl cortex_a72_disable_smp
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a72_disable_ext_debug
endfunc cortex_a72_cluster_pwr_dwn
declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
cortex_a72_reset_func, \
cortex_a72_core_pwr_dwn, \
cortex_a72_cluster_pwr_dwn

View File

@ -53,7 +53,7 @@ func reset_handler
/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
bl get_cpu_ops_ptr
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif
@ -92,7 +92,7 @@ func prepare_cpu_pwr_dwn
pop {r2, lr}
ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif
@ -118,7 +118,7 @@ func init_cpu_ops
cmp r1, #0
bne 1f
bl get_cpu_ops_ptr
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif

View File

@ -55,7 +55,7 @@ func reset_handler
/* Get the matching cpu_ops pointer */
bl get_cpu_ops_ptr
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif
@ -94,7 +94,7 @@ func prepare_cpu_pwr_dwn
mrs x1, tpidr_el3
ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif
@ -120,7 +120,7 @@ func init_cpu_ops
cbnz x0, 1f
mov x10, x30
bl get_cpu_ops_ptr
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif

View File

@ -165,7 +165,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
*/
psci_plat_pm_ops->pwr_domain_on_finish(state_info);
#if !HW_ASSISTED_COHERENCY
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
/*
* Arch. management: Enable data cache and manage stack memory
*/

View File

@ -302,7 +302,7 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx,
*/
psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
#if !HW_ASSISTED_COHERENCY
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
/* Arch. management: Enable the data cache, stack memory maintenance. */
psci_do_pwrup_cache_maintenance();
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,6 +31,7 @@
#include <stddef.h>
#include <arch_helpers.h>
#include <assert.h>
#include <console.h>
#include <debug.h>
#include <platform.h>
#include "psci_private.h"
@ -46,6 +47,8 @@ void psci_system_off(void)
psci_spd_pm->svc_system_off();
}
console_flush();
/* Call the platform specific hook */
psci_plat_pm_ops->system_off();
@ -63,6 +66,8 @@ void psci_system_reset(void)
psci_spd_pm->svc_system_reset();
}
console_flush();
/* Call the platform specific hook */
psci_plat_pm_ops->system_reset();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -28,14 +28,22 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <console.h>
#include <debug.h>
#include <platform.h>
/*
* This is a basic implementation. This could be improved.
*/
void __assert (const char *function, const char *file, unsigned int line,
void __assert(const char *function, const char *file, unsigned int line,
const char *assertion)
{
#if LOG_LEVEL >= LOG_LEVEL_INFO
/*
* Only print the output if LOG_LEVEL is higher or equal to
* LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
*/
tf_printf("ASSERT: %s <%d> : %s\n", function, line, assertion);
while(1);
console_flush();
#endif
plat_panic_handler();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -85,13 +85,13 @@
static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
#if DEBUG
#if ENABLE_ASSERTIONS
static unsigned long long get_max_supported_pa(void)
{
/* Physical address space size for long descriptor format. */
return (1ULL << 40) - 1ULL;
}
#endif
#endif /* ENABLE_ASSERTIONS */
void init_xlat_tables(void)
{

View File

@ -127,7 +127,7 @@ static unsigned long long calc_physical_addr_size_bits(
return TCR_PS_BITS_4GB;
}
#if DEBUG
#if ENABLE_ASSERTIONS
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
@ -144,7 +144,7 @@ static unsigned long long get_max_supported_pa(void)
return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
}
#endif
#endif /* ENABLE_ASSERTIONS */
void init_xlat_tables(void)
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -87,7 +87,7 @@ void print_mmap(void)
}
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr)
size_t size, mmap_attr_t attr)
{
mmap_region_t *mm = mmap;
mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
@ -109,7 +109,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
assert((base_pa + (unsigned long long)size - 1ULL) <=
(PLAT_PHY_ADDR_SPACE_SIZE - 1));
#if DEBUG
#if ENABLE_ASSERTIONS
/* Check for PAs and VAs overlaps with all other regions */
for (mm = mmap; mm->size; ++mm) {
@ -154,7 +154,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
mm = mmap; /* Restore pointer to the start of the array */
#endif /* DEBUG */
#endif /* ENABLE_ASSERTIONS */
/* Find correct place in mmap to insert new region */
while (mm->base_va < base_va && mm->size)
@ -199,7 +199,7 @@ void mmap_add(const mmap_region_t *mm)
}
}
static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa,
static uint64_t mmap_desc(mmap_attr_t attr, unsigned long long addr_pa,
int level)
{
uint64_t desc;
@ -277,11 +277,11 @@ static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa,
* attributes of the innermost region that contains it. If there are partial
* overlaps, it returns -1, as a smaller size is needed.
*/
static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
static mmap_attr_t mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
size_t size)
{
/* Don't assume that the area is contained in the first region */
int attr = -1;
mmap_attr_t attr = -1;
/*
* Get attributes from last (innermost) region that contains the
@ -360,7 +360,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
* there are partially overlapping regions. On success,
* it will return the innermost region's attributes.
*/
int attr = mmap_region_attr(mm, base_va, level_size);
mmap_attr_t attr = mmap_region_attr(mm, base_va,
level_size);
if (attr >= 0) {
desc = mmap_desc(attr,
base_va - mm->base_va + mm->base_pa,

View File

@ -37,13 +37,13 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
#if DEBUG
#if ENABLE_ASSERTIONS
static unsigned long long xlat_arch_get_max_supported_pa(void)
{
/* Physical address space size for long descriptor format. */
return (1ull << 40) - 1ull;
}
#endif /* DEBUG*/
#endif /* ENABLE_ASSERTIONS*/
int is_mmu_enabled(void)
{

View File

@ -77,7 +77,7 @@ static unsigned long long calc_physical_addr_size_bits(
return TCR_PS_BITS_4GB;
}
#if DEBUG
#if ENABLE_ASSERTIONS
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
@ -94,7 +94,7 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
return (1ull << pa_range_bits_arr[pa_range]) - 1ull;
}
#endif /* DEBUG*/
#endif /* ENABLE_ASSERTIONS*/
int is_mmu_enabled(void)
{

View File

@ -92,7 +92,7 @@ xlat_ctx_t tf_xlat_ctx = {
};
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr)
size_t size, mmap_attr_t attr)
{
mmap_region_t mm = {
.base_va = base_va,
@ -114,7 +114,7 @@ void mmap_add(const mmap_region_t *mm)
#if PLAT_XLAT_TABLES_DYNAMIC
int mmap_add_dynamic_region(unsigned long long base_pa,
uintptr_t base_va, size_t size, unsigned int attr)
uintptr_t base_va, size_t size, mmap_attr_t attr)
{
mmap_region_t mm = {
.base_va = base_va,

View File

@ -115,7 +115,7 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
/* Returns a block/page table descriptor for the given level and attributes. */
static uint64_t xlat_desc(unsigned int attr, unsigned long long addr_pa,
static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
int level)
{
uint64_t desc;
@ -609,7 +609,7 @@ void print_mmap(mmap_region_t *const mmap)
*/
static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
uintptr_t base_va, size_t size,
unsigned int attr)
mmap_attr_t attr)
{
mmap_region_t *mm = ctx->mmap;
unsigned long long end_pa = base_pa + size - 1;

View File

@ -154,3 +154,9 @@ USE_COHERENT_MEM := 1
# Build verbosity
V := 0
# Whether to enable D-Cache early during warm boot. This is usually
# applicable for platforms wherein interconnect programming is not
# required to enable cache coherency after warm reset (eg: single cluster
# platforms).
WARMBOOT_ENABLE_DCACHE_EARLY := 0

View File

@ -79,6 +79,9 @@ const mmap_region_t plat_arm_mmap[] = {
#endif
#ifdef IMAGE_BL32
const mmap_region_t plat_arm_mmap[] = {
#ifdef AARCH32
ARM_MAP_SHARED_RAM,
#endif
V2M_MAP_IOFPGA,
CSS_MAP_DEVICE,
SOC_CSS_MAP_DEVICE,

View File

@ -0,0 +1,216 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <cortex_a53.h>
#include <cortex_a57.h>
#include <cortex_a72.h>
#include <v2m_def.h>
#include "../juno_def.h"
.globl plat_reset_handler
.globl plat_arm_calc_core_pos
#define JUNO_REVISION(rev) REV_JUNO_R##rev
#define JUNO_HANDLER(rev) plat_reset_handler_juno_r##rev
#define JUMP_TO_HANDLER_IF_JUNO_R(revision) \
jump_to_handler JUNO_REVISION(revision), JUNO_HANDLER(revision)
/* --------------------------------------------------------------------
* Helper macro to jump to the given handler if the board revision
* matches.
* Expects the Juno board revision in x0.
* --------------------------------------------------------------------
*/
.macro jump_to_handler _revision, _handler
cmp r0, #\_revision
beq \_handler
.endm
/* --------------------------------------------------------------------
* Helper macro that reads the part number of the current CPU and jumps
* to the given label if it matches the CPU MIDR provided.
*
* Clobbers r0.
* --------------------------------------------------------------------
*/
.macro jump_if_cpu_midr _cpu_midr, _label
ldcopr r0, MIDR
ubfx r0, r0, #MIDR_PN_SHIFT, #12
ldr r1, =((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
cmp r0, r1
beq \_label
.endm
/* --------------------------------------------------------------------
* Platform reset handler for Juno R0.
*
* Juno R0 has the following topology:
* - Quad core Cortex-A53 processor cluster;
* - Dual core Cortex-A57 processor cluster.
*
* This handler does the following:
* - Implement workaround for defect id 831273 by enabling an event
* stream every 65536 cycles.
* - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
* - Set the L2 Tag RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
* --------------------------------------------------------------------
*/
func JUNO_HANDLER(0)
/* --------------------------------------------------------------------
* Enable the event stream every 65536 cycles
* --------------------------------------------------------------------
*/
mov r0, #(0xf << EVNTI_SHIFT)
orr r0, r0, #EVNTEN_BIT
stcopr r0, CNTKCTL
/* --------------------------------------------------------------------
* Nothing else to do on Cortex-A53.
* --------------------------------------------------------------------
*/
jump_if_cpu_midr CORTEX_A53_MIDR, 1f
/* --------------------------------------------------------------------
* Cortex-A57 specific settings
* --------------------------------------------------------------------
*/
mov r0, #((L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \
(L2_TAG_RAM_LATENCY_3_CYCLES << L2CTLR_TAG_RAM_LATENCY_SHIFT))
stcopr r0, L2CTLR
1:
isb
bx lr
endfunc JUNO_HANDLER(0)
/* --------------------------------------------------------------------
* Platform reset handler for Juno R1.
*
* Juno R1 has the following topology:
* - Quad core Cortex-A53 processor cluster;
* - Dual core Cortex-A57 processor cluster.
*
* This handler does the following:
* - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
*
* Note that:
* - The default value for the L2 Tag RAM latency for Cortex-A57 is
* suitable.
* - Defect #831273 doesn't affect Juno R1.
* --------------------------------------------------------------------
*/
func JUNO_HANDLER(1)
/* --------------------------------------------------------------------
* Nothing to do on Cortex-A53.
* --------------------------------------------------------------------
*/
jump_if_cpu_midr CORTEX_A57_MIDR, A57
bx lr
A57:
/* --------------------------------------------------------------------
* Cortex-A57 specific settings
* --------------------------------------------------------------------
*/
mov r0, #(L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT)
stcopr r0, L2CTLR
isb
bx lr
endfunc JUNO_HANDLER(1)
/* --------------------------------------------------------------------
* Platform reset handler for Juno R2.
*
* Juno R2 has the following topology:
* - Quad core Cortex-A53 processor cluster;
* - Dual core Cortex-A72 processor cluster.
*
* This handler does the following:
* - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72
* - Set the L2 Tag RAM latency to 1 (i.e. 2 cycles) for Cortex-A72
*
* Note that:
* - Defect #831273 doesn't affect Juno R2.
* --------------------------------------------------------------------
*/
func JUNO_HANDLER(2)
/* --------------------------------------------------------------------
* Nothing to do on Cortex-A53.
* --------------------------------------------------------------------
*/
jump_if_cpu_midr CORTEX_A72_MIDR, A72
bx lr
A72:
/* --------------------------------------------------------------------
* Cortex-A72 specific settings
* --------------------------------------------------------------------
*/
mov r0, #((L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \
(L2_TAG_RAM_LATENCY_2_CYCLES << L2CTLR_TAG_RAM_LATENCY_SHIFT))
stcopr r0, L2CTLR
isb
bx lr
endfunc JUNO_HANDLER(2)
/* --------------------------------------------------------------------
* void plat_reset_handler(void);
*
* Determine the Juno board revision and call the appropriate reset
* handler.
* --------------------------------------------------------------------
*/
func plat_reset_handler
/* Read the V2M SYS_ID register */
ldr r0, =(V2M_SYSREGS_BASE + V2M_SYS_ID)
ldr r1, [r0]
/* Extract board revision from the SYS_ID */
ubfx r0, r1, #V2M_SYS_ID_REV_SHIFT, #4
JUMP_TO_HANDLER_IF_JUNO_R(0)
JUMP_TO_HANDLER_IF_JUNO_R(1)
JUMP_TO_HANDLER_IF_JUNO_R(2)
/* Board revision is not supported */
no_ret plat_panic_handler
endfunc plat_reset_handler
/* -----------------------------------------------------
* unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
* Helper function to calculate the core position.
* -----------------------------------------------------
*/
func plat_arm_calc_core_pos
b css_calc_core_pos_swap_cluster
endfunc plat_arm_calc_core_pos

View File

@ -34,12 +34,18 @@
#include <cortex_a53.h>
#include <cortex_a57.h>
#include <cortex_a72.h>
#include <cpu_macros.S>
#include <css_def.h>
#include <v2m_def.h>
#include "../juno_def.h"
.globl plat_reset_handler
.globl plat_arm_calc_core_pos
#if JUNO_AARCH32_EL3_RUNTIME
.globl plat_get_my_entrypoint
.globl juno_reset_to_aarch32_state
#endif
#define JUNO_REVISION(rev) REV_JUNO_R##rev
#define JUNO_HANDLER(rev) plat_reset_handler_juno_r##rev
@ -205,6 +211,20 @@ func plat_reset_handler
endfunc plat_reset_handler
/* -----------------------------------------------------
* void juno_do_reset_to_aarch32_state(void);
*
* Request warm reset to AArch32 mode.
* -----------------------------------------------------
*/
func juno_do_reset_to_aarch32_state
mov x0, #RMR_EL3_RR_BIT
dsb sy
msr rmr_el3, x0
isb
wfi
endfunc juno_do_reset_to_aarch32_state
/* -----------------------------------------------------
* unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
* Helper function to calculate the core position.
@ -213,3 +233,77 @@ endfunc plat_reset_handler
func plat_arm_calc_core_pos
b css_calc_core_pos_swap_cluster
endfunc plat_arm_calc_core_pos
#if JUNO_AARCH32_EL3_RUNTIME
/* ---------------------------------------------------------------------
* uintptr_t plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between a cold and a warm
* boot. On JUNO platform, this distinction is based on the contents of
* the Trusted Mailbox. It is initialised to zero by the SCP before the
* AP cores are released from reset. Therefore, a zero mailbox means
* it's a cold reset. If it is a warm boot then a request to reset to
* AArch32 state is issued. This is the only way to reset to AArch32
* in EL3 on Juno. A trampoline located at the high vector address
* has already been prepared by BL1.
*
* This functions returns the contents of the mailbox, i.e.:
* - 0 for a cold boot;
* - request warm reset in AArch32 state for warm boot case;
* ---------------------------------------------------------------------
*/
func plat_get_my_entrypoint
mov_imm x0, PLAT_ARM_TRUSTED_MAILBOX_BASE
ldr x0, [x0]
cbz x0, return
b juno_do_reset_to_aarch32_state
1:
b 1b
return:
ret
endfunc plat_get_my_entrypoint
/*
* Emit a "movw r0, #imm16" which moves the lower
* 16 bits of `_val` into r0.
*/
.macro emit_movw _reg_d, _val
mov_imm \_reg_d, (0xe3000000 | \
((\_val & 0xfff) | \
((\_val & 0xf000) << 4)))
.endm
/*
* Emit a "movt r0, #imm16" which moves the upper
* 16 bits of `_val` into r0.
*/
.macro emit_movt _reg_d, _val
mov_imm \_reg_d, (0xe3400000 | \
(((\_val & 0x0fff0000) >> 16) | \
((\_val & 0xf0000000) >> 12)))
.endm
/*
* This function writes the trampoline code at HI-VEC (0xFFFF0000)
* address which loads r0 with the entrypoint address for
* BL32 (a.k.a SP_MIN) when EL3 is in AArch32 mode. A warm reset
* to AArch32 mode is then requested by writing into RMR_EL3.
*/
func juno_reset_to_aarch32_state
emit_movw w0, BL32_BASE
emit_movt w1, BL32_BASE
/* opcode "bx r0" to branch using r0 in AArch32 mode */
mov_imm w2, 0xe12fff10
/* Write the above opcodes at HI-VECTOR location */
mov_imm x3, HI_VECTOR_BASE
str w0, [x3], #4
str w1, [x3], #4
str w2, [x3]
bl juno_do_reset_to_aarch32_state
1:
b 1b
endfunc juno_reset_to_aarch32_state
#endif /* JUNO_AARCH32_EL3_RUNTIME */

View File

@ -103,8 +103,8 @@
#endif
#ifdef IMAGE_BL32
# define PLAT_ARM_MMAP_ENTRIES 4
# define MAX_XLAT_TABLES 3
# define PLAT_ARM_MMAP_ENTRIES 5
# define MAX_XLAT_TABLES 4
#endif
/*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,11 +32,15 @@
#include <errno.h>
#include <platform.h>
#include <plat_arm.h>
#include <sp805.h>
#include <tbbr_img_def.h>
#include <v2m_def.h>
#define RESET_REASON_WDOG_RESET (0x2)
void juno_reset_to_aarch32_state(void);
/*******************************************************************************
* The following function checks if Firmware update is needed,
* by checking if TOC in FIP image is valid or watchdog reset happened.
@ -85,3 +89,15 @@ __dead2 void bl1_plat_fwu_done(void *client_cookie, void *reserved)
while (1)
wfi();
}
#if JUNO_AARCH32_EL3_RUNTIME
void bl1_plat_prepare_exit(entry_point_info_t *ep_info)
{
#if !ARM_DISABLE_TRUSTED_WDOG
/* Disable watchdog before leaving BL1 */
sp805_stop(ARM_SP805_TWDG_BASE);
#endif
juno_reset_to_aarch32_state();
}
#endif /* JUNO_AARCH32_EL3_RUNTIME */

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <bl_common.h>
#include <desc_image_load.h>
#include <plat_arm.h>
#if JUNO_AARCH32_EL3_RUNTIME
/*******************************************************************************
* This function changes the spsr for BL32 image to bypass
* the check in BL1 AArch64 exception handler. This is needed in the aarch32
* boot flow as the core comes up in aarch64 and to enter the BL32 image a warm
* reset in aarch32 state is required.
******************************************************************************/
int bl2_plat_handle_post_image_load(unsigned int image_id)
{
int err = arm_bl2_handle_post_image_load(image_id);
if (!err && (image_id == BL32_IMAGE_ID)) {
bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
assert(bl_mem_params);
bl_mem_params->ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
}
return err;
}
#endif /* JUNO_AARCH32_EL3_RUNTIME */

View File

@ -1,93 +0,0 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <css_pm.h>
#include <plat_arm.h>
/*
* Custom `validate_power_state` handler for Juno. According to PSCI
* Specification, interrupts targeted to cores in PSCI CPU SUSPEND should
* be able to resume it. On Juno, when the system power domain is suspended,
* the GIC is also powered down. The SCP resumes the final core to be suspend
* when an external wake-up event is received. But the other cores cannot be
* woken up by a targeted interrupt, because GIC doesn't forward these
* interrupts to the SCP. Due to this hardware limitation, we down-grade PSCI
* CPU SUSPEND requests targeted to the system power domain level
* to cluster power domain level.
*
* The system power domain suspend on Juno is only supported only via
* PSCI SYSTEM SUSPEND API.
*/
static int juno_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int rc;
rc = arm_validate_power_state(power_state, req_state);
/*
* Ensure that the system power domain level is never suspended
* via PSCI CPU SUSPEND API. Currently system suspend is only
* supported via PSCI SYSTEM SUSPEND API.
*/
req_state->pwr_domain_state[ARM_PWR_LVL2] = ARM_LOCAL_STATE_RUN;
return rc;
}
/*
* Custom `translate_power_state_by_mpidr` handler for Juno. Unlike in the
* `juno_validate_power_state`, we do not down-grade the system power
* domain level request in `power_state` as it will be used to query the
* PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
*/
static int juno_translate_power_state_by_mpidr(u_register_t mpidr,
unsigned int power_state,
psci_power_state_t *output_state)
{
return arm_validate_power_state(power_state, output_state);
}
/*******************************************************************************
* Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
* platform will take care of registering the handlers with PSCI.
******************************************************************************/
plat_psci_ops_t plat_arm_psci_pm_ops = {
.pwr_domain_on = css_pwr_domain_on,
.pwr_domain_on_finish = css_pwr_domain_on_finish,
.pwr_domain_off = css_pwr_domain_off,
.cpu_standby = css_cpu_standby,
.pwr_domain_suspend = css_pwr_domain_suspend,
.pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
.system_off = css_system_off,
.system_reset = css_system_reset,
.validate_power_state = juno_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint,
.get_sys_suspend_power_state = css_get_sys_suspend_power_state,
.translate_power_state_by_mpidr = juno_translate_power_state_by_mpidr,
.get_node_hw_state = css_node_hw_state
};

View File

@ -48,8 +48,14 @@ endif
PLAT_INCLUDES := -Iplat/arm/board/juno/include
PLAT_BL_COMMON_SOURCES := plat/arm/board/juno/aarch64/juno_helpers.S
PLAT_BL_COMMON_SOURCES := plat/arm/board/juno/${ARCH}/juno_helpers.S
# Flag to enable support for AArch32 state on JUNO
JUNO_AARCH32_EL3_RUNTIME := 0
$(eval $(call assert_boolean,JUNO_AARCH32_EL3_RUNTIME))
$(eval $(call add_define,JUNO_AARCH32_EL3_RUNTIME))
ifeq (${ARCH},aarch64)
BL1_SOURCES += lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a57.S \
lib/cpus/aarch64/cortex_a72.S \
@ -59,6 +65,7 @@ BL1_SOURCES += lib/cpus/aarch64/cortex_a53.S \
${JUNO_SECURITY_SOURCES}
BL2_SOURCES += plat/arm/board/juno/juno_err.c \
plat/arm/board/juno/juno_bl2_setup.c \
${JUNO_SECURITY_SOURCES}
BL2U_SOURCES += ${JUNO_SECURITY_SOURCES}
@ -66,11 +73,11 @@ BL2U_SOURCES += ${JUNO_SECURITY_SOURCES}
BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a57.S \
lib/cpus/aarch64/cortex_a72.S \
plat/arm/board/juno/juno_pm.c \
plat/arm/board/juno/juno_topology.c \
${JUNO_GIC_SOURCES} \
${JUNO_INTERCONNECT_SOURCES} \
${JUNO_SECURITY_SOURCES}
endif
# Enable workarounds for selected Cortex-A53 and A57 errata.
ERRATA_A53_855873 := 1

View File

@ -0,0 +1,47 @@
#
# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SP_MIN source files specific to JUNO platform
BL32_SOURCES += lib/cpus/aarch32/cortex_a53.S \
lib/cpus/aarch32/cortex_a57.S \
lib/cpus/aarch32/cortex_a72.S \
plat/arm/board/juno/juno_pm.c \
plat/arm/board/juno/juno_topology.c \
plat/arm/css/common/css_pm.c \
plat/arm/css/common/css_topology.c \
plat/arm/soc/common/soc_css_security.c \
plat/arm/css/drivers/scp/css_pm_scpi.c \
plat/arm/css/drivers/scpi/css_mhu.c \
plat/arm/css/drivers/scpi/css_scpi.c \
${JUNO_GIC_SOURCES} \
${JUNO_INTERCONNECT_SOURCES} \
${JUNO_SECURITY_SOURCES}
include plat/arm/common/sp_min/arm_sp_min.mk

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,9 +31,10 @@
#include <platform_def.h>
.weak plat_arm_calc_core_pos
.weak plat_crash_console_init
.weak plat_crash_console_putc
.weak plat_my_core_pos
.globl plat_crash_console_init
.globl plat_crash_console_putc
.globl plat_crash_console_flush
/* -----------------------------------------------------
* unsigned int plat_my_core_pos(void)
@ -85,3 +86,16 @@ func plat_crash_console_putc
ldr r1, =PLAT_ARM_CRASH_UART_BASE
b console_core_putc
endfunc plat_crash_console_putc
/* ---------------------------------------------
* int plat_crash_console_flush()
* Function to force a write of all buffered
* data that hasn't been output.
* Out : return -1 on error else return 0.
* Clobber list : r0 - r1
* ---------------------------------------------
*/
func plat_crash_console_flush
ldr r1, =PLAT_ARM_CRASH_UART_BASE
b console_core_flush
endfunc plat_crash_console_flush

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -34,6 +34,7 @@
.weak plat_my_core_pos
.globl plat_crash_console_init
.globl plat_crash_console_putc
.globl plat_crash_console_flush
.globl platform_mem_init
@ -88,6 +89,19 @@ func plat_crash_console_putc
b console_core_putc
endfunc plat_crash_console_putc
/* ---------------------------------------------
* int plat_crash_console_flush()
* Function to force a write of all buffered
* data that hasn't been output.
* Out : return -1 on error else return 0.
* Clobber list : r0 - r1
* ---------------------------------------------
*/
func plat_crash_console_flush
mov_imm x1, PLAT_ARM_CRASH_UART_BASE
b console_core_flush
endfunc plat_crash_console_flush
/* ---------------------------------------------------------------------
* We don't need to carry out any memory initialization on ARM
* platforms. The Secure RAM is accessible straight away.

View File

@ -30,13 +30,13 @@
#include <arch.h>
#include <arm_def.h>
#include <arm_xlat_tables.h>
#include <bl_common.h>
#include <console.h>
#include <platform_def.h>
#include <plat_arm.h>
#include <sp805.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#include "../../../bl1/bl1_private.h"
/* Weak definitions may be overridden in specific ARM standard platform */
@ -44,6 +44,7 @@
#pragma weak bl1_plat_arch_setup
#pragma weak bl1_platform_setup
#pragma weak bl1_plat_sec_mem_layout
#pragma weak bl1_plat_prepare_exit
/* Data structure which holds the extents of the trusted SRAM for BL1*/

View File

@ -249,11 +249,7 @@ void bl2_plat_arch_setup(void)
}
#if LOAD_IMAGE_V2
/*******************************************************************************
* This function can be used by the platforms to update/use image
* information for given `image_id`.
******************************************************************************/
int bl2_plat_handle_post_image_load(unsigned int image_id)
int arm_bl2_handle_post_image_load(unsigned int image_id)
{
int err = 0;
bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
@ -286,6 +282,15 @@ int bl2_plat_handle_post_image_load(unsigned int image_id)
return err;
}
/*******************************************************************************
* This function can be used by the platforms to update/use image
* information for given `image_id`.
******************************************************************************/
int bl2_plat_handle_post_image_load(unsigned int image_id)
{
return arm_bl2_handle_post_image_load(image_id);
}
#else /* LOAD_IMAGE_V2 */
/*******************************************************************************

View File

@ -29,12 +29,12 @@
*/
#include <arch.h>
#include <arch_helpers.h>
#include <arm_xlat_tables.h>
#include <assert.h>
#include <debug.h>
#include <mmio.h>
#include <plat_arm.h>
#include <platform_def.h>
#include <xlat_tables_v2.h>
extern const mmap_region_t plat_arm_mmap[];

View File

@ -95,6 +95,11 @@ ARM_PLAT_MT := 0
$(eval $(call assert_boolean,ARM_PLAT_MT))
$(eval $(call add_define,ARM_PLAT_MT))
# Use translation tables library v2 by default
ARM_XLAT_TABLES_LIB_V1 := 0
$(eval $(call assert_boolean,ARM_XLAT_TABLES_LIB_V1))
$(eval $(call add_define,ARM_XLAT_TABLES_LIB_V1))
# Enable PSCI_STAT_COUNT/RESIDENCY APIs on ARM platforms
ENABLE_PSCI_STAT := 1
ENABLE_PMF := 1
@ -113,11 +118,17 @@ ifeq (${ARCH}, aarch64)
PLAT_INCLUDES += -Iinclude/plat/arm/common/aarch64
endif
PLAT_BL_COMMON_SOURCES += plat/arm/common/${ARCH}/arm_helpers.S \
plat/arm/common/arm_common.c
ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1)
PLAT_BL_COMMON_SOURCES += lib/xlat_tables/xlat_tables_common.c \
lib/xlat_tables/${ARCH}/xlat_tables.c
else
include lib/xlat_tables_v2/xlat_tables.mk
PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS} \
plat/arm/common/${ARCH}/arm_helpers.S \
plat/arm/common/arm_common.c
PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS}
endif
BL1_SOURCES += drivers/arm/sp805/sp805.c \
drivers/io/io_fip.c \
@ -137,8 +148,14 @@ BL2_SOURCES += drivers/io/io_fip.c \
plat/arm/common/arm_bl2_setup.c \
plat/arm/common/arm_io_storage.c
ifeq (${LOAD_IMAGE_V2},1)
BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c\
plat/arm/common/arm_image_load.c \
# Because BL1/BL2 execute in AArch64 mode but BL32 in AArch32 we need to use
# the AArch32 descriptors.
ifeq (${JUNO_AARCH32_EL3_RUNTIME},1)
BL2_SOURCES += plat/arm/common/aarch32/arm_bl2_mem_params_desc.c
else
BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c
endif
BL2_SOURCES += plat/arm/common/arm_image_load.c \
common/desc_image_load.c
endif

View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <cpu_macros.S>
#include <css_def.h>
.weak plat_secondary_cold_boot_setup
.weak plat_get_my_entrypoint
.globl css_calc_core_pos_swap_cluster
.weak plat_is_my_cpu_primary
/* ---------------------------------------------------------------------
* void plat_secondary_cold_boot_setup(void);
* In the normal boot flow, cold-booting secondary
* CPUs is not yet implemented and they panic.
* ---------------------------------------------------------------------
*/
func plat_secondary_cold_boot_setup
/* TODO: Implement secondary CPU cold boot setup on CSS platforms */
cb_panic:
b cb_panic
endfunc plat_secondary_cold_boot_setup
/* ---------------------------------------------------------------------
* uintptr_t plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between a cold and a warm
* boot. On CSS platforms, this distinction is based on the contents of
* the Trusted Mailbox. It is initialised to zero by the SCP before the
* AP cores are released from reset. Therefore, a zero mailbox means
* it's a cold reset.
*
* This functions returns the contents of the mailbox, i.e.:
* - 0 for a cold boot;
* - the warm boot entrypoint for a warm boot.
* ---------------------------------------------------------------------
*/
func plat_get_my_entrypoint
ldr r0, =PLAT_ARM_TRUSTED_MAILBOX_BASE
ldr r0, [r0]
bx lr
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------------
* unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr)
* Utility function to calculate the core position by
* swapping the cluster order. This is necessary in order to
* match the format of the boot information passed by the SCP
* and read in plat_is_my_cpu_primary below.
* -----------------------------------------------------------
*/
func css_calc_core_pos_swap_cluster
and r1, r0, #MPIDR_CPU_MASK
and r0, r0, #MPIDR_CLUSTER_MASK
eor r0, r0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order
add r0, r1, r0, LSR #6
bx lr
endfunc css_calc_core_pos_swap_cluster
/* -----------------------------------------------------
* unsigned int plat_is_my_cpu_primary (void);
*
* Find out whether the current cpu is the primary
* cpu (applicable ony after a cold boot)
* -----------------------------------------------------
*/
func plat_is_my_cpu_primary
mov r10, lr
bl plat_my_core_pos
ldr r1, =SCP_BOOT_CFG_ADDR
ldr r1, [r1]
ubfx r1, r1, #PLAT_CSS_PRIMARY_CPU_SHIFT, \
#PLAT_CSS_PRIMARY_CPU_BIT_WIDTH
cmp r0, r1
moveq r0, #1
movne r0, #0
bx r10
endfunc plat_is_my_cpu_primary

View File

@ -36,7 +36,7 @@ PLAT_INCLUDES += -Iinclude/plat/arm/css/common \
-Iinclude/plat/arm/css/common/aarch64
PLAT_BL_COMMON_SOURCES += plat/arm/css/common/aarch64/css_helpers.S
PLAT_BL_COMMON_SOURCES += plat/arm/css/common/${ARCH}/css_helpers.S
BL1_SOURCES += plat/arm/css/common/css_bl1_setup.c
@ -64,7 +64,7 @@ $(eval $(call assert_boolean,CSS_LOAD_SCP_IMAGES))
$(eval $(call add_define,CSS_LOAD_SCP_IMAGES))
ifeq (${CSS_LOAD_SCP_IMAGES},1)
$(eval $(call FIP_ADD_IMG,SCP_BL2,--scp-fw))
NEED_SCP_BL2 := yes
ifneq (${TRUSTED_BOARD_BOOT},0)
$(eval $(call FWU_FIP_ADD_IMG,SCP_BL2U,--scp-fwu-cfg))
endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -75,6 +75,13 @@ const unsigned int arm_pm_idle_states[] = {
CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1,
assert_max_pwr_lvl_supported_mismatch);
/*
* Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL
* assumed by the CSS layer.
*/
CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL,
assert_max_pwr_lvl_higher_than_css_sys_lvl);
/*******************************************************************************
* Handler called when a power domain is about to be turned on. The
* level and mpidr determine the affinity instance.
@ -243,7 +250,7 @@ void css_get_sys_suspend_power_state(psci_power_state_t *req_state)
* System Suspend is supported only if the system power domain node
* is implemented.
*/
assert(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL2);
assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL);
for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF;
@ -257,6 +264,39 @@ int css_node_hw_state(u_register_t mpidr, unsigned int power_level)
return css_scp_get_power_state(mpidr, power_level);
}
/*
* The system power domain suspend is only supported only via
* PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
* will be downgraded to the lower level.
*/
static int css_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int rc;
rc = arm_validate_power_state(power_state, req_state);
/*
* Ensure that the system power domain level is never suspended
* via PSCI CPU SUSPEND API. Currently system suspend is only
* supported via PSCI SYSTEM SUSPEND API.
*/
req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] = ARM_LOCAL_STATE_RUN;
return rc;
}
/*
* Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the
* `css_validate_power_state`, we do not downgrade the system power
* domain level request in `power_state` as it will be used to query the
* PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
*/
static int css_translate_power_state_by_mpidr(u_register_t mpidr,
unsigned int power_state,
psci_power_state_t *output_state)
{
return arm_validate_power_state(power_state, output_state);
}
/*******************************************************************************
* Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
* platform will take care of registering the handlers with PSCI.
@ -270,7 +310,9 @@ plat_psci_ops_t plat_arm_psci_pm_ops = {
.pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
.system_off = css_system_off,
.system_reset = css_system_reset,
.validate_power_state = arm_validate_power_state,
.validate_power_state = css_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint,
.get_node_hw_state = css_node_hw_state
.translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
.get_node_hw_state = css_node_hw_state,
.get_sys_suspend_power_state = css_get_sys_suspend_power_state
};

View File

@ -32,6 +32,7 @@
#include <assert.h>
#include <css_pm.h>
#include <debug.h>
#include <plat_arm.h>
#include "../scpi/css_scpi.h"
#include "css_scp.h"
@ -134,6 +135,12 @@ void __dead2 css_scp_sys_shutdown(void)
{
uint32_t response;
/*
* Disable GIC CPU interface to prevent pending interrupt
* from waking up the AP from WFI.
*/
plat_arm_gic_cpuif_disable();
/* Send the power down request to the SCP */
response = scpi_sys_power_state(scpi_system_shutdown);
@ -153,6 +160,12 @@ void __dead2 css_scp_sys_reboot(void)
{
uint32_t response;
/*
* Disable GIC CPU interface to prevent pending interrupt
* from waking up the AP from WFI.
*/
plat_arm_gic_cpuif_disable();
/* Send the system reset request to the SCP */
response = scpi_sys_power_state(scpi_system_reboot);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,11 +31,43 @@
#include <arch.h>
#include <asm_macros.S>
.weak plat_crash_console_init
.weak plat_crash_console_putc
.weak plat_crash_console_flush
.weak plat_reset_handler
.weak plat_disable_acp
.weak platform_mem_init
.weak plat_panic_handler
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
*/
func plat_crash_console_init
mov r0, #0
bx lr
endfunc plat_crash_console_init
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
*/
func plat_crash_console_putc
bx lr
endfunc plat_crash_console_putc
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
*/
func plat_crash_console_flush
mov r0, #0
bx lr
endfunc plat_crash_console_flush
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -35,6 +35,7 @@
.weak plat_report_exception
.weak plat_crash_console_init
.weak plat_crash_console_putc
.weak plat_crash_console_flush
.weak plat_reset_handler
.weak plat_disable_acp
.weak bl1_plat_prepare_exit
@ -96,6 +97,15 @@ func plat_crash_console_putc
ret
endfunc plat_crash_console_putc
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
*/
func plat_crash_console_flush
ret
endfunc plat_crash_console_flush
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform. This function should preserve x19 - x29.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -131,7 +131,7 @@ endfunc platform_set_stack
* -------------------------------------------------------
*/
func_deprecated platform_get_stack
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
mrs x1, mpidr_el1
cmp x0, x1
ASM_ASSERT(eq)
@ -150,7 +150,7 @@ endfunc_deprecated platform_get_stack
* -----------------------------------------------------
*/
func_deprecated platform_set_stack
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
mrs x1, mpidr_el1
cmp x0, x1
ASM_ASSERT(eq)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -33,6 +33,7 @@
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
* int console_core_init(unsigned long base_addr,
@ -170,3 +171,18 @@ getc_error:
mov w0, #-1
ret
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_flush
/* Placeholder */
mov w0, #0
ret
endfunc console_core_flush

View File

@ -243,11 +243,12 @@ endfunc platform_mem_init
* ---------------------------------------------
*/
func plat_crash_console_init
adr x0, tegra_console_base
ldr x0, [x0]
mov_imm x1, TEGRA_BOOT_UART_CLK_IN_HZ
mov_imm x2, TEGRA_CONSOLE_BAUDRATE
b console_core_init
mov x0, #0
adr x1, tegra_console_base
ldr x1, [x1]
cbz x1, 1f
mov w0, #1
1: ret
endfunc plat_crash_console_init
/* ---------------------------------------------

View File

@ -37,7 +37,7 @@
#include <string.h>
#include <tegra_def.h>
#include <utils.h>
#include <xlat_tables.h>
#include <xlat_tables_v2.h>
#define TEGRA_GPU_RESET_REG_OFFSET 0x28c
#define GPU_RESET_BIT (1 << 24)
@ -135,17 +135,18 @@ static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
unsigned long long non_overlap_area_size)
{
/*
* Perform cache maintenance to ensure that the non-overlapping area is
* zeroed out. The first invalidation of this range ensures that
* possible evictions of dirty cache lines do not interfere with the
* 'zeromem' operation. Other CPUs could speculatively prefetch the
* main memory contents of this area between the first invalidation and
* the 'zeromem' operation. The second invalidation ensures that any
* such cache lines are removed as well.
* Map the NS memory first, clean it and then unmap it.
*/
inv_dcache_range(non_overlap_area_start, non_overlap_area_size);
mmap_add_dynamic_region(non_overlap_area_start, /* PA */
non_overlap_area_start, /* VA */
non_overlap_area_size, /* size */
MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */
zeromem((void *)non_overlap_area_start, non_overlap_area_size);
inv_dcache_range(non_overlap_area_start, non_overlap_area_size);
flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
mmap_remove_dynamic_region(non_overlap_area_start,
non_overlap_area_size);
}
/*
@ -194,7 +195,6 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
*/
INFO("Cleaning previous Video Memory Carveout\n");
disable_mmu_el3();
if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) {
tegra_clear_videomem(video_mem_base, video_mem_size << 20);
} else {
@ -207,7 +207,6 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
}
}
enable_mmu_el3(0);
done:
tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI, (uint32_t)(phys_base >> 32));
@ -218,3 +217,29 @@ done:
video_mem_base = phys_base;
video_mem_size = size_in_bytes >> 20;
}
/*
* During boot, USB3 and flash media (SDMMC/SATA) devices need access to
* IRAM. Because these clients connect to the MC and do not have a direct
* path to the IRAM, the MC implements AHB redirection during boot to allow
* path to IRAM. In this mode, accesses to a programmed memory address aperture
* are directed to the AHB bus, allowing access to the IRAM. The AHB aperture
* is defined by the IRAM_BASE_LO and IRAM_BASE_HI registers, which are
* initialized to disable this aperture.
*
* Once bootup is complete, we must program IRAM base to 0xffffffff and
* IRAM top to 0x00000000, thus disabling access to IRAM. DRAM is then
* potentially accessible in this address range. These aperture registers
* also have an access_control/lock bit. After disabling the aperture, the
* access_control register should be programmed to lock the registers.
*/
void tegra_memctrl_disable_ahb_redirection(void)
{
/* program the aperture registers */
tegra_mc_write_32(MC_IRAM_BASE_LO, 0xFFFFFFFF);
tegra_mc_write_32(MC_IRAM_TOP_LO, 0);
tegra_mc_write_32(MC_IRAM_BASE_TOP_HI, 0);
/* lock the aperture registers */
tegra_mc_write_32(MC_IRAM_REG_CTRL, MC_DISABLE_IRAM_CFG_WRITES);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -30,6 +30,7 @@
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <debug.h>
#include <mce.h>
#include <memctrl.h>
@ -48,193 +49,6 @@
static uint64_t video_mem_base;
static uint64_t video_mem_size_mb;
/* array to hold stream_id override config register offsets */
const static uint32_t streamid_overrides[] = {
MC_STREAMID_OVERRIDE_CFG_PTCR,
MC_STREAMID_OVERRIDE_CFG_AFIR,
MC_STREAMID_OVERRIDE_CFG_HDAR,
MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR,
MC_STREAMID_OVERRIDE_CFG_NVENCSRD,
MC_STREAMID_OVERRIDE_CFG_SATAR,
MC_STREAMID_OVERRIDE_CFG_MPCORER,
MC_STREAMID_OVERRIDE_CFG_NVENCSWR,
MC_STREAMID_OVERRIDE_CFG_AFIW,
MC_STREAMID_OVERRIDE_CFG_SATAW,
MC_STREAMID_OVERRIDE_CFG_MPCOREW,
MC_STREAMID_OVERRIDE_CFG_SATAW,
MC_STREAMID_OVERRIDE_CFG_HDAW,
MC_STREAMID_OVERRIDE_CFG_ISPRA,
MC_STREAMID_OVERRIDE_CFG_ISPWA,
MC_STREAMID_OVERRIDE_CFG_ISPWB,
MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR,
MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW,
MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR,
MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW,
MC_STREAMID_OVERRIDE_CFG_TSECSRD,
MC_STREAMID_OVERRIDE_CFG_TSECSWR,
MC_STREAMID_OVERRIDE_CFG_GPUSRD,
MC_STREAMID_OVERRIDE_CFG_GPUSWR,
MC_STREAMID_OVERRIDE_CFG_SDMMCRA,
MC_STREAMID_OVERRIDE_CFG_SDMMCRAA,
MC_STREAMID_OVERRIDE_CFG_SDMMCR,
MC_STREAMID_OVERRIDE_CFG_SDMMCRAB,
MC_STREAMID_OVERRIDE_CFG_SDMMCWA,
MC_STREAMID_OVERRIDE_CFG_SDMMCWAA,
MC_STREAMID_OVERRIDE_CFG_SDMMCW,
MC_STREAMID_OVERRIDE_CFG_SDMMCWAB,
MC_STREAMID_OVERRIDE_CFG_VICSRD,
MC_STREAMID_OVERRIDE_CFG_VICSWR,
MC_STREAMID_OVERRIDE_CFG_VIW,
MC_STREAMID_OVERRIDE_CFG_NVDECSRD,
MC_STREAMID_OVERRIDE_CFG_NVDECSWR,
MC_STREAMID_OVERRIDE_CFG_APER,
MC_STREAMID_OVERRIDE_CFG_APEW,
MC_STREAMID_OVERRIDE_CFG_NVJPGSRD,
MC_STREAMID_OVERRIDE_CFG_NVJPGSWR,
MC_STREAMID_OVERRIDE_CFG_SESRD,
MC_STREAMID_OVERRIDE_CFG_SESWR,
MC_STREAMID_OVERRIDE_CFG_ETRR,
MC_STREAMID_OVERRIDE_CFG_ETRW,
MC_STREAMID_OVERRIDE_CFG_TSECSRDB,
MC_STREAMID_OVERRIDE_CFG_TSECSWRB,
MC_STREAMID_OVERRIDE_CFG_GPUSRD2,
MC_STREAMID_OVERRIDE_CFG_GPUSWR2,
MC_STREAMID_OVERRIDE_CFG_AXISR,
MC_STREAMID_OVERRIDE_CFG_AXISW,
MC_STREAMID_OVERRIDE_CFG_EQOSR,
MC_STREAMID_OVERRIDE_CFG_EQOSW,
MC_STREAMID_OVERRIDE_CFG_UFSHCR,
MC_STREAMID_OVERRIDE_CFG_UFSHCW,
MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR,
MC_STREAMID_OVERRIDE_CFG_BPMPR,
MC_STREAMID_OVERRIDE_CFG_BPMPW,
MC_STREAMID_OVERRIDE_CFG_BPMPDMAR,
MC_STREAMID_OVERRIDE_CFG_BPMPDMAW,
MC_STREAMID_OVERRIDE_CFG_AONR,
MC_STREAMID_OVERRIDE_CFG_AONW,
MC_STREAMID_OVERRIDE_CFG_AONDMAR,
MC_STREAMID_OVERRIDE_CFG_AONDMAW,
MC_STREAMID_OVERRIDE_CFG_SCER,
MC_STREAMID_OVERRIDE_CFG_SCEW,
MC_STREAMID_OVERRIDE_CFG_SCEDMAR,
MC_STREAMID_OVERRIDE_CFG_SCEDMAW,
MC_STREAMID_OVERRIDE_CFG_APEDMAR,
MC_STREAMID_OVERRIDE_CFG_APEDMAW,
MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1,
MC_STREAMID_OVERRIDE_CFG_VICSRD1,
MC_STREAMID_OVERRIDE_CFG_NVDECSRD1
};
/* array to hold the security configs for stream IDs */
const static mc_streamid_security_cfg_t sec_cfgs[] = {
mc_make_sec_cfg(SCEW, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(AFIR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(NVDISPLAYR1, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(XUSB_DEVR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(VICSRD1, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(NVENCSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(TSECSRDB, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(AXISW, SECURE, NO_OVERRIDE, DISABLE),
mc_make_sec_cfg(SDMMCWAB, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(AONDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(GPUSWR2, SECURE, NO_OVERRIDE, DISABLE),
mc_make_sec_cfg(SATAW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(UFSHCW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(AFIW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(SDMMCR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(SCEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(UFSHCR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(SDMMCWAA, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(SESWR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(MPCORER, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(PTCR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(BPMPW, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(ETRW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(GPUSRD, SECURE, NO_OVERRIDE, DISABLE),
mc_make_sec_cfg(VICSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(SCEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(HDAW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(ISPWA, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(EQOSW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(XUSB_HOSTW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(TSECSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(SDMMCRAA, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(VIW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(AXISR, SECURE, NO_OVERRIDE, DISABLE),
mc_make_sec_cfg(SDMMCW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(BPMPDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(ISPRA, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(NVDECSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(XUSB_DEVW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(NVDECSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(MPCOREW, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(NVDISPLAYR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(BPMPDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(NVJPGSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(NVDECSRD1, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(TSECSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(NVJPGSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(SDMMCWA, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(SCER, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(XUSB_HOSTR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(VICSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(AONDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(AONW, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(SDMMCRA, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(HOST1XDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(EQOSR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(SATAR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(BPMPR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(HDAR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(SDMMCRAB, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(ETRR, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(AONR, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(SESRD, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(NVENCSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(GPUSWR, SECURE, NO_OVERRIDE, DISABLE),
mc_make_sec_cfg(TSECSWRB, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(ISPWB, NON_SECURE, OVERRIDE, ENABLE),
mc_make_sec_cfg(GPUSRD2, SECURE, NO_OVERRIDE, DISABLE),
mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, ENABLE),
mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
};
const static mc_txn_override_cfg_t mc_override_cfgs[] = {
mc_make_txn_override_cfg(BPMPW, CGID_TAG_ADR),
mc_make_txn_override_cfg(EQOSW, CGID_TAG_ADR),
mc_make_txn_override_cfg(NVJPGSWR, CGID_TAG_ADR),
mc_make_txn_override_cfg(SDMMCWAA, CGID_TAG_ADR),
mc_make_txn_override_cfg(MPCOREW, CGID_TAG_ADR),
mc_make_txn_override_cfg(SCEDMAW, CGID_TAG_ADR),
mc_make_txn_override_cfg(SDMMCW, CGID_TAG_ADR),
mc_make_txn_override_cfg(AXISW, CGID_TAG_ADR),
mc_make_txn_override_cfg(TSECSWR, CGID_TAG_ADR),
mc_make_txn_override_cfg(GPUSWR, CGID_TAG_ADR),
mc_make_txn_override_cfg(XUSB_HOSTW, CGID_TAG_ADR),
mc_make_txn_override_cfg(TSECSWRB, CGID_TAG_ADR),
mc_make_txn_override_cfg(GPUSWR2, CGID_TAG_ADR),
mc_make_txn_override_cfg(AONDMAW, CGID_TAG_ADR),
mc_make_txn_override_cfg(AONW, CGID_TAG_ADR),
mc_make_txn_override_cfg(SESWR, CGID_TAG_ADR),
mc_make_txn_override_cfg(BPMPDMAW, CGID_TAG_ADR),
mc_make_txn_override_cfg(SDMMCWA, CGID_TAG_ADR),
mc_make_txn_override_cfg(HDAW, CGID_TAG_ADR),
mc_make_txn_override_cfg(NVDECSWR, CGID_TAG_ADR),
mc_make_txn_override_cfg(UFSHCW, CGID_TAG_ADR),
mc_make_txn_override_cfg(SATAW, CGID_TAG_ADR),
mc_make_txn_override_cfg(ETRW, CGID_TAG_ADR),
mc_make_txn_override_cfg(VICSWR, CGID_TAG_ADR),
mc_make_txn_override_cfg(NVENCSWR, CGID_TAG_ADR),
mc_make_txn_override_cfg(SDMMCWAB, CGID_TAG_ADR),
mc_make_txn_override_cfg(ISPWB, CGID_TAG_ADR),
mc_make_txn_override_cfg(APEW, CGID_TAG_ADR),
mc_make_txn_override_cfg(XUSB_DEVW, CGID_TAG_ADR),
mc_make_txn_override_cfg(AFIW, CGID_TAG_ADR),
mc_make_txn_override_cfg(SCEW, CGID_TAG_ADR),
};
static void tegra_memctrl_reconfig_mss_clients(void)
{
#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS
@ -248,8 +62,10 @@ static void tegra_memctrl_reconfig_mss_clients(void)
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL);
wdata_0 = MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB |
wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB |
#if ENABLE_AFI_DEVICE
MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB |
#endif
MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB;
@ -296,7 +112,9 @@ static void tegra_memctrl_reconfig_mss_clients(void)
* of control on overriding the memory type. So, remove TSA's
* memtype override.
*/
#if ENABLE_AFI_DEVICE
mc_set_tsa_passthrough(AFIW);
#endif
mc_set_tsa_passthrough(HDAW);
mc_set_tsa_passthrough(SATAW);
mc_set_tsa_passthrough(XUSB_HOSTW);
@ -321,15 +139,19 @@ static void tegra_memctrl_reconfig_mss_clients(void)
* whose AXI IDs we know and trust.
*/
#if ENABLE_AFI_DEVICE
/* Match AFIW */
mc_set_forced_coherent_so_dev_cfg(AFIR);
#endif
/*
* See bug 200131110 comment #35 - there are no normal requests
* and AWID for SO/DEV requests is hardcoded in RTL for a
* particular PCIE controller
*/
#if ENABLE_AFI_DEVICE
mc_set_forced_coherent_so_dev_cfg(AFIW);
#endif
mc_set_forced_coherent_cfg(HDAR);
mc_set_forced_coherent_cfg(HDAW);
mc_set_forced_coherent_cfg(SATAR);
@ -374,7 +196,9 @@ static void tegra_memctrl_reconfig_mss_clients(void)
* boot and strongly ordered MSS clients
*/
val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL &
#if ENABLE_AFI_DEVICE
mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) &
#endif
mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) &
mc_set_pcfifo_unordered_boot_so_mss(1, SATAW);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val);
@ -411,7 +235,9 @@ static void tegra_memctrl_reconfig_mss_clients(void)
* for boot and strongly ordered MSS clients
*/
val = MC_SMMU_CLIENT_CONFIG1_RESET_VAL &
#if ENABLE_AFI_DEVICE
mc_set_smmu_unordered_boot_so_mss(1, AFIW) &
#endif
mc_set_smmu_unordered_boot_so_mss(1, HDAW) &
mc_set_smmu_unordered_boot_so_mss(1, SATAW);
tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG1, val);
@ -480,66 +306,29 @@ static void tegra_memctrl_reconfig_mss_clients(void)
#endif
}
/*
* Init Memory controller during boot.
*/
void tegra_memctrl_setup(void)
static void tegra_memctrl_set_overrides(void)
{
uint32_t val;
uint32_t num_overrides = sizeof(streamid_overrides) / sizeof(uint32_t);
uint32_t num_sec_cfgs = sizeof(sec_cfgs) / sizeof(mc_streamid_security_cfg_t);
uint32_t num_txn_overrides = sizeof(mc_override_cfgs) / sizeof(mc_txn_override_cfg_t);
int i;
tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
const mc_txn_override_cfg_t *mc_txn_override_cfgs;
uint32_t num_txn_override_cfgs;
uint32_t i, val;
INFO("Tegra Memory Controller (v2)\n");
/* Program the SMMU pagesize */
tegra_smmu_init();
/* Program all the Stream ID overrides */
for (i = 0; i < num_overrides; i++)
tegra_mc_streamid_write_32(streamid_overrides[i],
MC_STREAM_ID_MAX);
/* Program the security config settings for all Stream IDs */
for (i = 0; i < num_sec_cfgs; i++) {
val = sec_cfgs[i].override_enable << 16 |
sec_cfgs[i].override_client_inputs << 8 |
sec_cfgs[i].override_client_ns_flag << 0;
tegra_mc_streamid_write_32(sec_cfgs[i].offset, val);
}
/*
* All requests at boot time, and certain requests during
* normal run time, are physically addressed and must bypass
* the SMMU. The client hub logic implements a hardware bypass
* path around the Translation Buffer Units (TBU). During
* boot-time, the SMMU_BYPASS_CTRL register (which defaults to
* TBU_BYPASS mode) will be used to steer all requests around
* the uninitialized TBUs. During normal operation, this register
* is locked into TBU_BYPASS_SID config, which routes requests
* with special StreamID 0x7f on the bypass path and all others
* through the selected TBU. This is done to disable SMMU Bypass
* mode, as it could be used to circumvent SMMU security checks.
*/
tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG,
MC_SMMU_BYPASS_CONFIG_SETTINGS);
/*
* Re-configure MSS to allow ROC to deal with ordering of the
* Memory Controller traffic. This is needed as the Memory Controller
* boots with MSS having all control, but ROC provides a performance
* boost as compared to MSS.
*/
tegra_memctrl_reconfig_mss_clients();
/* Get the settings from the platform */
assert(plat_mc_settings);
mc_txn_override_cfgs = plat_mc_settings->txn_override_cfg;
num_txn_override_cfgs = plat_mc_settings->num_txn_override_cfgs;
/*
* Set the MC_TXN_OVERRIDE registers for write clients.
*/
if (!tegra_platform_is_silicon() ||
(tegra_platform_is_silicon() && tegra_get_chipid_minor() == 1)) {
if ((tegra_chipid_is_t186()) &&
(!tegra_platform_is_silicon() ||
(tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1)))) {
/* GPU and NVENC settings for rev. A01 */
/*
* GPU and NVENC settings for Tegra186 simulation and
* Silicon rev. A01
*/
val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
@ -557,17 +346,85 @@ void tegra_memctrl_setup(void)
} else {
/* settings for rev. A02 */
for (i = 0; i < num_txn_overrides; i++) {
val = tegra_mc_read_32(mc_override_cfgs[i].offset);
/*
* Settings for Tegra186 silicon rev. A02 and onwards.
*/
for (i = 0; i < num_txn_override_cfgs; i++) {
val = tegra_mc_read_32(mc_txn_override_cfgs[i].offset);
val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(mc_override_cfgs[i].offset,
val | mc_override_cfgs[i].cgid_tag);
tegra_mc_write_32(mc_txn_override_cfgs[i].offset,
val | mc_txn_override_cfgs[i].cgid_tag);
}
}
}
/*
* Init Memory controller during boot.
*/
void tegra_memctrl_setup(void)
{
uint32_t val;
const uint32_t *mc_streamid_override_regs;
uint32_t num_streamid_override_regs;
const mc_streamid_security_cfg_t *mc_streamid_sec_cfgs;
uint32_t num_streamid_sec_cfgs;
tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
uint32_t i;
INFO("Tegra Memory Controller (v2)\n");
#if ENABLE_SMMU_DEVICE
/* Program the SMMU pagesize */
tegra_smmu_init();
#endif
/* Get the settings from the platform */
assert(plat_mc_settings);
mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg;
num_streamid_override_regs = plat_mc_settings->num_streamid_override_cfgs;
mc_streamid_sec_cfgs = plat_mc_settings->streamid_security_cfg;
num_streamid_sec_cfgs = plat_mc_settings->num_streamid_security_cfgs;
/* Program all the Stream ID overrides */
for (i = 0; i < num_streamid_override_regs; i++)
tegra_mc_streamid_write_32(mc_streamid_override_regs[i],
MC_STREAM_ID_MAX);
/* Program the security config settings for all Stream IDs */
for (i = 0; i < num_streamid_sec_cfgs; i++) {
val = mc_streamid_sec_cfgs[i].override_enable << 16 |
mc_streamid_sec_cfgs[i].override_client_inputs << 8 |
mc_streamid_sec_cfgs[i].override_client_ns_flag << 0;
tegra_mc_streamid_write_32(mc_streamid_sec_cfgs[i].offset, val);
}
/*
* All requests at boot time, and certain requests during
* normal run time, are physically addressed and must bypass
* the SMMU. The client hub logic implements a hardware bypass
* path around the Translation Buffer Units (TBU). During
* boot-time, the SMMU_BYPASS_CTRL register (which defaults to
* TBU_BYPASS mode) will be used to steer all requests around
* the uninitialized TBUs. During normal operation, this register
* is locked into TBU_BYPASS_SID config, which routes requests
* with special StreamID 0x7f on the bypass path and all others
* through the selected TBU. This is done to disable SMMU Bypass
* mode, as it could be used to circumvent SMMU security checks.
*/
tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG,
MC_SMMU_BYPASS_CONFIG_SETTINGS);
/*
* Re-configure MSS to allow ROC to deal with ordering of the
* Memory Controller traffic. This is needed as the Memory Controller
* boots with MSS having all control, but ROC provides a performance
* boost as compared to MSS.
*/
tegra_memctrl_reconfig_mss_clients();
/* Program overrides for MC transactions */
tegra_memctrl_set_overrides();
}
/*
* Restore Memory Controller settings after "System Suspend"
*/
@ -581,6 +438,9 @@ void tegra_memctrl_restore_settings(void)
*/
tegra_memctrl_reconfig_mss_clients();
/* Program overrides for MC transactions */
tegra_memctrl_set_overrides();
/* video memory carveout region */
if (video_mem_base) {
tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO,
@ -658,13 +518,6 @@ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
index += 4)
tegra_mc_write_32(index, 0);
/*
* Allow CPU read/write access to the aperture
*/
tegra_mc_write_32(MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG1,
TZRAM_CARVEOUT_CPU_WRITE_ACCESS_BIT |
TZRAM_CARVEOUT_CPU_READ_ACCESS_BIT);
/*
* Set the TZRAM base. TZRAM base must be 4k aligned, at least.
*/
@ -743,3 +596,11 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
*/
mce_update_gsc_videomem();
}
/*
* This feature exists only for v1 of the Tegra Memory Controller.
*/
void tegra_memctrl_disable_ahb_redirection(void)
{
; /* do nothing */
}

View File

@ -0,0 +1,184 @@
/*
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <bl_common.h>
#include <debug.h>
#include <platform_def.h>
#include <smmu.h>
#include <string.h>
#include <tegra_private.h>
/* SMMU IDs currently supported by the driver */
enum {
TEGRA_SMMU0,
TEGRA_SMMU1,
TEGRA_SMMU2
};
static uint32_t tegra_smmu_read_32(uint32_t smmu_id, uint32_t off)
{
#if defined(TEGRA_SMMU0_BASE)
if (smmu_id == TEGRA_SMMU0)
return mmio_read_32(TEGRA_SMMU0_BASE + off);
#endif
#if defined(TEGRA_SMMU1_BASE)
if (smmu_id == TEGRA_SMMU1)
return mmio_read_32(TEGRA_SMMU1_BASE + off);
#endif
#if defined(TEGRA_SMMU2_BASE)
if (smmu_id == TEGRA_SMMU2)
return mmio_read_32(TEGRA_SMMU2_BASE + off);
#endif
return 0;
}
static void tegra_smmu_write_32(uint32_t smmu_id,
uint32_t off, uint32_t val)
{
#if defined(TEGRA_SMMU0_BASE)
if (smmu_id == TEGRA_SMMU0)
mmio_write_32(TEGRA_SMMU0_BASE + off, val);
#endif
#if defined(TEGRA_SMMU1_BASE)
if (smmu_id == TEGRA_SMMU1)
mmio_write_32(TEGRA_SMMU1_BASE + off, val);
#endif
#if defined(TEGRA_SMMU2_BASE)
if (smmu_id == TEGRA_SMMU2)
mmio_write_32(TEGRA_SMMU2_BASE + off, val);
#endif
}
/*
* Save SMMU settings before "System Suspend" to TZDRAM
*/
void tegra_smmu_save_context(uint64_t smmu_ctx_addr)
{
uint32_t i, num_entries = 0;
smmu_regs_t *smmu_ctx_regs;
plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
uint64_t tzdram_base = params_from_bl2->tzdram_base;
uint64_t tzdram_end = tzdram_base + params_from_bl2->tzdram_size;
uint32_t reg_id1, pgshift, cb_size;
/* sanity check SMMU settings c*/
reg_id1 = mmio_read_32((TEGRA_SMMU0_BASE + SMMU_GNSR0_IDR1));
pgshift = (reg_id1 & ID1_PAGESIZE) ? 16 : 12;
cb_size = (2 << pgshift) * \
(1 << (((reg_id1 >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1));
assert(!((pgshift != PGSHIFT) || (cb_size != CB_SIZE)));
assert((smmu_ctx_addr >= tzdram_base) && (smmu_ctx_addr <= tzdram_end));
/* get SMMU context table */
smmu_ctx_regs = plat_get_smmu_ctx();
assert(smmu_ctx_regs);
/*
* smmu_ctx_regs[0].val contains the size of the context table minus
* the last entry. Sanity check the table size before we start with
* the context save operation.
*/
while (smmu_ctx_regs[num_entries].val != 0xFFFFFFFFU) {
num_entries++;
}
/* panic if the sizes do not match */
if (num_entries != smmu_ctx_regs[0].val)
panic();
/* save SMMU register values */
for (i = 1; i < num_entries; i++)
smmu_ctx_regs[i].val = mmio_read_32(smmu_ctx_regs[i].reg);
/* increment by 1 to take care of the last entry */
num_entries++;
/* Save SMMU config settings */
memcpy16((void *)(uintptr_t)smmu_ctx_addr, (void *)smmu_ctx_regs,
(sizeof(smmu_regs_t) * num_entries));
/* save the SMMU table address */
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO,
(uint32_t)smmu_ctx_addr);
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_HI,
(uint32_t)(smmu_ctx_addr >> 32));
}
#define SMMU_NUM_CONTEXTS 64
#define SMMU_CONTEXT_BANK_MAX_IDX 64
/*
* Init SMMU during boot or "System Suspend" exit
*/
void tegra_smmu_init(void)
{
uint32_t val, cb_idx, smmu_id, ctx_base;
for (smmu_id = 0; smmu_id < NUM_SMMU_DEVICES; smmu_id++) {
/* Program the SMMU pagesize and reset CACHE_LOCK bit */
val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
val |= SMMU_GSR0_PGSIZE_64K;
val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
/* reset CACHE LOCK bit for NS Aux. Config. Register */
val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
/* disable TCU prefetch for all contexts */
ctx_base = (SMMU_GSR0_PGSIZE_64K * SMMU_NUM_CONTEXTS)
+ SMMU_CBn_ACTLR;
for (cb_idx = 0; cb_idx < SMMU_CONTEXT_BANK_MAX_IDX; cb_idx++) {
val = tegra_smmu_read_32(smmu_id,
ctx_base + (SMMU_GSR0_PGSIZE_64K * cb_idx));
val &= ~SMMU_CBn_ACTLR_CPRE_BIT;
tegra_smmu_write_32(smmu_id, ctx_base +
(SMMU_GSR0_PGSIZE_64K * cb_idx), val);
}
/* set CACHE LOCK bit for NS Aux. Config. Register */
val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
/* set CACHE LOCK bit for S Aux. Config. Register */
val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
}
}

View File

@ -202,9 +202,6 @@ void bl31_early_platform_setup(bl31_params_t *from_bl2,
*/
console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
TEGRA_CONSOLE_BAUDRATE);
/* Initialise crash console */
plat_crash_console_init();
}
/*
@ -299,7 +296,16 @@ void bl31_platform_setup(void)
******************************************************************************/
void bl31_plat_runtime_setup(void)
{
; /* do nothing */
/*
* During boot, USB3 and flash media (SDMMC/SATA) devices need
* access to IRAM. Because these clients connect to the MC and
* do not have a direct path to the IRAM, the MC implements AHB
* redirection during boot to allow path to IRAM. In this mode
* accesses to a programmed memory address aperture are directed
* to the AHB bus, allowing access to the IRAM. This mode must be
* disabled before we jump to the non-secure world.
*/
tegra_memctrl_disable_ahb_redirection();
}
/*******************************************************************************

View File

@ -28,22 +28,12 @@
# POSSIBILITY OF SUCH DAMAGE.
#
CRASH_REPORTING := 1
$(eval $(call add_define,CRASH_REPORTING))
ASM_ASSERTION := 1
$(eval $(call add_define,ASM_ASSERTION))
USE_COHERENT_MEM := 0
SEPARATE_CODE_AND_RODATA := 1
PLAT_INCLUDES := -Iplat/nvidia/tegra/include/drivers \
-Iplat/nvidia/tegra/include \
-Iplat/nvidia/tegra/include/${TARGET_SOC}
PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \
lib/xlat_tables/aarch64/xlat_tables.c
include lib/xlat_tables_v2/xlat_tables.mk
PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS}
COMMON_DIR := plat/nvidia/tegra/common

Some files were not shown because too many files have changed in this diff Show More