diff --git a/Makefile b/Makefile index 9f9061c4a..d0568d917 100644 --- a/Makefile +++ b/Makefile @@ -50,10 +50,14 @@ include ${MAKE_HELPERS_DIRECTORY}build_env.mk # Default values for build configurations, and their dependencies ################################################################################ +ifdef ASM_ASSERTION + $(warning ASM_ASSERTION is removed, use ENABLE_ASSERTIONS instead.) +endif + include ${MAKE_HELPERS_DIRECTORY}defaults.mk -# ASM_ASSERTION enabled for DEBUG builds only -ASM_ASSERTION := ${DEBUG} +# Assertions enabled for DEBUG builds by default +ENABLE_ASSERTIONS := ${DEBUG} ENABLE_PMF := ${ENABLE_RUNTIME_INSTRUMENTATION} PLAT := ${DEFAULT_PLAT} @@ -347,6 +351,11 @@ ifdef BL2_SOURCES endif endif +# If SCP_BL2 is given, we always want FIP to include it. +ifdef SCP_BL2 + NEED_SCP_BL2 := yes +endif + # Process TBB related flags ifneq (${GENERATE_COT},0) # Common cert_create options @@ -434,13 +443,13 @@ endif # Build options checks ################################################################################ -$(eval $(call assert_boolean,ASM_ASSERTION)) $(eval $(call assert_boolean,COLD_BOOT_SINGLE_CPU)) $(eval $(call assert_boolean,CREATE_KEYS)) $(eval $(call assert_boolean,CTX_INCLUDE_AARCH32_REGS)) $(eval $(call assert_boolean,CTX_INCLUDE_FPREGS)) $(eval $(call assert_boolean,DEBUG)) $(eval $(call assert_boolean,DISABLE_PEDANTIC)) +$(eval $(call assert_boolean,ENABLE_ASSERTIONS)) $(eval $(call assert_boolean,ENABLE_PLAT_COMPAT)) $(eval $(call assert_boolean,ENABLE_PMF)) $(eval $(call assert_boolean,ENABLE_PSCI_STAT)) @@ -459,6 +468,7 @@ $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA)) $(eval $(call assert_boolean,SPIN_ON_BL1_EXIT)) $(eval $(call assert_boolean,TRUSTED_BOARD_BOOT)) $(eval $(call assert_boolean,USE_COHERENT_MEM)) +$(eval $(call assert_boolean,WARMBOOT_ENABLE_DCACHE_EARLY)) $(eval $(call assert_numeric,ARM_ARCH_MAJOR)) $(eval $(call assert_numeric,ARM_ARCH_MINOR)) @@ -473,10 +483,10 @@ $(eval $(call add_define,ARM_CCI_PRODUCT_ID)) $(eval $(call add_define,ARM_ARCH_MAJOR)) $(eval $(call add_define,ARM_ARCH_MINOR)) $(eval $(call add_define,ARM_GIC_ARCH)) -$(eval $(call add_define,ASM_ASSERTION)) $(eval $(call add_define,COLD_BOOT_SINGLE_CPU)) $(eval $(call add_define,CTX_INCLUDE_AARCH32_REGS)) $(eval $(call add_define,CTX_INCLUDE_FPREGS)) +$(eval $(call add_define,ENABLE_ASSERTIONS)) $(eval $(call add_define,ENABLE_PLAT_COMPAT)) $(eval $(call add_define,ENABLE_PMF)) $(eval $(call add_define,ENABLE_PSCI_STAT)) @@ -496,6 +506,7 @@ $(eval $(call add_define,SPD_${SPD})) $(eval $(call add_define,SPIN_ON_BL1_EXIT)) $(eval $(call add_define,TRUSTED_BOARD_BOOT)) $(eval $(call add_define,USE_COHERENT_MEM)) +$(eval $(call add_define,WARMBOOT_ENABLE_DCACHE_EARLY)) # Define the EL3_PAYLOAD_BASE flag only if it is provided. ifdef EL3_PAYLOAD_BASE @@ -541,6 +552,10 @@ $(if ${BL2}, $(eval $(call MAKE_TOOL_ARGS,2,${BL2},tb-fw)),\ $(eval $(call MAKE_BL,2,tb-fw))) endif +ifeq (${NEED_SCP_BL2},yes) +$(eval $(call FIP_ADD_IMG,SCP_BL2,--scp-fw)) +endif + ifeq (${NEED_BL31},yes) BL31_SOURCES += ${SPD_SOURCES} $(if ${BL31}, $(eval $(call MAKE_TOOL_ARGS,31,${BL31},soc-fw)),\ diff --git a/bl1/bl1_fwu.c b/bl1/bl1_fwu.c index f7fae6823..e2ede6817 100644 --- a/bl1/bl1_fwu.c +++ b/bl1/bl1_fwu.c @@ -109,7 +109,7 @@ register_t bl1_fwu_smc_handler(unsigned int smc_fid, break; } - SMC_RET0(handle); + SMC_RET1(handle, SMC_UNK); } /******************************************************************************* diff --git a/bl1/bl1_main.c b/bl1/bl1_main.c index 90c06afef..ebeb39eac 100644 --- a/bl1/bl1_main.c +++ b/bl1/bl1_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -113,7 +114,7 @@ void bl1_main(void) print_errata_status(); -#if DEBUG +#if ENABLE_ASSERTIONS u_register_t val; /* * Ensure that MMU/Caches and coherency are turned on @@ -140,7 +141,7 @@ void bl1_main(void) assert(CACHE_WRITEBACK_GRANULE == SIZE_FROM_LOG2_WORDS(val)); else assert(CACHE_WRITEBACK_GRANULE <= MAX_CACHE_LINE_SIZE); -#endif +#endif /* ENABLE_ASSERTIONS */ /* Perform remaining generic architectural setup from EL3 */ bl1_arch_setup(); @@ -166,6 +167,8 @@ void bl1_main(void) NOTICE("BL1-FWU: *******FWU Process Started*******\n"); bl1_prepare_next_image(image_id); + + console_flush(); } /******************************************************************************* diff --git a/bl2/bl2_image_load_v2.c b/bl2/bl2_image_load_v2.c index 4fab65563..05c0fcd27 100644 --- a/bl2/bl2_image_load_v2.c +++ b/bl2/bl2_image_load_v2.c @@ -109,6 +109,10 @@ entry_point_info_t *bl2_load_images(void) assert(bl2_to_next_bl_params->head); assert(bl2_to_next_bl_params->h.type == PARAM_BL_PARAMS); assert(bl2_to_next_bl_params->h.version >= VERSION_2); + assert(bl2_to_next_bl_params->head->ep_info); + + /* Populate arg0 for the next BL image */ + bl2_to_next_bl_params->head->ep_info->args.arg0 = (u_register_t)bl2_to_next_bl_params; /* Flush the parameters to be passed to next image */ plat_flush_next_bl_params(); diff --git a/bl2/bl2_main.c b/bl2/bl2_main.c index 514c00533..d187f2e34 100644 --- a/bl2/bl2_main.c +++ b/bl2/bl2_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include "bl2_private.h" @@ -69,6 +70,8 @@ void bl2_main(void) disable_mmu_icache_secure(); #endif /* AARCH32 */ + console_flush(); + /* * Run next BL image via an SMC to BL1. Information on how to pass * control to the BL32 (if present) and BL33 software images will diff --git a/bl2u/bl2u_main.c b/bl2u/bl2u_main.c index 515ddfb73..3ed5be73d 100644 --- a/bl2u/bl2u_main.c +++ b/bl2u/bl2u_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -63,6 +64,8 @@ void bl2u_main(void) /* Perform platform setup in BL2U after loading SCP_BL2U */ bl2u_platform_setup(); + console_flush(); + /* * Indicate that BL2U is done and resume back to * normal world via an SMC to BL1. diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index 623832900..a847ae32d 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -185,26 +185,27 @@ func bl31_warm_entrypoint * * The PSCI implementation invokes platform routines that enable CPUs to * participate in coherency. On a system where CPUs are not - * cache-coherent out of reset, having caches enabled until such time - * might lead to coherency issues (resulting from stale data getting - * speculatively fetched, among others). Therefore we keep data caches - * disabled while enabling the MMU, thereby forcing data accesses to - * have non-cacheable, nGnRnE attributes (these will always be coherent - * with main memory). + * cache-coherent without appropriate platform specific programming, + * having caches enabled until such time might lead to coherency issues + * (resulting from stale data getting speculatively fetched, among + * others). Therefore we keep data caches disabled even after enabling + * the MMU for such platforms. * - * On systems with hardware-assisted coherency, where CPUs are expected - * to be cache-coherent out of reset without needing explicit software - * intervention, PSCI need not invoke platform routines to enter - * coherency (as CPUs already are); and there's no reason to have caches - * disabled either. + * On systems with hardware-assisted coherency, or on single cluster + * platforms, such platform specific programming is not required to + * enter coherency (as CPUs already are); and there's no reason to have + * caches disabled either. */ -#if HW_ASSISTED_COHERENCY - mov x0, #0 -#else mov x0, #DISABLE_DCACHE -#endif bl bl31_plat_enable_mmu +#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY + mrs x0, sctlr_el3 + orr x0, x0, #SCTLR_C_BIT + msr sctlr_el3, x0 + isb +#endif + bl psci_warmboot_entrypoint #if ENABLE_RUNTIME_INSTRUMENTATION diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S index 8e603862f..c6d5c6c38 100644 --- a/bl31/aarch64/crash_reporting.S +++ b/bl31/aarch64/crash_reporting.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -349,6 +349,8 @@ func do_crash_reporting /* Print some platform registers */ plat_crash_print_regs + bl plat_crash_console_flush + /* Done reporting */ no_ret plat_panic_handler endfunc do_crash_reporting diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c index 85b3ea1e5..c74b72b71 100644 --- a/bl31/bl31_main.c +++ b/bl31/bl31_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -129,6 +130,8 @@ void bl31_main(void) */ bl31_prepare_next_image_entry(); + console_flush(); + /* * Perform any platform specific runtime setup prior to cold boot exit * from BL31 diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S index c7f60b589..477b55b4e 100644 --- a/bl32/sp_min/aarch32/entrypoint.S +++ b/bl32/sp_min/aarch32/entrypoint.S @@ -236,24 +236,27 @@ func sp_min_warm_entrypoint * * The PSCI implementation invokes platform routines that enable CPUs to * participate in coherency. On a system where CPUs are not - * cache-coherent out of reset, having caches enabled until such time - * might lead to coherency issues (resulting from stale data getting - * speculatively fetched, among others). Therefore we keep data caches - * disabled while enabling the MMU, thereby forcing data accesses to - * have non-cacheable, nGnRnE attributes (these will always be coherent - * with main memory). + * cache-coherent without appropriate platform specific programming, + * having caches enabled until such time might lead to coherency issues + * (resulting from stale data getting speculatively fetched, among + * others). Therefore we keep data caches disabled even after enabling + * the MMU for such platforms. * - * On systems where CPUs are cache-coherent out of reset, however, PSCI - * need not invoke platform routines to enter coherency (as CPUs already - * are), and there's no reason to have caches disabled either. + * On systems with hardware-assisted coherency, or on single cluster + * platforms, such platform specific programming is not required to + * enter coherency (as CPUs already are); and there's no reason to have + * caches disabled either. */ -#if HW_ASSISTED_COHERENCY - mov r0, #0 -#else mov r0, #DISABLE_DCACHE -#endif bl bl32_plat_enable_mmu +#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY + ldcopr r0, SCTLR + orr r0, r0, #SCTLR_C_BIT + stcopr r0, SCTLR + isb +#endif + bl sp_min_warm_boot /* Program the registers in cpu_context and exit monitor mode */ diff --git a/common/aarch32/debug.S b/common/aarch32/debug.S index ecf9faffa..adb6dc3ee 100644 --- a/common/aarch32/debug.S +++ b/common/aarch32/debug.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -70,9 +70,12 @@ func do_panic /* Print new line */ ldr r4, =panic_end bl asm_print_str + + bl plat_crash_console_flush + 1: mov lr, r6 - b plat_panic_handler + no_ret plat_panic_handler endfunc do_panic /*********************************************************** @@ -87,7 +90,7 @@ func report_exception no_ret plat_panic_handler endfunc report_exception -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS .section .rodata.assert_str, "aS" assert_msg1: .asciz "ASSERT: File " @@ -104,6 +107,11 @@ assert_msg2: * --------------------------------------------------------------------------- */ func asm_assert +#if LOG_LEVEL >= LOG_LEVEL_INFO + /* + * Only print the output if LOG_LEVEL is higher or equal to + * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1. + */ /* Stash the parameters already in r0 and r1 */ mov r5, r0 mov r6, r1 @@ -140,10 +148,14 @@ dec_print_loop: udiv r5, r5, r6 /* Reduce divisor */ cmp r5, #0 bne dec_print_loop + + bl plat_crash_console_flush + 1: +#endif /* LOG_LEVEL >= LOG_LEVEL_INFO */ no_ret plat_panic_handler endfunc asm_assert -#endif +#endif /* ENABLE_ASSERTIONS */ /* * This function prints a string from address in r4 diff --git a/common/aarch64/debug.S b/common/aarch64/debug.S index 9dd53ca96..cdb4ec6cd 100644 --- a/common/aarch64/debug.S +++ b/common/aarch64/debug.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -41,7 +41,7 @@ /* The offset to add to get ascii for numerals '0 - 9' */ #define ASCII_OFFSET_NUM 0x30 -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS .section .rodata.assert_str, "aS" assert_msg1: .asciz "ASSERT: File " @@ -78,6 +78,11 @@ dec_print_loop: * --------------------------------------------------------------------------- */ func asm_assert +#if LOG_LEVEL >= LOG_LEVEL_INFO + /* + * Only print the output if LOG_LEVEL is higher or equal to + * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1. + */ mov x5, x0 mov x6, x1 /* Ensure the console is initialized */ @@ -96,10 +101,12 @@ func asm_assert b.ne _assert_loop mov x4, x6 asm_print_line_dec + bl plat_crash_console_flush _assert_loop: - b _assert_loop +#endif /* LOG_LEVEL >= LOG_LEVEL_INFO */ + no_ret plat_panic_handler endfunc asm_assert -#endif +#endif /* ENABLE_ASSERTIONS */ /* * This function prints a string from address in x4. @@ -187,6 +194,8 @@ el3_panic: sub x4, x4, #4 bl asm_print_hex + bl plat_crash_console_flush + _panic_handler: /* Pass to plat_panic_handler the address from where el3_panic was * called, not the address of the call from el3_panic. */ diff --git a/common/desc_image_load.c b/common/desc_image_load.c index a9762b717..52ef362b3 100644 --- a/common/desc_image_load.c +++ b/common/desc_image_load.c @@ -47,8 +47,11 @@ static bl_params_t next_bl_params; ******************************************************************************/ void flush_bl_params_desc(void) { - flush_dcache_range((unsigned long)bl_mem_params_desc_ptr, + flush_dcache_range((uintptr_t)bl_mem_params_desc_ptr, sizeof(*bl_mem_params_desc_ptr) * bl_mem_params_desc_num); + + flush_dcache_range((uintptr_t)&next_bl_params, + sizeof(next_bl_params)); } /******************************************************************************* @@ -209,12 +212,5 @@ bl_params_t *get_next_bl_params_from_mem_params_desc(void) /* Invalid image is expected to terminate the loop */ assert(img_id == INVALID_IMAGE_ID); - /* Populate arg0 for the next BL image */ - next_bl_params.head->ep_info->args.arg0 = (unsigned long)&next_bl_params; - - /* Flush the parameters to be passed to the next BL image */ - flush_dcache_range((unsigned long)&next_bl_params, - sizeof(next_bl_params)); - return &next_bl_params; } diff --git a/docs/porting-guide.md b/docs/porting-guide.md index 690f307aa..0189ec407 100644 --- a/docs/porting-guide.md +++ b/docs/porting-guide.md @@ -2242,6 +2242,17 @@ designated crash console. It must only use general purpose registers x1 and x2 to do its work. The parameter and the return value are in general purpose register x0. +### Function : plat_crash_console_flush + + Argument : void + Return : int + +This API is used by the crash reporting mechanism to force write of all buffered +data on the designated crash console. It should only use general purpose +registers x0 and x1 to do its work. The return value is 0 on successful +completion; otherwise the return value is -1. + + 4. Build flags --------------- diff --git a/docs/user-guide.md b/docs/user-guide.md index a1df96524..3061fb960 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -203,11 +203,6 @@ performed. in MPIDR is set and access the bit-fields in MPIDR accordingly. Default value of this flag is 0. -* `ASM_ASSERTION`: This flag determines whether the assertion checks within - assembly source files are enabled or not. This option defaults to the - value of `DEBUG` - that is, by default this is only enabled for a debug - build of the firmware. - * `BL2`: This is an optional build option which specifies the path to BL2 image for the `fip` target. In this case, the BL2 in the ARM Trusted Firmware will not be built. @@ -286,6 +281,14 @@ performed. payload. Please refer to the "Booting an EL3 payload" section for more details. +* `ENABLE_ASSERTIONS`: This option controls whether or not calls to `assert()` + are compiled out. For debug builds, this option defaults to 1, and calls to + `assert()` are left in place. For release builds, this option defaults to 0 + and calls to `assert()` function are compiled out. This option can be set + independently of `DEBUG`. It can also be used to hide any auxiliary code + that is only required for the assertion and does not fit in the assertion + itself. + * `ENABLE_PMF`: Boolean option to enable support for optional Performance Measurement Framework(PMF). Default is 0. @@ -349,7 +352,8 @@ performed. initiate the operations, and the rest is managed in hardware, minimizing active software management. In such systems, this boolean option enables ARM Trusted Firmware to carry out build and run-time optimizations during boot - and power management operations. This option defaults to 0. + and power management operations. This option defaults to 0 and if it is + enabled, then it implies `WARMBOOT_ENABLE_DCACHE_EARLY` is also enabled. * `LOAD_IMAGE_V2`: Boolean option to enable support for new version (v2) of image loading, which provides more flexibility and scalability around what @@ -508,6 +512,12 @@ performed. to a string formed by concatenating the version number, build type and build string. +* `WARMBOOT_ENABLE_DCACHE_EARLY` : Boolean option to enable D-cache early on + the CPU after warm boot. This is applicable for platforms which do not + require interconnect programming to enable cache coherency (eg: single + cluster platforms). If this option is enabled, then warm boot path + enables D-caches immediately after enabling MMU. This option defaults to 0. + #### ARM development platform specific build options * `ARM_BL31_IN_DRAM`: Boolean option to select loading of BL31 in TZC secured @@ -568,6 +578,10 @@ performed. - `tdram` : Trusted DRAM (if available) - `dram` : Secure region in DRAM (configured by the TrustZone controller) +* `ARM_XLAT_TABLES_LIB_V1`: boolean option to compile the Trusted Firmware + with version 1 of the translation tables library instead of version 2. It is + set to 0 by default, which selects version 2. + For a better understanding of these options, the ARM development platform memory map is explained in the [Firmware Design]. diff --git a/drivers/arm/cci/cci.c b/drivers/arm/cci/cci.c index 2e773a98b..0fcec850c 100644 --- a/drivers/arm/cci/cci.c +++ b/drivers/arm/cci/cci.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -39,7 +39,7 @@ static uintptr_t g_cci_base; static unsigned int g_max_master_id; static const int *g_cci_slave_if_map; -#if DEBUG +#if ENABLE_ASSERTIONS static int validate_cci_map(const int *map) { unsigned int valid_cci_map = 0; @@ -54,26 +54,25 @@ static int validate_cci_map(const int *map) continue; if (slave_if_id >= CCI_SLAVE_INTERFACE_COUNT) { - tf_printf("Slave interface ID is invalid\n"); + ERROR("Slave interface ID is invalid\n"); return 0; } if (valid_cci_map & (1 << slave_if_id)) { - tf_printf("Multiple masters are assigned same" - " slave interface ID\n"); + ERROR("Multiple masters are assigned same slave interface ID\n"); return 0; } valid_cci_map |= 1 << slave_if_id; } if (!valid_cci_map) { - tf_printf("No master is assigned a valid slave interface\n"); + ERROR("No master is assigned a valid slave interface\n"); return 0; } return 1; } -#endif /* DEBUG */ +#endif /* ENABLE_ASSERTIONS */ void cci_init(uintptr_t cci_base, const int *map, diff --git a/drivers/arm/ccn/ccn.c b/drivers/arm/ccn/ccn.c index ca0618274..16c8f607a 100644 --- a/drivers/arm/ccn/ccn.c +++ b/drivers/arm/ccn/ccn.c @@ -81,7 +81,7 @@ static inline void ccn_reg_write(uintptr_t periphbase, mmio_write_64(region_base + register_offset, value); } -#if DEBUG +#if ENABLE_ASSERTIONS typedef struct rn_info { unsigned char node_desc[MAX_RN_NODES]; @@ -224,7 +224,7 @@ static void ccn_validate_plat_params(const ccn_desc_t *plat_desc) info.node_desc[node_id]--; } } -#endif /* DEBUG */ +#endif /* ENABLE_ASSERTIONS */ /******************************************************************************* * This function validates parameters passed by the platform (in a debug build) @@ -234,7 +234,7 @@ static void ccn_validate_plat_params(const ccn_desc_t *plat_desc) ******************************************************************************/ void ccn_init(const ccn_desc_t *plat_desc) { -#if DEBUG +#if ENABLE_ASSERTIONS ccn_validate_plat_params(plat_desc); #endif diff --git a/drivers/arm/pl011/aarch32/pl011_console.S b/drivers/arm/pl011/aarch32/pl011_console.S index 5b7352830..6c4046a67 100644 --- a/drivers/arm/pl011/aarch32/pl011_console.S +++ b/drivers/arm/pl011/aarch32/pl011_console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -40,6 +40,7 @@ .globl console_core_init .globl console_core_putc .globl console_core_getc + .globl console_core_flush /* ----------------------------------------------- @@ -158,3 +159,29 @@ getc_error: mov r0, #-1 bx lr endfunc console_core_getc + + /* --------------------------------------------- + * int console_core_flush(uintptr_t base_addr) + * Function to force a write of all buffered + * data that hasn't been output. + * In : r0 - console base address + * Out : return -1 on error else return 0. + * Clobber list : r0, r1 + * --------------------------------------------- + */ +func console_core_flush + cmp r0, #0 + beq flush_error + +1: + /* Loop while the transmit FIFO is busy */ + ldr r1, [r0, #UARTFR] + tst r1, #PL011_UARTFR_BUSY + bne 1b + + mov r0, #0 + bx lr +flush_error: + mov r0, #-1 + bx lr +endfunc console_core_flush diff --git a/drivers/arm/pl011/aarch64/pl011_console.S b/drivers/arm/pl011/aarch64/pl011_console.S index 11e3df777..110300891 100644 --- a/drivers/arm/pl011/aarch64/pl011_console.S +++ b/drivers/arm/pl011/aarch64/pl011_console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -41,6 +41,7 @@ .globl console_core_init .globl console_core_putc .globl console_core_getc + .globl console_core_flush /* ----------------------------------------------- @@ -151,3 +152,27 @@ getc_error: mov w0, #-1 ret endfunc console_core_getc + + /* --------------------------------------------- + * int console_core_flush(uintptr_t base_addr) + * Function to force a write of all buffered + * data that hasn't been output. + * In : x0 - console base address + * Out : return -1 on error else return 0. + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_core_flush + cbz x0, flush_error + +1: + /* Loop until the transmit FIFO is empty */ + ldr w1, [x0, #UARTFR] + tbnz w1, #PL011_UARTFR_BUSY_BIT, 1b + + mov w0, #0 + ret +flush_error: + mov w0, #-1 + ret +endfunc console_core_flush diff --git a/drivers/arm/tzc/tzc400.c b/drivers/arm/tzc/tzc400.c index ca088c32d..8c6f8ba6b 100644 --- a/drivers/arm/tzc/tzc400.c +++ b/drivers/arm/tzc/tzc400.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,7 +33,7 @@ #include #include #include -#include "tzc_common_private.c" +#include "tzc_common_private.h" /* * Macros which will be used by common core functions. diff --git a/drivers/arm/tzc/tzc_common_private.c b/drivers/arm/tzc/tzc_common_private.h similarity index 95% rename from drivers/arm/tzc/tzc_common_private.c rename to drivers/arm/tzc/tzc_common_private.h index 8b1ddf498..ee278ec92 100644 --- a/drivers/arm/tzc/tzc_common_private.c +++ b/drivers/arm/tzc/tzc_common_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,6 +28,9 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#ifndef __TZC_COMMON_PRIVATE_H__ +#define __TZC_COMMON_PRIVATE_H__ + #include #include #include @@ -190,8 +193,9 @@ nsaid_permissions); \ } -#if DEBUG -static unsigned int _tzc_read_peripheral_id(uintptr_t base) +#if ENABLE_ASSERTIONS + +static inline unsigned int _tzc_read_peripheral_id(uintptr_t base) { unsigned int id; @@ -203,7 +207,7 @@ static unsigned int _tzc_read_peripheral_id(uintptr_t base) } #ifdef AARCH32 -static unsigned long long _tzc_get_max_top_addr(int addr_width) +static inline unsigned long long _tzc_get_max_top_addr(int addr_width) { /* * Assume at least 32 bit wide address and initialize the max. @@ -232,4 +236,6 @@ static unsigned long long _tzc_get_max_top_addr(int addr_width) (UINT64_MAX >> (64 - (addr_width))) #endif /* AARCH32 */ -#endif +#endif /* ENABLE_ASSERTIONS */ + +#endif /* __TZC_COMMON_PRIVATE_H__ */ diff --git a/drivers/arm/tzc/tzc_dmc500.c b/drivers/arm/tzc/tzc_dmc500.c index 24e587c16..d696dfd9b 100644 --- a/drivers/arm/tzc/tzc_dmc500.c +++ b/drivers/arm/tzc/tzc_dmc500.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,7 +33,7 @@ #include #include #include "tzc_common.h" -#include "tzc_common_private.c" +#include "tzc_common_private.h" /* * Macros which will be used by common core functions. @@ -257,7 +257,7 @@ void tzc_dmc500_set_action(tzc_action_t action) static void validate_plat_driver_data( const tzc_dmc500_driver_data_t *plat_driver_data) { -#if DEBUG +#if ENABLE_ASSERTIONS int i; unsigned int dmc_id; uintptr_t dmc_base; @@ -273,7 +273,7 @@ static void validate_plat_driver_data( dmc_id = _tzc_read_peripheral_id(dmc_base); assert(dmc_id == DMC500_PERIPHERAL_ID); } -#endif /* DEBUG */ +#endif /* ENABLE_ASSERTIONS */ } diff --git a/drivers/cadence/uart/aarch64/cdns_console.S b/drivers/cadence/uart/aarch64/cdns_console.S index 2c7960d87..e16646e81 100644 --- a/drivers/cadence/uart/aarch64/cdns_console.S +++ b/drivers/cadence/uart/aarch64/cdns_console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,9 +31,10 @@ #include #include - .globl console_core_init - .globl console_core_putc - .globl console_core_getc + .globl console_core_init + .globl console_core_putc + .globl console_core_getc + .globl console_core_flush /* ----------------------------------------------- * int console_core_init(unsigned long base_addr, @@ -125,3 +126,18 @@ getc_error: mov w0, #-1 ret endfunc console_core_getc + + /* --------------------------------------------- + * int console_core_flush(uintptr_t base_addr) + * Function to force a write of all buffered + * data that hasn't been output. + * In : x0 - console base address + * Out : return -1 on error else return 0. + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_core_flush + /* Placeholder */ + mov w0, #0 + ret +endfunc console_core_flush diff --git a/drivers/console/aarch32/console.S b/drivers/console/aarch32/console.S index 29933452b..6f85a21f0 100644 --- a/drivers/console/aarch32/console.S +++ b/drivers/console/aarch32/console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,6 +33,7 @@ .globl console_uninit .globl console_putc .globl console_getc + .globl console_flush /* * The console base is in the data section and not in .bss @@ -112,3 +113,18 @@ func console_getc ldr r0, [r1] b console_core_getc endfunc console_getc + + /* --------------------------------------------- + * int console_flush(void) + * Function to force a write of all buffered + * data that hasn't been output. It returns 0 + * upon successful completion, otherwise it + * returns -1. + * Clobber list : r0, r1 + * --------------------------------------------- + */ +func console_flush + ldr r1, =console_base + ldr r0, [r1] + b console_core_flush +endfunc console_flush diff --git a/drivers/console/aarch32/skeleton_console.S b/drivers/console/aarch32/skeleton_console.S index 383874e6c..0b60bc750 100644 --- a/drivers/console/aarch32/skeleton_console.S +++ b/drivers/console/aarch32/skeleton_console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -38,6 +38,7 @@ .globl console_core_init .globl console_core_putc .globl console_core_getc + .globl console_core_flush /* ----------------------------------------------- * int console_core_init(uintptr_t base_addr, @@ -109,3 +110,23 @@ getc_error: mov r0, #-1 bx lr endfunc console_core_getc + + /* --------------------------------------------- + * int console_core_flush(uintptr_t base_addr) + * Function to force a write of all buffered + * data that hasn't been output. + * In : r0 - console base address + * Out : return -1 on error else return 0. + * Clobber list : r0, r1 + * --------------------------------------------- + */ +func console_core_flush + cmp r0, #0 + beq flush_error + /* Insert implementation here */ + mov r0, #0 + bx lr +flush_error: + mov r0, #-1 + bx lr +endfunc console_core_flush diff --git a/drivers/console/aarch64/console.S b/drivers/console/aarch64/console.S index bdd5f4c37..cd6579cfe 100644 --- a/drivers/console/aarch64/console.S +++ b/drivers/console/aarch64/console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,6 +33,7 @@ .globl console_uninit .globl console_putc .globl console_getc + .globl console_flush /* * The console base is in the data section and not in .bss @@ -111,3 +112,18 @@ func console_getc ldr x0, [x1, :lo12:console_base] b console_core_getc endfunc console_getc + + /* --------------------------------------------- + * int console_flush(void) + * Function to force a write of all buffered + * data that hasn't been output. It returns 0 + * upon successful completion, otherwise it + * returns -1. + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_flush + adrp x1, console_base + ldr x0, [x1, :lo12:console_base] + b console_core_flush +endfunc console_flush diff --git a/drivers/console/aarch64/skeleton_console.S b/drivers/console/aarch64/skeleton_console.S index 1583ee7d1..01a426726 100644 --- a/drivers/console/aarch64/skeleton_console.S +++ b/drivers/console/aarch64/skeleton_console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -38,6 +38,7 @@ .globl console_core_init .globl console_core_putc .globl console_core_getc + .globl console_core_flush /* ----------------------------------------------- * int console_core_init(uintptr_t base_addr, @@ -104,3 +105,22 @@ getc_error: mov w0, #-1 ret endfunc console_core_getc + + /* --------------------------------------------- + * int console_core_flush(uintptr_t base_addr) + * Function to force a write of all buffered + * data that hasn't been output. + * In : x0 - console base address + * Out : return -1 on error else return 0. + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_core_flush + cbz x0, flush_error + /* Insert implementation here */ + mov w0, #0 + ret +flush_error: + mov w0, #-1 + ret +endfunc console_core_flush diff --git a/drivers/io/io_storage.c b/drivers/io/io_storage.c index 7cb1a6aa2..de8c3bf63 100644 --- a/drivers/io/io_storage.c +++ b/drivers/io/io_storage.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -51,8 +51,8 @@ static const io_dev_info_t *devices[MAX_IO_DEVICES]; /* Number of currently registered devices */ static unsigned int dev_count; - -#if DEBUG /* Extra validation functions only used in debug builds */ +/* Extra validation functions only used when asserts are enabled */ +#if ENABLE_ASSERTIONS /* Return a boolean value indicating whether a device connector is valid */ static int is_valid_dev_connector(const io_dev_connector_t *dev_con) @@ -89,7 +89,8 @@ static int is_valid_seek_mode(io_seek_mode_t mode) return ((mode != IO_SEEK_INVALID) && (mode < IO_SEEK_MAX)); } -#endif /* End of debug-only validation functions */ +#endif /* ENABLE_ASSERTIONS */ +/* End of extra validation functions only used when asserts are enabled */ /* Open a connection to a specific device */ diff --git a/drivers/ti/uart/aarch64/16550_console.S b/drivers/ti/uart/aarch64/16550_console.S index 846648290..1b9cab8ea 100644 --- a/drivers/ti/uart/aarch64/16550_console.S +++ b/drivers/ti/uart/aarch64/16550_console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,6 +35,7 @@ .globl console_core_init .globl console_core_putc .globl console_core_getc + .globl console_core_flush /* ----------------------------------------------- * int console_core_init(unsigned long base_addr, @@ -114,9 +115,6 @@ func console_core_putc b.ne 1b mov w2, #0xD /* '\r' */ str w2, [x1, #UARTTX] - ldr w2, [x1, #UARTFCR] - orr w2, w2, #UARTFCR_TXCLR - str w2, [x1, #UARTFCR] /* Check if the transmit FIFO is full */ 2: ldr w2, [x1, #UARTLSR] @@ -124,9 +122,6 @@ func console_core_putc cmp w2, #(UARTLSR_TEMT | UARTLSR_THRE) b.ne 2b str w0, [x1, #UARTTX] - ldr w2, [x1, #UARTFCR] - orr w2, w2, #UARTFCR_TXCLR - str w2, [x1, #UARTFCR] ret putc_error: mov w0, #-1 @@ -153,3 +148,18 @@ getc_error: mov w0, #-1 ret endfunc console_core_getc + + /* --------------------------------------------- + * int console_core_flush(uintptr_t base_addr) + * Function to force a write of all buffered + * data that hasn't been output. + * In : x0 - console base address + * Out : return -1 on error else return 0. + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_core_flush + /* Placeholder */ + mov w0, #0 + ret +endfunc console_core_flush diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S index 45023a0bb..7b141da6c 100644 --- a/include/common/aarch32/asm_macros.S +++ b/include/common/aarch32/asm_macros.S @@ -134,4 +134,37 @@ .space SPINLOCK_ASM_SIZE .endm + /* + * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l` + * and the top 32 bits of `_val` into `_reg_h`. If either the bottom + * or top word of `_val` is zero, the corresponding OR operation + * is skipped. + */ + .macro orr64_imm _reg_l, _reg_h, _val + .if (\_val >> 32) + orr \_reg_h, \_reg_h, #(\_val >> 32) + .endif + .if (\_val & 0xffffffff) + orr \_reg_l, \_reg_l, #(\_val & 0xffffffff) + .endif + .endm + + /* + * Helper macro to bitwise-clear bits in `_reg_l` and + * `_reg_h` given a 64 bit immediate `_val`. The set bits + * in the bottom word of `_val` dictate which bits from + * `_reg_l` should be cleared. Similarly, the set bits in + * the top word of `_val` dictate which bits from `_reg_h` + * should be cleared. If either the bottom or top word of + * `_val` is zero, the corresponding BIC operation is skipped. + */ + .macro bic64_imm _reg_l, _reg_h, _val + .if (\_val >> 32) + bic \_reg_h, \_reg_h, #(\_val >> 32) + .endif + .if (\_val & 0xffffffff) + bic \_reg_l, \_reg_l, #(\_val & 0xffffffff) + .endif + .endm + #endif /* __ASM_MACROS_S__ */ diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S index d7e0b3f5d..915820350 100644 --- a/include/common/aarch32/el3_common_macros.S +++ b/include/common/aarch32/el3_common_macros.S @@ -148,7 +148,7 @@ _init_memory, _init_c_runtime, _exception_vectors /* Make sure we are in Secure Mode */ -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS ldcopr r0, SCR tst r0, #SCR_NS_BIT ASM_ASSERT(eq) diff --git a/include/drivers/console.h b/include/drivers/console.h index 69ad0bd74..e6e3a1cb1 100644 --- a/include/drivers/console.h +++ b/include/drivers/console.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -38,6 +38,7 @@ int console_init(uintptr_t base_addr, void console_uninit(void); int console_putc(int c); int console_getc(void); +int console_flush(void); #endif /* __CONSOLE_H__ */ diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h index 234ceeba3..3c69f9823 100644 --- a/include/lib/aarch32/arch.h +++ b/include/lib/aarch32/arch.h @@ -394,12 +394,14 @@ #define HCR p15, 4, c1, c1, 0 #define HCPTR p15, 4, c1, c1, 2 #define CNTHCTL p15, 4, c14, c1, 0 +#define CNTKCTL p15, 0, c14, c1, 0 #define VPIDR p15, 4, c0, c0, 0 #define VMPIDR p15, 4, c0, c0, 5 #define ISR p15, 0, c12, c1, 0 #define CLIDR p15, 1, c0, c0, 1 #define CSSELR p15, 2, c0, c0, 0 #define CCSIDR p15, 1, c0, c0, 0 +#define DBGOSDLR p14, 0, c1, c3, 4 /* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */ #define HDCR p15, 4, c1, c1, 1 diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h index a7d33d86a..472a8859a 100644 --- a/include/lib/aarch32/arch_helpers.h +++ b/include/lib/aarch32/arch_helpers.h @@ -209,6 +209,8 @@ DEFINE_SYSOP_FUNC(wfe) DEFINE_SYSOP_FUNC(sev) DEFINE_SYSOP_TYPE_FUNC(dsb, sy) DEFINE_SYSOP_TYPE_FUNC(dmb, sy) +DEFINE_SYSOP_TYPE_FUNC(dmb, st) +DEFINE_SYSOP_TYPE_FUNC(dmb, ld) DEFINE_SYSOP_TYPE_FUNC(dsb, ish) DEFINE_SYSOP_TYPE_FUNC(dsb, ishst) DEFINE_SYSOP_TYPE_FUNC(dmb, ish) diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 399a64385..ef7241d3b 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -261,6 +261,16 @@ #define DISABLE_ALL_EXCEPTIONS \ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT) +/* + * RMR_EL3 definitions + */ +#define RMR_EL3_RR_BIT (1 << 1) +#define RMR_EL3_AA64_BIT (1 << 0) + +/* + * HI-VECTOR address for AArch32 state + */ +#define HI_VECTOR_BASE (0xFFFF0000) /* * TCR defintions @@ -419,6 +429,10 @@ #define EC_BITS(x) (x >> ESR_EC_SHIFT) & ESR_EC_MASK +/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */ +#define RMR_RESET_REQUEST_SHIFT 0x1u +#define RMR_WARM_RESET_CPU (1u << RMR_RESET_REQUEST_SHIFT) + /******************************************************************************* * Definitions of register offsets, fields and macros for CPU system * instructions. diff --git a/include/lib/cpus/aarch32/cortex_a53.h b/include/lib/cpus/aarch32/cortex_a53.h new file mode 100644 index 000000000..5173d88e1 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a53.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CORTEX_A53_H__ +#define __CORTEX_A53_H__ + +/* Cortex-A53 midr for revision 0 */ +#define CORTEX_A53_MIDR 0x410FD030 + +/* Retention timer tick definitions */ +#define RETENTION_ENTRY_TICKS_2 0x1 +#define RETENTION_ENTRY_TICKS_8 0x2 +#define RETENTION_ENTRY_TICKS_32 0x3 +#define RETENTION_ENTRY_TICKS_64 0x4 +#define RETENTION_ENTRY_TICKS_128 0x5 +#define RETENTION_ENTRY_TICKS_256 0x6 +#define RETENTION_ENTRY_TICKS_512 0x7 + +/******************************************************************************* + * CPU Extended Control register specific definitions. + ******************************************************************************/ +#define CPUECTLR p15, 1, c15 /* Instruction def. */ + +#define CPUECTLR_SMP_BIT (1 << 6) + +#define CPUECTLR_CPU_RET_CTRL_SHIFT 0 +#define CPUECTLR_CPU_RET_CTRL_MASK (0x7 << CPUECTLR_CPU_RET_CTRL_SHIFT) + +#define CPUECTLR_FPU_RET_CTRL_SHIFT 3 +#define CPUECTLR_FPU_RET_CTRL_MASK (0x7 << CPUECTLR_FPU_RET_CTRL_SHIFT) + +/******************************************************************************* + * CPU Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define CPUMERRSR p15, 2, c15 /* Instruction def. */ + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CPUACTLR p15, 0, c15 /* Instruction def. */ + +#define CPUACTLR_DTAH (1 << 24) + +/******************************************************************************* + * L2 Auxiliary Control register specific definitions. + ******************************************************************************/ +#define L2ACTLR p15, 1, c15, c0, 0 /* Instruction def. */ + +#define L2ACTLR_ENABLE_UNIQUECLEAN (1 << 14) +#define L2ACTLR_DISABLE_CLEAN_PUSH (1 << 3) + +/******************************************************************************* + * L2 Extended Control register specific definitions. + ******************************************************************************/ +#define L2ECTLR p15, 1, c9, c0, 3 /* Instruction def. */ + +#define L2ECTLR_RET_CTRL_SHIFT 0 +#define L2ECTLR_RET_CTRL_MASK (0x7 << L2ECTLR_RET_CTRL_SHIFT) + +/******************************************************************************* + * L2 Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define L2MERRSR p15, 3, c15 /* Instruction def. */ + +#endif /* __CORTEX_A53_H__ */ diff --git a/include/lib/cpus/aarch32/cortex_a57.h b/include/lib/cpus/aarch32/cortex_a57.h new file mode 100644 index 000000000..a09ae9b71 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a57.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CORTEX_A57_H__ +#define __CORTEX_A57_H__ + +/* Cortex-A57 midr for revision 0 */ +#define CORTEX_A57_MIDR 0x410FD070 + +/* Retention timer tick definitions */ +#define RETENTION_ENTRY_TICKS_2 0x1 +#define RETENTION_ENTRY_TICKS_8 0x2 +#define RETENTION_ENTRY_TICKS_32 0x3 +#define RETENTION_ENTRY_TICKS_64 0x4 +#define RETENTION_ENTRY_TICKS_128 0x5 +#define RETENTION_ENTRY_TICKS_256 0x6 +#define RETENTION_ENTRY_TICKS_512 0x7 + +/******************************************************************************* + * CPU Extended Control register specific definitions. + ******************************************************************************/ +#define CPUECTLR p15, 1, c15 /* Instruction def. */ + +#define CPUECTLR_SMP_BIT (1 << 6) +#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT (1 << 38) +#define CPUECTLR_L2_IPFTCH_DIST_MASK (0x3 << 35) +#define CPUECTLR_L2_DPFTCH_DIST_MASK (0x3 << 32) + +#define CPUECTLR_CPU_RET_CTRL_SHIFT 0 +#define CPUECTLR_CPU_RET_CTRL_MASK (0x7 << CPUECTLR_CPU_RET_CTRL_SHIFT) + +/******************************************************************************* + * CPU Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define CPUMERRSR p15, 2, c15 /* Instruction def. */ + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CPUACTLR p15, 0, c15 /* Instruction def. */ + +#define CPUACTLR_DIS_LOAD_PASS_DMB (1 << 59) +#define CPUACTLR_GRE_NGRE_AS_NGNRE (1 << 54) +#define CPUACTLR_DIS_OVERREAD (1 << 52) +#define CPUACTLR_NO_ALLOC_WBWA (1 << 49) +#define CPUACTLR_DCC_AS_DCCI (1 << 44) +#define CPUACTLR_FORCE_FPSCR_FLUSH (1 << 38) +#define CPUACTLR_DIS_STREAMING (3 << 27) +#define CPUACTLR_DIS_L1_STREAMING (3 << 25) +#define CPUACTLR_DIS_INDIRECT_PREDICTOR (1 << 4) + +/******************************************************************************* + * L2 Control register specific definitions. + ******************************************************************************/ +#define L2CTLR p15, 1, c9, c0, 3 /* Instruction def. */ + +#define L2CTLR_DATA_RAM_LATENCY_SHIFT 0 +#define L2CTLR_TAG_RAM_LATENCY_SHIFT 6 + +#define L2_DATA_RAM_LATENCY_3_CYCLES 0x2 +#define L2_TAG_RAM_LATENCY_3_CYCLES 0x2 + +/******************************************************************************* + * L2 Extended Control register specific definitions. + ******************************************************************************/ +#define L2ECTLR p15, 1, c9, c0, 3 /* Instruction def. */ + +#define L2ECTLR_RET_CTRL_SHIFT 0 +#define L2ECTLR_RET_CTRL_MASK (0x7 << L2ECTLR_RET_CTRL_SHIFT) + +/******************************************************************************* + * L2 Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define L2MERRSR p15, 3, c15 /* Instruction def. */ + +#endif /* __CORTEX_A57_H__ */ diff --git a/include/lib/cpus/aarch32/cortex_a72.h b/include/lib/cpus/aarch32/cortex_a72.h new file mode 100644 index 000000000..c16a09bc7 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a72.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CORTEX_A72_H__ +#define __CORTEX_A72_H__ + +/* Cortex-A72 midr for revision 0 */ +#define CORTEX_A72_MIDR 0x410FD080 + +/******************************************************************************* + * CPU Extended Control register specific definitions. + ******************************************************************************/ +#define CPUECTLR p15, 1, c15 /* Instruction def. */ + +#define CPUECTLR_SMP_BIT (1 << 6) +#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT (1 << 38) +#define CPUECTLR_L2_IPFTCH_DIST_MASK (0x3 << 35) +#define CPUECTLR_L2_DPFTCH_DIST_MASK (0x3 << 32) + +/******************************************************************************* + * CPU Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define CPUMERRSR p15, 2, c15 /* Instruction def. */ + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CPUACTLR p15, 0, c15 /* Instruction def. */ + +#define CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH (1 << 56) +#define CPUACTLR_NO_ALLOC_WBWA (1 << 49) +#define CPUACTLR_DCC_AS_DCCI (1 << 44) + +/******************************************************************************* + * L2 Control register specific definitions. + ******************************************************************************/ +#define L2CTLR p15, 1, c9, c0, 3 /* Instruction def. */ + +#define L2CTLR_DATA_RAM_LATENCY_SHIFT 0 +#define L2CTLR_TAG_RAM_LATENCY_SHIFT 6 + +#define L2_DATA_RAM_LATENCY_3_CYCLES 0x2 +#define L2_TAG_RAM_LATENCY_2_CYCLES 0x1 +#define L2_TAG_RAM_LATENCY_3_CYCLES 0x2 + +/******************************************************************************* + * L2 Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define L2MERRSR p15, 3, c15 /* Instruction def. */ + +#endif /* __CORTEX_A72_H__ */ diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h index ca868ddb5..31bf68169 100644 --- a/include/lib/el3_runtime/context_mgmt.h +++ b/include/lib/el3_runtime/context_mgmt.h @@ -87,7 +87,7 @@ void cm_set_context_by_mpidr(uint64_t mpidr, ******************************************************************************/ static inline void cm_set_next_context(void *context) { -#if DEBUG +#if ENABLE_ASSERTIONS uint64_t sp_mode; /* @@ -98,7 +98,7 @@ static inline void cm_set_next_context(void *context) : "=r" (sp_mode)); assert(sp_mode == MODE_SP_EL0); -#endif +#endif /* ENABLE_ASSERTIONS */ __asm__ volatile("msr spsel, #1\n" "mov sp, %0\n" diff --git a/include/lib/smcc.h b/include/lib/smcc.h index e3ffb782d..d24d1905a 100644 --- a/include/lib/smcc.h +++ b/include/lib/smcc.h @@ -58,6 +58,7 @@ #define SMC_64 1 #define SMC_32 0 +#define SMC_OK 0 #define SMC_UNK 0xffffffff #define SMC_TYPE_FAST ULL(1) #define SMC_TYPE_STD 0 diff --git a/include/lib/stdlib/assert.h b/include/lib/stdlib/assert.h index 5621f8cac..1bcd1ead5 100644 --- a/include/lib/stdlib/assert.h +++ b/include/lib/stdlib/assert.h @@ -34,30 +34,27 @@ * @(#)assert.h 8.2 (Berkeley) 1/21/94 * $FreeBSD$ */ - -#include - /* - * Unlike other ANSI header files, may usefully be included - * multiple times, with and without NDEBUG defined. + * Portions copyright (c) 2017, ARM Limited and Contributors. + * All rights reserved. */ -#undef assert -#undef _assert - -#ifdef NDEBUG -#define assert(e) ((void)0) -#define _assert(e) ((void)0) -#else -#define _assert(e) assert(e) - -#define assert(e) ((e) ? (void)0 : __assert(__func__, __FILE__, \ - __LINE__, #e)) -#endif /* NDEBUG */ - #ifndef _ASSERT_H_ #define _ASSERT_H_ + +#include + +#if ENABLE_ASSERTIONS +#define _assert(e) assert(e) +#define assert(e) ((e) ? (void)0 : __assert(__func__, __FILE__, \ + __LINE__, #e)) +#else +#define assert(e) ((void)0) +#define _assert(e) ((void)0) +#endif /* ENABLE_ASSERTIONS */ + __BEGIN_DECLS void __assert(const char *, const char *, int, const char *) __dead2; __END_DECLS + #endif /* !_ASSERT_H_ */ diff --git a/include/lib/stdlib/stdbool.h b/include/lib/stdlib/stdbool.h new file mode 100644 index 000000000..48070c18d --- /dev/null +++ b/include/lib/stdlib/stdbool.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Jeroen Ruigrok van der Werven + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __bool_true_false_are_defined +#define __bool_true_false_are_defined 1 + +#ifndef __cplusplus + +#define false 0 +#define true 1 + +#define bool _Bool +#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER) +typedef int _Bool; +#endif + +#endif /* !__cplusplus */ +#endif /* __bool_true_false_are_defined */ diff --git a/include/lib/xlat_tables/xlat_tables.h b/include/lib/xlat_tables/xlat_tables.h index 4e855032b..38150f524 100644 --- a/include/lib/xlat_tables/xlat_tables.h +++ b/include/lib/xlat_tables/xlat_tables.h @@ -108,7 +108,7 @@ typedef struct mmap_region { /* Generic translation table APIs */ void init_xlat_tables(void); void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, - size_t size, unsigned int attr); + size_t size, mmap_attr_t attr); void mmap_add(const mmap_region_t *mm); #endif /*__ASSEMBLY__*/ diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h index 16b857cc6..d1704b7b4 100644 --- a/include/lib/xlat_tables/xlat_tables_v2.h +++ b/include/lib/xlat_tables/xlat_tables_v2.h @@ -114,7 +114,7 @@ void init_xlat_tables(void); * be added before initializing the MMU and cannot be removed later. */ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, - size_t size, unsigned int attr); + size_t size, mmap_attr_t attr); /* * Add a region with defined base PA and base VA. This type of region can be @@ -128,7 +128,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, * EPERM: It overlaps another region in an invalid way. */ int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va, - size_t size, unsigned int attr); + size_t size, mmap_attr_t attr); /* * Add an array of static regions with defined base PA and base VA. This type diff --git a/include/plat/arm/board/common/v2m_def.h b/include/plat/arm/board/common/v2m_def.h index aaa96f305..b843d49c2 100644 --- a/include/plat/arm/board/common/v2m_def.h +++ b/include/plat/arm/board/common/v2m_def.h @@ -30,7 +30,7 @@ #ifndef __V2M_DEF_H__ #define __V2M_DEF_H__ -#include +#include /* V2M motherboard system registers & offsets */ diff --git a/include/plat/arm/common/arm_xlat_tables.h b/include/plat/arm/common/arm_xlat_tables.h new file mode 100644 index 000000000..3f7e85f6e --- /dev/null +++ b/include/plat/arm/common/arm_xlat_tables.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#if ARM_XLAT_TABLES_LIB_V1 +#include +#else +#include +#endif /* ARM_XLAT_TABLES_LIB_V1 */ diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h index e61925905..8ea32b9ad 100644 --- a/include/plat/arm/common/plat_arm.h +++ b/include/plat/arm/common/plat_arm.h @@ -30,6 +30,7 @@ #ifndef __PLAT_ARM_H__ #define __PLAT_ARM_H__ +#include #include #include #include @@ -80,7 +81,7 @@ void arm_setup_page_tables(uintptr_t total_base, #else /* - * Empty macros for all other BL stages other than BL31 + * Empty macros for all other BL stages other than BL31 and BL32 */ #define ARM_INSTANTIATE_LOCK #define arm_lock_init() @@ -156,6 +157,7 @@ void arm_bl2_platform_setup(void); void arm_bl2_plat_arch_setup(void); uint32_t arm_get_spsr_for_bl32_entry(void); uint32_t arm_get_spsr_for_bl33_entry(void); +int arm_bl2_handle_post_image_load(unsigned int image_id); /* BL2U utility functions */ void arm_bl2u_early_platform_setup(struct meminfo *mem_layout, diff --git a/include/plat/arm/css/common/css_pm.h b/include/plat/arm/css/common/css_pm.h index 489275e96..1e1bab797 100644 --- a/include/plat/arm/css/common/css_pm.h +++ b/include/plat/arm/css/common/css_pm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,11 +35,15 @@ #include #include +/* System power domain at level 2, as currently implemented by CSS platforms */ +#define CSS_SYSTEM_PWR_DMN_LVL ARM_PWR_LVL2 + /* Macros to read the CSS power domain state */ #define CSS_CORE_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL0] #define CSS_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL1] -#define CSS_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > ARM_PWR_LVL1) ?\ - (state)->pwr_domain_state[ARM_PWR_LVL2] : 0) +#define CSS_SYSTEM_PWR_STATE(state) \ + ((PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) ?\ + (state)->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] : 0) int css_pwr_domain_on(u_register_t mpidr); void css_pwr_domain_on_finish(const psci_power_state_t *target_state); diff --git a/include/plat/arm/soc/common/soc_css_def.h b/include/plat/arm/soc/common/soc_css_def.h index 2c08296ca..ecced3d9b 100644 --- a/include/plat/arm/soc/common/soc_css_def.h +++ b/include/plat/arm/soc/common/soc_css_def.h @@ -96,9 +96,16 @@ /* * Required platform porting definitions common to all ARM CSS SoCs */ - +#if JUNO_AARCH32_EL3_RUNTIME +/* + * Following change is required to initialize TZC + * for enabling access to the HI_VECTOR (0xFFFF0000) + * location needed for JUNO AARCH32 support. + */ +#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x8000) +#else /* 2MB used for SCP DDR retraining */ #define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x00200000) - +#endif #endif /* __SOC_CSS_DEF_H__ */ diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h index f13b30d81..ddb1cab1a 100644 --- a/include/plat/common/platform.h +++ b/include/plat/common/platform.h @@ -100,6 +100,7 @@ uintptr_t plat_get_my_stack(void); void plat_report_exception(unsigned int exception_type); int plat_crash_console_init(void); int plat_crash_console_putc(int c); +int plat_crash_console_flush(void); void plat_error_handler(int err) __dead2; void plat_panic_handler(void) __dead2; diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S index 5b17c21cf..03b47eae2 100644 --- a/lib/aarch32/misc_helpers.S +++ b/lib/aarch32/misc_helpers.S @@ -162,7 +162,7 @@ endfunc zeromem * -------------------------------------------------------------------------- */ func memcpy4 -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS orr r3, r0, r1 tst r3, #0x3 ASM_ASSERT(eq) diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S index 84265e0b2..74550aa24 100644 --- a/lib/aarch64/misc_helpers.S +++ b/lib/aarch64/misc_helpers.S @@ -215,7 +215,7 @@ func zeromem_dczva tmp1 .req x4 tmp2 .req x5 -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS /* * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3) * register value and panic if the MMU is disabled. @@ -228,7 +228,7 @@ func zeromem_dczva tst tmp1, #SCTLR_M_BIT ASM_ASSERT(ne) -#endif /* ASM_ASSERTION */ +#endif /* ENABLE_ASSERTIONS */ /* stop_address is the address past the last to zero */ add stop_address, cursor, length @@ -247,7 +247,7 @@ func zeromem_dczva mov tmp2, #(1 << 2) lsl block_size, tmp2, block_size -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS /* * Assumes block size is at least 16 bytes to avoid manual realignment * of the cursor at the end of the DCZVA loop. @@ -444,7 +444,7 @@ endfunc zeromem_dczva * -------------------------------------------------------------------------- */ func memcpy16 -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS orr x3, x0, x1 tst x3, #0xf ASM_ASSERT(eq) diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S index 3d6064c93..7374e2508 100644 --- a/lib/cpus/aarch32/aem_generic.S +++ b/lib/cpus/aarch32/aem_generic.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,7 +35,7 @@ func aem_generic_core_pwr_dwn /* Assert if cache is enabled */ -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS ldcopr r0, SCTLR tst r0, #SCTLR_C_BIT ASM_ASSERT(eq) @@ -51,7 +51,7 @@ endfunc aem_generic_core_pwr_dwn func aem_generic_cluster_pwr_dwn /* Assert if cache is enabled */ -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS ldcopr r0, SCTLR tst r0, #SCTLR_C_BIT ASM_ASSERT(eq) diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S index f631c4cf7..8cd79330f 100644 --- a/lib/cpus/aarch32/cortex_a32.S +++ b/lib/cpus/aarch32/cortex_a32.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -76,7 +76,7 @@ func cortex_a32_core_pwr_dwn push {r12, lr} /* Assert if cache is enabled */ -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS ldcopr r0, SCTLR tst r0, #SCTLR_C_BIT ASM_ASSERT(eq) @@ -107,7 +107,7 @@ func cortex_a32_cluster_pwr_dwn push {r12, lr} /* Assert if cache is enabled */ -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS ldcopr r0, SCTLR tst r0, #SCTLR_C_BIT ASM_ASSERT(eq) diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S new file mode 100644 index 000000000..a16ead8bf --- /dev/null +++ b/lib/cpus/aarch32/cortex_a53.S @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include + + /* --------------------------------------------- + * Disable intra-cluster coherency + * --------------------------------------------- + */ +func cortex_a53_disable_smp + ldcopr16 r0, r1, CPUECTLR + bic64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + isb + dsb sy + bx lr +endfunc cortex_a53_disable_smp + + /* ------------------------------------------------- + * The CPU Ops reset function for Cortex-A53. + * ------------------------------------------------- + */ +func cortex_a53_reset_func + /* --------------------------------------------- + * Enable the SMP bit. + * --------------------------------------------- + */ + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + isb + bx lr +endfunc cortex_a53_reset_func + + /* ---------------------------------------------------- + * The CPU Ops core power down function for Cortex-A53. + * ---------------------------------------------------- + */ +func cortex_a53_core_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a53_disable_smp +endfunc cortex_a53_core_pwr_dwn + + /* ------------------------------------------------------- + * The CPU Ops cluster power down function for Cortex-A53. + * Clobbers: r0-r3 + * ------------------------------------------------------- + */ +func cortex_a53_cluster_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Disable the optional ACP. + * --------------------------------------------- + */ + bl plat_disable_acp + + /* --------------------------------------------- + * Flush L2 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level2 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a53_disable_smp +endfunc cortex_a53_cluster_pwr_dwn + +declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \ + cortex_a53_reset_func, \ + cortex_a53_core_pwr_dwn, \ + cortex_a53_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S new file mode 100644 index 000000000..3c5c4549c --- /dev/null +++ b/lib/cpus/aarch32/cortex_a57.S @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include + + /* --------------------------------------------- + * Disable intra-cluster coherency + * Clobbers: r0-r1 + * --------------------------------------------- + */ +func cortex_a57_disable_smp + ldcopr16 r0, r1, CPUECTLR + bic64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + bx lr +endfunc cortex_a57_disable_smp + + /* --------------------------------------------- + * Disable all types of L2 prefetches. + * Clobbers: r0-r2 + * --------------------------------------------- + */ +func cortex_a57_disable_l2_prefetch + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT + bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \ + CPUECTLR_L2_DPFTCH_DIST_MASK) + stcopr16 r0, r1, CPUECTLR + isb + dsb ish + bx lr +endfunc cortex_a57_disable_l2_prefetch + + /* --------------------------------------------- + * Disable debug interfaces + * --------------------------------------------- + */ +func cortex_a57_disable_ext_debug + mov r0, #1 + stcopr r0, DBGOSDLR + isb + dsb sy + bx lr +endfunc cortex_a57_disable_ext_debug + + /* ------------------------------------------------- + * The CPU Ops reset function for Cortex-A57. + * ------------------------------------------------- + */ +func cortex_a57_reset_func + /* --------------------------------------------- + * Enable the SMP bit. + * --------------------------------------------- + */ + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + isb + bx lr +endfunc cortex_a57_reset_func + + /* ---------------------------------------------------- + * The CPU Ops core power down function for Cortex-A57. + * ---------------------------------------------------- + */ +func cortex_a57_core_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Disable the L2 prefetches. + * --------------------------------------------- + */ + bl cortex_a57_disable_l2_prefetch + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + bl cortex_a57_disable_smp + + /* --------------------------------------------- + * Force the debug interfaces to be quiescent + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a57_disable_ext_debug +endfunc cortex_a57_core_pwr_dwn + + /* ------------------------------------------------------- + * The CPU Ops cluster power down function for Cortex-A57. + * Clobbers: r0-r3 + * ------------------------------------------------------- + */ +func cortex_a57_cluster_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Disable the L2 prefetches. + * --------------------------------------------- + */ + bl cortex_a57_disable_l2_prefetch + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Disable the optional ACP. + * --------------------------------------------- + */ + bl plat_disable_acp + + /* --------------------------------------------- + * Flush L2 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level2 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + bl cortex_a57_disable_smp + + /* --------------------------------------------- + * Force the debug interfaces to be quiescent + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a57_disable_ext_debug +endfunc cortex_a57_cluster_pwr_dwn + +declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \ + cortex_a57_reset_func, \ + cortex_a57_core_pwr_dwn, \ + cortex_a57_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S new file mode 100644 index 000000000..583c1b58a --- /dev/null +++ b/lib/cpus/aarch32/cortex_a72.S @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include + + /* --------------------------------------------- + * Disable all types of L2 prefetches. + * --------------------------------------------- + */ +func cortex_a72_disable_l2_prefetch + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT + bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \ + CPUECTLR_L2_DPFTCH_DIST_MASK) + stcopr16 r0, r1, CPUECTLR + isb + bx lr +endfunc cortex_a72_disable_l2_prefetch + + /* --------------------------------------------- + * Disable the load-store hardware prefetcher. + * --------------------------------------------- + */ +func cortex_a72_disable_hw_prefetcher + ldcopr16 r0, r1, CPUACTLR + orr64_imm r0, r1, CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH + stcopr16 r0, r1, CPUACTLR + isb + dsb ish + bx lr +endfunc cortex_a72_disable_hw_prefetcher + + /* --------------------------------------------- + * Disable intra-cluster coherency + * Clobbers: r0-r1 + * --------------------------------------------- + */ +func cortex_a72_disable_smp + ldcopr16 r0, r1, CPUECTLR + bic64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + bx lr +endfunc cortex_a72_disable_smp + + /* --------------------------------------------- + * Disable debug interfaces + * --------------------------------------------- + */ +func cortex_a72_disable_ext_debug + mov r0, #1 + stcopr r0, DBGOSDLR + isb + dsb sy + bx lr +endfunc cortex_a72_disable_ext_debug + + /* ------------------------------------------------- + * The CPU Ops reset function for Cortex-A72. + * ------------------------------------------------- + */ +func cortex_a72_reset_func + /* --------------------------------------------- + * Enable the SMP bit. + * --------------------------------------------- + */ + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + isb + bx lr +endfunc cortex_a72_reset_func + + /* ---------------------------------------------------- + * The CPU Ops core power down function for Cortex-A72. + * ---------------------------------------------------- + */ +func cortex_a72_core_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Disable the L2 prefetches. + * --------------------------------------------- + */ + bl cortex_a72_disable_l2_prefetch + + /* --------------------------------------------- + * Disable the load-store hardware prefetcher. + * --------------------------------------------- + */ + bl cortex_a72_disable_hw_prefetcher + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + bl cortex_a72_disable_smp + + /* --------------------------------------------- + * Force the debug interfaces to be quiescent + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a72_disable_ext_debug +endfunc cortex_a72_core_pwr_dwn + + /* ------------------------------------------------------- + * The CPU Ops cluster power down function for Cortex-A72. + * ------------------------------------------------------- + */ +func cortex_a72_cluster_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Disable the L2 prefetches. + * --------------------------------------------- + */ + bl cortex_a72_disable_l2_prefetch + + /* --------------------------------------------- + * Disable the load-store hardware prefetcher. + * --------------------------------------------- + */ + bl cortex_a72_disable_hw_prefetcher + +#if !SKIP_A72_L1_FLUSH_PWR_DWN + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 +#endif + + /* --------------------------------------------- + * Disable the optional ACP. + * --------------------------------------------- + */ + bl plat_disable_acp + + /* ------------------------------------------------- + * Flush the L2 caches. + * ------------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level2 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + bl cortex_a72_disable_smp + + /* --------------------------------------------- + * Force the debug interfaces to be quiescent + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a72_disable_ext_debug +endfunc cortex_a72_cluster_pwr_dwn + +declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \ + cortex_a72_reset_func, \ + cortex_a72_core_pwr_dwn, \ + cortex_a72_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S index dc1b6e619..7606b8e28 100644 --- a/lib/cpus/aarch32/cpu_helpers.S +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -53,7 +53,7 @@ func reset_handler /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */ bl get_cpu_ops_ptr -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS cmp r0, #0 ASM_ASSERT(ne) #endif @@ -92,7 +92,7 @@ func prepare_cpu_pwr_dwn pop {r2, lr} ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR] -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS cmp r0, #0 ASM_ASSERT(ne) #endif @@ -118,7 +118,7 @@ func init_cpu_ops cmp r1, #0 bne 1f bl get_cpu_ops_ptr -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS cmp r0, #0 ASM_ASSERT(ne) #endif diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index 47cb6a2de..6a3991679 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -55,7 +55,7 @@ func reset_handler /* Get the matching cpu_ops pointer */ bl get_cpu_ops_ptr -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS cmp x0, #0 ASM_ASSERT(ne) #endif @@ -94,7 +94,7 @@ func prepare_cpu_pwr_dwn mrs x1, tpidr_el3 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS cmp x0, #0 ASM_ASSERT(ne) #endif @@ -120,7 +120,7 @@ func init_cpu_ops cbnz x0, 1f mov x10, x30 bl get_cpu_ops_ptr -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS cmp x0, #0 ASM_ASSERT(ne) #endif diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c index 675ed6681..76e67a362 100644 --- a/lib/psci/psci_on.c +++ b/lib/psci/psci_on.c @@ -165,7 +165,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx, */ psci_plat_pm_ops->pwr_domain_on_finish(state_info); -#if !HW_ASSISTED_COHERENCY +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) /* * Arch. management: Enable data cache and manage stack memory */ diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c index 08c8fd6a6..bf95df247 100644 --- a/lib/psci/psci_suspend.c +++ b/lib/psci/psci_suspend.c @@ -302,7 +302,7 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx, */ psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); -#if !HW_ASSISTED_COHERENCY +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) /* Arch. management: Enable the data cache, stack memory maintenance. */ psci_do_pwrup_cache_maintenance(); #endif diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c index de9ec6436..eb3e7fbc0 100644 --- a/lib/psci/psci_system_off.c +++ b/lib/psci/psci_system_off.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include "psci_private.h" @@ -46,6 +47,8 @@ void psci_system_off(void) psci_spd_pm->svc_system_off(); } + console_flush(); + /* Call the platform specific hook */ psci_plat_pm_ops->system_off(); @@ -63,6 +66,8 @@ void psci_system_reset(void) psci_spd_pm->svc_system_reset(); } + console_flush(); + /* Call the platform specific hook */ psci_plat_pm_ops->system_reset(); diff --git a/lib/stdlib/assert.c b/lib/stdlib/assert.c index 90a1afe50..3c0bd166f 100644 --- a/lib/stdlib/assert.c +++ b/lib/stdlib/assert.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,14 +28,22 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include #include +#include -/* - * This is a basic implementation. This could be improved. - */ -void __assert (const char *function, const char *file, unsigned int line, +void __assert(const char *function, const char *file, unsigned int line, const char *assertion) { +#if LOG_LEVEL >= LOG_LEVEL_INFO + /* + * Only print the output if LOG_LEVEL is higher or equal to + * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1. + */ tf_printf("ASSERT: %s <%d> : %s\n", function, line, assertion); - while(1); + + console_flush(); +#endif + + plat_panic_handler(); } diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c index 316a60e70..4fe5bf912 100644 --- a/lib/xlat_tables/aarch32/xlat_tables.c +++ b/lib/xlat_tables/aarch32/xlat_tables.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -85,13 +85,13 @@ static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES] __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t)); -#if DEBUG +#if ENABLE_ASSERTIONS static unsigned long long get_max_supported_pa(void) { /* Physical address space size for long descriptor format. */ return (1ULL << 40) - 1ULL; } -#endif +#endif /* ENABLE_ASSERTIONS */ void init_xlat_tables(void) { diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c index ecb120220..4f2379363 100644 --- a/lib/xlat_tables/aarch64/xlat_tables.c +++ b/lib/xlat_tables/aarch64/xlat_tables.c @@ -127,7 +127,7 @@ static unsigned long long calc_physical_addr_size_bits( return TCR_PS_BITS_4GB; } -#if DEBUG +#if ENABLE_ASSERTIONS /* Physical Address ranges supported in the AArch64 Memory Model */ static const unsigned int pa_range_bits_arr[] = { PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, @@ -144,7 +144,7 @@ static unsigned long long get_max_supported_pa(void) return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL; } -#endif +#endif /* ENABLE_ASSERTIONS */ void init_xlat_tables(void) { diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c index 81c4dc683..4426ccefb 100644 --- a/lib/xlat_tables/xlat_tables_common.c +++ b/lib/xlat_tables/xlat_tables_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -87,7 +87,7 @@ void print_mmap(void) } void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, - size_t size, unsigned int attr) + size_t size, mmap_attr_t attr) { mmap_region_t *mm = mmap; mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1; @@ -109,7 +109,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, assert((base_pa + (unsigned long long)size - 1ULL) <= (PLAT_PHY_ADDR_SPACE_SIZE - 1)); -#if DEBUG +#if ENABLE_ASSERTIONS /* Check for PAs and VAs overlaps with all other regions */ for (mm = mmap; mm->size; ++mm) { @@ -154,7 +154,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, mm = mmap; /* Restore pointer to the start of the array */ -#endif /* DEBUG */ +#endif /* ENABLE_ASSERTIONS */ /* Find correct place in mmap to insert new region */ while (mm->base_va < base_va && mm->size) @@ -199,7 +199,7 @@ void mmap_add(const mmap_region_t *mm) } } -static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa, +static uint64_t mmap_desc(mmap_attr_t attr, unsigned long long addr_pa, int level) { uint64_t desc; @@ -277,11 +277,11 @@ static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa, * attributes of the innermost region that contains it. If there are partial * overlaps, it returns -1, as a smaller size is needed. */ -static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va, +static mmap_attr_t mmap_region_attr(mmap_region_t *mm, uintptr_t base_va, size_t size) { /* Don't assume that the area is contained in the first region */ - int attr = -1; + mmap_attr_t attr = -1; /* * Get attributes from last (innermost) region that contains the @@ -360,7 +360,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, * there are partially overlapping regions. On success, * it will return the innermost region's attributes. */ - int attr = mmap_region_attr(mm, base_va, level_size); + mmap_attr_t attr = mmap_region_attr(mm, base_va, + level_size); if (attr >= 0) { desc = mmap_desc(attr, base_va - mm->base_va + mm->base_pa, diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c index ba0e53d6f..cd7aad8fa 100644 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -37,13 +37,13 @@ #include #include "../xlat_tables_private.h" -#if DEBUG +#if ENABLE_ASSERTIONS static unsigned long long xlat_arch_get_max_supported_pa(void) { /* Physical address space size for long descriptor format. */ return (1ull << 40) - 1ull; } -#endif /* DEBUG*/ +#endif /* ENABLE_ASSERTIONS*/ int is_mmu_enabled(void) { diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index 575ac71ce..24266b2d1 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -77,7 +77,7 @@ static unsigned long long calc_physical_addr_size_bits( return TCR_PS_BITS_4GB; } -#if DEBUG +#if ENABLE_ASSERTIONS /* Physical Address ranges supported in the AArch64 Memory Model */ static const unsigned int pa_range_bits_arr[] = { PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, @@ -94,7 +94,7 @@ unsigned long long xlat_arch_get_max_supported_pa(void) return (1ull << pa_range_bits_arr[pa_range]) - 1ull; } -#endif /* DEBUG*/ +#endif /* ENABLE_ASSERTIONS*/ int is_mmu_enabled(void) { diff --git a/lib/xlat_tables_v2/xlat_tables_common.c b/lib/xlat_tables_v2/xlat_tables_common.c index b4691a2b4..7ca81b9c8 100644 --- a/lib/xlat_tables_v2/xlat_tables_common.c +++ b/lib/xlat_tables_v2/xlat_tables_common.c @@ -92,7 +92,7 @@ xlat_ctx_t tf_xlat_ctx = { }; void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, - size_t size, unsigned int attr) + size_t size, mmap_attr_t attr) { mmap_region_t mm = { .base_va = base_va, @@ -114,7 +114,7 @@ void mmap_add(const mmap_region_t *mm) #if PLAT_XLAT_TABLES_DYNAMIC int mmap_add_dynamic_region(unsigned long long base_pa, - uintptr_t base_va, size_t size, unsigned int attr) + uintptr_t base_va, size_t size, mmap_attr_t attr) { mmap_region_t mm = { .base_va = base_va, diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c index 2f03306e9..581f77032 100644 --- a/lib/xlat_tables_v2/xlat_tables_internal.c +++ b/lib/xlat_tables_v2/xlat_tables_internal.c @@ -115,7 +115,7 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx) #endif /* PLAT_XLAT_TABLES_DYNAMIC */ /* Returns a block/page table descriptor for the given level and attributes. */ -static uint64_t xlat_desc(unsigned int attr, unsigned long long addr_pa, +static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa, int level) { uint64_t desc; @@ -609,7 +609,7 @@ void print_mmap(mmap_region_t *const mmap) */ static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa, uintptr_t base_va, size_t size, - unsigned int attr) + mmap_attr_t attr) { mmap_region_t *mm = ctx->mmap; unsigned long long end_pa = base_pa + size - 1; diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk index e66f5112a..903363a46 100644 --- a/make_helpers/defaults.mk +++ b/make_helpers/defaults.mk @@ -154,3 +154,9 @@ USE_COHERENT_MEM := 1 # Build verbosity V := 0 + +# Whether to enable D-Cache early during warm boot. This is usually +# applicable for platforms wherein interconnect programming is not +# required to enable cache coherency after warm reset (eg: single cluster +# platforms). +WARMBOOT_ENABLE_DCACHE_EARLY := 0 diff --git a/plat/arm/board/common/board_css_common.c b/plat/arm/board/common/board_css_common.c index 3fcc6ee02..6593d2a0c 100644 --- a/plat/arm/board/common/board_css_common.c +++ b/plat/arm/board/common/board_css_common.c @@ -79,6 +79,9 @@ const mmap_region_t plat_arm_mmap[] = { #endif #ifdef IMAGE_BL32 const mmap_region_t plat_arm_mmap[] = { +#ifdef AARCH32 + ARM_MAP_SHARED_RAM, +#endif V2M_MAP_IOFPGA, CSS_MAP_DEVICE, SOC_CSS_MAP_DEVICE, diff --git a/plat/arm/board/juno/aarch32/juno_helpers.S b/plat/arm/board/juno/aarch32/juno_helpers.S new file mode 100644 index 000000000..86eeb2c43 --- /dev/null +++ b/plat/arm/board/juno/aarch32/juno_helpers.S @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "../juno_def.h" + + + .globl plat_reset_handler + .globl plat_arm_calc_core_pos + +#define JUNO_REVISION(rev) REV_JUNO_R##rev +#define JUNO_HANDLER(rev) plat_reset_handler_juno_r##rev +#define JUMP_TO_HANDLER_IF_JUNO_R(revision) \ + jump_to_handler JUNO_REVISION(revision), JUNO_HANDLER(revision) + + /* -------------------------------------------------------------------- + * Helper macro to jump to the given handler if the board revision + * matches. + * Expects the Juno board revision in x0. + * -------------------------------------------------------------------- + */ + .macro jump_to_handler _revision, _handler + cmp r0, #\_revision + beq \_handler + .endm + + /* -------------------------------------------------------------------- + * Helper macro that reads the part number of the current CPU and jumps + * to the given label if it matches the CPU MIDR provided. + * + * Clobbers r0. + * -------------------------------------------------------------------- + */ + .macro jump_if_cpu_midr _cpu_midr, _label + ldcopr r0, MIDR + ubfx r0, r0, #MIDR_PN_SHIFT, #12 + ldr r1, =((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK) + cmp r0, r1 + beq \_label + .endm + + /* -------------------------------------------------------------------- + * Platform reset handler for Juno R0. + * + * Juno R0 has the following topology: + * - Quad core Cortex-A53 processor cluster; + * - Dual core Cortex-A57 processor cluster. + * + * This handler does the following: + * - Implement workaround for defect id 831273 by enabling an event + * stream every 65536 cycles. + * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57 + * - Set the L2 Tag RAM latency to 2 (i.e. 3 cycles) for Cortex-A57 + * -------------------------------------------------------------------- + */ +func JUNO_HANDLER(0) + /* -------------------------------------------------------------------- + * Enable the event stream every 65536 cycles + * -------------------------------------------------------------------- + */ + mov r0, #(0xf << EVNTI_SHIFT) + orr r0, r0, #EVNTEN_BIT + stcopr r0, CNTKCTL + + /* -------------------------------------------------------------------- + * Nothing else to do on Cortex-A53. + * -------------------------------------------------------------------- + */ + jump_if_cpu_midr CORTEX_A53_MIDR, 1f + + /* -------------------------------------------------------------------- + * Cortex-A57 specific settings + * -------------------------------------------------------------------- + */ + mov r0, #((L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \ + (L2_TAG_RAM_LATENCY_3_CYCLES << L2CTLR_TAG_RAM_LATENCY_SHIFT)) + stcopr r0, L2CTLR +1: + isb + bx lr +endfunc JUNO_HANDLER(0) + + /* -------------------------------------------------------------------- + * Platform reset handler for Juno R1. + * + * Juno R1 has the following topology: + * - Quad core Cortex-A53 processor cluster; + * - Dual core Cortex-A57 processor cluster. + * + * This handler does the following: + * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57 + * + * Note that: + * - The default value for the L2 Tag RAM latency for Cortex-A57 is + * suitable. + * - Defect #831273 doesn't affect Juno R1. + * -------------------------------------------------------------------- + */ +func JUNO_HANDLER(1) + /* -------------------------------------------------------------------- + * Nothing to do on Cortex-A53. + * -------------------------------------------------------------------- + */ + jump_if_cpu_midr CORTEX_A57_MIDR, A57 + bx lr + +A57: + /* -------------------------------------------------------------------- + * Cortex-A57 specific settings + * -------------------------------------------------------------------- + */ + mov r0, #(L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) + stcopr r0, L2CTLR + isb + bx lr +endfunc JUNO_HANDLER(1) + + /* -------------------------------------------------------------------- + * Platform reset handler for Juno R2. + * + * Juno R2 has the following topology: + * - Quad core Cortex-A53 processor cluster; + * - Dual core Cortex-A72 processor cluster. + * + * This handler does the following: + * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72 + * - Set the L2 Tag RAM latency to 1 (i.e. 2 cycles) for Cortex-A72 + * + * Note that: + * - Defect #831273 doesn't affect Juno R2. + * -------------------------------------------------------------------- + */ +func JUNO_HANDLER(2) + /* -------------------------------------------------------------------- + * Nothing to do on Cortex-A53. + * -------------------------------------------------------------------- + */ + jump_if_cpu_midr CORTEX_A72_MIDR, A72 + bx lr + +A72: + /* -------------------------------------------------------------------- + * Cortex-A72 specific settings + * -------------------------------------------------------------------- + */ + mov r0, #((L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \ + (L2_TAG_RAM_LATENCY_2_CYCLES << L2CTLR_TAG_RAM_LATENCY_SHIFT)) + stcopr r0, L2CTLR + isb + bx lr +endfunc JUNO_HANDLER(2) + + /* -------------------------------------------------------------------- + * void plat_reset_handler(void); + * + * Determine the Juno board revision and call the appropriate reset + * handler. + * -------------------------------------------------------------------- + */ +func plat_reset_handler + /* Read the V2M SYS_ID register */ + ldr r0, =(V2M_SYSREGS_BASE + V2M_SYS_ID) + ldr r1, [r0] + /* Extract board revision from the SYS_ID */ + ubfx r0, r1, #V2M_SYS_ID_REV_SHIFT, #4 + + JUMP_TO_HANDLER_IF_JUNO_R(0) + JUMP_TO_HANDLER_IF_JUNO_R(1) + JUMP_TO_HANDLER_IF_JUNO_R(2) + + /* Board revision is not supported */ + no_ret plat_panic_handler + +endfunc plat_reset_handler + + /* ----------------------------------------------------- + * unsigned int plat_arm_calc_core_pos(u_register_t mpidr) + * Helper function to calculate the core position. + * ----------------------------------------------------- + */ +func plat_arm_calc_core_pos + b css_calc_core_pos_swap_cluster +endfunc plat_arm_calc_core_pos diff --git a/plat/arm/board/juno/aarch64/juno_helpers.S b/plat/arm/board/juno/aarch64/juno_helpers.S index ac54ac9b3..49fef16ff 100644 --- a/plat/arm/board/juno/aarch64/juno_helpers.S +++ b/plat/arm/board/juno/aarch64/juno_helpers.S @@ -34,12 +34,18 @@ #include #include #include +#include +#include #include #include "../juno_def.h" .globl plat_reset_handler .globl plat_arm_calc_core_pos +#if JUNO_AARCH32_EL3_RUNTIME + .globl plat_get_my_entrypoint + .globl juno_reset_to_aarch32_state +#endif #define JUNO_REVISION(rev) REV_JUNO_R##rev #define JUNO_HANDLER(rev) plat_reset_handler_juno_r##rev @@ -205,6 +211,20 @@ func plat_reset_handler endfunc plat_reset_handler + /* ----------------------------------------------------- + * void juno_do_reset_to_aarch32_state(void); + * + * Request warm reset to AArch32 mode. + * ----------------------------------------------------- + */ +func juno_do_reset_to_aarch32_state + mov x0, #RMR_EL3_RR_BIT + dsb sy + msr rmr_el3, x0 + isb + wfi +endfunc juno_do_reset_to_aarch32_state + /* ----------------------------------------------------- * unsigned int plat_arm_calc_core_pos(u_register_t mpidr) * Helper function to calculate the core position. @@ -213,3 +233,77 @@ endfunc plat_reset_handler func plat_arm_calc_core_pos b css_calc_core_pos_swap_cluster endfunc plat_arm_calc_core_pos + +#if JUNO_AARCH32_EL3_RUNTIME + /* --------------------------------------------------------------------- + * uintptr_t plat_get_my_entrypoint (void); + * + * Main job of this routine is to distinguish between a cold and a warm + * boot. On JUNO platform, this distinction is based on the contents of + * the Trusted Mailbox. It is initialised to zero by the SCP before the + * AP cores are released from reset. Therefore, a zero mailbox means + * it's a cold reset. If it is a warm boot then a request to reset to + * AArch32 state is issued. This is the only way to reset to AArch32 + * in EL3 on Juno. A trampoline located at the high vector address + * has already been prepared by BL1. + * + * This functions returns the contents of the mailbox, i.e.: + * - 0 for a cold boot; + * - request warm reset in AArch32 state for warm boot case; + * --------------------------------------------------------------------- + */ +func plat_get_my_entrypoint + mov_imm x0, PLAT_ARM_TRUSTED_MAILBOX_BASE + ldr x0, [x0] + cbz x0, return + b juno_do_reset_to_aarch32_state +1: + b 1b +return: + ret +endfunc plat_get_my_entrypoint + +/* + * Emit a "movw r0, #imm16" which moves the lower + * 16 bits of `_val` into r0. + */ +.macro emit_movw _reg_d, _val + mov_imm \_reg_d, (0xe3000000 | \ + ((\_val & 0xfff) | \ + ((\_val & 0xf000) << 4))) +.endm + +/* + * Emit a "movt r0, #imm16" which moves the upper + * 16 bits of `_val` into r0. + */ +.macro emit_movt _reg_d, _val + mov_imm \_reg_d, (0xe3400000 | \ + (((\_val & 0x0fff0000) >> 16) | \ + ((\_val & 0xf0000000) >> 12))) +.endm + +/* + * This function writes the trampoline code at HI-VEC (0xFFFF0000) + * address which loads r0 with the entrypoint address for + * BL32 (a.k.a SP_MIN) when EL3 is in AArch32 mode. A warm reset + * to AArch32 mode is then requested by writing into RMR_EL3. + */ +func juno_reset_to_aarch32_state + emit_movw w0, BL32_BASE + emit_movt w1, BL32_BASE + /* opcode "bx r0" to branch using r0 in AArch32 mode */ + mov_imm w2, 0xe12fff10 + + /* Write the above opcodes at HI-VECTOR location */ + mov_imm x3, HI_VECTOR_BASE + str w0, [x3], #4 + str w1, [x3], #4 + str w2, [x3] + + bl juno_do_reset_to_aarch32_state +1: + b 1b +endfunc juno_reset_to_aarch32_state + +#endif /* JUNO_AARCH32_EL3_RUNTIME */ diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h index f89f7b463..4da8ab06d 100644 --- a/plat/arm/board/juno/include/platform_def.h +++ b/plat/arm/board/juno/include/platform_def.h @@ -103,8 +103,8 @@ #endif #ifdef IMAGE_BL32 -# define PLAT_ARM_MMAP_ENTRIES 4 -# define MAX_XLAT_TABLES 3 +# define PLAT_ARM_MMAP_ENTRIES 5 +# define MAX_XLAT_TABLES 4 #endif /* diff --git a/plat/arm/board/juno/juno_bl1_setup.c b/plat/arm/board/juno/juno_bl1_setup.c index e805c9a38..93ca1c319 100644 --- a/plat/arm/board/juno/juno_bl1_setup.c +++ b/plat/arm/board/juno/juno_bl1_setup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -32,11 +32,15 @@ #include #include #include +#include #include #include #define RESET_REASON_WDOG_RESET (0x2) +void juno_reset_to_aarch32_state(void); + + /******************************************************************************* * The following function checks if Firmware update is needed, * by checking if TOC in FIP image is valid or watchdog reset happened. @@ -85,3 +89,15 @@ __dead2 void bl1_plat_fwu_done(void *client_cookie, void *reserved) while (1) wfi(); } + +#if JUNO_AARCH32_EL3_RUNTIME +void bl1_plat_prepare_exit(entry_point_info_t *ep_info) +{ +#if !ARM_DISABLE_TRUSTED_WDOG + /* Disable watchdog before leaving BL1 */ + sp805_stop(ARM_SP805_TWDG_BASE); +#endif + + juno_reset_to_aarch32_state(); +} +#endif /* JUNO_AARCH32_EL3_RUNTIME */ diff --git a/plat/arm/board/juno/juno_bl2_setup.c b/plat/arm/board/juno/juno_bl2_setup.c new file mode 100644 index 000000000..ffb6387e0 --- /dev/null +++ b/plat/arm/board/juno/juno_bl2_setup.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#if JUNO_AARCH32_EL3_RUNTIME +/******************************************************************************* + * This function changes the spsr for BL32 image to bypass + * the check in BL1 AArch64 exception handler. This is needed in the aarch32 + * boot flow as the core comes up in aarch64 and to enter the BL32 image a warm + * reset in aarch32 state is required. + ******************************************************************************/ +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + int err = arm_bl2_handle_post_image_load(image_id); + + if (!err && (image_id == BL32_IMAGE_ID)) { + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + assert(bl_mem_params); + bl_mem_params->ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + } + + return err; +} +#endif /* JUNO_AARCH32_EL3_RUNTIME */ diff --git a/plat/arm/board/juno/juno_pm.c b/plat/arm/board/juno/juno_pm.c deleted file mode 100644 index c0fa628ef..000000000 --- a/plat/arm/board/juno/juno_pm.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include - -/* - * Custom `validate_power_state` handler for Juno. According to PSCI - * Specification, interrupts targeted to cores in PSCI CPU SUSPEND should - * be able to resume it. On Juno, when the system power domain is suspended, - * the GIC is also powered down. The SCP resumes the final core to be suspend - * when an external wake-up event is received. But the other cores cannot be - * woken up by a targeted interrupt, because GIC doesn't forward these - * interrupts to the SCP. Due to this hardware limitation, we down-grade PSCI - * CPU SUSPEND requests targeted to the system power domain level - * to cluster power domain level. - * - * The system power domain suspend on Juno is only supported only via - * PSCI SYSTEM SUSPEND API. - */ -static int juno_validate_power_state(unsigned int power_state, - psci_power_state_t *req_state) -{ - int rc; - rc = arm_validate_power_state(power_state, req_state); - - /* - * Ensure that the system power domain level is never suspended - * via PSCI CPU SUSPEND API. Currently system suspend is only - * supported via PSCI SYSTEM SUSPEND API. - */ - req_state->pwr_domain_state[ARM_PWR_LVL2] = ARM_LOCAL_STATE_RUN; - return rc; -} - -/* - * Custom `translate_power_state_by_mpidr` handler for Juno. Unlike in the - * `juno_validate_power_state`, we do not down-grade the system power - * domain level request in `power_state` as it will be used to query the - * PSCI_STAT_COUNT/RESIDENCY at the system power domain level. - */ -static int juno_translate_power_state_by_mpidr(u_register_t mpidr, - unsigned int power_state, - psci_power_state_t *output_state) -{ - return arm_validate_power_state(power_state, output_state); -} - -/******************************************************************************* - * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard - * platform will take care of registering the handlers with PSCI. - ******************************************************************************/ -plat_psci_ops_t plat_arm_psci_pm_ops = { - .pwr_domain_on = css_pwr_domain_on, - .pwr_domain_on_finish = css_pwr_domain_on_finish, - .pwr_domain_off = css_pwr_domain_off, - .cpu_standby = css_cpu_standby, - .pwr_domain_suspend = css_pwr_domain_suspend, - .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish, - .system_off = css_system_off, - .system_reset = css_system_reset, - .validate_power_state = juno_validate_power_state, - .validate_ns_entrypoint = arm_validate_ns_entrypoint, - .get_sys_suspend_power_state = css_get_sys_suspend_power_state, - .translate_power_state_by_mpidr = juno_translate_power_state_by_mpidr, - .get_node_hw_state = css_node_hw_state -}; diff --git a/plat/arm/board/juno/platform.mk b/plat/arm/board/juno/platform.mk index 39977240b..e29f8c869 100644 --- a/plat/arm/board/juno/platform.mk +++ b/plat/arm/board/juno/platform.mk @@ -48,8 +48,14 @@ endif PLAT_INCLUDES := -Iplat/arm/board/juno/include -PLAT_BL_COMMON_SOURCES := plat/arm/board/juno/aarch64/juno_helpers.S +PLAT_BL_COMMON_SOURCES := plat/arm/board/juno/${ARCH}/juno_helpers.S +# Flag to enable support for AArch32 state on JUNO +JUNO_AARCH32_EL3_RUNTIME := 0 +$(eval $(call assert_boolean,JUNO_AARCH32_EL3_RUNTIME)) +$(eval $(call add_define,JUNO_AARCH32_EL3_RUNTIME)) + +ifeq (${ARCH},aarch64) BL1_SOURCES += lib/cpus/aarch64/cortex_a53.S \ lib/cpus/aarch64/cortex_a57.S \ lib/cpus/aarch64/cortex_a72.S \ @@ -59,6 +65,7 @@ BL1_SOURCES += lib/cpus/aarch64/cortex_a53.S \ ${JUNO_SECURITY_SOURCES} BL2_SOURCES += plat/arm/board/juno/juno_err.c \ + plat/arm/board/juno/juno_bl2_setup.c \ ${JUNO_SECURITY_SOURCES} BL2U_SOURCES += ${JUNO_SECURITY_SOURCES} @@ -66,11 +73,11 @@ BL2U_SOURCES += ${JUNO_SECURITY_SOURCES} BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \ lib/cpus/aarch64/cortex_a57.S \ lib/cpus/aarch64/cortex_a72.S \ - plat/arm/board/juno/juno_pm.c \ plat/arm/board/juno/juno_topology.c \ ${JUNO_GIC_SOURCES} \ ${JUNO_INTERCONNECT_SOURCES} \ ${JUNO_SECURITY_SOURCES} +endif # Enable workarounds for selected Cortex-A53 and A57 errata. ERRATA_A53_855873 := 1 diff --git a/plat/arm/board/juno/sp_min/sp_min-juno.mk b/plat/arm/board/juno/sp_min/sp_min-juno.mk new file mode 100644 index 000000000..fb3c55e7e --- /dev/null +++ b/plat/arm/board/juno/sp_min/sp_min-juno.mk @@ -0,0 +1,47 @@ +# +# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# Neither the name of ARM nor the names of its contributors may be used +# to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# + +# SP_MIN source files specific to JUNO platform +BL32_SOURCES += lib/cpus/aarch32/cortex_a53.S \ + lib/cpus/aarch32/cortex_a57.S \ + lib/cpus/aarch32/cortex_a72.S \ + plat/arm/board/juno/juno_pm.c \ + plat/arm/board/juno/juno_topology.c \ + plat/arm/css/common/css_pm.c \ + plat/arm/css/common/css_topology.c \ + plat/arm/soc/common/soc_css_security.c \ + plat/arm/css/drivers/scp/css_pm_scpi.c \ + plat/arm/css/drivers/scpi/css_mhu.c \ + plat/arm/css/drivers/scpi/css_scpi.c \ + ${JUNO_GIC_SOURCES} \ + ${JUNO_INTERCONNECT_SOURCES} \ + ${JUNO_SECURITY_SOURCES} + +include plat/arm/common/sp_min/arm_sp_min.mk diff --git a/plat/arm/common/aarch32/arm_helpers.S b/plat/arm/common/aarch32/arm_helpers.S index 5d238ecb5..51e5ee9a0 100644 --- a/plat/arm/common/aarch32/arm_helpers.S +++ b/plat/arm/common/aarch32/arm_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,9 +31,10 @@ #include .weak plat_arm_calc_core_pos - .weak plat_crash_console_init - .weak plat_crash_console_putc .weak plat_my_core_pos + .globl plat_crash_console_init + .globl plat_crash_console_putc + .globl plat_crash_console_flush /* ----------------------------------------------------- * unsigned int plat_my_core_pos(void) @@ -85,3 +86,16 @@ func plat_crash_console_putc ldr r1, =PLAT_ARM_CRASH_UART_BASE b console_core_putc endfunc plat_crash_console_putc + + /* --------------------------------------------- + * int plat_crash_console_flush() + * Function to force a write of all buffered + * data that hasn't been output. + * Out : return -1 on error else return 0. + * Clobber list : r0 - r1 + * --------------------------------------------- + */ +func plat_crash_console_flush + ldr r1, =PLAT_ARM_CRASH_UART_BASE + b console_core_flush +endfunc plat_crash_console_flush diff --git a/plat/arm/common/aarch64/arm_helpers.S b/plat/arm/common/aarch64/arm_helpers.S index d782020ae..60ff834a1 100644 --- a/plat/arm/common/aarch64/arm_helpers.S +++ b/plat/arm/common/aarch64/arm_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,6 +34,7 @@ .weak plat_my_core_pos .globl plat_crash_console_init .globl plat_crash_console_putc + .globl plat_crash_console_flush .globl platform_mem_init @@ -88,6 +89,19 @@ func plat_crash_console_putc b console_core_putc endfunc plat_crash_console_putc + /* --------------------------------------------- + * int plat_crash_console_flush() + * Function to force a write of all buffered + * data that hasn't been output. + * Out : return -1 on error else return 0. + * Clobber list : r0 - r1 + * --------------------------------------------- + */ +func plat_crash_console_flush + mov_imm x1, PLAT_ARM_CRASH_UART_BASE + b console_core_flush +endfunc plat_crash_console_flush + /* --------------------------------------------------------------------- * We don't need to carry out any memory initialization on ARM * platforms. The Secure RAM is accessible straight away. diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c index 8c1fde43f..c588f965c 100644 --- a/plat/arm/common/arm_bl1_setup.c +++ b/plat/arm/common/arm_bl1_setup.c @@ -30,13 +30,13 @@ #include #include +#include #include #include #include #include #include #include -#include #include "../../../bl1/bl1_private.h" /* Weak definitions may be overridden in specific ARM standard platform */ @@ -44,6 +44,7 @@ #pragma weak bl1_plat_arch_setup #pragma weak bl1_platform_setup #pragma weak bl1_plat_sec_mem_layout +#pragma weak bl1_plat_prepare_exit /* Data structure which holds the extents of the trusted SRAM for BL1*/ diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index 007108d12..66e350aad 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -249,11 +249,7 @@ void bl2_plat_arch_setup(void) } #if LOAD_IMAGE_V2 -/******************************************************************************* - * This function can be used by the platforms to update/use image - * information for given `image_id`. - ******************************************************************************/ -int bl2_plat_handle_post_image_load(unsigned int image_id) +int arm_bl2_handle_post_image_load(unsigned int image_id) { int err = 0; bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); @@ -286,6 +282,15 @@ int bl2_plat_handle_post_image_load(unsigned int image_id) return err; } +/******************************************************************************* + * This function can be used by the platforms to update/use image + * information for given `image_id`. + ******************************************************************************/ +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + return arm_bl2_handle_post_image_load(image_id); +} + #else /* LOAD_IMAGE_V2 */ /******************************************************************************* diff --git a/plat/arm/common/arm_common.c b/plat/arm/common/arm_common.c index aade22129..3d67ef767 100644 --- a/plat/arm/common/arm_common.c +++ b/plat/arm/common/arm_common.c @@ -29,12 +29,12 @@ */ #include #include +#include #include #include #include #include #include -#include extern const mmap_region_t plat_arm_mmap[]; diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk index 891e2fbd4..9cf2b7e05 100644 --- a/plat/arm/common/arm_common.mk +++ b/plat/arm/common/arm_common.mk @@ -95,6 +95,11 @@ ARM_PLAT_MT := 0 $(eval $(call assert_boolean,ARM_PLAT_MT)) $(eval $(call add_define,ARM_PLAT_MT)) +# Use translation tables library v2 by default +ARM_XLAT_TABLES_LIB_V1 := 0 +$(eval $(call assert_boolean,ARM_XLAT_TABLES_LIB_V1)) +$(eval $(call add_define,ARM_XLAT_TABLES_LIB_V1)) + # Enable PSCI_STAT_COUNT/RESIDENCY APIs on ARM platforms ENABLE_PSCI_STAT := 1 ENABLE_PMF := 1 @@ -113,11 +118,17 @@ ifeq (${ARCH}, aarch64) PLAT_INCLUDES += -Iinclude/plat/arm/common/aarch64 endif +PLAT_BL_COMMON_SOURCES += plat/arm/common/${ARCH}/arm_helpers.S \ + plat/arm/common/arm_common.c + +ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1) +PLAT_BL_COMMON_SOURCES += lib/xlat_tables/xlat_tables_common.c \ + lib/xlat_tables/${ARCH}/xlat_tables.c +else include lib/xlat_tables_v2/xlat_tables.mk -PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS} \ - plat/arm/common/${ARCH}/arm_helpers.S \ - plat/arm/common/arm_common.c +PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS} +endif BL1_SOURCES += drivers/arm/sp805/sp805.c \ drivers/io/io_fip.c \ @@ -137,8 +148,14 @@ BL2_SOURCES += drivers/io/io_fip.c \ plat/arm/common/arm_bl2_setup.c \ plat/arm/common/arm_io_storage.c ifeq (${LOAD_IMAGE_V2},1) -BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c\ - plat/arm/common/arm_image_load.c \ +# Because BL1/BL2 execute in AArch64 mode but BL32 in AArch32 we need to use +# the AArch32 descriptors. +ifeq (${JUNO_AARCH32_EL3_RUNTIME},1) +BL2_SOURCES += plat/arm/common/aarch32/arm_bl2_mem_params_desc.c +else +BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c +endif +BL2_SOURCES += plat/arm/common/arm_image_load.c \ common/desc_image_load.c endif diff --git a/plat/arm/css/common/aarch32/css_helpers.S b/plat/arm/css/common/aarch32/css_helpers.S new file mode 100644 index 000000000..b7075bd7c --- /dev/null +++ b/plat/arm/css/common/aarch32/css_helpers.S @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include + + .weak plat_secondary_cold_boot_setup + .weak plat_get_my_entrypoint + .globl css_calc_core_pos_swap_cluster + .weak plat_is_my_cpu_primary + + /* --------------------------------------------------------------------- + * void plat_secondary_cold_boot_setup(void); + * In the normal boot flow, cold-booting secondary + * CPUs is not yet implemented and they panic. + * --------------------------------------------------------------------- + */ +func plat_secondary_cold_boot_setup + /* TODO: Implement secondary CPU cold boot setup on CSS platforms */ +cb_panic: + b cb_panic +endfunc plat_secondary_cold_boot_setup + + /* --------------------------------------------------------------------- + * uintptr_t plat_get_my_entrypoint (void); + * + * Main job of this routine is to distinguish between a cold and a warm + * boot. On CSS platforms, this distinction is based on the contents of + * the Trusted Mailbox. It is initialised to zero by the SCP before the + * AP cores are released from reset. Therefore, a zero mailbox means + * it's a cold reset. + * + * This functions returns the contents of the mailbox, i.e.: + * - 0 for a cold boot; + * - the warm boot entrypoint for a warm boot. + * --------------------------------------------------------------------- + */ +func plat_get_my_entrypoint + ldr r0, =PLAT_ARM_TRUSTED_MAILBOX_BASE + ldr r0, [r0] + bx lr +endfunc plat_get_my_entrypoint + + /* ----------------------------------------------------------- + * unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr) + * Utility function to calculate the core position by + * swapping the cluster order. This is necessary in order to + * match the format of the boot information passed by the SCP + * and read in plat_is_my_cpu_primary below. + * ----------------------------------------------------------- + */ +func css_calc_core_pos_swap_cluster + and r1, r0, #MPIDR_CPU_MASK + and r0, r0, #MPIDR_CLUSTER_MASK + eor r0, r0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order + add r0, r1, r0, LSR #6 + bx lr +endfunc css_calc_core_pos_swap_cluster + + /* ----------------------------------------------------- + * unsigned int plat_is_my_cpu_primary (void); + * + * Find out whether the current cpu is the primary + * cpu (applicable ony after a cold boot) + * ----------------------------------------------------- + */ +func plat_is_my_cpu_primary + mov r10, lr + bl plat_my_core_pos + ldr r1, =SCP_BOOT_CFG_ADDR + ldr r1, [r1] + ubfx r1, r1, #PLAT_CSS_PRIMARY_CPU_SHIFT, \ + #PLAT_CSS_PRIMARY_CPU_BIT_WIDTH + cmp r0, r1 + moveq r0, #1 + movne r0, #0 + bx r10 +endfunc plat_is_my_cpu_primary diff --git a/plat/arm/css/common/css_common.mk b/plat/arm/css/common/css_common.mk index 86ba6df5f..24215a5d1 100644 --- a/plat/arm/css/common/css_common.mk +++ b/plat/arm/css/common/css_common.mk @@ -36,7 +36,7 @@ PLAT_INCLUDES += -Iinclude/plat/arm/css/common \ -Iinclude/plat/arm/css/common/aarch64 -PLAT_BL_COMMON_SOURCES += plat/arm/css/common/aarch64/css_helpers.S +PLAT_BL_COMMON_SOURCES += plat/arm/css/common/${ARCH}/css_helpers.S BL1_SOURCES += plat/arm/css/common/css_bl1_setup.c @@ -64,7 +64,7 @@ $(eval $(call assert_boolean,CSS_LOAD_SCP_IMAGES)) $(eval $(call add_define,CSS_LOAD_SCP_IMAGES)) ifeq (${CSS_LOAD_SCP_IMAGES},1) - $(eval $(call FIP_ADD_IMG,SCP_BL2,--scp-fw)) + NEED_SCP_BL2 := yes ifneq (${TRUSTED_BOARD_BOOT},0) $(eval $(call FWU_FIP_ADD_IMG,SCP_BL2U,--scp-fwu-cfg)) endif diff --git a/plat/arm/css/common/css_pm.c b/plat/arm/css/common/css_pm.c index d4dd0af7d..21ce86551 100644 --- a/plat/arm/css/common/css_pm.c +++ b/plat/arm/css/common/css_pm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -75,6 +75,13 @@ const unsigned int arm_pm_idle_states[] = { CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1, assert_max_pwr_lvl_supported_mismatch); +/* + * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL + * assumed by the CSS layer. + */ +CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL, + assert_max_pwr_lvl_higher_than_css_sys_lvl); + /******************************************************************************* * Handler called when a power domain is about to be turned on. The * level and mpidr determine the affinity instance. @@ -243,7 +250,7 @@ void css_get_sys_suspend_power_state(psci_power_state_t *req_state) * System Suspend is supported only if the system power domain node * is implemented. */ - assert(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL2); + assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL); for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF; @@ -257,6 +264,39 @@ int css_node_hw_state(u_register_t mpidr, unsigned int power_level) return css_scp_get_power_state(mpidr, power_level); } +/* + * The system power domain suspend is only supported only via + * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain + * will be downgraded to the lower level. + */ +static int css_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + int rc; + rc = arm_validate_power_state(power_state, req_state); + + /* + * Ensure that the system power domain level is never suspended + * via PSCI CPU SUSPEND API. Currently system suspend is only + * supported via PSCI SYSTEM SUSPEND API. + */ + req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] = ARM_LOCAL_STATE_RUN; + return rc; +} + +/* + * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the + * `css_validate_power_state`, we do not downgrade the system power + * domain level request in `power_state` as it will be used to query the + * PSCI_STAT_COUNT/RESIDENCY at the system power domain level. + */ +static int css_translate_power_state_by_mpidr(u_register_t mpidr, + unsigned int power_state, + psci_power_state_t *output_state) +{ + return arm_validate_power_state(power_state, output_state); +} + /******************************************************************************* * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard * platform will take care of registering the handlers with PSCI. @@ -270,7 +310,9 @@ plat_psci_ops_t plat_arm_psci_pm_ops = { .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish, .system_off = css_system_off, .system_reset = css_system_reset, - .validate_power_state = arm_validate_power_state, + .validate_power_state = css_validate_power_state, .validate_ns_entrypoint = arm_validate_ns_entrypoint, - .get_node_hw_state = css_node_hw_state + .translate_power_state_by_mpidr = css_translate_power_state_by_mpidr, + .get_node_hw_state = css_node_hw_state, + .get_sys_suspend_power_state = css_get_sys_suspend_power_state }; diff --git a/plat/arm/css/drivers/scp/css_pm_scpi.c b/plat/arm/css/drivers/scp/css_pm_scpi.c index e22504d1d..3b643e669 100644 --- a/plat/arm/css/drivers/scp/css_pm_scpi.c +++ b/plat/arm/css/drivers/scp/css_pm_scpi.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "../scpi/css_scpi.h" #include "css_scp.h" @@ -134,6 +135,12 @@ void __dead2 css_scp_sys_shutdown(void) { uint32_t response; + /* + * Disable GIC CPU interface to prevent pending interrupt + * from waking up the AP from WFI. + */ + plat_arm_gic_cpuif_disable(); + /* Send the power down request to the SCP */ response = scpi_sys_power_state(scpi_system_shutdown); @@ -153,6 +160,12 @@ void __dead2 css_scp_sys_reboot(void) { uint32_t response; + /* + * Disable GIC CPU interface to prevent pending interrupt + * from waking up the AP from WFI. + */ + plat_arm_gic_cpuif_disable(); + /* Send the system reset request to the SCP */ response = scpi_sys_power_state(scpi_system_reboot); diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S index 802e1fe6b..357719bfe 100644 --- a/plat/common/aarch32/platform_helpers.S +++ b/plat/common/aarch32/platform_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,11 +31,43 @@ #include #include + .weak plat_crash_console_init + .weak plat_crash_console_putc + .weak plat_crash_console_flush .weak plat_reset_handler .weak plat_disable_acp .weak platform_mem_init .weak plat_panic_handler + /* ----------------------------------------------------- + * Placeholder function which should be redefined by + * each platform. + * ----------------------------------------------------- + */ +func plat_crash_console_init + mov r0, #0 + bx lr +endfunc plat_crash_console_init + + /* ----------------------------------------------------- + * Placeholder function which should be redefined by + * each platform. + * ----------------------------------------------------- + */ +func plat_crash_console_putc + bx lr +endfunc plat_crash_console_putc + + /* ----------------------------------------------------- + * Placeholder function which should be redefined by + * each platform. + * ----------------------------------------------------- + */ +func plat_crash_console_flush + mov r0, #0 + bx lr +endfunc plat_crash_console_flush + /* ----------------------------------------------------- * Placeholder function which should be redefined by * each platform. diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S index 68bda2233..ce4773845 100644 --- a/plat/common/aarch64/platform_helpers.S +++ b/plat/common/aarch64/platform_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,6 +35,7 @@ .weak plat_report_exception .weak plat_crash_console_init .weak plat_crash_console_putc + .weak plat_crash_console_flush .weak plat_reset_handler .weak plat_disable_acp .weak bl1_plat_prepare_exit @@ -96,6 +97,15 @@ func plat_crash_console_putc ret endfunc plat_crash_console_putc + /* ----------------------------------------------------- + * Placeholder function which should be redefined by + * each platform. + * ----------------------------------------------------- + */ +func plat_crash_console_flush + ret +endfunc plat_crash_console_flush + /* ----------------------------------------------------- * Placeholder function which should be redefined by * each platform. This function should preserve x19 - x29. diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S index e3063d14b..322e3bb74 100644 --- a/plat/common/aarch64/platform_mp_stack.S +++ b/plat/common/aarch64/platform_mp_stack.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -131,7 +131,7 @@ endfunc platform_set_stack * ------------------------------------------------------- */ func_deprecated platform_get_stack -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS mrs x1, mpidr_el1 cmp x0, x1 ASM_ASSERT(eq) @@ -150,7 +150,7 @@ endfunc_deprecated platform_get_stack * ----------------------------------------------------- */ func_deprecated platform_set_stack -#if ASM_ASSERTION +#if ENABLE_ASSERTIONS mrs x1, mpidr_el1 cmp x0, x1 ASM_ASSERT(eq) diff --git a/plat/mediatek/common/drivers/uart/8250_console.S b/plat/mediatek/common/drivers/uart/8250_console.S index 5b0ae6df3..8da248c13 100644 --- a/plat/mediatek/common/drivers/uart/8250_console.S +++ b/plat/mediatek/common/drivers/uart/8250_console.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,6 +33,7 @@ .globl console_core_init .globl console_core_putc .globl console_core_getc + .globl console_core_flush /* ----------------------------------------------- * int console_core_init(unsigned long base_addr, @@ -170,3 +171,18 @@ getc_error: mov w0, #-1 ret endfunc console_core_getc + + /* --------------------------------------------- + * int console_core_flush(uintptr_t base_addr) + * Function to force a write of all buffered + * data that hasn't been output. + * In : x0 - console base address + * Out : return -1 on error else return 0. + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_core_flush + /* Placeholder */ + mov w0, #0 + ret +endfunc console_core_flush diff --git a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S index 308753ecb..2b3901a24 100644 --- a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S +++ b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S @@ -243,11 +243,12 @@ endfunc platform_mem_init * --------------------------------------------- */ func plat_crash_console_init - adr x0, tegra_console_base - ldr x0, [x0] - mov_imm x1, TEGRA_BOOT_UART_CLK_IN_HZ - mov_imm x2, TEGRA_CONSOLE_BAUDRATE - b console_core_init + mov x0, #0 + adr x1, tegra_console_base + ldr x1, [x1] + cbz x1, 1f + mov w0, #1 +1: ret endfunc plat_crash_console_init /* --------------------------------------------- diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c index 859ecd53e..5609867af 100644 --- a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c +++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #define TEGRA_GPU_RESET_REG_OFFSET 0x28c #define GPU_RESET_BIT (1 << 24) @@ -135,17 +135,18 @@ static void tegra_clear_videomem(uintptr_t non_overlap_area_start, unsigned long long non_overlap_area_size) { /* - * Perform cache maintenance to ensure that the non-overlapping area is - * zeroed out. The first invalidation of this range ensures that - * possible evictions of dirty cache lines do not interfere with the - * 'zeromem' operation. Other CPUs could speculatively prefetch the - * main memory contents of this area between the first invalidation and - * the 'zeromem' operation. The second invalidation ensures that any - * such cache lines are removed as well. + * Map the NS memory first, clean it and then unmap it. */ - inv_dcache_range(non_overlap_area_start, non_overlap_area_size); + mmap_add_dynamic_region(non_overlap_area_start, /* PA */ + non_overlap_area_start, /* VA */ + non_overlap_area_size, /* size */ + MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */ + zeromem((void *)non_overlap_area_start, non_overlap_area_size); - inv_dcache_range(non_overlap_area_start, non_overlap_area_size); + flush_dcache_range(non_overlap_area_start, non_overlap_area_size); + + mmap_remove_dynamic_region(non_overlap_area_start, + non_overlap_area_size); } /* @@ -194,7 +195,6 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes) */ INFO("Cleaning previous Video Memory Carveout\n"); - disable_mmu_el3(); if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) { tegra_clear_videomem(video_mem_base, video_mem_size << 20); } else { @@ -207,7 +207,6 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes) tegra_clear_videomem(vmem_end_new, non_overlap_area_size); } } - enable_mmu_el3(0); done: tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI, (uint32_t)(phys_base >> 32)); @@ -218,3 +217,29 @@ done: video_mem_base = phys_base; video_mem_size = size_in_bytes >> 20; } + +/* + * During boot, USB3 and flash media (SDMMC/SATA) devices need access to + * IRAM. Because these clients connect to the MC and do not have a direct + * path to the IRAM, the MC implements AHB redirection during boot to allow + * path to IRAM. In this mode, accesses to a programmed memory address aperture + * are directed to the AHB bus, allowing access to the IRAM. The AHB aperture + * is defined by the IRAM_BASE_LO and IRAM_BASE_HI registers, which are + * initialized to disable this aperture. + * + * Once bootup is complete, we must program IRAM base to 0xffffffff and + * IRAM top to 0x00000000, thus disabling access to IRAM. DRAM is then + * potentially accessible in this address range. These aperture registers + * also have an access_control/lock bit. After disabling the aperture, the + * access_control register should be programmed to lock the registers. + */ +void tegra_memctrl_disable_ahb_redirection(void) +{ + /* program the aperture registers */ + tegra_mc_write_32(MC_IRAM_BASE_LO, 0xFFFFFFFF); + tegra_mc_write_32(MC_IRAM_TOP_LO, 0); + tegra_mc_write_32(MC_IRAM_BASE_TOP_HI, 0); + + /* lock the aperture registers */ + tegra_mc_write_32(MC_IRAM_REG_CTRL, MC_DISABLE_IRAM_CFG_WRITES); +} diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c index fb57b2b74..41a4ede80 100644 --- a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c +++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -48,193 +49,6 @@ static uint64_t video_mem_base; static uint64_t video_mem_size_mb; -/* array to hold stream_id override config register offsets */ -const static uint32_t streamid_overrides[] = { - MC_STREAMID_OVERRIDE_CFG_PTCR, - MC_STREAMID_OVERRIDE_CFG_AFIR, - MC_STREAMID_OVERRIDE_CFG_HDAR, - MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR, - MC_STREAMID_OVERRIDE_CFG_NVENCSRD, - MC_STREAMID_OVERRIDE_CFG_SATAR, - MC_STREAMID_OVERRIDE_CFG_MPCORER, - MC_STREAMID_OVERRIDE_CFG_NVENCSWR, - MC_STREAMID_OVERRIDE_CFG_AFIW, - MC_STREAMID_OVERRIDE_CFG_SATAW, - MC_STREAMID_OVERRIDE_CFG_MPCOREW, - MC_STREAMID_OVERRIDE_CFG_SATAW, - MC_STREAMID_OVERRIDE_CFG_HDAW, - MC_STREAMID_OVERRIDE_CFG_ISPRA, - MC_STREAMID_OVERRIDE_CFG_ISPWA, - MC_STREAMID_OVERRIDE_CFG_ISPWB, - MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR, - MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW, - MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR, - MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW, - MC_STREAMID_OVERRIDE_CFG_TSECSRD, - MC_STREAMID_OVERRIDE_CFG_TSECSWR, - MC_STREAMID_OVERRIDE_CFG_GPUSRD, - MC_STREAMID_OVERRIDE_CFG_GPUSWR, - MC_STREAMID_OVERRIDE_CFG_SDMMCRA, - MC_STREAMID_OVERRIDE_CFG_SDMMCRAA, - MC_STREAMID_OVERRIDE_CFG_SDMMCR, - MC_STREAMID_OVERRIDE_CFG_SDMMCRAB, - MC_STREAMID_OVERRIDE_CFG_SDMMCWA, - MC_STREAMID_OVERRIDE_CFG_SDMMCWAA, - MC_STREAMID_OVERRIDE_CFG_SDMMCW, - MC_STREAMID_OVERRIDE_CFG_SDMMCWAB, - MC_STREAMID_OVERRIDE_CFG_VICSRD, - MC_STREAMID_OVERRIDE_CFG_VICSWR, - MC_STREAMID_OVERRIDE_CFG_VIW, - MC_STREAMID_OVERRIDE_CFG_NVDECSRD, - MC_STREAMID_OVERRIDE_CFG_NVDECSWR, - MC_STREAMID_OVERRIDE_CFG_APER, - MC_STREAMID_OVERRIDE_CFG_APEW, - MC_STREAMID_OVERRIDE_CFG_NVJPGSRD, - MC_STREAMID_OVERRIDE_CFG_NVJPGSWR, - MC_STREAMID_OVERRIDE_CFG_SESRD, - MC_STREAMID_OVERRIDE_CFG_SESWR, - MC_STREAMID_OVERRIDE_CFG_ETRR, - MC_STREAMID_OVERRIDE_CFG_ETRW, - MC_STREAMID_OVERRIDE_CFG_TSECSRDB, - MC_STREAMID_OVERRIDE_CFG_TSECSWRB, - MC_STREAMID_OVERRIDE_CFG_GPUSRD2, - MC_STREAMID_OVERRIDE_CFG_GPUSWR2, - MC_STREAMID_OVERRIDE_CFG_AXISR, - MC_STREAMID_OVERRIDE_CFG_AXISW, - MC_STREAMID_OVERRIDE_CFG_EQOSR, - MC_STREAMID_OVERRIDE_CFG_EQOSW, - MC_STREAMID_OVERRIDE_CFG_UFSHCR, - MC_STREAMID_OVERRIDE_CFG_UFSHCW, - MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR, - MC_STREAMID_OVERRIDE_CFG_BPMPR, - MC_STREAMID_OVERRIDE_CFG_BPMPW, - MC_STREAMID_OVERRIDE_CFG_BPMPDMAR, - MC_STREAMID_OVERRIDE_CFG_BPMPDMAW, - MC_STREAMID_OVERRIDE_CFG_AONR, - MC_STREAMID_OVERRIDE_CFG_AONW, - MC_STREAMID_OVERRIDE_CFG_AONDMAR, - MC_STREAMID_OVERRIDE_CFG_AONDMAW, - MC_STREAMID_OVERRIDE_CFG_SCER, - MC_STREAMID_OVERRIDE_CFG_SCEW, - MC_STREAMID_OVERRIDE_CFG_SCEDMAR, - MC_STREAMID_OVERRIDE_CFG_SCEDMAW, - MC_STREAMID_OVERRIDE_CFG_APEDMAR, - MC_STREAMID_OVERRIDE_CFG_APEDMAW, - MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1, - MC_STREAMID_OVERRIDE_CFG_VICSRD1, - MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 -}; - -/* array to hold the security configs for stream IDs */ -const static mc_streamid_security_cfg_t sec_cfgs[] = { - mc_make_sec_cfg(SCEW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(AFIR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(NVDISPLAYR1, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(XUSB_DEVR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(VICSRD1, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(NVENCSWR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(TSECSRDB, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(AXISW, SECURE, NO_OVERRIDE, DISABLE), - mc_make_sec_cfg(SDMMCWAB, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(AONDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(GPUSWR2, SECURE, NO_OVERRIDE, DISABLE), - mc_make_sec_cfg(SATAW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(UFSHCW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(AFIW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(SDMMCR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(SCEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(UFSHCR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(SDMMCWAA, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(SESWR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(MPCORER, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(PTCR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(BPMPW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(ETRW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(GPUSRD, SECURE, NO_OVERRIDE, DISABLE), - mc_make_sec_cfg(VICSWR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(SCEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(HDAW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(ISPWA, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(EQOSW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(XUSB_HOSTW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(TSECSWR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(SDMMCRAA, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(VIW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(AXISR, SECURE, NO_OVERRIDE, DISABLE), - mc_make_sec_cfg(SDMMCW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(BPMPDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(ISPRA, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(NVDECSWR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(XUSB_DEVW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(NVDECSRD, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(MPCOREW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(NVDISPLAYR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(BPMPDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(NVJPGSWR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(NVDECSRD1, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(TSECSRD, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(NVJPGSRD, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(SDMMCWA, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(SCER, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(XUSB_HOSTR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(VICSRD, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(AONDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(AONW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(SDMMCRA, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(HOST1XDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(EQOSR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(SATAR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(BPMPR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(HDAR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(SDMMCRAB, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(ETRR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(AONR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(SESRD, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(NVENCSRD, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(GPUSWR, SECURE, NO_OVERRIDE, DISABLE), - mc_make_sec_cfg(TSECSWRB, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(ISPWB, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(GPUSRD2, SECURE, NO_OVERRIDE, DISABLE), - mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), -}; - -const static mc_txn_override_cfg_t mc_override_cfgs[] = { - mc_make_txn_override_cfg(BPMPW, CGID_TAG_ADR), - mc_make_txn_override_cfg(EQOSW, CGID_TAG_ADR), - mc_make_txn_override_cfg(NVJPGSWR, CGID_TAG_ADR), - mc_make_txn_override_cfg(SDMMCWAA, CGID_TAG_ADR), - mc_make_txn_override_cfg(MPCOREW, CGID_TAG_ADR), - mc_make_txn_override_cfg(SCEDMAW, CGID_TAG_ADR), - mc_make_txn_override_cfg(SDMMCW, CGID_TAG_ADR), - mc_make_txn_override_cfg(AXISW, CGID_TAG_ADR), - mc_make_txn_override_cfg(TSECSWR, CGID_TAG_ADR), - mc_make_txn_override_cfg(GPUSWR, CGID_TAG_ADR), - mc_make_txn_override_cfg(XUSB_HOSTW, CGID_TAG_ADR), - mc_make_txn_override_cfg(TSECSWRB, CGID_TAG_ADR), - mc_make_txn_override_cfg(GPUSWR2, CGID_TAG_ADR), - mc_make_txn_override_cfg(AONDMAW, CGID_TAG_ADR), - mc_make_txn_override_cfg(AONW, CGID_TAG_ADR), - mc_make_txn_override_cfg(SESWR, CGID_TAG_ADR), - mc_make_txn_override_cfg(BPMPDMAW, CGID_TAG_ADR), - mc_make_txn_override_cfg(SDMMCWA, CGID_TAG_ADR), - mc_make_txn_override_cfg(HDAW, CGID_TAG_ADR), - mc_make_txn_override_cfg(NVDECSWR, CGID_TAG_ADR), - mc_make_txn_override_cfg(UFSHCW, CGID_TAG_ADR), - mc_make_txn_override_cfg(SATAW, CGID_TAG_ADR), - mc_make_txn_override_cfg(ETRW, CGID_TAG_ADR), - mc_make_txn_override_cfg(VICSWR, CGID_TAG_ADR), - mc_make_txn_override_cfg(NVENCSWR, CGID_TAG_ADR), - mc_make_txn_override_cfg(SDMMCWAB, CGID_TAG_ADR), - mc_make_txn_override_cfg(ISPWB, CGID_TAG_ADR), - mc_make_txn_override_cfg(APEW, CGID_TAG_ADR), - mc_make_txn_override_cfg(XUSB_DEVW, CGID_TAG_ADR), - mc_make_txn_override_cfg(AFIW, CGID_TAG_ADR), - mc_make_txn_override_cfg(SCEW, CGID_TAG_ADR), -}; - static void tegra_memctrl_reconfig_mss_clients(void) { #if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS @@ -248,8 +62,10 @@ static void tegra_memctrl_reconfig_mss_clients(void) val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0); assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL); - wdata_0 = MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB | - MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB | + wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB | +#if ENABLE_AFI_DEVICE + MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB | +#endif MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB | MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB | MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB; @@ -296,7 +112,9 @@ static void tegra_memctrl_reconfig_mss_clients(void) * of control on overriding the memory type. So, remove TSA's * memtype override. */ +#if ENABLE_AFI_DEVICE mc_set_tsa_passthrough(AFIW); +#endif mc_set_tsa_passthrough(HDAW); mc_set_tsa_passthrough(SATAW); mc_set_tsa_passthrough(XUSB_HOSTW); @@ -321,15 +139,19 @@ static void tegra_memctrl_reconfig_mss_clients(void) * whose AXI IDs we know and trust. */ +#if ENABLE_AFI_DEVICE /* Match AFIW */ mc_set_forced_coherent_so_dev_cfg(AFIR); +#endif /* * See bug 200131110 comment #35 - there are no normal requests * and AWID for SO/DEV requests is hardcoded in RTL for a * particular PCIE controller */ +#if ENABLE_AFI_DEVICE mc_set_forced_coherent_so_dev_cfg(AFIW); +#endif mc_set_forced_coherent_cfg(HDAR); mc_set_forced_coherent_cfg(HDAW); mc_set_forced_coherent_cfg(SATAR); @@ -374,7 +196,9 @@ static void tegra_memctrl_reconfig_mss_clients(void) * boot and strongly ordered MSS clients */ val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL & +#if ENABLE_AFI_DEVICE mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) & +#endif mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) & mc_set_pcfifo_unordered_boot_so_mss(1, SATAW); tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val); @@ -411,7 +235,9 @@ static void tegra_memctrl_reconfig_mss_clients(void) * for boot and strongly ordered MSS clients */ val = MC_SMMU_CLIENT_CONFIG1_RESET_VAL & +#if ENABLE_AFI_DEVICE mc_set_smmu_unordered_boot_so_mss(1, AFIW) & +#endif mc_set_smmu_unordered_boot_so_mss(1, HDAW) & mc_set_smmu_unordered_boot_so_mss(1, SATAW); tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG1, val); @@ -480,66 +306,29 @@ static void tegra_memctrl_reconfig_mss_clients(void) #endif } -/* - * Init Memory controller during boot. - */ -void tegra_memctrl_setup(void) +static void tegra_memctrl_set_overrides(void) { - uint32_t val; - uint32_t num_overrides = sizeof(streamid_overrides) / sizeof(uint32_t); - uint32_t num_sec_cfgs = sizeof(sec_cfgs) / sizeof(mc_streamid_security_cfg_t); - uint32_t num_txn_overrides = sizeof(mc_override_cfgs) / sizeof(mc_txn_override_cfg_t); - int i; + tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings(); + const mc_txn_override_cfg_t *mc_txn_override_cfgs; + uint32_t num_txn_override_cfgs; + uint32_t i, val; - INFO("Tegra Memory Controller (v2)\n"); - - /* Program the SMMU pagesize */ - tegra_smmu_init(); - - /* Program all the Stream ID overrides */ - for (i = 0; i < num_overrides; i++) - tegra_mc_streamid_write_32(streamid_overrides[i], - MC_STREAM_ID_MAX); - - /* Program the security config settings for all Stream IDs */ - for (i = 0; i < num_sec_cfgs; i++) { - val = sec_cfgs[i].override_enable << 16 | - sec_cfgs[i].override_client_inputs << 8 | - sec_cfgs[i].override_client_ns_flag << 0; - tegra_mc_streamid_write_32(sec_cfgs[i].offset, val); - } - - /* - * All requests at boot time, and certain requests during - * normal run time, are physically addressed and must bypass - * the SMMU. The client hub logic implements a hardware bypass - * path around the Translation Buffer Units (TBU). During - * boot-time, the SMMU_BYPASS_CTRL register (which defaults to - * TBU_BYPASS mode) will be used to steer all requests around - * the uninitialized TBUs. During normal operation, this register - * is locked into TBU_BYPASS_SID config, which routes requests - * with special StreamID 0x7f on the bypass path and all others - * through the selected TBU. This is done to disable SMMU Bypass - * mode, as it could be used to circumvent SMMU security checks. - */ - tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG, - MC_SMMU_BYPASS_CONFIG_SETTINGS); - - /* - * Re-configure MSS to allow ROC to deal with ordering of the - * Memory Controller traffic. This is needed as the Memory Controller - * boots with MSS having all control, but ROC provides a performance - * boost as compared to MSS. - */ - tegra_memctrl_reconfig_mss_clients(); + /* Get the settings from the platform */ + assert(plat_mc_settings); + mc_txn_override_cfgs = plat_mc_settings->txn_override_cfg; + num_txn_override_cfgs = plat_mc_settings->num_txn_override_cfgs; /* * Set the MC_TXN_OVERRIDE registers for write clients. */ - if (!tegra_platform_is_silicon() || - (tegra_platform_is_silicon() && tegra_get_chipid_minor() == 1)) { + if ((tegra_chipid_is_t186()) && + (!tegra_platform_is_silicon() || + (tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1)))) { - /* GPU and NVENC settings for rev. A01 */ + /* + * GPU and NVENC settings for Tegra186 simulation and + * Silicon rev. A01 + */ val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR); val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK; tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR, @@ -557,17 +346,85 @@ void tegra_memctrl_setup(void) } else { - /* settings for rev. A02 */ - for (i = 0; i < num_txn_overrides; i++) { - val = tegra_mc_read_32(mc_override_cfgs[i].offset); + /* + * Settings for Tegra186 silicon rev. A02 and onwards. + */ + for (i = 0; i < num_txn_override_cfgs; i++) { + val = tegra_mc_read_32(mc_txn_override_cfgs[i].offset); val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK; - tegra_mc_write_32(mc_override_cfgs[i].offset, - val | mc_override_cfgs[i].cgid_tag); + tegra_mc_write_32(mc_txn_override_cfgs[i].offset, + val | mc_txn_override_cfgs[i].cgid_tag); } - } } +/* + * Init Memory controller during boot. + */ +void tegra_memctrl_setup(void) +{ + uint32_t val; + const uint32_t *mc_streamid_override_regs; + uint32_t num_streamid_override_regs; + const mc_streamid_security_cfg_t *mc_streamid_sec_cfgs; + uint32_t num_streamid_sec_cfgs; + tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings(); + uint32_t i; + + INFO("Tegra Memory Controller (v2)\n"); + +#if ENABLE_SMMU_DEVICE + /* Program the SMMU pagesize */ + tegra_smmu_init(); +#endif + /* Get the settings from the platform */ + assert(plat_mc_settings); + mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg; + num_streamid_override_regs = plat_mc_settings->num_streamid_override_cfgs; + mc_streamid_sec_cfgs = plat_mc_settings->streamid_security_cfg; + num_streamid_sec_cfgs = plat_mc_settings->num_streamid_security_cfgs; + + /* Program all the Stream ID overrides */ + for (i = 0; i < num_streamid_override_regs; i++) + tegra_mc_streamid_write_32(mc_streamid_override_regs[i], + MC_STREAM_ID_MAX); + + /* Program the security config settings for all Stream IDs */ + for (i = 0; i < num_streamid_sec_cfgs; i++) { + val = mc_streamid_sec_cfgs[i].override_enable << 16 | + mc_streamid_sec_cfgs[i].override_client_inputs << 8 | + mc_streamid_sec_cfgs[i].override_client_ns_flag << 0; + tegra_mc_streamid_write_32(mc_streamid_sec_cfgs[i].offset, val); + } + + /* + * All requests at boot time, and certain requests during + * normal run time, are physically addressed and must bypass + * the SMMU. The client hub logic implements a hardware bypass + * path around the Translation Buffer Units (TBU). During + * boot-time, the SMMU_BYPASS_CTRL register (which defaults to + * TBU_BYPASS mode) will be used to steer all requests around + * the uninitialized TBUs. During normal operation, this register + * is locked into TBU_BYPASS_SID config, which routes requests + * with special StreamID 0x7f on the bypass path and all others + * through the selected TBU. This is done to disable SMMU Bypass + * mode, as it could be used to circumvent SMMU security checks. + */ + tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG, + MC_SMMU_BYPASS_CONFIG_SETTINGS); + + /* + * Re-configure MSS to allow ROC to deal with ordering of the + * Memory Controller traffic. This is needed as the Memory Controller + * boots with MSS having all control, but ROC provides a performance + * boost as compared to MSS. + */ + tegra_memctrl_reconfig_mss_clients(); + + /* Program overrides for MC transactions */ + tegra_memctrl_set_overrides(); +} + /* * Restore Memory Controller settings after "System Suspend" */ @@ -581,6 +438,9 @@ void tegra_memctrl_restore_settings(void) */ tegra_memctrl_reconfig_mss_clients(); + /* Program overrides for MC transactions */ + tegra_memctrl_set_overrides(); + /* video memory carveout region */ if (video_mem_base) { tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, @@ -658,13 +518,6 @@ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes) index += 4) tegra_mc_write_32(index, 0); - /* - * Allow CPU read/write access to the aperture - */ - tegra_mc_write_32(MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG1, - TZRAM_CARVEOUT_CPU_WRITE_ACCESS_BIT | - TZRAM_CARVEOUT_CPU_READ_ACCESS_BIT); - /* * Set the TZRAM base. TZRAM base must be 4k aligned, at least. */ @@ -743,3 +596,11 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes) */ mce_update_gsc_videomem(); } + +/* + * This feature exists only for v1 of the Tegra Memory Controller. + */ +void tegra_memctrl_disable_ahb_redirection(void) +{ + ; /* do nothing */ +} diff --git a/plat/nvidia/tegra/common/drivers/smmu/smmu.c b/plat/nvidia/tegra/common/drivers/smmu/smmu.c new file mode 100644 index 000000000..60fd30067 --- /dev/null +++ b/plat/nvidia/tegra/common/drivers/smmu/smmu.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* SMMU IDs currently supported by the driver */ +enum { + TEGRA_SMMU0, + TEGRA_SMMU1, + TEGRA_SMMU2 +}; + +static uint32_t tegra_smmu_read_32(uint32_t smmu_id, uint32_t off) +{ +#if defined(TEGRA_SMMU0_BASE) + if (smmu_id == TEGRA_SMMU0) + return mmio_read_32(TEGRA_SMMU0_BASE + off); +#endif + +#if defined(TEGRA_SMMU1_BASE) + if (smmu_id == TEGRA_SMMU1) + return mmio_read_32(TEGRA_SMMU1_BASE + off); +#endif + +#if defined(TEGRA_SMMU2_BASE) + if (smmu_id == TEGRA_SMMU2) + return mmio_read_32(TEGRA_SMMU2_BASE + off); +#endif + + return 0; +} + +static void tegra_smmu_write_32(uint32_t smmu_id, + uint32_t off, uint32_t val) +{ +#if defined(TEGRA_SMMU0_BASE) + if (smmu_id == TEGRA_SMMU0) + mmio_write_32(TEGRA_SMMU0_BASE + off, val); +#endif + +#if defined(TEGRA_SMMU1_BASE) + if (smmu_id == TEGRA_SMMU1) + mmio_write_32(TEGRA_SMMU1_BASE + off, val); +#endif + +#if defined(TEGRA_SMMU2_BASE) + if (smmu_id == TEGRA_SMMU2) + mmio_write_32(TEGRA_SMMU2_BASE + off, val); +#endif +} + +/* + * Save SMMU settings before "System Suspend" to TZDRAM + */ +void tegra_smmu_save_context(uint64_t smmu_ctx_addr) +{ + uint32_t i, num_entries = 0; + smmu_regs_t *smmu_ctx_regs; + plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); + uint64_t tzdram_base = params_from_bl2->tzdram_base; + uint64_t tzdram_end = tzdram_base + params_from_bl2->tzdram_size; + uint32_t reg_id1, pgshift, cb_size; + + /* sanity check SMMU settings c*/ + reg_id1 = mmio_read_32((TEGRA_SMMU0_BASE + SMMU_GNSR0_IDR1)); + pgshift = (reg_id1 & ID1_PAGESIZE) ? 16 : 12; + cb_size = (2 << pgshift) * \ + (1 << (((reg_id1 >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1)); + + assert(!((pgshift != PGSHIFT) || (cb_size != CB_SIZE))); + assert((smmu_ctx_addr >= tzdram_base) && (smmu_ctx_addr <= tzdram_end)); + + /* get SMMU context table */ + smmu_ctx_regs = plat_get_smmu_ctx(); + assert(smmu_ctx_regs); + + /* + * smmu_ctx_regs[0].val contains the size of the context table minus + * the last entry. Sanity check the table size before we start with + * the context save operation. + */ + while (smmu_ctx_regs[num_entries].val != 0xFFFFFFFFU) { + num_entries++; + } + + /* panic if the sizes do not match */ + if (num_entries != smmu_ctx_regs[0].val) + panic(); + + /* save SMMU register values */ + for (i = 1; i < num_entries; i++) + smmu_ctx_regs[i].val = mmio_read_32(smmu_ctx_regs[i].reg); + + /* increment by 1 to take care of the last entry */ + num_entries++; + + /* Save SMMU config settings */ + memcpy16((void *)(uintptr_t)smmu_ctx_addr, (void *)smmu_ctx_regs, + (sizeof(smmu_regs_t) * num_entries)); + + /* save the SMMU table address */ + mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO, + (uint32_t)smmu_ctx_addr); + mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_HI, + (uint32_t)(smmu_ctx_addr >> 32)); +} + +#define SMMU_NUM_CONTEXTS 64 +#define SMMU_CONTEXT_BANK_MAX_IDX 64 + +/* + * Init SMMU during boot or "System Suspend" exit + */ +void tegra_smmu_init(void) +{ + uint32_t val, cb_idx, smmu_id, ctx_base; + + for (smmu_id = 0; smmu_id < NUM_SMMU_DEVICES; smmu_id++) { + /* Program the SMMU pagesize and reset CACHE_LOCK bit */ + val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR); + val |= SMMU_GSR0_PGSIZE_64K; + val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT; + tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val); + + /* reset CACHE LOCK bit for NS Aux. Config. Register */ + val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR); + val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT; + tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val); + + /* disable TCU prefetch for all contexts */ + ctx_base = (SMMU_GSR0_PGSIZE_64K * SMMU_NUM_CONTEXTS) + + SMMU_CBn_ACTLR; + for (cb_idx = 0; cb_idx < SMMU_CONTEXT_BANK_MAX_IDX; cb_idx++) { + val = tegra_smmu_read_32(smmu_id, + ctx_base + (SMMU_GSR0_PGSIZE_64K * cb_idx)); + val &= ~SMMU_CBn_ACTLR_CPRE_BIT; + tegra_smmu_write_32(smmu_id, ctx_base + + (SMMU_GSR0_PGSIZE_64K * cb_idx), val); + } + + /* set CACHE LOCK bit for NS Aux. Config. Register */ + val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR); + val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT; + tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val); + + /* set CACHE LOCK bit for S Aux. Config. Register */ + val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR); + val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT; + tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val); + } +} diff --git a/plat/nvidia/tegra/common/tegra_bl31_setup.c b/plat/nvidia/tegra/common/tegra_bl31_setup.c index 9e7e576c2..8307af706 100644 --- a/plat/nvidia/tegra/common/tegra_bl31_setup.c +++ b/plat/nvidia/tegra/common/tegra_bl31_setup.c @@ -202,9 +202,6 @@ void bl31_early_platform_setup(bl31_params_t *from_bl2, */ console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ, TEGRA_CONSOLE_BAUDRATE); - - /* Initialise crash console */ - plat_crash_console_init(); } /* @@ -299,7 +296,16 @@ void bl31_platform_setup(void) ******************************************************************************/ void bl31_plat_runtime_setup(void) { - ; /* do nothing */ + /* + * During boot, USB3 and flash media (SDMMC/SATA) devices need + * access to IRAM. Because these clients connect to the MC and + * do not have a direct path to the IRAM, the MC implements AHB + * redirection during boot to allow path to IRAM. In this mode + * accesses to a programmed memory address aperture are directed + * to the AHB bus, allowing access to the IRAM. This mode must be + * disabled before we jump to the non-secure world. + */ + tegra_memctrl_disable_ahb_redirection(); } /******************************************************************************* diff --git a/plat/nvidia/tegra/common/tegra_common.mk b/plat/nvidia/tegra/common/tegra_common.mk index d6bd2eab2..cb4d18863 100644 --- a/plat/nvidia/tegra/common/tegra_common.mk +++ b/plat/nvidia/tegra/common/tegra_common.mk @@ -28,22 +28,12 @@ # POSSIBILITY OF SUCH DAMAGE. # -CRASH_REPORTING := 1 -$(eval $(call add_define,CRASH_REPORTING)) - -ASM_ASSERTION := 1 -$(eval $(call add_define,ASM_ASSERTION)) - -USE_COHERENT_MEM := 0 - -SEPARATE_CODE_AND_RODATA := 1 - PLAT_INCLUDES := -Iplat/nvidia/tegra/include/drivers \ -Iplat/nvidia/tegra/include \ -Iplat/nvidia/tegra/include/${TARGET_SOC} -PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \ - lib/xlat_tables/aarch64/xlat_tables.c +include lib/xlat_tables_v2/xlat_tables.mk +PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS} COMMON_DIR := plat/nvidia/tegra/common diff --git a/plat/nvidia/tegra/common/tegra_platform.c b/plat/nvidia/tegra/common/tegra_platform.c index 0724b18ee..18cc555cf 100644 --- a/plat/nvidia/tegra/common/tegra_platform.c +++ b/plat/nvidia/tegra/common/tegra_platform.c @@ -69,6 +69,7 @@ typedef enum tegra_platform { typedef enum tegra_chipid { TEGRA_CHIPID_TEGRA13 = 0x13, TEGRA_CHIPID_TEGRA21 = 0x21, + TEGRA_CHIPID_TEGRA18 = 0x18, } tegra_chipid_t; /* @@ -109,6 +110,13 @@ uint8_t tegra_chipid_is_t210(void) return (chip_id == TEGRA_CHIPID_TEGRA21); } +uint8_t tegra_chipid_is_t186(void) +{ + uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK; + + return (chip_id == TEGRA_CHIPID_TEGRA18); +} + /* * Read the chip ID value and derive the platform */ diff --git a/plat/nvidia/tegra/common/tegra_pm.c b/plat/nvidia/tegra/common/tegra_pm.c index 5376d5231..d63292689 100644 --- a/plat/nvidia/tegra/common/tegra_pm.c +++ b/plat/nvidia/tegra/common/tegra_pm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -48,6 +48,14 @@ extern uint64_t tegra_bl31_phys_base; extern uint64_t tegra_sec_entry_point; extern uint64_t tegra_console_base; +/* + * tegra_fake_system_suspend acts as a boolean var controlling whether + * we are going to take fake system suspend code or normal system suspend code + * path. This variable is set inside the sip call handlers,when the kernel + * requests a SIP call to set the suspend debug flags. + */ +uint8_t tegra_fake_system_suspend; + /* * The following platform setup functions are weakly defined. They * provide typical implementations that will be overridden by a SoC. @@ -182,14 +190,31 @@ void tegra_pwr_domain_suspend(const psci_power_state_t *target_state) __dead2 void tegra_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) { + uint8_t pwr_state = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; + uint64_t rmr_el3 = 0; + /* call the chip's power down handler */ tegra_soc_pwr_domain_power_down_wfi(target_state); - /* enter power down state */ - wfi(); + /* + * If we are in fake system suspend mode, ensure we start doing + * procedures that help in looping back towards system suspend exit + * instead of calling WFI by requesting a warm reset. + * Else, just call WFI to enter low power state. + */ + if ((tegra_fake_system_suspend != 0U) && + (pwr_state == (uint8_t)PSTATE_ID_SOC_POWERDN)) { + + /* warm reboot */ + rmr_el3 = read_rmr_el3(); + write_rmr_el3(rmr_el3 | RMR_WARM_RESET_CPU); + + } else { + /* enter power down state */ + wfi(); + } /* we can never reach here */ - ERROR("%s: operation not handled.\n", __func__); panic(); } diff --git a/plat/nvidia/tegra/common/tegra_sip_calls.c b/plat/nvidia/tegra/common/tegra_sip_calls.c index 4dd43532b..1e5423db7 100644 --- a/plat/nvidia/tegra/common/tegra_sip_calls.c +++ b/plat/nvidia/tegra/common/tegra_sip_calls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -37,6 +37,7 @@ #include #include #include +#include /******************************************************************************* * Common Tegra SiP SMCs @@ -44,6 +45,13 @@ #define TEGRA_SIP_NEW_VIDEOMEM_REGION 0x82000003 #define TEGRA_SIP_FIQ_NS_ENTRYPOINT 0x82000005 #define TEGRA_SIP_FIQ_NS_GET_CONTEXT 0x82000006 +#define TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND 0xC2000007 + +/******************************************************************************* + * Fake system suspend mode control var + ******************************************************************************/ +extern uint8_t tegra_fake_system_suspend; + /******************************************************************************* * SoC specific SiP handler @@ -78,7 +86,7 @@ uint64_t tegra_sip_handler(uint32_t smc_fid, /* Check if this is a SoC specific SiP */ err = plat_sip_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags); if (err == 0) - SMC_RET1(handle, err); + SMC_RET1(handle, (uint64_t)err); switch (smc_fid) { @@ -144,6 +152,26 @@ uint64_t tegra_sip_handler(uint32_t smc_fid, SMC_RET0(handle); break; + case TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND: + /* + * System suspend fake mode is set if we are on VDK and we make + * a debug SIP call. This mode ensures that we excercise debug + * path instead of the regular code path to suit the pre-silicon + * platform needs. These include replacing the call to WFI by + * a warm reset request. + */ + if (tegra_platform_is_emulation() != 0U) { + + tegra_fake_system_suspend = 1; + SMC_RET1(handle, 0); + } + + /* + * We return to the external world as if this SIP is not + * implemented in case, we are not running on VDK. + */ + break; + default: ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); break; diff --git a/plat/nvidia/tegra/common/tegra_topology.c b/plat/nvidia/tegra/common/tegra_topology.c index 0431d98a2..f4c56617b 100644 --- a/plat/nvidia/tegra/common/tegra_topology.c +++ b/plat/nvidia/tegra/common/tegra_topology.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,6 +33,7 @@ #include extern const unsigned char tegra_power_domain_tree_desc[]; +#pragma weak plat_core_pos_by_mpidr /******************************************************************************* * This function returns the Tegra default topology tree information. @@ -52,23 +53,18 @@ int plat_core_pos_by_mpidr(u_register_t mpidr) { unsigned int cluster_id, cpu_id; - mpidr &= MPIDR_AFFINITY_MASK; - - if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)) - return -1; - cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK; if (cluster_id >= PLATFORM_CLUSTER_COUNT) - return -1; + return PSCI_E_NOT_PRESENT; /* * Validate cpu_id by checking whether it represents a CPU in * one of the two clusters present on the platform. */ if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER) - return -1; + return PSCI_E_NOT_PRESENT; return (cpu_id + (cluster_id * 4)); } diff --git a/plat/nvidia/tegra/include/drivers/flowctrl.h b/plat/nvidia/tegra/include/drivers/flowctrl.h index 23909e805..17145e8e6 100644 --- a/plat/nvidia/tegra/include/drivers/flowctrl.h +++ b/plat/nvidia/tegra/include/drivers/flowctrl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,34 +34,34 @@ #include #include -#define FLOWCTRL_HALT_CPU0_EVENTS 0x0 -#define FLOWCTRL_WAITEVENT (2 << 29) -#define FLOWCTRL_WAIT_FOR_INTERRUPT (4 << 29) -#define FLOWCTRL_JTAG_RESUME (1 << 28) -#define FLOWCTRL_HALT_SCLK (1 << 27) -#define FLOWCTRL_HALT_LIC_IRQ (1 << 11) -#define FLOWCTRL_HALT_LIC_FIQ (1 << 10) -#define FLOWCTRL_HALT_GIC_IRQ (1 << 9) -#define FLOWCTRL_HALT_GIC_FIQ (1 << 8) -#define FLOWCTRL_HALT_BPMP_EVENTS 0x4 -#define FLOWCTRL_CPU0_CSR 0x8 -#define FLOW_CTRL_CSR_PWR_OFF_STS (1 << 16) -#define FLOWCTRL_CSR_INTR_FLAG (1 << 15) -#define FLOWCTRL_CSR_EVENT_FLAG (1 << 14) -#define FLOWCTRL_CSR_IMMEDIATE_WAKE (1 << 3) -#define FLOWCTRL_CSR_ENABLE (1 << 0) -#define FLOWCTRL_HALT_CPU1_EVENTS 0x14 -#define FLOWCTRL_CPU1_CSR 0x18 -#define FLOWCTRL_CC4_CORE0_CTRL 0x6c -#define FLOWCTRL_WAIT_WFI_BITMAP 0x100 -#define FLOWCTRL_L2_FLUSH_CONTROL 0x94 -#define FLOWCTRL_BPMP_CLUSTER_CONTROL 0x98 -#define FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK (1 << 2) +#define FLOWCTRL_HALT_CPU0_EVENTS 0x0U +#define FLOWCTRL_WAITEVENT (2U << 29) +#define FLOWCTRL_WAIT_FOR_INTERRUPT (4U << 29) +#define FLOWCTRL_JTAG_RESUME (1U << 28) +#define FLOWCTRL_HALT_SCLK (1U << 27) +#define FLOWCTRL_HALT_LIC_IRQ (1U << 11) +#define FLOWCTRL_HALT_LIC_FIQ (1U << 10) +#define FLOWCTRL_HALT_GIC_IRQ (1U << 9) +#define FLOWCTRL_HALT_GIC_FIQ (1U << 8) +#define FLOWCTRL_HALT_BPMP_EVENTS 0x4U +#define FLOWCTRL_CPU0_CSR 0x8U +#define FLOW_CTRL_CSR_PWR_OFF_STS (1U << 16) +#define FLOWCTRL_CSR_INTR_FLAG (1U << 15) +#define FLOWCTRL_CSR_EVENT_FLAG (1U << 14) +#define FLOWCTRL_CSR_IMMEDIATE_WAKE (1U << 3) +#define FLOWCTRL_CSR_ENABLE (1U << 0) +#define FLOWCTRL_HALT_CPU1_EVENTS 0x14U +#define FLOWCTRL_CPU1_CSR 0x18U +#define FLOWCTRL_CC4_CORE0_CTRL 0x6cU +#define FLOWCTRL_WAIT_WFI_BITMAP 0x100U +#define FLOWCTRL_L2_FLUSH_CONTROL 0x94U +#define FLOWCTRL_BPMP_CLUSTER_CONTROL 0x98U +#define FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK (1U << 2) -#define FLOWCTRL_ENABLE_EXT 12 -#define FLOWCTRL_ENABLE_EXT_MASK 3 -#define FLOWCTRL_PG_CPU_NONCPU 0x1 -#define FLOWCTRL_TURNOFF_CPURAIL 0x2 +#define FLOWCTRL_ENABLE_EXT 12U +#define FLOWCTRL_ENABLE_EXT_MASK 3U +#define FLOWCTRL_PG_CPU_NONCPU 0x1U +#define FLOWCTRL_TURNOFF_CPURAIL 0x2U static inline uint32_t tegra_fc_read_32(uint32_t off) { diff --git a/plat/nvidia/tegra/include/drivers/mce.h b/plat/nvidia/tegra/include/drivers/mce.h new file mode 100644 index 000000000..faeacf7fe --- /dev/null +++ b/plat/nvidia/tegra/include/drivers/mce.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MCE_H__ +#define __MCE_H__ + +#include +#include + +/******************************************************************************* + * MCE commands + ******************************************************************************/ +typedef enum mce_cmd { + MCE_CMD_ENTER_CSTATE = 0U, + MCE_CMD_UPDATE_CSTATE_INFO = 1U, + MCE_CMD_UPDATE_CROSSOVER_TIME = 2U, + MCE_CMD_READ_CSTATE_STATS = 3U, + MCE_CMD_WRITE_CSTATE_STATS = 4U, + MCE_CMD_IS_SC7_ALLOWED = 5U, + MCE_CMD_ONLINE_CORE = 6U, + MCE_CMD_CC3_CTRL = 7U, + MCE_CMD_ECHO_DATA = 8U, + MCE_CMD_READ_VERSIONS = 9U, + MCE_CMD_ENUM_FEATURES = 10U, + MCE_CMD_ROC_FLUSH_CACHE_TRBITS = 11U, + MCE_CMD_ENUM_READ_MCA = 12U, + MCE_CMD_ENUM_WRITE_MCA = 13U, + MCE_CMD_ROC_FLUSH_CACHE = 14U, + MCE_CMD_ROC_CLEAN_CACHE = 15U, + MCE_CMD_ENABLE_LATIC = 16U, + MCE_CMD_UNCORE_PERFMON_REQ = 17U, + MCE_CMD_MISC_CCPLEX = 18U, + MCE_CMD_IS_CCX_ALLOWED = 0xFEU, + MCE_CMD_MAX = 0xFFU, +} mce_cmd_t; + +#define MCE_CMD_MASK 0xFFU + +/******************************************************************************* + * Timeout value used to powerdown a core + ******************************************************************************/ +#define MCE_CORE_SLEEP_TIME_INFINITE 0xFFFFFFFFU + +/******************************************************************************* + * Struct to prepare UPDATE_CSTATE_INFO request + ******************************************************************************/ +typedef struct mce_cstate_info { + /* cluster cstate value */ + uint32_t cluster; + /* ccplex cstate value */ + uint32_t ccplex; + /* system cstate value */ + uint32_t system; + /* force system state? */ + uint8_t system_state_force; + /* wake mask value */ + uint32_t wake_mask; + /* update the wake mask? */ + uint8_t update_wake_mask; +} mce_cstate_info_t; + +/* public interfaces */ +int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1, + uint64_t arg2); +int mce_update_reset_vector(void); +int mce_update_gsc_videomem(void); +int mce_update_gsc_tzdram(void); +int mce_update_gsc_tzram(void); +__dead2 void mce_enter_ccplex_state(uint32_t state_idx); +void mce_update_cstate_info(mce_cstate_info_t *cstate); +void mce_verify_firmware_version(void); + +#endif /* __MCE_H__ */ diff --git a/plat/nvidia/tegra/include/drivers/memctrl.h b/plat/nvidia/tegra/include/drivers/memctrl.h index a3f08755a..1557bbf88 100644 --- a/plat/nvidia/tegra/include/drivers/memctrl.h +++ b/plat/nvidia/tegra/include/drivers/memctrl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -36,5 +36,6 @@ void tegra_memctrl_restore_settings(void); void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes); void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes); void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes); +void tegra_memctrl_disable_ahb_redirection(void); #endif /* __MEMCTRL_H__ */ diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v1.h b/plat/nvidia/tegra/include/drivers/memctrl_v1.h index b504594bf..ab2edac48 100644 --- a/plat/nvidia/tegra/include/drivers/memctrl_v1.h +++ b/plat/nvidia/tegra/include/drivers/memctrl_v1.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,31 +35,38 @@ #include /* SMMU registers */ -#define MC_SMMU_CONFIG_0 0x10 -#define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE 0 -#define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE 1 -#define MC_SMMU_TLB_CONFIG_0 0x14 -#define MC_SMMU_TLB_CONFIG_0_RESET_VAL 0x20000010 -#define MC_SMMU_PTC_CONFIG_0 0x18 -#define MC_SMMU_PTC_CONFIG_0_RESET_VAL 0x2000003f -#define MC_SMMU_TLB_FLUSH_0 0x30 -#define TLB_FLUSH_VA_MATCH_ALL 0 -#define TLB_FLUSH_ASID_MATCH_DISABLE 0 -#define TLB_FLUSH_ASID_MATCH_SHIFT 31 +#define MC_SMMU_CONFIG_0 0x10U +#define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE 0U +#define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE 1U +#define MC_SMMU_TLB_CONFIG_0 0x14U +#define MC_SMMU_TLB_CONFIG_0_RESET_VAL 0x20000010U +#define MC_SMMU_PTC_CONFIG_0 0x18U +#define MC_SMMU_PTC_CONFIG_0_RESET_VAL 0x2000003fU +#define MC_SMMU_TLB_FLUSH_0 0x30U +#define TLB_FLUSH_VA_MATCH_ALL 0U +#define TLB_FLUSH_ASID_MATCH_DISABLE 0U +#define TLB_FLUSH_ASID_MATCH_SHIFT 31U #define MC_SMMU_TLB_FLUSH_ALL \ (TLB_FLUSH_VA_MATCH_ALL | \ (TLB_FLUSH_ASID_MATCH_DISABLE << TLB_FLUSH_ASID_MATCH_SHIFT)) -#define MC_SMMU_PTC_FLUSH_0 0x34 -#define MC_SMMU_PTC_FLUSH_ALL 0 -#define MC_SMMU_ASID_SECURITY_0 0x38 -#define MC_SMMU_ASID_SECURITY 0 -#define MC_SMMU_TRANSLATION_ENABLE_0_0 0x228 -#define MC_SMMU_TRANSLATION_ENABLE_1_0 0x22c -#define MC_SMMU_TRANSLATION_ENABLE_2_0 0x230 -#define MC_SMMU_TRANSLATION_ENABLE_3_0 0x234 -#define MC_SMMU_TRANSLATION_ENABLE_4_0 0xb98 +#define MC_SMMU_PTC_FLUSH_0 0x34U +#define MC_SMMU_PTC_FLUSH_ALL 0U +#define MC_SMMU_ASID_SECURITY_0 0x38U +#define MC_SMMU_ASID_SECURITY 0U +#define MC_SMMU_TRANSLATION_ENABLE_0_0 0x228U +#define MC_SMMU_TRANSLATION_ENABLE_1_0 0x22cU +#define MC_SMMU_TRANSLATION_ENABLE_2_0 0x230U +#define MC_SMMU_TRANSLATION_ENABLE_3_0 0x234U +#define MC_SMMU_TRANSLATION_ENABLE_4_0 0xb98U #define MC_SMMU_TRANSLATION_ENABLE (~0) +/* MC IRAM aperture registers */ +#define MC_IRAM_BASE_LO 0x65CU +#define MC_IRAM_TOP_LO 0x660U +#define MC_IRAM_BASE_TOP_HI 0x980U +#define MC_IRAM_REG_CTRL 0x964U +#define MC_DISABLE_IRAM_CFG_WRITES 1U + static inline uint32_t tegra_mc_read_32(uint32_t off) { return mmio_read_32(TEGRA_MC_BASE + off); diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v2.h b/plat/nvidia/tegra/include/drivers/memctrl_v2.h index 559ea2c59..201025dc7 100644 --- a/plat/nvidia/tegra/include/drivers/memctrl_v2.h +++ b/plat/nvidia/tegra/include/drivers/memctrl_v2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,6 +33,10 @@ #include +#ifndef __ASSEMBLY__ + +#include + /******************************************************************************* * StreamID to indicate no SMMU translations (requests to be steered on the * SMMU bypass path) @@ -42,19 +46,18 @@ /******************************************************************************* * Stream ID Override Config registers ******************************************************************************/ -#define MC_STREAMID_OVERRIDE_CFG_PTCR 0x0 -#define MC_STREAMID_OVERRIDE_CFG_AFIR 0x70 -#define MC_STREAMID_OVERRIDE_CFG_HDAR 0xA8 -#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR 0xB0 -#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD 0xE0 -#define MC_STREAMID_OVERRIDE_CFG_SATAR 0xF8 +#define MC_STREAMID_OVERRIDE_CFG_PTCR 0x000 +#define MC_STREAMID_OVERRIDE_CFG_AFIR 0x070 +#define MC_STREAMID_OVERRIDE_CFG_HDAR 0x0A8 +#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR 0x0B0 +#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD 0x0E0 +#define MC_STREAMID_OVERRIDE_CFG_SATAR 0x0F8 #define MC_STREAMID_OVERRIDE_CFG_MPCORER 0x138 #define MC_STREAMID_OVERRIDE_CFG_NVENCSWR 0x158 #define MC_STREAMID_OVERRIDE_CFG_AFIW 0x188 -#define MC_STREAMID_OVERRIDE_CFG_SATAW 0x1E8 +#define MC_STREAMID_OVERRIDE_CFG_HDAW 0x1A8 #define MC_STREAMID_OVERRIDE_CFG_MPCOREW 0x1C8 #define MC_STREAMID_OVERRIDE_CFG_SATAW 0x1E8 -#define MC_STREAMID_OVERRIDE_CFG_HDAW 0x1A8 #define MC_STREAMID_OVERRIDE_CFG_ISPRA 0x220 #define MC_STREAMID_OVERRIDE_CFG_ISPWA 0x230 #define MC_STREAMID_OVERRIDE_CFG_ISPWB 0x238 @@ -117,94 +120,9 @@ #define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 0x518 /******************************************************************************* - * Stream ID Security Config registers + * Macro to calculate Security cfg register addr from StreamID Override register ******************************************************************************/ -#define MC_STREAMID_SECURITY_CFG_PTCR 0x4 -#define MC_STREAMID_SECURITY_CFG_AFIR 0x74 -#define MC_STREAMID_SECURITY_CFG_HDAR 0xAC -#define MC_STREAMID_SECURITY_CFG_HOST1XDMAR 0xB4 -#define MC_STREAMID_SECURITY_CFG_NVENCSRD 0xE4 -#define MC_STREAMID_SECURITY_CFG_SATAR 0xFC -#define MC_STREAMID_SECURITY_CFG_HDAW 0x1AC -#define MC_STREAMID_SECURITY_CFG_MPCORER 0x13C -#define MC_STREAMID_SECURITY_CFG_NVENCSWR 0x15C -#define MC_STREAMID_SECURITY_CFG_AFIW 0x18C -#define MC_STREAMID_SECURITY_CFG_MPCOREW 0x1CC -#define MC_STREAMID_SECURITY_CFG_SATAW 0x1EC -#define MC_STREAMID_SECURITY_CFG_ISPRA 0x224 -#define MC_STREAMID_SECURITY_CFG_ISPWA 0x234 -#define MC_STREAMID_SECURITY_CFG_ISPWB 0x23C -#define MC_STREAMID_SECURITY_CFG_XUSB_HOSTR 0x254 -#define MC_STREAMID_SECURITY_CFG_XUSB_HOSTW 0x25C -#define MC_STREAMID_SECURITY_CFG_XUSB_DEVR 0x264 -#define MC_STREAMID_SECURITY_CFG_XUSB_DEVW 0x26C -#define MC_STREAMID_SECURITY_CFG_TSECSRD 0x2A4 -#define MC_STREAMID_SECURITY_CFG_TSECSWR 0x2AC -#define MC_STREAMID_SECURITY_CFG_GPUSRD 0x2C4 -#define MC_STREAMID_SECURITY_CFG_GPUSWR 0x2CC -#define MC_STREAMID_SECURITY_CFG_SDMMCRA 0x304 -#define MC_STREAMID_SECURITY_CFG_SDMMCRAA 0x30C -#define MC_STREAMID_SECURITY_CFG_SDMMCR 0x314 -#define MC_STREAMID_SECURITY_CFG_SDMMCRAB 0x31C -#define MC_STREAMID_SECURITY_CFG_SDMMCWA 0x324 -#define MC_STREAMID_SECURITY_CFG_SDMMCWAA 0x32C -#define MC_STREAMID_SECURITY_CFG_SDMMCW 0x334 -#define MC_STREAMID_SECURITY_CFG_SDMMCWAB 0x33C -#define MC_STREAMID_SECURITY_CFG_VICSRD 0x364 -#define MC_STREAMID_SECURITY_CFG_VICSWR 0x36C -#define MC_STREAMID_SECURITY_CFG_VIW 0x394 -#define MC_STREAMID_SECURITY_CFG_NVDECSRD 0x3C4 -#define MC_STREAMID_SECURITY_CFG_NVDECSWR 0x3CC -#define MC_STREAMID_SECURITY_CFG_APER 0x3D4 -#define MC_STREAMID_SECURITY_CFG_APEW 0x3DC -#define MC_STREAMID_SECURITY_CFG_NVJPGSRD 0x3F4 -#define MC_STREAMID_SECURITY_CFG_NVJPGSWR 0x3FC -#define MC_STREAMID_SECURITY_CFG_SESRD 0x404 -#define MC_STREAMID_SECURITY_CFG_SESWR 0x40C -#define MC_STREAMID_SECURITY_CFG_ETRR 0x424 -#define MC_STREAMID_SECURITY_CFG_ETRW 0x42C -#define MC_STREAMID_SECURITY_CFG_TSECSRDB 0x434 -#define MC_STREAMID_SECURITY_CFG_TSECSWRB 0x43C -#define MC_STREAMID_SECURITY_CFG_GPUSRD2 0x444 -#define MC_STREAMID_SECURITY_CFG_GPUSWR2 0x44C -#define MC_STREAMID_SECURITY_CFG_AXISR 0x464 -#define MC_STREAMID_SECURITY_CFG_AXISW 0x46C -#define MC_STREAMID_SECURITY_CFG_EQOSR 0x474 -#define MC_STREAMID_SECURITY_CFG_EQOSW 0x47C -#define MC_STREAMID_SECURITY_CFG_UFSHCR 0x484 -#define MC_STREAMID_SECURITY_CFG_UFSHCW 0x48C -#define MC_STREAMID_SECURITY_CFG_NVDISPLAYR 0x494 -#define MC_STREAMID_SECURITY_CFG_BPMPR 0x49C -#define MC_STREAMID_SECURITY_CFG_BPMPW 0x4A4 -#define MC_STREAMID_SECURITY_CFG_BPMPDMAR 0x4AC -#define MC_STREAMID_SECURITY_CFG_BPMPDMAW 0x4B4 -#define MC_STREAMID_SECURITY_CFG_AONR 0x4BC -#define MC_STREAMID_SECURITY_CFG_AONW 0x4C4 -#define MC_STREAMID_SECURITY_CFG_AONDMAR 0x4CC -#define MC_STREAMID_SECURITY_CFG_AONDMAW 0x4D4 -#define MC_STREAMID_SECURITY_CFG_SCER 0x4DC -#define MC_STREAMID_SECURITY_CFG_SCEW 0x4E4 -#define MC_STREAMID_SECURITY_CFG_SCEDMAR 0x4EC -#define MC_STREAMID_SECURITY_CFG_SCEDMAW 0x4F4 -#define MC_STREAMID_SECURITY_CFG_APEDMAR 0x4FC -#define MC_STREAMID_SECURITY_CFG_APEDMAW 0x504 -#define MC_STREAMID_SECURITY_CFG_NVDISPLAYR1 0x50C -#define MC_STREAMID_SECURITY_CFG_VICSRD1 0x514 -#define MC_STREAMID_SECURITY_CFG_NVDECSRD1 0x51C - -/******************************************************************************* - * Memory Controller SMMU Bypass config register - ******************************************************************************/ -#define MC_SMMU_BYPASS_CONFIG 0x1820 -#define MC_SMMU_BYPASS_CTRL_MASK 0x3 -#define MC_SMMU_BYPASS_CTRL_SHIFT 0 -#define MC_SMMU_CTRL_TBU_BYPASS_ALL (0 << MC_SMMU_BYPASS_CTRL_SHIFT) -#define MC_SMMU_CTRL_TBU_RSVD (1 << MC_SMMU_BYPASS_CTRL_SHIFT) -#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID (2 << MC_SMMU_BYPASS_CTRL_SHIFT) -#define MC_SMMU_CTRL_TBU_BYPASS_NONE (3 << MC_SMMU_BYPASS_CTRL_SHIFT) -#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT (1 << 31) -#define MC_SMMU_BYPASS_CONFIG_SETTINGS (MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \ - MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID) +#define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) (addr + sizeof(uint32_t)) /******************************************************************************* * Memory Controller transaction override config registers @@ -282,24 +200,6 @@ #define MC_TXN_OVERRIDE_CONFIG_AFIW 0x1188 #define MC_TXN_OVERRIDE_CONFIG_SCEW 0x14e0 -#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1 << 0) -#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2 << 4) -#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1 << 12) - -/******************************************************************************* - * Non-SO_DEV transactions override values for CGID_TAG bitfield for the - * MC_TXN_OVERRIDE_CONFIG_{module} registers - ******************************************************************************/ -#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT 0 -#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID 1 -#define MC_TXN_OVERRIDE_CGID_TAG_ZERO 2 -#define MC_TXN_OVERRIDE_CGID_TAG_ADR 3 -#define MC_TXN_OVERRIDE_CGID_TAG_MASK 3 - -#ifndef __ASSEMBLY__ - -#include - /******************************************************************************* * Structure to hold the transaction override settings to use to override * client inputs @@ -342,16 +242,57 @@ typedef struct mc_streamid_security_cfg { #define CLIENT_INPUTS_NO_OVERRIDE 0 #define mc_make_sec_cfg(off, ns, ovrrd, access) \ - { \ - .name = # off, \ - .offset = MC_STREAMID_SECURITY_CFG_ ## off, \ - .override_client_ns_flag = CLIENT_FLAG_ ## ns, \ - .override_client_inputs = CLIENT_INPUTS_ ## ovrrd, \ - .override_enable = OVERRIDE_ ## access \ - } + { \ + .name = # off, \ + .offset = MC_STREAMID_OVERRIDE_TO_SECURITY_CFG( \ + MC_STREAMID_OVERRIDE_CFG_ ## off), \ + .override_client_ns_flag = CLIENT_FLAG_ ## ns, \ + .override_client_inputs = CLIENT_INPUTS_ ## ovrrd, \ + .override_enable = OVERRIDE_ ## access \ + } + +/******************************************************************************* + * Structure to hold Memory Controller's Configuration settings + ******************************************************************************/ +typedef struct tegra_mc_settings { + const uint32_t *streamid_override_cfg; + uint32_t num_streamid_override_cfgs; + const mc_streamid_security_cfg_t *streamid_security_cfg; + uint32_t num_streamid_security_cfgs; + const mc_txn_override_cfg_t *txn_override_cfg; + uint32_t num_txn_override_cfgs; +} tegra_mc_settings_t; #endif /* __ASSEMBLY__ */ +/******************************************************************************* + * Memory Controller SMMU Bypass config register + ******************************************************************************/ +#define MC_SMMU_BYPASS_CONFIG 0x1820 +#define MC_SMMU_BYPASS_CTRL_MASK 0x3 +#define MC_SMMU_BYPASS_CTRL_SHIFT 0 +#define MC_SMMU_CTRL_TBU_BYPASS_ALL (0 << MC_SMMU_BYPASS_CTRL_SHIFT) +#define MC_SMMU_CTRL_TBU_RSVD (1 << MC_SMMU_BYPASS_CTRL_SHIFT) +#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID (2 << MC_SMMU_BYPASS_CTRL_SHIFT) +#define MC_SMMU_CTRL_TBU_BYPASS_NONE (3 << MC_SMMU_BYPASS_CTRL_SHIFT) +#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT (1 << 31) +#define MC_SMMU_BYPASS_CONFIG_SETTINGS (MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \ + MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID) + +#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1 << 0) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2 << 4) +#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1 << 12) + +/******************************************************************************* + * Non-SO_DEV transactions override values for CGID_TAG bitfield for the + * MC_TXN_OVERRIDE_CONFIG_{module} registers + ******************************************************************************/ +#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT 0 +#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID 1 +#define MC_TXN_OVERRIDE_CGID_TAG_ZERO 2 +#define MC_TXN_OVERRIDE_CGID_TAG_ADR 3 +#define MC_TXN_OVERRIDE_CGID_TAG_MASK 3 + /******************************************************************************* * Memory Controller Reset Control registers ******************************************************************************/ @@ -548,6 +489,14 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val) MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID | \ MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \ } + +/******************************************************************************* + * Handler to read memory configuration settings + * + * Implemented by SoCs under tegra/soc/txxx + ******************************************************************************/ +tegra_mc_settings_t *tegra_get_mc_settings(void); + #endif /* __ASSMEBLY__ */ #endif /* __MEMCTRLV2_H__ */ diff --git a/plat/nvidia/tegra/include/drivers/pmc.h b/plat/nvidia/tegra/include/drivers/pmc.h index c0616d020..a5ab8f14c 100644 --- a/plat/nvidia/tegra/include/drivers/pmc.h +++ b/plat/nvidia/tegra/include/drivers/pmc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,19 +34,19 @@ #include #include -#define PMC_CONFIG 0x0 -#define PMC_PWRGATE_STATUS 0x38 -#define PMC_PWRGATE_TOGGLE 0x30 -#define PMC_TOGGLE_START 0x100 -#define PMC_SCRATCH39 0x138 -#define PMC_SECURE_DISABLE2 0x2c4 -#define PMC_SECURE_DISABLE2_WRITE22_ON (1 << 28) -#define PMC_SECURE_SCRATCH22 0x338 -#define PMC_SECURE_DISABLE3 0x2d8 -#define PMC_SECURE_DISABLE3_WRITE34_ON (1 << 20) -#define PMC_SECURE_DISABLE3_WRITE35_ON (1 << 22) -#define PMC_SECURE_SCRATCH34 0x368 -#define PMC_SECURE_SCRATCH35 0x36c +#define PMC_CONFIG 0x0U +#define PMC_PWRGATE_STATUS 0x38U +#define PMC_PWRGATE_TOGGLE 0x30U +#define PMC_TOGGLE_START 0x100U +#define PMC_SCRATCH39 0x138U +#define PMC_SECURE_DISABLE2 0x2c4U +#define PMC_SECURE_DISABLE2_WRITE22_ON (1U << 28) +#define PMC_SECURE_SCRATCH22 0x338U +#define PMC_SECURE_DISABLE3 0x2d8U +#define PMC_SECURE_DISABLE3_WRITE34_ON (1U << 20) +#define PMC_SECURE_DISABLE3_WRITE35_ON (1U << 22) +#define PMC_SECURE_SCRATCH34 0x368U +#define PMC_SECURE_SCRATCH35 0x36cU static inline uint32_t tegra_pmc_read_32(uint32_t off) { diff --git a/plat/nvidia/tegra/include/drivers/smmu.h b/plat/nvidia/tegra/include/drivers/smmu.h index 0640846af..1897aab64 100644 --- a/plat/nvidia/tegra/include/drivers/smmu.h +++ b/plat/nvidia/tegra/include/drivers/smmu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,609 +31,702 @@ #ifndef __SMMU_H #define __SMMU_H +#include #include #include /******************************************************************************* * SMMU Register constants ******************************************************************************/ -#define SMMU_CBn_SCTLR (0x0) -#define SMMU_CBn_SCTLR_STAGE2 (0x0) -#define SMMU_CBn_ACTLR (0x4) -#define SMMU_CBn_RESUME (0x8) -#define SMMU_CBn_TCR2 (0x10) -#define SMMU_CBn_TTBR0_LO (0x20) -#define SMMU_CBn_TTBR0_HI (0x24) -#define SMMU_CBn_TTBR1_LO (0x28) -#define SMMU_CBn_TTBR1_HI (0x2c) -#define SMMU_CBn_TCR_LPAE (0x30) -#define SMMU_CBn_TCR (0x30) -#define SMMU_CBn_TCR_EAE_1 (0x30) -#define SMMU_CBn_TCR (0x30) -#define SMMU_CBn_CONTEXTIDR (0x34) -#define SMMU_CBn_CONTEXTIDR_EAE_1 (0x34) -#define SMMU_CBn_PRRR_MAIR0 (0x38) -#define SMMU_CBn_NMRR_MAIR1 (0x3c) -#define SMMU_CBn_SMMU_CBn_PAR (0x50) -#define SMMU_CBn_SMMU_CBn_PAR0 (0x50) -#define SMMU_CBn_SMMU_CBn_PAR1 (0x54) -/* SMMU_CBn_SMMU_CBn_PAR0_Fault (0x50) */ -/* SMMU_CBn_SMMU_CBn_PAR0_Fault (0x54) */ -#define SMMU_CBn_FSR (0x58) -#define SMMU_CBn_FSRRESTORE (0x5c) -#define SMMU_CBn_FAR_LO (0x60) -#define SMMU_CBn_FAR_HI (0x64) -#define SMMU_CBn_FSYNR0 (0x68) -#define SMMU_CBn_IPAFAR_LO (0x70) -#define SMMU_CBn_IPAFAR_HI (0x74) -#define SMMU_CBn_TLBIVA_LO (0x600) -#define SMMU_CBn_TLBIVA_HI (0x604) -#define SMMU_CBn_TLBIVA_AARCH_32 (0x600) -#define SMMU_CBn_TLBIVAA_LO (0x608) -#define SMMU_CBn_TLBIVAA_HI (0x60c) -#define SMMU_CBn_TLBIVAA_AARCH_32 (0x608) -#define SMMU_CBn_TLBIASID (0x610) -#define SMMU_CBn_TLBIALL (0x618) -#define SMMU_CBn_TLBIVAL_LO (0x620) -#define SMMU_CBn_TLBIVAL_HI (0x624) -#define SMMU_CBn_TLBIVAL_AARCH_32 (0x618) -#define SMMU_CBn_TLBIVAAL_LO (0x628) -#define SMMU_CBn_TLBIVAAL_HI (0x62c) -#define SMMU_CBn_TLBIVAAL_AARCH_32 (0x628) -#define SMMU_CBn_TLBIIPAS2_LO (0x630) -#define SMMU_CBn_TLBIIPAS2_HI (0x634) -#define SMMU_CBn_TLBIIPAS2L_LO (0x638) -#define SMMU_CBn_TLBIIPAS2L_HI (0x63c) -#define SMMU_CBn_TLBSYNC (0x7f0) -#define SMMU_CBn_TLBSTATUS (0x7f4) -#define SMMU_CBn_ATSR (0x800) -#define SMMU_CBn_PMEVCNTR0 (0xe00) -#define SMMU_CBn_PMEVCNTR1 (0xe04) -#define SMMU_CBn_PMEVCNTR2 (0xe08) -#define SMMU_CBn_PMEVCNTR3 (0xe0c) -#define SMMU_CBn_PMEVTYPER0 (0xe80) -#define SMMU_CBn_PMEVTYPER1 (0xe84) -#define SMMU_CBn_PMEVTYPER2 (0xe88) -#define SMMU_CBn_PMEVTYPER3 (0xe8c) -#define SMMU_CBn_PMCFGR (0xf00) -#define SMMU_CBn_PMCR (0xf04) -#define SMMU_CBn_PMCEID (0xf20) -#define SMMU_CBn_PMCNTENSE (0xf40) -#define SMMU_CBn_PMCNTENCLR (0xf44) -#define SMMU_CBn_PMCNTENSET (0xf48) -#define SMMU_CBn_PMINTENCLR (0xf4c) -#define SMMU_CBn_PMOVSCLR (0xf50) -#define SMMU_CBn_PMOVSSET (0xf58) -#define SMMU_CBn_PMAUTHSTATUS (0xfb8) -#define SMMU_GNSR0_CR0 (0x0) -#define SMMU_GNSR0_CR2 (0x8) -#define SMMU_GNSR0_ACR (0x10) -#define SMMU_GNSR0_IDR0 (0x20) -#define SMMU_GNSR0_IDR1 (0x24) -#define SMMU_GNSR0_IDR2 (0x28) -#define SMMU_GNSR0_IDR7 (0x3c) -#define SMMU_GNSR0_GFAR_LO (0x40) -#define SMMU_GNSR0_GFAR_HI (0x44) -#define SMMU_GNSR0_GFSR (0x48) -#define SMMU_GNSR0_GFSRRESTORE (0x4c) -#define SMMU_GNSR0_GFSYNR0 (0x50) -#define SMMU_GNSR0_GFSYNR1 (0x54) -#define SMMU_GNSR0_GFSYNR1_v2 (0x54) -#define SMMU_GNSR0_TLBIVMID (0x64) -#define SMMU_GNSR0_TLBIALLNSNH (0x68) -#define SMMU_GNSR0_TLBIALLH (0x6c) -#define SMMU_GNSR0_TLBGSYNC (0x70) -#define SMMU_GNSR0_TLBGSTATUS (0x74) -#define SMMU_GNSR0_TLBIVAH_LO (0x78) -#define SMMU_GNSR0_TLBIVALH64_LO (0xb0) -#define SMMU_GNSR0_TLBIVALH64_HI (0xb4) -#define SMMU_GNSR0_TLBIVMIDS1 (0xb8) -#define SMMU_GNSR0_TLBIVAH64_LO (0xc0) -#define SMMU_GNSR0_TLBIVAH64_HI (0xc4) -#define SMMU_GNSR0_SMR0 (0x800) -#define SMMU_GNSR0_SMRn (0x800) -#define SMMU_GNSR0_SMR1 (0x804) -#define SMMU_GNSR0_SMR2 (0x808) -#define SMMU_GNSR0_SMR3 (0x80c) -#define SMMU_GNSR0_SMR4 (0x810) -#define SMMU_GNSR0_SMR5 (0x814) -#define SMMU_GNSR0_SMR6 (0x818) -#define SMMU_GNSR0_SMR7 (0x81c) -#define SMMU_GNSR0_SMR8 (0x820) -#define SMMU_GNSR0_SMR9 (0x824) -#define SMMU_GNSR0_SMR10 (0x828) -#define SMMU_GNSR0_SMR11 (0x82c) -#define SMMU_GNSR0_SMR12 (0x830) -#define SMMU_GNSR0_SMR13 (0x834) -#define SMMU_GNSR0_SMR14 (0x838) -#define SMMU_GNSR0_SMR15 (0x83c) -#define SMMU_GNSR0_SMR16 (0x840) -#define SMMU_GNSR0_SMR17 (0x844) -#define SMMU_GNSR0_SMR18 (0x848) -#define SMMU_GNSR0_SMR19 (0x84c) -#define SMMU_GNSR0_SMR20 (0x850) -#define SMMU_GNSR0_SMR21 (0x854) -#define SMMU_GNSR0_SMR22 (0x858) -#define SMMU_GNSR0_SMR23 (0x85c) -#define SMMU_GNSR0_SMR24 (0x860) -#define SMMU_GNSR0_SMR25 (0x864) -#define SMMU_GNSR0_SMR26 (0x868) -#define SMMU_GNSR0_SMR27 (0x86c) -#define SMMU_GNSR0_SMR28 (0x870) -#define SMMU_GNSR0_SMR29 (0x874) -#define SMMU_GNSR0_SMR30 (0x878) -#define SMMU_GNSR0_SMR31 (0x87c) -#define SMMU_GNSR0_SMR32 (0x880) -#define SMMU_GNSR0_SMR33 (0x884) -#define SMMU_GNSR0_SMR34 (0x888) -#define SMMU_GNSR0_SMR35 (0x88c) -#define SMMU_GNSR0_SMR36 (0x890) -#define SMMU_GNSR0_SMR37 (0x894) -#define SMMU_GNSR0_SMR38 (0x898) -#define SMMU_GNSR0_SMR39 (0x89c) -#define SMMU_GNSR0_SMR40 (0x8a0) -#define SMMU_GNSR0_SMR41 (0x8a4) -#define SMMU_GNSR0_SMR42 (0x8a8) -#define SMMU_GNSR0_SMR43 (0x8ac) -#define SMMU_GNSR0_SMR44 (0x8b0) -#define SMMU_GNSR0_SMR45 (0x8b4) -#define SMMU_GNSR0_SMR46 (0x8b8) -#define SMMU_GNSR0_SMR47 (0x8bc) -#define SMMU_GNSR0_SMR48 (0x8c0) -#define SMMU_GNSR0_SMR49 (0x8c4) -#define SMMU_GNSR0_SMR50 (0x8c8) -#define SMMU_GNSR0_SMR51 (0x8cc) -#define SMMU_GNSR0_SMR52 (0x8d0) -#define SMMU_GNSR0_SMR53 (0x8d4) -#define SMMU_GNSR0_SMR54 (0x8d8) -#define SMMU_GNSR0_SMR55 (0x8dc) -#define SMMU_GNSR0_SMR56 (0x8e0) -#define SMMU_GNSR0_SMR57 (0x8e4) -#define SMMU_GNSR0_SMR58 (0x8e8) -#define SMMU_GNSR0_SMR59 (0x8ec) -#define SMMU_GNSR0_SMR60 (0x8f0) -#define SMMU_GNSR0_SMR61 (0x8f4) -#define SMMU_GNSR0_SMR62 (0x8f8) -#define SMMU_GNSR0_SMR63 (0x8fc) -#define SMMU_GNSR0_SMR64 (0x900) -#define SMMU_GNSR0_SMR65 (0x904) -#define SMMU_GNSR0_SMR66 (0x908) -#define SMMU_GNSR0_SMR67 (0x90c) -#define SMMU_GNSR0_SMR68 (0x910) -#define SMMU_GNSR0_SMR69 (0x914) -#define SMMU_GNSR0_SMR70 (0x918) -#define SMMU_GNSR0_SMR71 (0x91c) -#define SMMU_GNSR0_SMR72 (0x920) -#define SMMU_GNSR0_SMR73 (0x924) -#define SMMU_GNSR0_SMR74 (0x928) -#define SMMU_GNSR0_SMR75 (0x92c) -#define SMMU_GNSR0_SMR76 (0x930) -#define SMMU_GNSR0_SMR77 (0x934) -#define SMMU_GNSR0_SMR78 (0x938) -#define SMMU_GNSR0_SMR79 (0x93c) -#define SMMU_GNSR0_SMR80 (0x940) -#define SMMU_GNSR0_SMR81 (0x944) -#define SMMU_GNSR0_SMR82 (0x948) -#define SMMU_GNSR0_SMR83 (0x94c) -#define SMMU_GNSR0_SMR84 (0x950) -#define SMMU_GNSR0_SMR85 (0x954) -#define SMMU_GNSR0_SMR86 (0x958) -#define SMMU_GNSR0_SMR87 (0x95c) -#define SMMU_GNSR0_SMR88 (0x960) -#define SMMU_GNSR0_SMR89 (0x964) -#define SMMU_GNSR0_SMR90 (0x968) -#define SMMU_GNSR0_SMR91 (0x96c) -#define SMMU_GNSR0_SMR92 (0x970) -#define SMMU_GNSR0_SMR93 (0x974) -#define SMMU_GNSR0_SMR94 (0x978) -#define SMMU_GNSR0_SMR95 (0x97c) -#define SMMU_GNSR0_SMR96 (0x980) -#define SMMU_GNSR0_SMR97 (0x984) -#define SMMU_GNSR0_SMR98 (0x988) -#define SMMU_GNSR0_SMR99 (0x98c) -#define SMMU_GNSR0_SMR100 (0x990) -#define SMMU_GNSR0_SMR101 (0x994) -#define SMMU_GNSR0_SMR102 (0x998) -#define SMMU_GNSR0_SMR103 (0x99c) -#define SMMU_GNSR0_SMR104 (0x9a0) -#define SMMU_GNSR0_SMR105 (0x9a4) -#define SMMU_GNSR0_SMR106 (0x9a8) -#define SMMU_GNSR0_SMR107 (0x9ac) -#define SMMU_GNSR0_SMR108 (0x9b0) -#define SMMU_GNSR0_SMR109 (0x9b4) -#define SMMU_GNSR0_SMR110 (0x9b8) -#define SMMU_GNSR0_SMR111 (0x9bc) -#define SMMU_GNSR0_SMR112 (0x9c0) -#define SMMU_GNSR0_SMR113 (0x9c4) -#define SMMU_GNSR0_SMR114 (0x9c8) -#define SMMU_GNSR0_SMR115 (0x9cc) -#define SMMU_GNSR0_SMR116 (0x9d0) -#define SMMU_GNSR0_SMR117 (0x9d4) -#define SMMU_GNSR0_SMR118 (0x9d8) -#define SMMU_GNSR0_SMR119 (0x9dc) -#define SMMU_GNSR0_SMR120 (0x9e0) -#define SMMU_GNSR0_SMR121 (0x9e4) -#define SMMU_GNSR0_SMR122 (0x9e8) -#define SMMU_GNSR0_SMR123 (0x9ec) -#define SMMU_GNSR0_SMR124 (0x9f0) -#define SMMU_GNSR0_SMR125 (0x9f4) -#define SMMU_GNSR0_SMR126 (0x9f8) -#define SMMU_GNSR0_SMR127 (0x9fc) -#define SMMU_GNSR0_S2CR0 (0xc00) -#define SMMU_GNSR0_S2CRn (0xc00) -#define SMMU_GNSR0_S2CRn (0xc00) -#define SMMU_GNSR0_S2CR1 (0xc04) -#define SMMU_GNSR0_S2CR2 (0xc08) -#define SMMU_GNSR0_S2CR3 (0xc0c) -#define SMMU_GNSR0_S2CR4 (0xc10) -#define SMMU_GNSR0_S2CR5 (0xc14) -#define SMMU_GNSR0_S2CR6 (0xc18) -#define SMMU_GNSR0_S2CR7 (0xc1c) -#define SMMU_GNSR0_S2CR8 (0xc20) -#define SMMU_GNSR0_S2CR9 (0xc24) -#define SMMU_GNSR0_S2CR10 (0xc28) -#define SMMU_GNSR0_S2CR11 (0xc2c) -#define SMMU_GNSR0_S2CR12 (0xc30) -#define SMMU_GNSR0_S2CR13 (0xc34) -#define SMMU_GNSR0_S2CR14 (0xc38) -#define SMMU_GNSR0_S2CR15 (0xc3c) -#define SMMU_GNSR0_S2CR16 (0xc40) -#define SMMU_GNSR0_S2CR17 (0xc44) -#define SMMU_GNSR0_S2CR18 (0xc48) -#define SMMU_GNSR0_S2CR19 (0xc4c) -#define SMMU_GNSR0_S2CR20 (0xc50) -#define SMMU_GNSR0_S2CR21 (0xc54) -#define SMMU_GNSR0_S2CR22 (0xc58) -#define SMMU_GNSR0_S2CR23 (0xc5c) -#define SMMU_GNSR0_S2CR24 (0xc60) -#define SMMU_GNSR0_S2CR25 (0xc64) -#define SMMU_GNSR0_S2CR26 (0xc68) -#define SMMU_GNSR0_S2CR27 (0xc6c) -#define SMMU_GNSR0_S2CR28 (0xc70) -#define SMMU_GNSR0_S2CR29 (0xc74) -#define SMMU_GNSR0_S2CR30 (0xc78) -#define SMMU_GNSR0_S2CR31 (0xc7c) -#define SMMU_GNSR0_S2CR32 (0xc80) -#define SMMU_GNSR0_S2CR33 (0xc84) -#define SMMU_GNSR0_S2CR34 (0xc88) -#define SMMU_GNSR0_S2CR35 (0xc8c) -#define SMMU_GNSR0_S2CR36 (0xc90) -#define SMMU_GNSR0_S2CR37 (0xc94) -#define SMMU_GNSR0_S2CR38 (0xc98) -#define SMMU_GNSR0_S2CR39 (0xc9c) -#define SMMU_GNSR0_S2CR40 (0xca0) -#define SMMU_GNSR0_S2CR41 (0xca4) -#define SMMU_GNSR0_S2CR42 (0xca8) -#define SMMU_GNSR0_S2CR43 (0xcac) -#define SMMU_GNSR0_S2CR44 (0xcb0) -#define SMMU_GNSR0_S2CR45 (0xcb4) -#define SMMU_GNSR0_S2CR46 (0xcb8) -#define SMMU_GNSR0_S2CR47 (0xcbc) -#define SMMU_GNSR0_S2CR48 (0xcc0) -#define SMMU_GNSR0_S2CR49 (0xcc4) -#define SMMU_GNSR0_S2CR50 (0xcc8) -#define SMMU_GNSR0_S2CR51 (0xccc) -#define SMMU_GNSR0_S2CR52 (0xcd0) -#define SMMU_GNSR0_S2CR53 (0xcd4) -#define SMMU_GNSR0_S2CR54 (0xcd8) -#define SMMU_GNSR0_S2CR55 (0xcdc) -#define SMMU_GNSR0_S2CR56 (0xce0) -#define SMMU_GNSR0_S2CR57 (0xce4) -#define SMMU_GNSR0_S2CR58 (0xce8) -#define SMMU_GNSR0_S2CR59 (0xcec) -#define SMMU_GNSR0_S2CR60 (0xcf0) -#define SMMU_GNSR0_S2CR61 (0xcf4) -#define SMMU_GNSR0_S2CR62 (0xcf8) -#define SMMU_GNSR0_S2CR63 (0xcfc) -#define SMMU_GNSR0_S2CR64 (0xd00) -#define SMMU_GNSR0_S2CR65 (0xd04) -#define SMMU_GNSR0_S2CR66 (0xd08) -#define SMMU_GNSR0_S2CR67 (0xd0c) -#define SMMU_GNSR0_S2CR68 (0xd10) -#define SMMU_GNSR0_S2CR69 (0xd14) -#define SMMU_GNSR0_S2CR70 (0xd18) -#define SMMU_GNSR0_S2CR71 (0xd1c) -#define SMMU_GNSR0_S2CR72 (0xd20) -#define SMMU_GNSR0_S2CR73 (0xd24) -#define SMMU_GNSR0_S2CR74 (0xd28) -#define SMMU_GNSR0_S2CR75 (0xd2c) -#define SMMU_GNSR0_S2CR76 (0xd30) -#define SMMU_GNSR0_S2CR77 (0xd34) -#define SMMU_GNSR0_S2CR78 (0xd38) -#define SMMU_GNSR0_S2CR79 (0xd3c) -#define SMMU_GNSR0_S2CR80 (0xd40) -#define SMMU_GNSR0_S2CR81 (0xd44) -#define SMMU_GNSR0_S2CR82 (0xd48) -#define SMMU_GNSR0_S2CR83 (0xd4c) -#define SMMU_GNSR0_S2CR84 (0xd50) -#define SMMU_GNSR0_S2CR85 (0xd54) -#define SMMU_GNSR0_S2CR86 (0xd58) -#define SMMU_GNSR0_S2CR87 (0xd5c) -#define SMMU_GNSR0_S2CR88 (0xd60) -#define SMMU_GNSR0_S2CR89 (0xd64) -#define SMMU_GNSR0_S2CR90 (0xd68) -#define SMMU_GNSR0_S2CR91 (0xd6c) -#define SMMU_GNSR0_S2CR92 (0xd70) -#define SMMU_GNSR0_S2CR93 (0xd74) -#define SMMU_GNSR0_S2CR94 (0xd78) -#define SMMU_GNSR0_S2CR95 (0xd7c) -#define SMMU_GNSR0_S2CR96 (0xd80) -#define SMMU_GNSR0_S2CR97 (0xd84) -#define SMMU_GNSR0_S2CR98 (0xd88) -#define SMMU_GNSR0_S2CR99 (0xd8c) -#define SMMU_GNSR0_S2CR100 (0xd90) -#define SMMU_GNSR0_S2CR101 (0xd94) -#define SMMU_GNSR0_S2CR102 (0xd98) -#define SMMU_GNSR0_S2CR103 (0xd9c) -#define SMMU_GNSR0_S2CR104 (0xda0) -#define SMMU_GNSR0_S2CR105 (0xda4) -#define SMMU_GNSR0_S2CR106 (0xda8) -#define SMMU_GNSR0_S2CR107 (0xdac) -#define SMMU_GNSR0_S2CR108 (0xdb0) -#define SMMU_GNSR0_S2CR109 (0xdb4) -#define SMMU_GNSR0_S2CR110 (0xdb8) -#define SMMU_GNSR0_S2CR111 (0xdbc) -#define SMMU_GNSR0_S2CR112 (0xdc0) -#define SMMU_GNSR0_S2CR113 (0xdc4) -#define SMMU_GNSR0_S2CR114 (0xdc8) -#define SMMU_GNSR0_S2CR115 (0xdcc) -#define SMMU_GNSR0_S2CR116 (0xdd0) -#define SMMU_GNSR0_S2CR117 (0xdd4) -#define SMMU_GNSR0_S2CR118 (0xdd8) -#define SMMU_GNSR0_S2CR119 (0xddc) -#define SMMU_GNSR0_S2CR120 (0xde0) -#define SMMU_GNSR0_S2CR121 (0xde4) -#define SMMU_GNSR0_S2CR122 (0xde8) -#define SMMU_GNSR0_S2CR123 (0xdec) -#define SMMU_GNSR0_S2CR124 (0xdf0) -#define SMMU_GNSR0_S2CR125 (0xdf4) -#define SMMU_GNSR0_S2CR126 (0xdf8) -#define SMMU_GNSR0_S2CR127 (0xdfc) -#define SMMU_GNSR0_PIDR0 (0xfe0) -#define SMMU_GNSR0_PIDR1 (0xfe4) -#define SMMU_GNSR0_PIDR2 (0xfe8) -#define SMMU_GNSR0_PIDR3 (0xfec) -#define SMMU_GNSR0_PIDR4 (0xfd0) -#define SMMU_GNSR0_PIDR5 (0xfd4) -#define SMMU_GNSR0_PIDR6 (0xfd8) -#define SMMU_GNSR0_PIDR7 (0xfdc) -#define SMMU_GNSR0_CIDR0 (0xff0) -#define SMMU_GNSR0_CIDR1 (0xff4) -#define SMMU_GNSR0_CIDR2 (0xff8) -#define SMMU_GNSR0_CIDR3 (0xffc) -#define SMMU_GNSR1_CBAR0 (0x0) -#define SMMU_GNSR1_CBARn (0x0) -#define SMMU_GNSR1_CBFRSYNRA0 (0x400) -#define SMMU_GNSR1_CBA2R0 (0x800) -#define SMMU_GNSR1_CBAR1 (0x4) -#define SMMU_GNSR1_CBFRSYNRA1 (0x404) -#define SMMU_GNSR1_CBA2R1 (0x804) -#define SMMU_GNSR1_CBAR2 (0x8) -#define SMMU_GNSR1_CBFRSYNRA2 (0x408) -#define SMMU_GNSR1_CBA2R2 (0x808) -#define SMMU_GNSR1_CBAR3 (0xc) -#define SMMU_GNSR1_CBFRSYNRA3 (0x40c) -#define SMMU_GNSR1_CBA2R3 (0x80c) -#define SMMU_GNSR1_CBAR4 (0x10) -#define SMMU_GNSR1_CBFRSYNRA4 (0x410) -#define SMMU_GNSR1_CBA2R4 (0x810) -#define SMMU_GNSR1_CBAR5 (0x14) -#define SMMU_GNSR1_CBFRSYNRA5 (0x414) -#define SMMU_GNSR1_CBA2R5 (0x814) -#define SMMU_GNSR1_CBAR6 (0x18) -#define SMMU_GNSR1_CBFRSYNRA6 (0x418) -#define SMMU_GNSR1_CBA2R6 (0x818) -#define SMMU_GNSR1_CBAR7 (0x1c) -#define SMMU_GNSR1_CBFRSYNRA7 (0x41c) -#define SMMU_GNSR1_CBA2R7 (0x81c) -#define SMMU_GNSR1_CBAR8 (0x20) -#define SMMU_GNSR1_CBFRSYNRA8 (0x420) -#define SMMU_GNSR1_CBA2R8 (0x820) -#define SMMU_GNSR1_CBAR9 (0x24) -#define SMMU_GNSR1_CBFRSYNRA9 (0x424) -#define SMMU_GNSR1_CBA2R9 (0x824) -#define SMMU_GNSR1_CBAR10 (0x28) -#define SMMU_GNSR1_CBFRSYNRA10 (0x428) -#define SMMU_GNSR1_CBA2R10 (0x828) -#define SMMU_GNSR1_CBAR11 (0x2c) -#define SMMU_GNSR1_CBFRSYNRA11 (0x42c) -#define SMMU_GNSR1_CBA2R11 (0x82c) -#define SMMU_GNSR1_CBAR12 (0x30) -#define SMMU_GNSR1_CBFRSYNRA12 (0x430) -#define SMMU_GNSR1_CBA2R12 (0x830) -#define SMMU_GNSR1_CBAR13 (0x34) -#define SMMU_GNSR1_CBFRSYNRA13 (0x434) -#define SMMU_GNSR1_CBA2R13 (0x834) -#define SMMU_GNSR1_CBAR14 (0x38) -#define SMMU_GNSR1_CBFRSYNRA14 (0x438) -#define SMMU_GNSR1_CBA2R14 (0x838) -#define SMMU_GNSR1_CBAR15 (0x3c) -#define SMMU_GNSR1_CBFRSYNRA15 (0x43c) -#define SMMU_GNSR1_CBA2R15 (0x83c) -#define SMMU_GNSR1_CBAR16 (0x40) -#define SMMU_GNSR1_CBFRSYNRA16 (0x440) -#define SMMU_GNSR1_CBA2R16 (0x840) -#define SMMU_GNSR1_CBAR17 (0x44) -#define SMMU_GNSR1_CBFRSYNRA17 (0x444) -#define SMMU_GNSR1_CBA2R17 (0x844) -#define SMMU_GNSR1_CBAR18 (0x48) -#define SMMU_GNSR1_CBFRSYNRA18 (0x448) -#define SMMU_GNSR1_CBA2R18 (0x848) -#define SMMU_GNSR1_CBAR19 (0x4c) -#define SMMU_GNSR1_CBFRSYNRA19 (0x44c) -#define SMMU_GNSR1_CBA2R19 (0x84c) -#define SMMU_GNSR1_CBAR20 (0x50) -#define SMMU_GNSR1_CBFRSYNRA20 (0x450) -#define SMMU_GNSR1_CBA2R20 (0x850) -#define SMMU_GNSR1_CBAR21 (0x54) -#define SMMU_GNSR1_CBFRSYNRA21 (0x454) -#define SMMU_GNSR1_CBA2R21 (0x854) -#define SMMU_GNSR1_CBAR22 (0x58) -#define SMMU_GNSR1_CBFRSYNRA22 (0x458) -#define SMMU_GNSR1_CBA2R22 (0x858) -#define SMMU_GNSR1_CBAR23 (0x5c) -#define SMMU_GNSR1_CBFRSYNRA23 (0x45c) -#define SMMU_GNSR1_CBA2R23 (0x85c) -#define SMMU_GNSR1_CBAR24 (0x60) -#define SMMU_GNSR1_CBFRSYNRA24 (0x460) -#define SMMU_GNSR1_CBA2R24 (0x860) -#define SMMU_GNSR1_CBAR25 (0x64) -#define SMMU_GNSR1_CBFRSYNRA25 (0x464) -#define SMMU_GNSR1_CBA2R25 (0x864) -#define SMMU_GNSR1_CBAR26 (0x68) -#define SMMU_GNSR1_CBFRSYNRA26 (0x468) -#define SMMU_GNSR1_CBA2R26 (0x868) -#define SMMU_GNSR1_CBAR27 (0x6c) -#define SMMU_GNSR1_CBFRSYNRA27 (0x46c) -#define SMMU_GNSR1_CBA2R27 (0x86c) -#define SMMU_GNSR1_CBAR28 (0x70) -#define SMMU_GNSR1_CBFRSYNRA28 (0x470) -#define SMMU_GNSR1_CBA2R28 (0x870) -#define SMMU_GNSR1_CBAR29 (0x74) -#define SMMU_GNSR1_CBFRSYNRA29 (0x474) -#define SMMU_GNSR1_CBA2R29 (0x874) -#define SMMU_GNSR1_CBAR30 (0x78) -#define SMMU_GNSR1_CBFRSYNRA30 (0x478) -#define SMMU_GNSR1_CBA2R30 (0x878) -#define SMMU_GNSR1_CBAR31 (0x7c) -#define SMMU_GNSR1_CBFRSYNRA31 (0x47c) -#define SMMU_GNSR1_CBA2R31 (0x87c) -#define SMMU_GNSR1_CBAR32 (0x80) -#define SMMU_GNSR1_CBFRSYNRA32 (0x480) -#define SMMU_GNSR1_CBA2R32 (0x880) -#define SMMU_GNSR1_CBAR33 (0x84) -#define SMMU_GNSR1_CBFRSYNRA33 (0x484) -#define SMMU_GNSR1_CBA2R33 (0x884) -#define SMMU_GNSR1_CBAR34 (0x88) -#define SMMU_GNSR1_CBFRSYNRA34 (0x488) -#define SMMU_GNSR1_CBA2R34 (0x888) -#define SMMU_GNSR1_CBAR35 (0x8c) -#define SMMU_GNSR1_CBFRSYNRA35 (0x48c) -#define SMMU_GNSR1_CBA2R35 (0x88c) -#define SMMU_GNSR1_CBAR36 (0x90) -#define SMMU_GNSR1_CBFRSYNRA36 (0x490) -#define SMMU_GNSR1_CBA2R36 (0x890) -#define SMMU_GNSR1_CBAR37 (0x94) -#define SMMU_GNSR1_CBFRSYNRA37 (0x494) -#define SMMU_GNSR1_CBA2R37 (0x894) -#define SMMU_GNSR1_CBAR38 (0x98) -#define SMMU_GNSR1_CBFRSYNRA38 (0x498) -#define SMMU_GNSR1_CBA2R38 (0x898) -#define SMMU_GNSR1_CBAR39 (0x9c) -#define SMMU_GNSR1_CBFRSYNRA39 (0x49c) -#define SMMU_GNSR1_CBA2R39 (0x89c) -#define SMMU_GNSR1_CBAR40 (0xa0) -#define SMMU_GNSR1_CBFRSYNRA40 (0x4a0) -#define SMMU_GNSR1_CBA2R40 (0x8a0) -#define SMMU_GNSR1_CBAR41 (0xa4) -#define SMMU_GNSR1_CBFRSYNRA41 (0x4a4) -#define SMMU_GNSR1_CBA2R41 (0x8a4) -#define SMMU_GNSR1_CBAR42 (0xa8) -#define SMMU_GNSR1_CBFRSYNRA42 (0x4a8) -#define SMMU_GNSR1_CBA2R42 (0x8a8) -#define SMMU_GNSR1_CBAR43 (0xac) -#define SMMU_GNSR1_CBFRSYNRA43 (0x4ac) -#define SMMU_GNSR1_CBA2R43 (0x8ac) -#define SMMU_GNSR1_CBAR44 (0xb0) -#define SMMU_GNSR1_CBFRSYNRA44 (0x4b0) -#define SMMU_GNSR1_CBA2R44 (0x8b0) -#define SMMU_GNSR1_CBAR45 (0xb4) -#define SMMU_GNSR1_CBFRSYNRA45 (0x4b4) -#define SMMU_GNSR1_CBA2R45 (0x8b4) -#define SMMU_GNSR1_CBAR46 (0xb8) -#define SMMU_GNSR1_CBFRSYNRA46 (0x4b8) -#define SMMU_GNSR1_CBA2R46 (0x8b8) -#define SMMU_GNSR1_CBAR47 (0xbc) -#define SMMU_GNSR1_CBFRSYNRA47 (0x4bc) -#define SMMU_GNSR1_CBA2R47 (0x8bc) -#define SMMU_GNSR1_CBAR48 (0xc0) -#define SMMU_GNSR1_CBFRSYNRA48 (0x4c0) -#define SMMU_GNSR1_CBA2R48 (0x8c0) -#define SMMU_GNSR1_CBAR49 (0xc4) -#define SMMU_GNSR1_CBFRSYNRA49 (0x4c4) -#define SMMU_GNSR1_CBA2R49 (0x8c4) -#define SMMU_GNSR1_CBAR50 (0xc8) -#define SMMU_GNSR1_CBFRSYNRA50 (0x4c8) -#define SMMU_GNSR1_CBA2R50 (0x8c8) -#define SMMU_GNSR1_CBAR51 (0xcc) -#define SMMU_GNSR1_CBFRSYNRA51 (0x4cc) -#define SMMU_GNSR1_CBA2R51 (0x8cc) -#define SMMU_GNSR1_CBAR52 (0xd0) -#define SMMU_GNSR1_CBFRSYNRA52 (0x4d0) -#define SMMU_GNSR1_CBA2R52 (0x8d0) -#define SMMU_GNSR1_CBAR53 (0xd4) -#define SMMU_GNSR1_CBFRSYNRA53 (0x4d4) -#define SMMU_GNSR1_CBA2R53 (0x8d4) -#define SMMU_GNSR1_CBAR54 (0xd8) -#define SMMU_GNSR1_CBFRSYNRA54 (0x4d8) -#define SMMU_GNSR1_CBA2R54 (0x8d8) -#define SMMU_GNSR1_CBAR55 (0xdc) -#define SMMU_GNSR1_CBFRSYNRA55 (0x4dc) -#define SMMU_GNSR1_CBA2R55 (0x8dc) -#define SMMU_GNSR1_CBAR56 (0xe0) -#define SMMU_GNSR1_CBFRSYNRA56 (0x4e0) -#define SMMU_GNSR1_CBA2R56 (0x8e0) -#define SMMU_GNSR1_CBAR57 (0xe4) -#define SMMU_GNSR1_CBFRSYNRA57 (0x4e4) -#define SMMU_GNSR1_CBA2R57 (0x8e4) -#define SMMU_GNSR1_CBAR58 (0xe8) -#define SMMU_GNSR1_CBFRSYNRA58 (0x4e8) -#define SMMU_GNSR1_CBA2R58 (0x8e8) -#define SMMU_GNSR1_CBAR59 (0xec) -#define SMMU_GNSR1_CBFRSYNRA59 (0x4ec) -#define SMMU_GNSR1_CBA2R59 (0x8ec) -#define SMMU_GNSR1_CBAR60 (0xf0) -#define SMMU_GNSR1_CBFRSYNRA60 (0x4f0) -#define SMMU_GNSR1_CBA2R60 (0x8f0) -#define SMMU_GNSR1_CBAR61 (0xf4) -#define SMMU_GNSR1_CBFRSYNRA61 (0x4f4) -#define SMMU_GNSR1_CBA2R61 (0x8f4) -#define SMMU_GNSR1_CBAR62 (0xf8) -#define SMMU_GNSR1_CBFRSYNRA62 (0x4f8) -#define SMMU_GNSR1_CBA2R62 (0x8f8) -#define SMMU_GNSR1_CBAR63 (0xfc) -#define SMMU_GNSR1_CBFRSYNRA63 (0x4fc) -#define SMMU_GNSR1_CBA2R63 (0x8fc) +#define SMMU_CBn_SCTLR (0x0U) +#define SMMU_CBn_SCTLR_STAGE2 (0x0U) +#define SMMU_CBn_ACTLR (0x4U) +#define SMMU_CBn_RESUME (0x8U) +#define SMMU_CBn_TCR2 (0x10U) +#define SMMU_CBn_TTBR0_LO (0x20U) +#define SMMU_CBn_TTBR0_HI (0x24U) +#define SMMU_CBn_TTBR1_LO (0x28U) +#define SMMU_CBn_TTBR1_HI (0x2cU) +#define SMMU_CBn_TCR_LPAE (0x30U) +#define SMMU_CBn_TCR (0x30U) +#define SMMU_CBn_TCR_EAE_1 (0x30U) +#define SMMU_CBn_TCR (0x30U) +#define SMMU_CBn_CONTEXTIDR (0x34U) +#define SMMU_CBn_CONTEXTIDR_EAE_1 (0x34U) +#define SMMU_CBn_PRRR_MAIR0 (0x38U) +#define SMMU_CBn_NMRR_MAIR1 (0x3cU) +#define SMMU_CBn_SMMU_CBn_PAR (0x50U) +#define SMMU_CBn_SMMU_CBn_PAR0 (0x50U) +#define SMMU_CBn_SMMU_CBn_PAR1 (0x54U) +/* SMMU_CBn_SMMU_CBn_PAR0_Fault (0x50U) */ +/* SMMU_CBn_SMMU_CBn_PAR0_Fault (0x54U) */ +#define SMMU_CBn_FSR (0x58U) +#define SMMU_CBn_FSRRESTORE (0x5cU) +#define SMMU_CBn_FAR_LO (0x60U) +#define SMMU_CBn_FAR_HI (0x64U) +#define SMMU_CBn_FSYNR0 (0x68U) +#define SMMU_CBn_IPAFAR_LO (0x70U) +#define SMMU_CBn_IPAFAR_HI (0x74U) +#define SMMU_CBn_TLBIVA_LO (0x600U) +#define SMMU_CBn_TLBIVA_HI (0x604U) +#define SMMU_CBn_TLBIVA_AARCH_32 (0x600U) +#define SMMU_CBn_TLBIVAA_LO (0x608U) +#define SMMU_CBn_TLBIVAA_HI (0x60cU) +#define SMMU_CBn_TLBIVAA_AARCH_32 (0x608U) +#define SMMU_CBn_TLBIASID (0x610U) +#define SMMU_CBn_TLBIALL (0x618U) +#define SMMU_CBn_TLBIVAL_LO (0x620U) +#define SMMU_CBn_TLBIVAL_HI (0x624U) +#define SMMU_CBn_TLBIVAL_AARCH_32 (0x618U) +#define SMMU_CBn_TLBIVAAL_LO (0x628U) +#define SMMU_CBn_TLBIVAAL_HI (0x62cU) +#define SMMU_CBn_TLBIVAAL_AARCH_32 (0x628U) +#define SMMU_CBn_TLBIIPAS2_LO (0x630U) +#define SMMU_CBn_TLBIIPAS2_HI (0x634U) +#define SMMU_CBn_TLBIIPAS2L_LO (0x638U) +#define SMMU_CBn_TLBIIPAS2L_HI (0x63cU) +#define SMMU_CBn_TLBSYNC (0x7f0U) +#define SMMU_CBn_TLBSTATUS (0x7f4U) +#define SMMU_CBn_ATSR (0x800U) +#define SMMU_CBn_PMEVCNTR0 (0xe00U) +#define SMMU_CBn_PMEVCNTR1 (0xe04U) +#define SMMU_CBn_PMEVCNTR2 (0xe08U) +#define SMMU_CBn_PMEVCNTR3 (0xe0cU) +#define SMMU_CBn_PMEVTYPER0 (0xe80U) +#define SMMU_CBn_PMEVTYPER1 (0xe84U) +#define SMMU_CBn_PMEVTYPER2 (0xe88U) +#define SMMU_CBn_PMEVTYPER3 (0xe8cU) +#define SMMU_CBn_PMCFGR (0xf00U) +#define SMMU_CBn_PMCR (0xf04U) +#define SMMU_CBn_PMCEID (0xf20U) +#define SMMU_CBn_PMCNTENSE (0xf40U) +#define SMMU_CBn_PMCNTENCLR (0xf44U) +#define SMMU_CBn_PMCNTENSET (0xf48U) +#define SMMU_CBn_PMINTENCLR (0xf4cU) +#define SMMU_CBn_PMOVSCLR (0xf50U) +#define SMMU_CBn_PMOVSSET (0xf58U) +#define SMMU_CBn_PMAUTHSTATUS (0xfb8U) +#define SMMU_GNSR0_CR0 (0x0U) +#define SMMU_GNSR0_CR2 (0x8U) +#define SMMU_GNSR0_ACR (0x10U) +#define SMMU_GNSR0_IDR0 (0x20U) +#define SMMU_GNSR0_IDR1 (0x24U) +#define SMMU_GNSR0_IDR2 (0x28U) +#define SMMU_GNSR0_IDR7 (0x3cU) +#define SMMU_GNSR0_GFAR_LO (0x40U) +#define SMMU_GNSR0_GFAR_HI (0x44U) +#define SMMU_GNSR0_GFSR (0x48U) +#define SMMU_GNSR0_GFSRRESTORE (0x4cU) +#define SMMU_GNSR0_GFSYNR0 (0x50U) +#define SMMU_GNSR0_GFSYNR1 (0x54U) +#define SMMU_GNSR0_GFSYNR1_v2 (0x54U) +#define SMMU_GNSR0_TLBIVMID (0x64U) +#define SMMU_GNSR0_TLBIALLNSNH (0x68U) +#define SMMU_GNSR0_TLBIALLH (0x6cU) +#define SMMU_GNSR0_TLBGSYNC (0x70U) +#define SMMU_GNSR0_TLBGSTATUS (0x74U) +#define SMMU_GNSR0_TLBIVAH_LO (0x78U) +#define SMMU_GNSR0_TLBIVALH64_LO (0xb0U) +#define SMMU_GNSR0_TLBIVALH64_HI (0xb4U) +#define SMMU_GNSR0_TLBIVMIDS1 (0xb8U) +#define SMMU_GNSR0_TLBIVAH64_LO (0xc0U) +#define SMMU_GNSR0_TLBIVAH64_HI (0xc4U) +#define SMMU_GNSR0_SMR0 (0x800U) +#define SMMU_GNSR0_SMRn (0x800U) +#define SMMU_GNSR0_SMR1 (0x804U) +#define SMMU_GNSR0_SMR2 (0x808U) +#define SMMU_GNSR0_SMR3 (0x80cU) +#define SMMU_GNSR0_SMR4 (0x810U) +#define SMMU_GNSR0_SMR5 (0x814U) +#define SMMU_GNSR0_SMR6 (0x818U) +#define SMMU_GNSR0_SMR7 (0x81cU) +#define SMMU_GNSR0_SMR8 (0x820U) +#define SMMU_GNSR0_SMR9 (0x824U) +#define SMMU_GNSR0_SMR10 (0x828U) +#define SMMU_GNSR0_SMR11 (0x82cU) +#define SMMU_GNSR0_SMR12 (0x830U) +#define SMMU_GNSR0_SMR13 (0x834U) +#define SMMU_GNSR0_SMR14 (0x838U) +#define SMMU_GNSR0_SMR15 (0x83cU) +#define SMMU_GNSR0_SMR16 (0x840U) +#define SMMU_GNSR0_SMR17 (0x844U) +#define SMMU_GNSR0_SMR18 (0x848U) +#define SMMU_GNSR0_SMR19 (0x84cU) +#define SMMU_GNSR0_SMR20 (0x850U) +#define SMMU_GNSR0_SMR21 (0x854U) +#define SMMU_GNSR0_SMR22 (0x858U) +#define SMMU_GNSR0_SMR23 (0x85cU) +#define SMMU_GNSR0_SMR24 (0x860U) +#define SMMU_GNSR0_SMR25 (0x864U) +#define SMMU_GNSR0_SMR26 (0x868U) +#define SMMU_GNSR0_SMR27 (0x86cU) +#define SMMU_GNSR0_SMR28 (0x870U) +#define SMMU_GNSR0_SMR29 (0x874U) +#define SMMU_GNSR0_SMR30 (0x878U) +#define SMMU_GNSR0_SMR31 (0x87cU) +#define SMMU_GNSR0_SMR32 (0x880U) +#define SMMU_GNSR0_SMR33 (0x884U) +#define SMMU_GNSR0_SMR34 (0x888U) +#define SMMU_GNSR0_SMR35 (0x88cU) +#define SMMU_GNSR0_SMR36 (0x890U) +#define SMMU_GNSR0_SMR37 (0x894U) +#define SMMU_GNSR0_SMR38 (0x898U) +#define SMMU_GNSR0_SMR39 (0x89cU) +#define SMMU_GNSR0_SMR40 (0x8a0U) +#define SMMU_GNSR0_SMR41 (0x8a4U) +#define SMMU_GNSR0_SMR42 (0x8a8U) +#define SMMU_GNSR0_SMR43 (0x8acU) +#define SMMU_GNSR0_SMR44 (0x8b0U) +#define SMMU_GNSR0_SMR45 (0x8b4U) +#define SMMU_GNSR0_SMR46 (0x8b8U) +#define SMMU_GNSR0_SMR47 (0x8bcU) +#define SMMU_GNSR0_SMR48 (0x8c0U) +#define SMMU_GNSR0_SMR49 (0x8c4U) +#define SMMU_GNSR0_SMR50 (0x8c8U) +#define SMMU_GNSR0_SMR51 (0x8ccU) +#define SMMU_GNSR0_SMR52 (0x8d0U) +#define SMMU_GNSR0_SMR53 (0x8d4U) +#define SMMU_GNSR0_SMR54 (0x8d8U) +#define SMMU_GNSR0_SMR55 (0x8dcU) +#define SMMU_GNSR0_SMR56 (0x8e0U) +#define SMMU_GNSR0_SMR57 (0x8e4U) +#define SMMU_GNSR0_SMR58 (0x8e8U) +#define SMMU_GNSR0_SMR59 (0x8ecU) +#define SMMU_GNSR0_SMR60 (0x8f0U) +#define SMMU_GNSR0_SMR61 (0x8f4U) +#define SMMU_GNSR0_SMR62 (0x8f8U) +#define SMMU_GNSR0_SMR63 (0x8fcU) +#define SMMU_GNSR0_SMR64 (0x900U) +#define SMMU_GNSR0_SMR65 (0x904U) +#define SMMU_GNSR0_SMR66 (0x908U) +#define SMMU_GNSR0_SMR67 (0x90cU) +#define SMMU_GNSR0_SMR68 (0x910U) +#define SMMU_GNSR0_SMR69 (0x914U) +#define SMMU_GNSR0_SMR70 (0x918U) +#define SMMU_GNSR0_SMR71 (0x91cU) +#define SMMU_GNSR0_SMR72 (0x920U) +#define SMMU_GNSR0_SMR73 (0x924U) +#define SMMU_GNSR0_SMR74 (0x928U) +#define SMMU_GNSR0_SMR75 (0x92cU) +#define SMMU_GNSR0_SMR76 (0x930U) +#define SMMU_GNSR0_SMR77 (0x934U) +#define SMMU_GNSR0_SMR78 (0x938U) +#define SMMU_GNSR0_SMR79 (0x93cU) +#define SMMU_GNSR0_SMR80 (0x940U) +#define SMMU_GNSR0_SMR81 (0x944U) +#define SMMU_GNSR0_SMR82 (0x948U) +#define SMMU_GNSR0_SMR83 (0x94cU) +#define SMMU_GNSR0_SMR84 (0x950U) +#define SMMU_GNSR0_SMR85 (0x954U) +#define SMMU_GNSR0_SMR86 (0x958U) +#define SMMU_GNSR0_SMR87 (0x95cU) +#define SMMU_GNSR0_SMR88 (0x960U) +#define SMMU_GNSR0_SMR89 (0x964U) +#define SMMU_GNSR0_SMR90 (0x968U) +#define SMMU_GNSR0_SMR91 (0x96cU) +#define SMMU_GNSR0_SMR92 (0x970U) +#define SMMU_GNSR0_SMR93 (0x974U) +#define SMMU_GNSR0_SMR94 (0x978U) +#define SMMU_GNSR0_SMR95 (0x97cU) +#define SMMU_GNSR0_SMR96 (0x980U) +#define SMMU_GNSR0_SMR97 (0x984U) +#define SMMU_GNSR0_SMR98 (0x988U) +#define SMMU_GNSR0_SMR99 (0x98cU) +#define SMMU_GNSR0_SMR100 (0x990U) +#define SMMU_GNSR0_SMR101 (0x994U) +#define SMMU_GNSR0_SMR102 (0x998U) +#define SMMU_GNSR0_SMR103 (0x99cU) +#define SMMU_GNSR0_SMR104 (0x9a0U) +#define SMMU_GNSR0_SMR105 (0x9a4U) +#define SMMU_GNSR0_SMR106 (0x9a8U) +#define SMMU_GNSR0_SMR107 (0x9acU) +#define SMMU_GNSR0_SMR108 (0x9b0U) +#define SMMU_GNSR0_SMR109 (0x9b4U) +#define SMMU_GNSR0_SMR110 (0x9b8U) +#define SMMU_GNSR0_SMR111 (0x9bcU) +#define SMMU_GNSR0_SMR112 (0x9c0U) +#define SMMU_GNSR0_SMR113 (0x9c4U) +#define SMMU_GNSR0_SMR114 (0x9c8U) +#define SMMU_GNSR0_SMR115 (0x9ccU) +#define SMMU_GNSR0_SMR116 (0x9d0U) +#define SMMU_GNSR0_SMR117 (0x9d4U) +#define SMMU_GNSR0_SMR118 (0x9d8U) +#define SMMU_GNSR0_SMR119 (0x9dcU) +#define SMMU_GNSR0_SMR120 (0x9e0U) +#define SMMU_GNSR0_SMR121 (0x9e4U) +#define SMMU_GNSR0_SMR122 (0x9e8U) +#define SMMU_GNSR0_SMR123 (0x9ecU) +#define SMMU_GNSR0_SMR124 (0x9f0U) +#define SMMU_GNSR0_SMR125 (0x9f4U) +#define SMMU_GNSR0_SMR126 (0x9f8U) +#define SMMU_GNSR0_SMR127 (0x9fcU) +#define SMMU_GNSR0_S2CR0 (0xc00U) +#define SMMU_GNSR0_S2CRn (0xc00U) +#define SMMU_GNSR0_S2CRn (0xc00U) +#define SMMU_GNSR0_S2CR1 (0xc04U) +#define SMMU_GNSR0_S2CR2 (0xc08U) +#define SMMU_GNSR0_S2CR3 (0xc0cU) +#define SMMU_GNSR0_S2CR4 (0xc10U) +#define SMMU_GNSR0_S2CR5 (0xc14U) +#define SMMU_GNSR0_S2CR6 (0xc18U) +#define SMMU_GNSR0_S2CR7 (0xc1cU) +#define SMMU_GNSR0_S2CR8 (0xc20U) +#define SMMU_GNSR0_S2CR9 (0xc24U) +#define SMMU_GNSR0_S2CR10 (0xc28U) +#define SMMU_GNSR0_S2CR11 (0xc2cU) +#define SMMU_GNSR0_S2CR12 (0xc30U) +#define SMMU_GNSR0_S2CR13 (0xc34U) +#define SMMU_GNSR0_S2CR14 (0xc38U) +#define SMMU_GNSR0_S2CR15 (0xc3cU) +#define SMMU_GNSR0_S2CR16 (0xc40U) +#define SMMU_GNSR0_S2CR17 (0xc44U) +#define SMMU_GNSR0_S2CR18 (0xc48U) +#define SMMU_GNSR0_S2CR19 (0xc4cU) +#define SMMU_GNSR0_S2CR20 (0xc50U) +#define SMMU_GNSR0_S2CR21 (0xc54U) +#define SMMU_GNSR0_S2CR22 (0xc58U) +#define SMMU_GNSR0_S2CR23 (0xc5cU) +#define SMMU_GNSR0_S2CR24 (0xc60U) +#define SMMU_GNSR0_S2CR25 (0xc64U) +#define SMMU_GNSR0_S2CR26 (0xc68U) +#define SMMU_GNSR0_S2CR27 (0xc6cU) +#define SMMU_GNSR0_S2CR28 (0xc70U) +#define SMMU_GNSR0_S2CR29 (0xc74U) +#define SMMU_GNSR0_S2CR30 (0xc78U) +#define SMMU_GNSR0_S2CR31 (0xc7cU) +#define SMMU_GNSR0_S2CR32 (0xc80U) +#define SMMU_GNSR0_S2CR33 (0xc84U) +#define SMMU_GNSR0_S2CR34 (0xc88U) +#define SMMU_GNSR0_S2CR35 (0xc8cU) +#define SMMU_GNSR0_S2CR36 (0xc90U) +#define SMMU_GNSR0_S2CR37 (0xc94U) +#define SMMU_GNSR0_S2CR38 (0xc98U) +#define SMMU_GNSR0_S2CR39 (0xc9cU) +#define SMMU_GNSR0_S2CR40 (0xca0U) +#define SMMU_GNSR0_S2CR41 (0xca4U) +#define SMMU_GNSR0_S2CR42 (0xca8U) +#define SMMU_GNSR0_S2CR43 (0xcacU) +#define SMMU_GNSR0_S2CR44 (0xcb0U) +#define SMMU_GNSR0_S2CR45 (0xcb4U) +#define SMMU_GNSR0_S2CR46 (0xcb8U) +#define SMMU_GNSR0_S2CR47 (0xcbcU) +#define SMMU_GNSR0_S2CR48 (0xcc0U) +#define SMMU_GNSR0_S2CR49 (0xcc4U) +#define SMMU_GNSR0_S2CR50 (0xcc8U) +#define SMMU_GNSR0_S2CR51 (0xcccU) +#define SMMU_GNSR0_S2CR52 (0xcd0U) +#define SMMU_GNSR0_S2CR53 (0xcd4U) +#define SMMU_GNSR0_S2CR54 (0xcd8U) +#define SMMU_GNSR0_S2CR55 (0xcdcU) +#define SMMU_GNSR0_S2CR56 (0xce0U) +#define SMMU_GNSR0_S2CR57 (0xce4U) +#define SMMU_GNSR0_S2CR58 (0xce8U) +#define SMMU_GNSR0_S2CR59 (0xcecU) +#define SMMU_GNSR0_S2CR60 (0xcf0U) +#define SMMU_GNSR0_S2CR61 (0xcf4U) +#define SMMU_GNSR0_S2CR62 (0xcf8U) +#define SMMU_GNSR0_S2CR63 (0xcfcU) +#define SMMU_GNSR0_S2CR64 (0xd00U) +#define SMMU_GNSR0_S2CR65 (0xd04U) +#define SMMU_GNSR0_S2CR66 (0xd08U) +#define SMMU_GNSR0_S2CR67 (0xd0cU) +#define SMMU_GNSR0_S2CR68 (0xd10U) +#define SMMU_GNSR0_S2CR69 (0xd14U) +#define SMMU_GNSR0_S2CR70 (0xd18U) +#define SMMU_GNSR0_S2CR71 (0xd1cU) +#define SMMU_GNSR0_S2CR72 (0xd20U) +#define SMMU_GNSR0_S2CR73 (0xd24U) +#define SMMU_GNSR0_S2CR74 (0xd28U) +#define SMMU_GNSR0_S2CR75 (0xd2cU) +#define SMMU_GNSR0_S2CR76 (0xd30U) +#define SMMU_GNSR0_S2CR77 (0xd34U) +#define SMMU_GNSR0_S2CR78 (0xd38U) +#define SMMU_GNSR0_S2CR79 (0xd3cU) +#define SMMU_GNSR0_S2CR80 (0xd40U) +#define SMMU_GNSR0_S2CR81 (0xd44U) +#define SMMU_GNSR0_S2CR82 (0xd48U) +#define SMMU_GNSR0_S2CR83 (0xd4cU) +#define SMMU_GNSR0_S2CR84 (0xd50U) +#define SMMU_GNSR0_S2CR85 (0xd54U) +#define SMMU_GNSR0_S2CR86 (0xd58U) +#define SMMU_GNSR0_S2CR87 (0xd5cU) +#define SMMU_GNSR0_S2CR88 (0xd60U) +#define SMMU_GNSR0_S2CR89 (0xd64U) +#define SMMU_GNSR0_S2CR90 (0xd68U) +#define SMMU_GNSR0_S2CR91 (0xd6cU) +#define SMMU_GNSR0_S2CR92 (0xd70U) +#define SMMU_GNSR0_S2CR93 (0xd74U) +#define SMMU_GNSR0_S2CR94 (0xd78U) +#define SMMU_GNSR0_S2CR95 (0xd7cU) +#define SMMU_GNSR0_S2CR96 (0xd80U) +#define SMMU_GNSR0_S2CR97 (0xd84U) +#define SMMU_GNSR0_S2CR98 (0xd88U) +#define SMMU_GNSR0_S2CR99 (0xd8cU) +#define SMMU_GNSR0_S2CR100 (0xd90U) +#define SMMU_GNSR0_S2CR101 (0xd94U) +#define SMMU_GNSR0_S2CR102 (0xd98U) +#define SMMU_GNSR0_S2CR103 (0xd9cU) +#define SMMU_GNSR0_S2CR104 (0xda0U) +#define SMMU_GNSR0_S2CR105 (0xda4U) +#define SMMU_GNSR0_S2CR106 (0xda8U) +#define SMMU_GNSR0_S2CR107 (0xdacU) +#define SMMU_GNSR0_S2CR108 (0xdb0U) +#define SMMU_GNSR0_S2CR109 (0xdb4U) +#define SMMU_GNSR0_S2CR110 (0xdb8U) +#define SMMU_GNSR0_S2CR111 (0xdbcU) +#define SMMU_GNSR0_S2CR112 (0xdc0U) +#define SMMU_GNSR0_S2CR113 (0xdc4U) +#define SMMU_GNSR0_S2CR114 (0xdc8U) +#define SMMU_GNSR0_S2CR115 (0xdccU) +#define SMMU_GNSR0_S2CR116 (0xdd0U) +#define SMMU_GNSR0_S2CR117 (0xdd4U) +#define SMMU_GNSR0_S2CR118 (0xdd8U) +#define SMMU_GNSR0_S2CR119 (0xddcU) +#define SMMU_GNSR0_S2CR120 (0xde0U) +#define SMMU_GNSR0_S2CR121 (0xde4U) +#define SMMU_GNSR0_S2CR122 (0xde8U) +#define SMMU_GNSR0_S2CR123 (0xdecU) +#define SMMU_GNSR0_S2CR124 (0xdf0U) +#define SMMU_GNSR0_S2CR125 (0xdf4U) +#define SMMU_GNSR0_S2CR126 (0xdf8U) +#define SMMU_GNSR0_S2CR127 (0xdfcU) +#define SMMU_GNSR0_PIDR0 (0xfe0U) +#define SMMU_GNSR0_PIDR1 (0xfe4U) +#define SMMU_GNSR0_PIDR2 (0xfe8U) +#define SMMU_GNSR0_PIDR3 (0xfecU) +#define SMMU_GNSR0_PIDR4 (0xfd0U) +#define SMMU_GNSR0_PIDR5 (0xfd4U) +#define SMMU_GNSR0_PIDR6 (0xfd8U) +#define SMMU_GNSR0_PIDR7 (0xfdcU) +#define SMMU_GNSR0_CIDR0 (0xff0U) +#define SMMU_GNSR0_CIDR1 (0xff4U) +#define SMMU_GNSR0_CIDR2 (0xff8U) +#define SMMU_GNSR0_CIDR3 (0xffcU) +#define SMMU_GNSR1_CBAR0 (0x0U) +#define SMMU_GNSR1_CBARn (0x0U) +#define SMMU_GNSR1_CBFRSYNRA0 (0x400U) +#define SMMU_GNSR1_CBA2R0 (0x800U) +#define SMMU_GNSR1_CBAR1 (0x4U) +#define SMMU_GNSR1_CBFRSYNRA1 (0x404U) +#define SMMU_GNSR1_CBA2R1 (0x804U) +#define SMMU_GNSR1_CBAR2 (0x8U) +#define SMMU_GNSR1_CBFRSYNRA2 (0x408U) +#define SMMU_GNSR1_CBA2R2 (0x808U) +#define SMMU_GNSR1_CBAR3 (0xcU) +#define SMMU_GNSR1_CBFRSYNRA3 (0x40cU) +#define SMMU_GNSR1_CBA2R3 (0x80cU) +#define SMMU_GNSR1_CBAR4 (0x10U) +#define SMMU_GNSR1_CBFRSYNRA4 (0x410U) +#define SMMU_GNSR1_CBA2R4 (0x810U) +#define SMMU_GNSR1_CBAR5 (0x14U) +#define SMMU_GNSR1_CBFRSYNRA5 (0x414U) +#define SMMU_GNSR1_CBA2R5 (0x814U) +#define SMMU_GNSR1_CBAR6 (0x18U) +#define SMMU_GNSR1_CBFRSYNRA6 (0x418U) +#define SMMU_GNSR1_CBA2R6 (0x818U) +#define SMMU_GNSR1_CBAR7 (0x1cU) +#define SMMU_GNSR1_CBFRSYNRA7 (0x41cU) +#define SMMU_GNSR1_CBA2R7 (0x81cU) +#define SMMU_GNSR1_CBAR8 (0x20U) +#define SMMU_GNSR1_CBFRSYNRA8 (0x420U) +#define SMMU_GNSR1_CBA2R8 (0x820U) +#define SMMU_GNSR1_CBAR9 (0x24U) +#define SMMU_GNSR1_CBFRSYNRA9 (0x424U) +#define SMMU_GNSR1_CBA2R9 (0x824U) +#define SMMU_GNSR1_CBAR10 (0x28U) +#define SMMU_GNSR1_CBFRSYNRA10 (0x428U) +#define SMMU_GNSR1_CBA2R10 (0x828U) +#define SMMU_GNSR1_CBAR11 (0x2cU) +#define SMMU_GNSR1_CBFRSYNRA11 (0x42cU) +#define SMMU_GNSR1_CBA2R11 (0x82cU) +#define SMMU_GNSR1_CBAR12 (0x30U) +#define SMMU_GNSR1_CBFRSYNRA12 (0x430U) +#define SMMU_GNSR1_CBA2R12 (0x830U) +#define SMMU_GNSR1_CBAR13 (0x34U) +#define SMMU_GNSR1_CBFRSYNRA13 (0x434U) +#define SMMU_GNSR1_CBA2R13 (0x834U) +#define SMMU_GNSR1_CBAR14 (0x38U) +#define SMMU_GNSR1_CBFRSYNRA14 (0x438U) +#define SMMU_GNSR1_CBA2R14 (0x838U) +#define SMMU_GNSR1_CBAR15 (0x3cU) +#define SMMU_GNSR1_CBFRSYNRA15 (0x43cU) +#define SMMU_GNSR1_CBA2R15 (0x83cU) +#define SMMU_GNSR1_CBAR16 (0x40U) +#define SMMU_GNSR1_CBFRSYNRA16 (0x440U) +#define SMMU_GNSR1_CBA2R16 (0x840U) +#define SMMU_GNSR1_CBAR17 (0x44U) +#define SMMU_GNSR1_CBFRSYNRA17 (0x444U) +#define SMMU_GNSR1_CBA2R17 (0x844U) +#define SMMU_GNSR1_CBAR18 (0x48U) +#define SMMU_GNSR1_CBFRSYNRA18 (0x448U) +#define SMMU_GNSR1_CBA2R18 (0x848U) +#define SMMU_GNSR1_CBAR19 (0x4cU) +#define SMMU_GNSR1_CBFRSYNRA19 (0x44cU) +#define SMMU_GNSR1_CBA2R19 (0x84cU) +#define SMMU_GNSR1_CBAR20 (0x50U) +#define SMMU_GNSR1_CBFRSYNRA20 (0x450U) +#define SMMU_GNSR1_CBA2R20 (0x850U) +#define SMMU_GNSR1_CBAR21 (0x54U) +#define SMMU_GNSR1_CBFRSYNRA21 (0x454U) +#define SMMU_GNSR1_CBA2R21 (0x854U) +#define SMMU_GNSR1_CBAR22 (0x58U) +#define SMMU_GNSR1_CBFRSYNRA22 (0x458U) +#define SMMU_GNSR1_CBA2R22 (0x858U) +#define SMMU_GNSR1_CBAR23 (0x5cU) +#define SMMU_GNSR1_CBFRSYNRA23 (0x45cU) +#define SMMU_GNSR1_CBA2R23 (0x85cU) +#define SMMU_GNSR1_CBAR24 (0x60U) +#define SMMU_GNSR1_CBFRSYNRA24 (0x460U) +#define SMMU_GNSR1_CBA2R24 (0x860U) +#define SMMU_GNSR1_CBAR25 (0x64U) +#define SMMU_GNSR1_CBFRSYNRA25 (0x464U) +#define SMMU_GNSR1_CBA2R25 (0x864U) +#define SMMU_GNSR1_CBAR26 (0x68U) +#define SMMU_GNSR1_CBFRSYNRA26 (0x468U) +#define SMMU_GNSR1_CBA2R26 (0x868U) +#define SMMU_GNSR1_CBAR27 (0x6cU) +#define SMMU_GNSR1_CBFRSYNRA27 (0x46cU) +#define SMMU_GNSR1_CBA2R27 (0x86cU) +#define SMMU_GNSR1_CBAR28 (0x70U) +#define SMMU_GNSR1_CBFRSYNRA28 (0x470U) +#define SMMU_GNSR1_CBA2R28 (0x870U) +#define SMMU_GNSR1_CBAR29 (0x74U) +#define SMMU_GNSR1_CBFRSYNRA29 (0x474U) +#define SMMU_GNSR1_CBA2R29 (0x874U) +#define SMMU_GNSR1_CBAR30 (0x78U) +#define SMMU_GNSR1_CBFRSYNRA30 (0x478U) +#define SMMU_GNSR1_CBA2R30 (0x878U) +#define SMMU_GNSR1_CBAR31 (0x7cU) +#define SMMU_GNSR1_CBFRSYNRA31 (0x47cU) +#define SMMU_GNSR1_CBA2R31 (0x87cU) +#define SMMU_GNSR1_CBAR32 (0x80U) +#define SMMU_GNSR1_CBFRSYNRA32 (0x480U) +#define SMMU_GNSR1_CBA2R32 (0x880U) +#define SMMU_GNSR1_CBAR33 (0x84U) +#define SMMU_GNSR1_CBFRSYNRA33 (0x484U) +#define SMMU_GNSR1_CBA2R33 (0x884U) +#define SMMU_GNSR1_CBAR34 (0x88U) +#define SMMU_GNSR1_CBFRSYNRA34 (0x488U) +#define SMMU_GNSR1_CBA2R34 (0x888U) +#define SMMU_GNSR1_CBAR35 (0x8cU) +#define SMMU_GNSR1_CBFRSYNRA35 (0x48cU) +#define SMMU_GNSR1_CBA2R35 (0x88cU) +#define SMMU_GNSR1_CBAR36 (0x90U) +#define SMMU_GNSR1_CBFRSYNRA36 (0x490U) +#define SMMU_GNSR1_CBA2R36 (0x890U) +#define SMMU_GNSR1_CBAR37 (0x94U) +#define SMMU_GNSR1_CBFRSYNRA37 (0x494U) +#define SMMU_GNSR1_CBA2R37 (0x894U) +#define SMMU_GNSR1_CBAR38 (0x98U) +#define SMMU_GNSR1_CBFRSYNRA38 (0x498U) +#define SMMU_GNSR1_CBA2R38 (0x898U) +#define SMMU_GNSR1_CBAR39 (0x9cU) +#define SMMU_GNSR1_CBFRSYNRA39 (0x49cU) +#define SMMU_GNSR1_CBA2R39 (0x89cU) +#define SMMU_GNSR1_CBAR40 (0xa0U) +#define SMMU_GNSR1_CBFRSYNRA40 (0x4a0U) +#define SMMU_GNSR1_CBA2R40 (0x8a0U) +#define SMMU_GNSR1_CBAR41 (0xa4U) +#define SMMU_GNSR1_CBFRSYNRA41 (0x4a4U) +#define SMMU_GNSR1_CBA2R41 (0x8a4U) +#define SMMU_GNSR1_CBAR42 (0xa8U) +#define SMMU_GNSR1_CBFRSYNRA42 (0x4a8U) +#define SMMU_GNSR1_CBA2R42 (0x8a8U) +#define SMMU_GNSR1_CBAR43 (0xacU) +#define SMMU_GNSR1_CBFRSYNRA43 (0x4acU) +#define SMMU_GNSR1_CBA2R43 (0x8acU) +#define SMMU_GNSR1_CBAR44 (0xb0U) +#define SMMU_GNSR1_CBFRSYNRA44 (0x4b0U) +#define SMMU_GNSR1_CBA2R44 (0x8b0U) +#define SMMU_GNSR1_CBAR45 (0xb4U) +#define SMMU_GNSR1_CBFRSYNRA45 (0x4b4U) +#define SMMU_GNSR1_CBA2R45 (0x8b4U) +#define SMMU_GNSR1_CBAR46 (0xb8U) +#define SMMU_GNSR1_CBFRSYNRA46 (0x4b8U) +#define SMMU_GNSR1_CBA2R46 (0x8b8U) +#define SMMU_GNSR1_CBAR47 (0xbcU) +#define SMMU_GNSR1_CBFRSYNRA47 (0x4bcU) +#define SMMU_GNSR1_CBA2R47 (0x8bcU) +#define SMMU_GNSR1_CBAR48 (0xc0U) +#define SMMU_GNSR1_CBFRSYNRA48 (0x4c0U) +#define SMMU_GNSR1_CBA2R48 (0x8c0U) +#define SMMU_GNSR1_CBAR49 (0xc4U) +#define SMMU_GNSR1_CBFRSYNRA49 (0x4c4U) +#define SMMU_GNSR1_CBA2R49 (0x8c4U) +#define SMMU_GNSR1_CBAR50 (0xc8U) +#define SMMU_GNSR1_CBFRSYNRA50 (0x4c8U) +#define SMMU_GNSR1_CBA2R50 (0x8c8U) +#define SMMU_GNSR1_CBAR51 (0xccU) +#define SMMU_GNSR1_CBFRSYNRA51 (0x4ccU) +#define SMMU_GNSR1_CBA2R51 (0x8ccU) +#define SMMU_GNSR1_CBAR52 (0xd0U) +#define SMMU_GNSR1_CBFRSYNRA52 (0x4d0U) +#define SMMU_GNSR1_CBA2R52 (0x8d0U) +#define SMMU_GNSR1_CBAR53 (0xd4U) +#define SMMU_GNSR1_CBFRSYNRA53 (0x4d4U) +#define SMMU_GNSR1_CBA2R53 (0x8d4U) +#define SMMU_GNSR1_CBAR54 (0xd8U) +#define SMMU_GNSR1_CBFRSYNRA54 (0x4d8U) +#define SMMU_GNSR1_CBA2R54 (0x8d8U) +#define SMMU_GNSR1_CBAR55 (0xdcU) +#define SMMU_GNSR1_CBFRSYNRA55 (0x4dcU) +#define SMMU_GNSR1_CBA2R55 (0x8dcU) +#define SMMU_GNSR1_CBAR56 (0xe0U) +#define SMMU_GNSR1_CBFRSYNRA56 (0x4e0U) +#define SMMU_GNSR1_CBA2R56 (0x8e0U) +#define SMMU_GNSR1_CBAR57 (0xe4U) +#define SMMU_GNSR1_CBFRSYNRA57 (0x4e4U) +#define SMMU_GNSR1_CBA2R57 (0x8e4U) +#define SMMU_GNSR1_CBAR58 (0xe8U) +#define SMMU_GNSR1_CBFRSYNRA58 (0x4e8U) +#define SMMU_GNSR1_CBA2R58 (0x8e8U) +#define SMMU_GNSR1_CBAR59 (0xecU) +#define SMMU_GNSR1_CBFRSYNRA59 (0x4ecU) +#define SMMU_GNSR1_CBA2R59 (0x8ecU) +#define SMMU_GNSR1_CBAR60 (0xf0U) +#define SMMU_GNSR1_CBFRSYNRA60 (0x4f0U) +#define SMMU_GNSR1_CBA2R60 (0x8f0U) +#define SMMU_GNSR1_CBAR61 (0xf4U) +#define SMMU_GNSR1_CBFRSYNRA61 (0x4f4U) +#define SMMU_GNSR1_CBA2R61 (0x8f4U) +#define SMMU_GNSR1_CBAR62 (0xf8U) +#define SMMU_GNSR1_CBFRSYNRA62 (0x4f8U) +#define SMMU_GNSR1_CBA2R62 (0x8f8U) +#define SMMU_GNSR1_CBAR63 (0xfcU) +#define SMMU_GNSR1_CBFRSYNRA63 (0x4fcU) +#define SMMU_GNSR1_CBA2R63 (0x8fcU) /******************************************************************************* * SMMU Global Secure Aux. Configuration Register ******************************************************************************/ -#define SMMU_GSR0_SECURE_ACR 0x10 -#define SMMU_GNSR_ACR (SMMU_GSR0_SECURE_ACR + 0x400) -#define SMMU_GSR0_PGSIZE_SHIFT 16 -#define SMMU_GSR0_PGSIZE_4K (0 << SMMU_GSR0_PGSIZE_SHIFT) -#define SMMU_GSR0_PGSIZE_64K (1 << SMMU_GSR0_PGSIZE_SHIFT) -#define SMMU_ACR_CACHE_LOCK_ENABLE_BIT (1 << 26) +#define SMMU_GSR0_SECURE_ACR 0x10U +#define SMMU_GNSR_ACR (SMMU_GSR0_SECURE_ACR + 0x400U) +#define SMMU_GSR0_PGSIZE_SHIFT 16U +#define SMMU_GSR0_PGSIZE_4K (0U << SMMU_GSR0_PGSIZE_SHIFT) +#define SMMU_GSR0_PGSIZE_64K (1U << SMMU_GSR0_PGSIZE_SHIFT) +#define SMMU_ACR_CACHE_LOCK_ENABLE_BIT (1U << 26) /******************************************************************************* * SMMU Global Aux. Control Register ******************************************************************************/ -#define SMMU_CBn_ACTLR_CPRE_BIT (1 << 1) +#define SMMU_CBn_ACTLR_CPRE_BIT (1U << 1) /******************************************************************************* * SMMU configuration constants ******************************************************************************/ -#define ID1_PAGESIZE (1 << 31) -#define ID1_NUMPAGENDXB_SHIFT 28 -#define ID1_NUMPAGENDXB_MASK 7 -#define ID1_NUMS2CB_SHIFT 16 -#define ID1_NUMS2CB_MASK 0xff -#define ID1_NUMCB_SHIFT 0 -#define ID1_NUMCB_MASK 0xff -#define PGSHIFT 16 -#define CB_SIZE 0x800000 +#define ID1_PAGESIZE (1U << 31) +#define ID1_NUMPAGENDXB_SHIFT 28U +#define ID1_NUMPAGENDXB_MASK 7U +#define ID1_NUMS2CB_SHIFT 16U +#define ID1_NUMS2CB_MASK 0xffU +#define ID1_NUMCB_SHIFT 0U +#define ID1_NUMCB_MASK 0xffU +#define PGSHIFT 16U +#define CB_SIZE 0x800000U -static inline uint32_t tegra_smmu_read_32(uint32_t off) -{ - return mmio_read_32(TEGRA_SMMU_BASE + off); -} +typedef struct smmu_regs { + uint32_t reg; + uint32_t val; +} smmu_regs_t; + +#define mc_make_sid_override_cfg(name) \ + { \ + .reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_CFG_ ## name, \ + .val = 0x00000000U, \ + } + +#define mc_make_sid_security_cfg(name) \ + { \ + .reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(MC_STREAMID_OVERRIDE_CFG_ ## name), \ + .val = 0x00000000U, \ + } + +#define smmu_make_gnsr0_sec_cfg(name) \ + { \ + .reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_ ## name, \ + .val = 0x00000000U, \ + } + +/* + * On ARM-SMMU, conditional offset to access secure aliases of non-secure registers + * is 0x400. So, add it to register address + */ +#define smmu_make_gnsr0_nsec_cfg(name) \ + { \ + .reg = TEGRA_SMMU0_BASE + 0x400U + SMMU_GNSR0_ ## name, \ + .val = 0x00000000U, \ + } + +#define smmu_make_gnsr0_smr_cfg(n) \ + { \ + .reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_SMR ## n, \ + .val = 0x00000000U, \ + } + +#define smmu_make_gnsr0_s2cr_cfg(n) \ + { \ + .reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_S2CR ## n, \ + .val = 0x00000000U, \ + } + +#define smmu_make_gnsr1_cbar_cfg(n) \ + { \ + .reg = TEGRA_SMMU0_BASE + (1U << PGSHIFT) + SMMU_GNSR1_CBAR ## n, \ + .val = 0x00000000U, \ + } + +#define smmu_make_gnsr1_cba2r_cfg(n) \ + { \ + .reg = TEGRA_SMMU0_BASE + (1U << PGSHIFT) + SMMU_GNSR1_CBA2R ## n, \ + .val = 0x00000000U, \ + } + +#define make_smmu_cb_cfg(name, n) \ + { \ + .reg = TEGRA_SMMU0_BASE + (CB_SIZE >> 1) + (n * (1 << PGSHIFT)) \ + + SMMU_CBn_ ## name, \ + .val = 0x00000000U, \ + } + +#define smmu_make_smrg_group(n) \ + smmu_make_gnsr0_smr_cfg(n), \ + smmu_make_gnsr0_s2cr_cfg(n), \ + smmu_make_gnsr1_cbar_cfg(n), \ + smmu_make_gnsr1_cba2r_cfg(n) /* don't put "," here. */ + +#define smmu_make_cb_group(n) \ + make_smmu_cb_cfg(SCTLR, n), \ + make_smmu_cb_cfg(TCR2, n), \ + make_smmu_cb_cfg(TTBR0_LO, n), \ + make_smmu_cb_cfg(TTBR0_HI, n), \ + make_smmu_cb_cfg(TCR, n), \ + make_smmu_cb_cfg(PRRR_MAIR0, n),\ + make_smmu_cb_cfg(FSR, n), \ + make_smmu_cb_cfg(FAR_LO, n), \ + make_smmu_cb_cfg(FAR_HI, n), \ + make_smmu_cb_cfg(FSYNR0, n) /* don't put "," here. */ + +#define smmu_bypass_cfg \ + { \ + .reg = TEGRA_MC_BASE + MC_SMMU_BYPASS_CONFIG, \ + .val = 0x00000000U, \ + } + +#define _START_OF_TABLE_ \ + { \ + .reg = 0xCAFE05C7U, \ + .val = 0x00000000U, \ + } + +#define _END_OF_TABLE_ \ + { \ + .reg = 0xFFFFFFFFU, \ + .val = 0xFFFFFFFFU, \ + } -static inline void tegra_smmu_write_32(uint32_t off, uint32_t val) -{ - mmio_write_32(TEGRA_SMMU_BASE + off, val); -} void tegra_smmu_init(void); void tegra_smmu_save_context(uint64_t smmu_ctx_addr); +smmu_regs_t *plat_get_smmu_ctx(void); #endif /*__SMMU_H */ diff --git a/plat/nvidia/tegra/include/platform_def.h b/plat/nvidia/tegra/include/platform_def.h index 4df309d36..52e07bd83 100644 --- a/plat/nvidia/tegra/include/platform_def.h +++ b/plat/nvidia/tegra/include/platform_def.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -77,7 +77,8 @@ /******************************************************************************* * Platform specific page table and MMU setup constants ******************************************************************************/ -#define ADDR_SPACE_SIZE (1ull << 35) +#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 35) +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 35) /******************************************************************************* * Some data must be aligned on the biggest cache line size in the platform. diff --git a/plat/nvidia/tegra/include/t186/tegra_def.h b/plat/nvidia/tegra/include/t186/tegra_def.h index 6bac0d714..ae11d2810 100644 --- a/plat/nvidia/tegra/include/t186/tegra_def.h +++ b/plat/nvidia/tegra/include/t186/tegra_def.h @@ -182,8 +182,6 @@ #define TZRAM_ENABLE_TZ_LOCK_BIT (1 << 0) #define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG0 0x21A0 #define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG1 0x21A4 -#define TZRAM_CARVEOUT_CPU_WRITE_ACCESS_BIT (1 << 25) -#define TZRAM_CARVEOUT_CPU_READ_ACCESS_BIT (1 << 7) #define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG2 0x21A8 #define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG3 0x21AC #define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG4 0x21B0 @@ -249,6 +247,8 @@ * Tegra scratch registers constants ******************************************************************************/ #define TEGRA_SCRATCH_BASE 0x0C390000 +#define SECURE_SCRATCH_RSV1_LO 0x658 +#define SECURE_SCRATCH_RSV1_HI 0x65C #define SECURE_SCRATCH_RSV6 0x680 #define SECURE_SCRATCH_RSV11_LO 0x6A8 #define SECURE_SCRATCH_RSV11_HI 0x6AC @@ -259,14 +259,20 @@ #define SECURE_SCRATCH_RSV55_HI 0x80C /******************************************************************************* - * Tegra Memory Mapped Control Register Access Bus constants + * Tegra Memory Mapped Control Register Access constants ******************************************************************************/ #define TEGRA_MMCRAB_BASE 0x0E000000 +/******************************************************************************* + * Tegra Memory Mapped Activity Monitor Register Access constants + ******************************************************************************/ +#define TEGRA_ARM_ACTMON_CTR_BASE 0x0E060000 +#define TEGRA_DENVER_ACTMON_CTR_BASE 0x0E070000 + /******************************************************************************* * Tegra SMMU Controller constants ******************************************************************************/ -#define TEGRA_SMMU_BASE 0x12000000 +#define TEGRA_SMMU0_BASE 0x12000000 /******************************************************************************* * Tegra TZRAM constants diff --git a/plat/nvidia/tegra/include/tegra_platform.h b/plat/nvidia/tegra/include/tegra_platform.h index a2813a832..c06ce70ef 100644 --- a/plat/nvidia/tegra/include/tegra_platform.h +++ b/plat/nvidia/tegra/include/tegra_platform.h @@ -42,8 +42,10 @@ uint32_t tegra_get_chipid_minor(void); /* * Tegra chip identifiers */ -uint8_t tegra_is_t132(void); -uint8_t tegra_is_t210(void); +uint8_t tegra_chipid_is_t132(void); +uint8_t tegra_chipid_is_t210(void); +uint8_t tegra_chipid_is_t186(void); + /* * Tegra platform identifiers diff --git a/plat/nvidia/tegra/include/tegra_private.h b/plat/nvidia/tegra/include/tegra_private.h index 39006f6fa..10065db55 100644 --- a/plat/nvidia/tegra/include/tegra_private.h +++ b/plat/nvidia/tegra/include/tegra_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -39,8 +39,8 @@ /******************************************************************************* * Tegra DRAM memory base address ******************************************************************************/ -#define TEGRA_DRAM_BASE 0x80000000 -#define TEGRA_DRAM_END 0x27FFFFFFF +#define TEGRA_DRAM_BASE 0x80000000ULL +#define TEGRA_DRAM_END 0x27FFFFFFFULL /******************************************************************************* * Struct for parameters received from BL2 @@ -103,6 +103,8 @@ void tegra_security_setup(void); void tegra_security_setup_videomem(uintptr_t base, uint64_t size); /* Declarations for tegra_pm.c */ +extern uint8_t tegra_fake_system_suspend; + void tegra_pm_system_suspend_entry(void); void tegra_pm_system_suspend_exit(void); int tegra_system_suspended(void); diff --git a/plat/nvidia/tegra/platform.mk b/plat/nvidia/tegra/platform.mk index 2eeffca74..1f7a4dcdd 100644 --- a/plat/nvidia/tegra/platform.mk +++ b/plat/nvidia/tegra/platform.mk @@ -30,12 +30,29 @@ SOC_DIR := plat/nvidia/tegra/soc/${TARGET_SOC} -# Enable PSCI v1.0 extended state ID format -PSCI_EXTENDED_STATE_ID := 1 +# dump the state on crash console +CRASH_REPORTING := 1 +$(eval $(call add_define,CRASH_REPORTING)) + +# enable assert() for release/debug builds +ENABLE_ASSERTIONS := 1 # Disable the PSCI platform compatibility layer ENABLE_PLAT_COMPAT := 0 +# enable dynamic memory mapping +PLAT_XLAT_TABLES_DYNAMIC := 1 +$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC)) + +# Enable PSCI v1.0 extended state ID format +PSCI_EXTENDED_STATE_ID := 1 + +# code and read-only data should be put on separate memory pages +SEPARATE_CODE_AND_RODATA := 1 + +# do not use coherent memory +USE_COHERENT_MEM := 0 + include plat/nvidia/tegra/common/tegra_common.mk include ${SOC_DIR}/platform_${TARGET_SOC}.mk diff --git a/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c index f05f3d0e9..af21c2869 100644 --- a/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c +++ b/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -132,7 +132,7 @@ int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) { -#if DEBUG +#if ENABLE_ASSERTIONS int cpu = read_mpidr() & MPIDR_CPU_MASK; /* SYSTEM_SUSPEND only on CPU0 */ diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/mce.h b/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h similarity index 75% rename from plat/nvidia/tegra/soc/t186/drivers/include/mce.h rename to plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h index 441a2c1b7..ac1cff6d6 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/include/mce.h +++ b/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,108 +28,56 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __MCE_H__ -#define __MCE_H__ +#ifndef __MCE_PRIVATE_H__ +#define __MCE_PRIVATE_H__ #include #include -/******************************************************************************* - * MCE commands - ******************************************************************************/ -typedef enum mce_cmd { - MCE_CMD_ENTER_CSTATE = 0, - MCE_CMD_UPDATE_CSTATE_INFO = 1, - MCE_CMD_UPDATE_CROSSOVER_TIME = 2, - MCE_CMD_READ_CSTATE_STATS = 3, - MCE_CMD_WRITE_CSTATE_STATS = 4, - MCE_CMD_IS_SC7_ALLOWED = 5, - MCE_CMD_ONLINE_CORE = 6, - MCE_CMD_CC3_CTRL = 7, - MCE_CMD_ECHO_DATA = 8, - MCE_CMD_READ_VERSIONS = 9, - MCE_CMD_ENUM_FEATURES = 10, - MCE_CMD_ROC_FLUSH_CACHE_TRBITS = 11, - MCE_CMD_ENUM_READ_MCA = 12, - MCE_CMD_ENUM_WRITE_MCA = 13, - MCE_CMD_ROC_FLUSH_CACHE = 14, - MCE_CMD_ROC_CLEAN_CACHE = 15, - MCE_CMD_ENABLE_LATIC = 16, - MCE_CMD_UNCORE_PERFMON_REQ = 17, - MCE_CMD_MISC_CCPLEX = 18, - MCE_CMD_IS_CCX_ALLOWED = 0xFE, - MCE_CMD_MAX = 0xFF, -} mce_cmd_t; - -#define MCE_CMD_MASK 0xFF - -/******************************************************************************* - * Struct to prepare UPDATE_CSTATE_INFO request - ******************************************************************************/ -typedef struct mce_cstate_info { - /* cluster cstate value */ - uint32_t cluster; - /* ccplex cstate value */ - uint32_t ccplex; - /* system cstate value */ - uint32_t system; - /* force system state? */ - uint8_t system_state_force; - /* wake mask value */ - uint32_t wake_mask; - /* update the wake mask? */ - uint8_t update_wake_mask; -} mce_cstate_info_t; - /******************************************************************************* * Macros to prepare CSTATE info request ******************************************************************************/ /* Description of the parameters for UPDATE_CSTATE_INFO request */ -#define CLUSTER_CSTATE_MASK 0x7 -#define CLUSTER_CSTATE_SHIFT 0 -#define CLUSTER_CSTATE_UPDATE_BIT (1 << 7) -#define CCPLEX_CSTATE_MASK 0x3 -#define CCPLEX_CSTATE_SHIFT 8 -#define CCPLEX_CSTATE_UPDATE_BIT (1 << 15) -#define SYSTEM_CSTATE_MASK 0xF -#define SYSTEM_CSTATE_SHIFT 16 -#define SYSTEM_CSTATE_FORCE_UPDATE_SHIFT 22 -#define SYSTEM_CSTATE_FORCE_UPDATE_BIT (1 << 22) -#define SYSTEM_CSTATE_UPDATE_BIT (1 << 23) -#define CSTATE_WAKE_MASK_UPDATE_BIT (1 << 31) -#define CSTATE_WAKE_MASK_SHIFT 32 -#define CSTATE_WAKE_MASK_CLEAR 0xFFFFFFFF +#define CLUSTER_CSTATE_MASK 0x7ULL +#define CLUSTER_CSTATE_SHIFT 0U +#define CLUSTER_CSTATE_UPDATE_BIT (1ULL << 7) +#define CCPLEX_CSTATE_MASK 0x3ULL +#define CCPLEX_CSTATE_SHIFT 8ULL +#define CCPLEX_CSTATE_UPDATE_BIT (1ULL << 15) +#define SYSTEM_CSTATE_MASK 0xFULL +#define SYSTEM_CSTATE_SHIFT 16ULL +#define SYSTEM_CSTATE_FORCE_UPDATE_SHIFT 22ULL +#define SYSTEM_CSTATE_FORCE_UPDATE_BIT (1ULL << 22) +#define SYSTEM_CSTATE_UPDATE_BIT (1ULL << 23) +#define CSTATE_WAKE_MASK_UPDATE_BIT (1ULL << 31) +#define CSTATE_WAKE_MASK_SHIFT 32ULL +#define CSTATE_WAKE_MASK_CLEAR 0xFFFFFFFFU /******************************************************************************* * Auto-CC3 control macros ******************************************************************************/ -#define MCE_AUTO_CC3_FREQ_MASK 0x1FF -#define MCE_AUTO_CC3_FREQ_SHIFT 0 -#define MCE_AUTO_CC3_VTG_MASK 0x7F -#define MCE_AUTO_CC3_VTG_SHIFT 16 -#define MCE_AUTO_CC3_ENABLE_BIT (1 << 31) +#define MCE_AUTO_CC3_FREQ_MASK 0x1FFU +#define MCE_AUTO_CC3_FREQ_SHIFT 0U +#define MCE_AUTO_CC3_VTG_MASK 0x7FU +#define MCE_AUTO_CC3_VTG_SHIFT 16U +#define MCE_AUTO_CC3_ENABLE_BIT (1U << 31) /******************************************************************************* * Macros for the 'IS_SC7_ALLOWED' command ******************************************************************************/ -#define MCE_SC7_ALLOWED_MASK 0x7 -#define MCE_SC7_WAKE_TIME_SHIFT 32 +#define MCE_SC7_ALLOWED_MASK 0x7U +#define MCE_SC7_WAKE_TIME_SHIFT 32U /******************************************************************************* * Macros for 'read/write ctats' commands ******************************************************************************/ -#define MCE_CSTATE_STATS_TYPE_SHIFT 32 -#define MCE_CSTATE_WRITE_DATA_LO_MASK 0xF +#define MCE_CSTATE_STATS_TYPE_SHIFT 32ULL +#define MCE_CSTATE_WRITE_DATA_LO_MASK 0xFU /******************************************************************************* * Macros for 'update crossover threshold' command ******************************************************************************/ -#define MCE_CROSSOVER_THRESHOLD_TIME_SHIFT 32 - -/******************************************************************************* - * Timeout value used to powerdown a core - ******************************************************************************/ -#define MCE_CORE_SLEEP_TIME_INFINITE 0xFFFFFFFF +#define MCE_CROSSOVER_THRESHOLD_TIME_SHIFT 32U /******************************************************************************* * MCA command struct @@ -152,9 +100,10 @@ typedef union mca_cmd { ******************************************************************************/ typedef union mca_arg { struct err { - uint64_t error:8; - uint64_t unused:48; - uint64_t finish:8; + uint32_t error:8; + uint32_t unused:24; + uint32_t unused2:24; + uint32_t finish:8; } err; struct arg { uint32_t low; @@ -171,45 +120,45 @@ typedef union uncore_perfmon_req { /* * Commands: 0 = READ, 1 = WRITE */ - uint64_t cmd:8; + uint32_t cmd:8; /* * The unit group: L2=0, L3=1, ROC=2, MC=3, IOB=4 */ - uint64_t grp:4; + uint32_t grp:4; /* * Unit selector: Selects the unit instance, with 0 = Unit * = (number of units in group) - 1. */ - uint64_t unit:4; + uint32_t unit:4; /* * Selects the uncore perfmon register to access */ - uint64_t reg:8; + uint32_t reg:8; /* * Counter number. Selects which counter to use for * registers NV_PMEVCNTR and NV_PMEVTYPER. */ - uint64_t counter:8; + uint32_t counter:8; } perfmon_command; struct perfmon_status { /* * Resulting command status */ - uint64_t val:8; - uint64_t unused:24; + uint32_t val:8; + uint32_t unused:24; } perfmon_status; uint64_t data; } uncore_perfmon_req_t; -#define UNCORE_PERFMON_CMD_READ 0 -#define UNCORE_PERFMON_CMD_WRITE 1 +#define UNCORE_PERFMON_CMD_READ 0U +#define UNCORE_PERFMON_CMD_WRITE 1U -#define UNCORE_PERFMON_CMD_MASK 0xFF -#define UNCORE_PERFMON_UNIT_GRP_MASK 0xF -#define UNCORE_PERFMON_SELECTOR_MASK 0xF -#define UNCORE_PERFMON_REG_MASK 0xFF -#define UNCORE_PERFMON_CTR_MASK 0xFF -#define UNCORE_PERFMON_RESP_STATUS_MASK 0xFF +#define UNCORE_PERFMON_CMD_MASK 0xFFU +#define UNCORE_PERFMON_UNIT_GRP_MASK 0xFU +#define UNCORE_PERFMON_SELECTOR_MASK 0xFU +#define UNCORE_PERFMON_REG_MASK 0xFFU +#define UNCORE_PERFMON_CTR_MASK 0xFFU +#define UNCORE_PERFMON_RESP_STATUS_MASK 0xFFU /******************************************************************************* * Structure populated by arch specific code to export routines which perform @@ -353,16 +302,6 @@ typedef struct arch_mce_ops { uint32_t value); } arch_mce_ops_t; -int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1, - uint64_t arg2); -int mce_update_reset_vector(void); -int mce_update_gsc_videomem(void); -int mce_update_gsc_tzdram(void); -int mce_update_gsc_tzram(void); -__dead2 void mce_enter_ccplex_state(uint32_t state_idx); -void mce_update_cstate_info(mce_cstate_info_t *cstate); -void mce_verify_firmware_version(void); - /* declarations for ARI/NVG handler functions */ int ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time); int ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex, @@ -399,4 +338,4 @@ int nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time); int nvg_online_core(uint32_t ari_base, uint32_t core); int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable); -#endif /* __MCE_H__ */ +#endif /* __MCE_PRIVATE_H__ */ diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h index cb48de62f..e01037fe5 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h +++ b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -40,410 +40,421 @@ */ enum { - TEGRA_ARI_VERSION_MAJOR = 3, - TEGRA_ARI_VERSION_MINOR = 0, + TEGRA_ARI_VERSION_MAJOR = 3U, + TEGRA_ARI_VERSION_MINOR = 1U, }; typedef enum { /* indexes below get the core lock */ - TEGRA_ARI_MISC = 0, + TEGRA_ARI_MISC = 0U, /* index 1 is deprecated */ /* index 2 is deprecated */ /* index 3 is deprecated */ - TEGRA_ARI_ONLINE_CORE = 4, + TEGRA_ARI_ONLINE_CORE = 4U, /* indexes below need cluster lock */ - TEGRA_ARI_MISC_CLUSTER = 41, - TEGRA_ARI_IS_CCX_ALLOWED = 42, - TEGRA_ARI_CC3_CTRL = 43, + TEGRA_ARI_MISC_CLUSTER = 41U, + TEGRA_ARI_IS_CCX_ALLOWED = 42U, + TEGRA_ARI_CC3_CTRL = 43U, /* indexes below need ccplex lock */ - TEGRA_ARI_ENTER_CSTATE = 80, - TEGRA_ARI_UPDATE_CSTATE_INFO = 81, - TEGRA_ARI_IS_SC7_ALLOWED = 82, + TEGRA_ARI_ENTER_CSTATE = 80U, + TEGRA_ARI_UPDATE_CSTATE_INFO = 81U, + TEGRA_ARI_IS_SC7_ALLOWED = 82U, /* index 83 is deprecated */ - TEGRA_ARI_PERFMON = 84, - TEGRA_ARI_UPDATE_CCPLEX_GSC = 85, + TEGRA_ARI_PERFMON = 84U, + TEGRA_ARI_UPDATE_CCPLEX_GSC = 85U, /* index 86 is depracated */ /* index 87 is deprecated */ - TEGRA_ARI_ROC_FLUSH_CACHE_ONLY = 88, - TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS = 89, - TEGRA_ARI_MISC_CCPLEX = 90, - TEGRA_ARI_MCA = 91, - TEGRA_ARI_UPDATE_CROSSOVER = 92, - TEGRA_ARI_CSTATE_STATS = 93, - TEGRA_ARI_WRITE_CSTATE_STATS = 94, - TEGRA_ARI_COPY_MISCREG_AA64_RST = 95, - TEGRA_ARI_ROC_CLEAN_CACHE_ONLY = 96, + TEGRA_ARI_ROC_FLUSH_CACHE_ONLY = 88U, + TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS = 89U, + TEGRA_ARI_MISC_CCPLEX = 90U, + TEGRA_ARI_MCA = 91U, + TEGRA_ARI_UPDATE_CROSSOVER = 92U, + TEGRA_ARI_CSTATE_STATS = 93U, + TEGRA_ARI_WRITE_CSTATE_STATS = 94U, + TEGRA_ARI_COPY_MISCREG_AA64_RST = 95U, + TEGRA_ARI_ROC_CLEAN_CACHE_ONLY = 96U, } tegra_ari_req_id_t; typedef enum { - TEGRA_ARI_MISC_ECHO = 0, - TEGRA_ARI_MISC_VERSION = 1, - TEGRA_ARI_MISC_FEATURE_LEAF_0 = 2, + TEGRA_ARI_MISC_ECHO = 0U, + TEGRA_ARI_MISC_VERSION = 1U, + TEGRA_ARI_MISC_FEATURE_LEAF_0 = 2U, } tegra_ari_misc_index_t; typedef enum { - TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF = 0, - TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT = 1, - TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL = 2, + TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF = 0U, + TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT = 1U, + TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL = 2U, + TEGRA_ARI_MISC_CCPLEX_EDBGREQ = 3U, } tegra_ari_misc_ccplex_index_t; typedef enum { - TEGRA_ARI_CORE_C0 = 0, - TEGRA_ARI_CORE_C1 = 1, - TEGRA_ARI_CORE_C6 = 6, - TEGRA_ARI_CORE_C7 = 7, - TEGRA_ARI_CORE_WARMRSTREQ = 8, + TEGRA_ARI_CORE_C0 = 0U, + TEGRA_ARI_CORE_C1 = 1U, + TEGRA_ARI_CORE_C6 = 6U, + TEGRA_ARI_CORE_C7 = 7U, + TEGRA_ARI_CORE_WARMRSTREQ = 8U, } tegra_ari_core_sleep_state_t; typedef enum { - TEGRA_ARI_CLUSTER_CC0 = 0, - TEGRA_ARI_CLUSTER_CC1 = 1, - TEGRA_ARI_CLUSTER_CC6 = 6, - TEGRA_ARI_CLUSTER_CC7 = 7, + TEGRA_ARI_CLUSTER_CC0 = 0U, + TEGRA_ARI_CLUSTER_CC1 = 1U, + TEGRA_ARI_CLUSTER_CC6 = 6U, + TEGRA_ARI_CLUSTER_CC7 = 7U, } tegra_ari_cluster_sleep_state_t; typedef enum { - TEGRA_ARI_CCPLEX_CCP0 = 0, - TEGRA_ARI_CCPLEX_CCP1 = 1, - TEGRA_ARI_CCPLEX_CCP3 = 3, /* obsoleted */ + TEGRA_ARI_CCPLEX_CCP0 = 0U, + TEGRA_ARI_CCPLEX_CCP1 = 1U, + TEGRA_ARI_CCPLEX_CCP3 = 3U, /* obsoleted */ } tegra_ari_ccplex_sleep_state_t; typedef enum { - TEGRA_ARI_SYSTEM_SC0 = 0, - TEGRA_ARI_SYSTEM_SC1 = 1, /* obsoleted */ - TEGRA_ARI_SYSTEM_SC2 = 2, /* obsoleted */ - TEGRA_ARI_SYSTEM_SC3 = 3, /* obsoleted */ - TEGRA_ARI_SYSTEM_SC4 = 4, /* obsoleted */ - TEGRA_ARI_SYSTEM_SC7 = 7, - TEGRA_ARI_SYSTEM_SC8 = 8, + TEGRA_ARI_SYSTEM_SC0 = 0U, + TEGRA_ARI_SYSTEM_SC1 = 1U, /* obsoleted */ + TEGRA_ARI_SYSTEM_SC2 = 2U, /* obsoleted */ + TEGRA_ARI_SYSTEM_SC3 = 3U, /* obsoleted */ + TEGRA_ARI_SYSTEM_SC4 = 4U, /* obsoleted */ + TEGRA_ARI_SYSTEM_SC7 = 7U, + TEGRA_ARI_SYSTEM_SC8 = 8U, } tegra_ari_system_sleep_state_t; typedef enum { - TEGRA_ARI_CROSSOVER_C1_C6 = 0, - TEGRA_ARI_CROSSOVER_CC1_CC6 = 1, - TEGRA_ARI_CROSSOVER_CC1_CC7 = 2, - TEGRA_ARI_CROSSOVER_CCP1_CCP3 = 3, /* obsoleted */ - TEGRA_ARI_CROSSOVER_CCP3_SC2 = 4, /* obsoleted */ - TEGRA_ARI_CROSSOVER_CCP3_SC3 = 5, /* obsoleted */ - TEGRA_ARI_CROSSOVER_CCP3_SC4 = 6, /* obsoleted */ - TEGRA_ARI_CROSSOVER_CCP3_SC7 = 7, /* obsoleted */ - TEGRA_ARI_CROSSOVER_SC0_SC7 = 7, - TEGRA_ARI_CROSSOVER_CCP3_SC1 = 8, /* obsoleted */ + TEGRA_ARI_CROSSOVER_C1_C6 = 0U, + TEGRA_ARI_CROSSOVER_CC1_CC6 = 1U, + TEGRA_ARI_CROSSOVER_CC1_CC7 = 2U, + TEGRA_ARI_CROSSOVER_CCP1_CCP3 = 3U, /* obsoleted */ + TEGRA_ARI_CROSSOVER_CCP3_SC2 = 4U, /* obsoleted */ + TEGRA_ARI_CROSSOVER_CCP3_SC3 = 5U, /* obsoleted */ + TEGRA_ARI_CROSSOVER_CCP3_SC4 = 6U, /* obsoleted */ + TEGRA_ARI_CROSSOVER_CCP3_SC7 = 7U, /* obsoleted */ + TEGRA_ARI_CROSSOVER_SC0_SC7 = 7U, + TEGRA_ARI_CROSSOVER_CCP3_SC1 = 8U, /* obsoleted */ } tegra_ari_crossover_index_t; typedef enum { - TEGRA_ARI_CSTATE_STATS_CLEAR = 0, - TEGRA_ARI_CSTATE_STATS_SC7_ENTRIES, - TEGRA_ARI_CSTATE_STATS_SC4_ENTRIES, /* obsoleted */ - TEGRA_ARI_CSTATE_STATS_SC3_ENTRIES, /* obsoleted */ - TEGRA_ARI_CSTATE_STATS_SC2_ENTRIES, /* obsoleted */ - TEGRA_ARI_CSTATE_STATS_CCP3_ENTRIES, /* obsoleted */ + TEGRA_ARI_CSTATE_STATS_CLEAR = 0U, + TEGRA_ARI_CSTATE_STATS_SC7_ENTRIES = 1U, + TEGRA_ARI_CSTATE_STATS_SC4_ENTRIES, /* obsoleted */ + TEGRA_ARI_CSTATE_STATS_SC3_ENTRIES, /* obsoleted */ + TEGRA_ARI_CSTATE_STATS_SC2_ENTRIES, /* obsoleted */ + TEGRA_ARI_CSTATE_STATS_CCP3_ENTRIES, /* obsoleted */ TEGRA_ARI_CSTATE_STATS_A57_CC6_ENTRIES, TEGRA_ARI_CSTATE_STATS_A57_CC7_ENTRIES, TEGRA_ARI_CSTATE_STATS_D15_CC6_ENTRIES, TEGRA_ARI_CSTATE_STATS_D15_CC7_ENTRIES, TEGRA_ARI_CSTATE_STATS_D15_0_C6_ENTRIES, TEGRA_ARI_CSTATE_STATS_D15_1_C6_ENTRIES, - TEGRA_ARI_CSTATE_STATS_D15_0_C7_ENTRIES = 14, + TEGRA_ARI_CSTATE_STATS_D15_0_C7_ENTRIES = 14U, TEGRA_ARI_CSTATE_STATS_D15_1_C7_ENTRIES, - TEGRA_ARI_CSTATE_STATS_A57_0_C7_ENTRIES = 18, + TEGRA_ARI_CSTATE_STATS_A57_0_C7_ENTRIES = 18U, TEGRA_ARI_CSTATE_STATS_A57_1_C7_ENTRIES, TEGRA_ARI_CSTATE_STATS_A57_2_C7_ENTRIES, TEGRA_ARI_CSTATE_STATS_A57_3_C7_ENTRIES, TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0, TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1, - TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 26, + TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 26U, TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1, TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2, TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3, } tegra_ari_cstate_stats_index_t; typedef enum { - TEGRA_ARI_GSC_ALL = 0, - - TEGRA_ARI_GSC_BPMP = 6, - TEGRA_ARI_GSC_APE = 7, - TEGRA_ARI_GSC_SPE = 8, - TEGRA_ARI_GSC_SCE = 9, - TEGRA_ARI_GSC_APR = 10, - TEGRA_ARI_GSC_TZRAM = 11, - TEGRA_ARI_GSC_SE = 12, - - TEGRA_ARI_GSC_BPMP_TO_SPE = 16, - TEGRA_ARI_GSC_SPE_TO_BPMP = 17, - TEGRA_ARI_GSC_CPU_TZ_TO_BPMP = 18, - TEGRA_ARI_GSC_BPMP_TO_CPU_TZ = 19, - TEGRA_ARI_GSC_CPU_NS_TO_BPMP = 20, - TEGRA_ARI_GSC_BPMP_TO_CPU_NS = 21, - TEGRA_ARI_GSC_IPC_SE_SPE_SCE_BPMP = 22, - TEGRA_ARI_GSC_SC7_RESUME_FW = 23, - - TEGRA_ARI_GSC_TZ_DRAM_IDX = 34, - TEGRA_ARI_GSC_VPR_IDX = 35, + TEGRA_ARI_GSC_ALL = 0U, + TEGRA_ARI_GSC_BPMP = 6U, + TEGRA_ARI_GSC_APE = 7U, + TEGRA_ARI_GSC_SPE = 8U, + TEGRA_ARI_GSC_SCE = 9U, + TEGRA_ARI_GSC_APR = 10U, + TEGRA_ARI_GSC_TZRAM = 11U, + TEGRA_ARI_GSC_SE = 12U, + TEGRA_ARI_GSC_BPMP_TO_SPE = 16U, + TEGRA_ARI_GSC_SPE_TO_BPMP = 17U, + TEGRA_ARI_GSC_CPU_TZ_TO_BPMP = 18U, + TEGRA_ARI_GSC_BPMP_TO_CPU_TZ = 19U, + TEGRA_ARI_GSC_CPU_NS_TO_BPMP = 20U, + TEGRA_ARI_GSC_BPMP_TO_CPU_NS = 21U, + TEGRA_ARI_GSC_IPC_SE_SPE_SCE_BPMP = 22U, + TEGRA_ARI_GSC_SC7_RESUME_FW = 23U, + TEGRA_ARI_GSC_TZ_DRAM_IDX = 34U, + TEGRA_ARI_GSC_VPR_IDX = 35U, } tegra_ari_gsc_index_t; /* This macro will produce enums for __name##_LSB, __name##_MSB and __name##_MSK */ #define TEGRA_ARI_ENUM_MASK_LSB_MSB(__name, __lsb, __msb) __name##_LSB = __lsb, __name##_MSB = __msb typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE, 0, 2), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE_PRESENT, 7, 7), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE, 8, 9), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE_PRESENT, 15, 15), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE, 16, 19), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__IGNORE_CROSSOVERS, 22, 22), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE_PRESENT, 23, 23), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__WAKE_MASK_PRESENT, 31, 31), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE, 0U, 2U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE_PRESENT, 7U, 7U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE, 8U, 9U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE_PRESENT, 15U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE, 16U, 19U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__IGNORE_CROSSOVERS, 22U, 22U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE_PRESENT, 23U, 23U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__WAKE_MASK_PRESENT, 31U, 31U), } tegra_ari_update_cstate_info_bitmasks_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL__EN, 0, 0), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL__EN, 0U, 0U), } tegra_ari_misc_ccplex_bitmasks_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_FREQ, 0, 8), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_VOLT, 16, 23), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__ENABLE, 31, 31), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_FREQ, 0U, 8U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_VOLT, 16U, 23U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__ENABLE, 31U, 31U), } tegra_ari_cc3_ctrl_bitmasks_t; typedef enum { - TEGRA_ARI_MCA_NOP = 0, - TEGRA_ARI_MCA_READ_SERR = 1, - TEGRA_ARI_MCA_WRITE_SERR = 2, - TEGRA_ARI_MCA_CLEAR_SERR = 4, - TEGRA_ARI_MCA_REPORT_SERR = 5, - TEGRA_ARI_MCA_READ_INTSTS = 6, - TEGRA_ARI_MCA_WRITE_INTSTS = 7, - TEGRA_ARI_MCA_READ_PREBOOT_SERR = 8, + TEGRA_ARI_MCA_NOP = 0U, + TEGRA_ARI_MCA_READ_SERR = 1U, + TEGRA_ARI_MCA_WRITE_SERR = 2U, + TEGRA_ARI_MCA_CLEAR_SERR = 4U, + TEGRA_ARI_MCA_REPORT_SERR = 5U, + TEGRA_ARI_MCA_READ_INTSTS = 6U, + TEGRA_ARI_MCA_WRITE_INTSTS = 7U, + TEGRA_ARI_MCA_READ_PREBOOT_SERR = 8U, } tegra_ari_mca_commands_t; typedef enum { - TEGRA_ARI_MCA_RD_WR_DPMU = 0, - TEGRA_ARI_MCA_RD_WR_IOB = 1, - TEGRA_ARI_MCA_RD_WR_MCB = 2, - TEGRA_ARI_MCA_RD_WR_CCE = 3, - TEGRA_ARI_MCA_RD_WR_CQX = 4, - TEGRA_ARI_MCA_RD_WR_CTU = 5, - TEGRA_ARI_MCA_RD_BANK_INFO = 0x0f, - TEGRA_ARI_MCA_RD_BANK_TEMPLATE = 0x10, - TEGRA_ARI_MCA_RD_WR_SECURE_ACCESS_REGISTER = 0x11, - TEGRA_ARI_MCA_RD_WR_GLOBAL_CONFIG_REGISTER = 0x12, + TEGRA_ARI_MCA_RD_WR_DPMU = 0U, + TEGRA_ARI_MCA_RD_WR_IOB = 1U, + TEGRA_ARI_MCA_RD_WR_MCB = 2U, + TEGRA_ARI_MCA_RD_WR_CCE = 3U, + TEGRA_ARI_MCA_RD_WR_CQX = 4U, + TEGRA_ARI_MCA_RD_WR_CTU = 5U, + TEGRA_ARI_MCA_RD_WR_JSR_MTS = 7U, + TEGRA_ARI_MCA_RD_BANK_INFO = 0x0fU, + TEGRA_ARI_MCA_RD_BANK_TEMPLATE = 0x10U, + TEGRA_ARI_MCA_RD_WR_SECURE_ACCESS_REGISTER = 0x11U, + TEGRA_ARI_MCA_RD_WR_GLOBAL_CONFIG_REGISTER = 0x12U, } tegra_ari_mca_rd_wr_indexes_t; typedef enum { - TEGRA_ARI_MCA_RD_WR_ASERRX_CTRL = 0, - TEGRA_ARI_MCA_RD_WR_ASERRX_STATUS = 1, - TEGRA_ARI_MCA_RD_WR_ASERRX_ADDR = 2, - TEGRA_ARI_MCA_RD_WR_ASERRX_MISC1 = 3, - TEGRA_ARI_MCA_RD_WR_ASERRX_MISC2 = 4, + TEGRA_ARI_MCA_RD_WR_ASERRX_CTRL = 0U, + TEGRA_ARI_MCA_RD_WR_ASERRX_STATUS = 1U, + TEGRA_ARI_MCA_RD_WR_ASERRX_ADDR = 2U, + TEGRA_ARI_MCA_RD_WR_ASERRX_MISC1 = 3U, + TEGRA_ARI_MCA_RD_WR_ASERRX_MISC2 = 4U, } tegra_ari_mca_read_asserx_subindexes_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_SETTING_ENABLES_NS_PERMITTED, 0, 0), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_READING_STATUS_NS_PERMITTED, 1, 1), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_PENDING_MCA_ERRORS_NS_PERMITTED, 2, 2), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_CLEARING_MCA_INTERRUPTS_NS_PERMITTED, 3, 3), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_SETTING_ENABLES_NS_PERMITTED, 0U, 0U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_READING_STATUS_NS_PERMITTED, 1U, 1U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_PENDING_MCA_ERRORS_NS_PERMITTED, 2U, 2U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_CLEARING_MCA_INTERRUPTS_NS_PERMITTED, 3U, 3U), } tegra_ari_mca_secure_register_bitmasks_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_SERR_ERR_CODE, 0, 15), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM_ERR, 16, 16), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_CRAB_ERR, 17, 17), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_RD_WR_N, 18, 18), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UCODE_ERR, 19, 19), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM, 20, 23), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_AV, 58, 58), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_MV, 59, 59), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_EN, 60, 60), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UC, 61, 61), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_OVF, 62, 62), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_VAL, 63, 63), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_SERR_ERR_CODE, 0U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM_ERR, 16U, 16U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_CRAB_ERR, 17U, 17U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_RD_WR_N, 18U, 18U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UCODE_ERR, 19U, 19U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM, 20U, 23U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_AV, 58U, 58U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_MV, 59U, 59U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_EN, 60U, 60U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UC, 61U, 61U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_OVF, 62U, 62U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_VAL, 63U, 63U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_ADDR, 0, 41), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_UCODE_ERRCD, 42, 52), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_ADDR, 0U, 41U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_UCODE_ERRCD, 42U, 52U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_PWM_ERR, 0, 0), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_CRAB_ERR, 1, 1), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_UCODE_ERR, 3, 3), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_PWM_ERR, 0U, 0U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_CRAB_ERR, 1U, 1U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_UCODE_ERR, 3U, 3U), } tegra_ari_mca_aserr0_bitmasks_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_SERR_ERR_CODE, 0, 15), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MSI_ERR, 16, 16), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_IHI_ERR, 17, 17), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CRI_ERR, 18, 18), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MMCRAB_ERR, 19, 19), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CSI_ERR, 20, 20), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RD_WR_N, 21, 21), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_REQ_ERRT, 22, 23), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RESP_ERRT, 24, 25), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AV, 58, 58), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MV, 59, 59), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_EN, 60, 60), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_UC, 61, 61), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_OVF, 62, 62), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_VAL, 63, 63), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_SERR_ERR_CODE, 0U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MSI_ERR, 16U, 16U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_IHI_ERR, 17U, 17U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CRI_ERR, 18U, 18U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MMCRAB_ERR, 19U, 19U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CSI_ERR, 20U, 20U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RD_WR_N, 21U, 21U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_REQ_ERRT, 22U, 23U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RESP_ERRT, 24U, 25U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AV, 58U, 58U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MV, 59U, 59U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_EN, 60U, 60U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_UC, 61U, 61U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_OVF, 62U, 62U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_VAL, 63U, 63U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AXI_ID, 0, 7), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_ID, 8, 27), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CID, 28, 31), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CMD, 32, 35), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AXI_ID, 0U, 7U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_ID, 8U, 27U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CID, 28U, 31U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CMD, 32U, 35U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MSI_ERR, 0, 0), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_IHI_ERR, 1, 1), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CRI_ERR, 2, 2), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MMCRAB_ERR, 3, 3), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CSI_ERR, 4, 4), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MSI_ERR, 0U, 0U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_IHI_ERR, 1U, 1U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CRI_ERR, 2U, 2U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MMCRAB_ERR, 3U, 3U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CSI_ERR, 4U, 4U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_MISC_ADDR, 0, 41), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_MISC_ADDR, 0U, 41U), } tegra_ari_mca_aserr1_bitmasks_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SERR_ERR_CODE, 0, 15), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MC_ERR, 16, 16), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SYSRAM_ERR, 17, 17), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_CLIENT_ID, 18, 19), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_AV, 58, 58), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MV, 59, 59), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_EN, 60, 60), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_UC, 61, 61), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_OVF, 62, 62), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_VAL, 63, 63), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SERR_ERR_CODE, 0U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MC_ERR, 16U, 16U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SYSRAM_ERR, 17U, 17U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_CLIENT_ID, 18U, 19U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_AV, 58U, 58U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MV, 59U, 59U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_EN, 60U, 60U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_UC, 61U, 61U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_OVF, 62U, 62U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_VAL, 63U, 63U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ID, 0, 17), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_CMD, 18, 21), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ADDR, 22, 53), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ID, 0U, 17U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_CMD, 18U, 21U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ADDR, 22U, 53U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_CTRL_EN_MC_ERR, 0, 0), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_CTRL_EN_MC_ERR, 0U, 0U), } tegra_ari_mca_aserr2_bitmasks_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_SERR_ERR_CODE, 0, 15), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_TO_ERR, 16, 16), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_STAT_ERR, 17, 17), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_DST_ERR, 18, 18), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UNC_ERR, 19, 19), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MH_ERR, 20, 20), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PERR, 21, 21), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PSN_ERR, 22, 22), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_AV, 58, 58), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MV, 59, 59), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_EN, 60, 60), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UC, 61, 61), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_OVF, 62, 62), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_VAL, 63, 63), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_SERR_ERR_CODE, 0U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_TO_ERR, 16U, 16U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_STAT_ERR, 17U, 17U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_DST_ERR, 18U, 18U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UNC_ERR, 19U, 19U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MH_ERR, 20U, 20U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PERR, 21U, 21U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PSN_ERR, 22U, 22U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_AV, 58U, 58U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MV, 59U, 59U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_EN, 60U, 60U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UC, 61U, 61U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_OVF, 62U, 62U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_VAL, 63U, 63U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_CMD, 0, 5), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_ADDR, 6, 47), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_CMD, 0U, 5U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_ADDR, 6U, 47U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TO, 0, 0), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_DIV4, 1, 1), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TLIMIT, 2, 11), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_PSN_ERR_CORR_MSK, 12, 25), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TO, 0U, 0U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_DIV4, 1U, 1U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TLIMIT, 2U, 11U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_PSN_ERR_CORR_MSK, 12U, 25U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_MORE_INFO, 0, 17), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TO_INFO, 18, 43), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_SRC, 44, 45), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TID, 46, 52), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_MORE_INFO, 0U, 17U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TO_INFO, 18U, 43U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_SRC, 44U, 45U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TID, 46U, 52U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_TO_ERR, 0, 0), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_STAT_ERR, 1, 1), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_DST_ERR, 2, 2), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_UNC_ERR, 3, 3), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_MH_ERR, 4, 4), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PERR, 5, 5), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PSN_ERR, 6, 19), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_TO_ERR, 0U, 0U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_STAT_ERR, 1U, 1U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_DST_ERR, 2U, 2U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_UNC_ERR, 3U, 3U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_MH_ERR, 4U, 4U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PERR, 5U, 5U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PSN_ERR, 6U, 19U), } tegra_ari_mca_aserr3_bitmasks_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SERR_ERR_CODE, 0, 15), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SRC_ERR, 16, 16), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_DST_ERR, 17, 17), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_REQ_ERR, 18, 18), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_RSP_ERR, 19, 19), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_AV, 58, 58), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_MV, 59, 59), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_EN, 60, 60), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_UC, 61, 61), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_OVF, 62, 62), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_VAL, 63, 63), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SERR_ERR_CODE, 0U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SRC_ERR, 16U, 16U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_DST_ERR, 17U, 17U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_REQ_ERR, 18U, 18U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_RSP_ERR, 19U, 19U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_AV, 58U, 58U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_MV, 59U, 59U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_EN, 60U, 60U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_UC, 61U, 61U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_OVF, 62U, 62U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_VAL, 63U, 63U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_CTRL_EN_CPE_ERR, 0, 0), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_CTRL_EN_CPE_ERR, 0U, 0U), } tegra_ari_mca_aserr4_bitmasks_t; typedef enum { - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_SERR_ERR_CODE, 0, 15), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_CTUPAR, 16, 16), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MULTI, 17, 17), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_AV, 58, 58), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MV, 59, 59), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_EN, 60, 60), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_UC, 61, 61), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_OVF, 62, 62), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_VAL, 63, 63), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_SERR_ERR_CODE, 0U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_CTUPAR, 16U, 16U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MULTI, 17U, 17U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_AV, 58U, 58U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MV, 59U, 59U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_EN, 60U, 60U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_UC, 61U, 61U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_OVF, 62U, 62U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_VAL, 63U, 63U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_SRC, 0, 7), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ID, 8, 15), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_DATA, 16, 26), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_CMD, 32, 35), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ADDR, 36, 45), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_SRC, 0U, 7U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ID, 8U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_DATA, 16U, 26U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_CMD, 32U, 35U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ADDR, 36U, 45U), - TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_CTRL_EN_CTUPAR, 0, 0), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_CTRL_EN_CTUPAR, 0U, 0U), } tegra_ari_mca_aserr5_bitmasks_t; +typedef enum { + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_SERR_ERR_CODE, 0U, 15U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_AV, 58U, 58U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_MV, 59U, 59U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_EN, 60U, 60U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_UC, 61U, 61U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_OVF, 62U, 62U), + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_VAL, 63U, 63U), + + TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_ADDR_TBD_INFO, 0U, 63U), +} tegra_ari_mca_serr1_bitmasks_t; + #undef TEGRA_ARI_ENUM_MASK_LSB_MSB typedef enum { - TEGRA_NVG_CHANNEL_PMIC = 0, - TEGRA_NVG_CHANNEL_POWER_PERF = 1, - TEGRA_NVG_CHANNEL_POWER_MODES = 2, - TEGRA_NVG_CHANNEL_WAKE_TIME = 3, - TEGRA_NVG_CHANNEL_CSTATE_INFO = 4, - TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 = 5, - TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC6 = 6, - TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC7 = 7, - TEGRA_NVG_CHANNEL_CROSSOVER_CCP1_CCP3 = 8, /* obsoleted */ - TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC2 = 9, /* obsoleted */ - TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC3 = 10, /* obsoleted */ - TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC4 = 11, /* obsoleted */ - TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC7 = 12, /* obsoleted */ - TEGRA_NVG_CHANNEL_CROSSOVER_SC0_SC7 = 12, - TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR = 13, - TEGRA_NVG_CHANNEL_CSTATE_STATS_SC7_ENTRIES = 14, - TEGRA_NVG_CHANNEL_CSTATE_STATS_SC4_ENTRIES = 15, /* obsoleted */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_SC3_ENTRIES = 16, /* obsoleted */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_SC2_ENTRIES = 17, /* obsoleted */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_CCP3_ENTRIES = 18, /* obsoleted */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC6_ENTRIES = 19, - TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC7_ENTRIES = 20, - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC6_ENTRIES = 21, - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC7_ENTRIES = 22, - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C6_ENTRIES = 23, - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C6_ENTRIES = 24, - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C6_ENTRIES = 25, /* Reserved (for Denver15 core 2) */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C6_ENTRIES = 26, /* Reserved (for Denver15 core 3) */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C7_ENTRIES = 27, - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C7_ENTRIES = 28, - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C7_ENTRIES = 29, /* Reserved (for Denver15 core 2) */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C7_ENTRIES = 30, /* Reserved (for Denver15 core 3) */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_0_C7_ENTRIES = 31, - TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_1_C7_ENTRIES = 32, - TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_2_C7_ENTRIES = 33, - TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_3_C7_ENTRIES = 34, - TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0 = 35, - TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1 = 36, - TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_2 = 37, /* Reserved (for Denver15 core 2) */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_3 = 38, /* Reserved (for Denver15 core 3) */ - TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 39, - TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1 = 40, - TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2 = 41, - TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3 = 42, - TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED = 43, - TEGRA_NVG_CHANNEL_ONLINE_CORE = 44, - TEGRA_NVG_CHANNEL_CC3_CTRL = 45, - TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC1 = 46, /* obsoleted */ + TEGRA_NVG_CHANNEL_PMIC = 0U, + TEGRA_NVG_CHANNEL_POWER_PERF = 1U, + TEGRA_NVG_CHANNEL_POWER_MODES = 2U, + TEGRA_NVG_CHANNEL_WAKE_TIME = 3U, + TEGRA_NVG_CHANNEL_CSTATE_INFO = 4U, + TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 = 5U, + TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC6 = 6U, + TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC7 = 7U, + TEGRA_NVG_CHANNEL_CROSSOVER_CCP1_CCP3 = 8U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC2 = 9U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC3 = 10U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC4 = 11U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC7 = 12U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CROSSOVER_SC0_SC7 = 12U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR = 13U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_SC7_ENTRIES = 14U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_SC4_ENTRIES = 15U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_SC3_ENTRIES = 16U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_SC2_ENTRIES = 17U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_CCP3_ENTRIES = 18U, /* obsoleted */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC6_ENTRIES = 19U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC7_ENTRIES = 20U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC6_ENTRIES = 21U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC7_ENTRIES = 22U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C6_ENTRIES = 23U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C6_ENTRIES = 24U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C6_ENTRIES = 25U, /* Reserved (for Denver15 core 2) */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C6_ENTRIES = 26U, /* Reserved (for Denver15 core 3) */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C7_ENTRIES = 27U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C7_ENTRIES = 28U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C7_ENTRIES = 29U, /* Reserved (for Denver15 core 2) */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C7_ENTRIES = 30U, /* Reserved (for Denver15 core 3) */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_0_C7_ENTRIES = 31U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_1_C7_ENTRIES = 32U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_2_C7_ENTRIES = 33U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_3_C7_ENTRIES = 34U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0 = 35U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1 = 36U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_2 = 37U, /* Reserved (for Denver15 core 2) */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_3 = 38U, /* Reserved (for Denver15 core 3) */ + TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 39U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1 = 40U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2 = 41U, + TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3 = 42U, + TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED = 43U, + TEGRA_NVG_CHANNEL_ONLINE_CORE = 44U, + TEGRA_NVG_CHANNEL_CC3_CTRL = 45U, + TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC1 = 46U, /* obsoleted */ TEGRA_NVG_CHANNEL_LAST_INDEX, } tegra_nvg_channel_id_t; diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c index 7597c12b6..7f711a726 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c +++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include @@ -483,7 +483,7 @@ void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value) * used to enable/disable coresight clock gating. */ - if ((index > TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) || + if ((index > TEGRA_ARI_MISC_CCPLEX_EDBGREQ) || ((index == TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) && (value > 1))) { ERROR("%s: invalid parameters \n", __func__); diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c index 3a0edfb9e..0489f7943 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c +++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -491,7 +492,7 @@ void mce_verify_firmware_version(void) uint32_t major, minor; /* - * MCE firmware is not running on simulation platforms. + * MCE firmware is not supported on simulation platforms. */ if (tegra_platform_is_emulation()) return; diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c index 25479a24b..7ddafcb94 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c +++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include diff --git a/plat/nvidia/tegra/soc/t186/plat_memctrl.c b/plat/nvidia/tegra/soc/t186/plat_memctrl.c new file mode 100644 index 000000000..6fabaf2ee --- /dev/null +++ b/plat/nvidia/tegra/soc/t186/plat_memctrl.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +/******************************************************************************* + * Array to hold stream_id override config register offsets + ******************************************************************************/ +const static uint32_t tegra186_streamid_override_regs[] = { + MC_STREAMID_OVERRIDE_CFG_PTCR, + MC_STREAMID_OVERRIDE_CFG_AFIR, + MC_STREAMID_OVERRIDE_CFG_HDAR, + MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR, + MC_STREAMID_OVERRIDE_CFG_NVENCSRD, + MC_STREAMID_OVERRIDE_CFG_SATAR, + MC_STREAMID_OVERRIDE_CFG_MPCORER, + MC_STREAMID_OVERRIDE_CFG_NVENCSWR, + MC_STREAMID_OVERRIDE_CFG_AFIW, + MC_STREAMID_OVERRIDE_CFG_HDAW, + MC_STREAMID_OVERRIDE_CFG_MPCOREW, + MC_STREAMID_OVERRIDE_CFG_SATAW, + MC_STREAMID_OVERRIDE_CFG_ISPRA, + MC_STREAMID_OVERRIDE_CFG_ISPWA, + MC_STREAMID_OVERRIDE_CFG_ISPWB, + MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR, + MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW, + MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR, + MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW, + MC_STREAMID_OVERRIDE_CFG_TSECSRD, + MC_STREAMID_OVERRIDE_CFG_TSECSWR, + MC_STREAMID_OVERRIDE_CFG_GPUSRD, + MC_STREAMID_OVERRIDE_CFG_GPUSWR, + MC_STREAMID_OVERRIDE_CFG_SDMMCRA, + MC_STREAMID_OVERRIDE_CFG_SDMMCRAA, + MC_STREAMID_OVERRIDE_CFG_SDMMCR, + MC_STREAMID_OVERRIDE_CFG_SDMMCRAB, + MC_STREAMID_OVERRIDE_CFG_SDMMCWA, + MC_STREAMID_OVERRIDE_CFG_SDMMCWAA, + MC_STREAMID_OVERRIDE_CFG_SDMMCW, + MC_STREAMID_OVERRIDE_CFG_SDMMCWAB, + MC_STREAMID_OVERRIDE_CFG_VICSRD, + MC_STREAMID_OVERRIDE_CFG_VICSWR, + MC_STREAMID_OVERRIDE_CFG_VIW, + MC_STREAMID_OVERRIDE_CFG_NVDECSRD, + MC_STREAMID_OVERRIDE_CFG_NVDECSWR, + MC_STREAMID_OVERRIDE_CFG_APER, + MC_STREAMID_OVERRIDE_CFG_APEW, + MC_STREAMID_OVERRIDE_CFG_NVJPGSRD, + MC_STREAMID_OVERRIDE_CFG_NVJPGSWR, + MC_STREAMID_OVERRIDE_CFG_SESRD, + MC_STREAMID_OVERRIDE_CFG_SESWR, + MC_STREAMID_OVERRIDE_CFG_ETRR, + MC_STREAMID_OVERRIDE_CFG_ETRW, + MC_STREAMID_OVERRIDE_CFG_TSECSRDB, + MC_STREAMID_OVERRIDE_CFG_TSECSWRB, + MC_STREAMID_OVERRIDE_CFG_GPUSRD2, + MC_STREAMID_OVERRIDE_CFG_GPUSWR2, + MC_STREAMID_OVERRIDE_CFG_AXISR, + MC_STREAMID_OVERRIDE_CFG_AXISW, + MC_STREAMID_OVERRIDE_CFG_EQOSR, + MC_STREAMID_OVERRIDE_CFG_EQOSW, + MC_STREAMID_OVERRIDE_CFG_UFSHCR, + MC_STREAMID_OVERRIDE_CFG_UFSHCW, + MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR, + MC_STREAMID_OVERRIDE_CFG_BPMPR, + MC_STREAMID_OVERRIDE_CFG_BPMPW, + MC_STREAMID_OVERRIDE_CFG_BPMPDMAR, + MC_STREAMID_OVERRIDE_CFG_BPMPDMAW, + MC_STREAMID_OVERRIDE_CFG_AONR, + MC_STREAMID_OVERRIDE_CFG_AONW, + MC_STREAMID_OVERRIDE_CFG_AONDMAR, + MC_STREAMID_OVERRIDE_CFG_AONDMAW, + MC_STREAMID_OVERRIDE_CFG_SCER, + MC_STREAMID_OVERRIDE_CFG_SCEW, + MC_STREAMID_OVERRIDE_CFG_SCEDMAR, + MC_STREAMID_OVERRIDE_CFG_SCEDMAW, + MC_STREAMID_OVERRIDE_CFG_APEDMAR, + MC_STREAMID_OVERRIDE_CFG_APEDMAW, + MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1, + MC_STREAMID_OVERRIDE_CFG_VICSRD1, + MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 +}; + +/******************************************************************************* + * Array to hold the security configs for stream IDs + ******************************************************************************/ +const static mc_streamid_security_cfg_t tegra186_streamid_sec_cfgs[] = { + mc_make_sec_cfg(SCEW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(AFIR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(AFIW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(NVDISPLAYR1, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(XUSB_DEVR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(VICSRD1, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(NVENCSWR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(TSECSRDB, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(AXISW, SECURE, NO_OVERRIDE, DISABLE), + mc_make_sec_cfg(SDMMCWAB, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(AONDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(GPUSWR2, SECURE, NO_OVERRIDE, DISABLE), + mc_make_sec_cfg(SATAW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(UFSHCW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SDMMCR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SCEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(UFSHCR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SDMMCWAA, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SESWR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(MPCORER, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(PTCR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(BPMPW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(ETRW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(GPUSRD, SECURE, NO_OVERRIDE, DISABLE), + mc_make_sec_cfg(VICSWR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(SCEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(HDAW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(ISPWA, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(EQOSW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(XUSB_HOSTW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(TSECSWR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(SDMMCRAA, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(VIW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(AXISR, SECURE, NO_OVERRIDE, DISABLE), + mc_make_sec_cfg(SDMMCW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(BPMPDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(ISPRA, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(NVDECSWR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(XUSB_DEVW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(NVDECSRD, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(MPCOREW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(NVDISPLAYR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(BPMPDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(NVJPGSWR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(NVDECSRD1, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(TSECSRD, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(NVJPGSRD, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(SDMMCWA, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SCER, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(XUSB_HOSTR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(VICSRD, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(AONDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(AONW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(SDMMCRA, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(HOST1XDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(EQOSR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SATAR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(BPMPR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(HDAR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SDMMCRAB, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(ETRR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(AONR, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(SESRD, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(NVENCSRD, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(GPUSWR, SECURE, NO_OVERRIDE, DISABLE), + mc_make_sec_cfg(TSECSWRB, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(ISPWB, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(GPUSRD2, SECURE, NO_OVERRIDE, DISABLE), + mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), +}; + +/******************************************************************************* + * Array to hold the transaction override configs + ******************************************************************************/ +const static mc_txn_override_cfg_t tegra186_txn_override_cfgs[] = { + mc_make_txn_override_cfg(BPMPW, CGID_TAG_ADR), + mc_make_txn_override_cfg(EQOSW, CGID_TAG_ADR), + mc_make_txn_override_cfg(NVJPGSWR, CGID_TAG_ADR), + mc_make_txn_override_cfg(SDMMCWAA, CGID_TAG_ADR), + mc_make_txn_override_cfg(MPCOREW, CGID_TAG_ADR), + mc_make_txn_override_cfg(SCEDMAW, CGID_TAG_ADR), + mc_make_txn_override_cfg(SDMMCW, CGID_TAG_ADR), + mc_make_txn_override_cfg(AXISW, CGID_TAG_ADR), + mc_make_txn_override_cfg(TSECSWR, CGID_TAG_ADR), + mc_make_txn_override_cfg(GPUSWR, CGID_TAG_ADR), + mc_make_txn_override_cfg(XUSB_HOSTW, CGID_TAG_ADR), + mc_make_txn_override_cfg(TSECSWRB, CGID_TAG_ADR), + mc_make_txn_override_cfg(GPUSWR2, CGID_TAG_ADR), + mc_make_txn_override_cfg(AONDMAW, CGID_TAG_ADR), + mc_make_txn_override_cfg(AONW, CGID_TAG_ADR), + mc_make_txn_override_cfg(SESWR, CGID_TAG_ADR), + mc_make_txn_override_cfg(BPMPDMAW, CGID_TAG_ADR), + mc_make_txn_override_cfg(SDMMCWA, CGID_TAG_ADR), + mc_make_txn_override_cfg(HDAW, CGID_TAG_ADR), + mc_make_txn_override_cfg(NVDECSWR, CGID_TAG_ADR), + mc_make_txn_override_cfg(UFSHCW, CGID_TAG_ADR), + mc_make_txn_override_cfg(SATAW, CGID_TAG_ADR), + mc_make_txn_override_cfg(ETRW, CGID_TAG_ADR), + mc_make_txn_override_cfg(VICSWR, CGID_TAG_ADR), + mc_make_txn_override_cfg(NVENCSWR, CGID_TAG_ADR), + mc_make_txn_override_cfg(SDMMCWAB, CGID_TAG_ADR), + mc_make_txn_override_cfg(ISPWB, CGID_TAG_ADR), + mc_make_txn_override_cfg(APEW, CGID_TAG_ADR), + mc_make_txn_override_cfg(XUSB_DEVW, CGID_TAG_ADR), + mc_make_txn_override_cfg(AFIW, CGID_TAG_ADR), + mc_make_txn_override_cfg(SCEW, CGID_TAG_ADR), +}; + +/******************************************************************************* + * Struct to hold the memory controller settings + ******************************************************************************/ +static tegra_mc_settings_t tegra186_mc_settings = { + .streamid_override_cfg = tegra186_streamid_override_regs, + .num_streamid_override_cfgs = ARRAY_SIZE(tegra186_streamid_override_regs), + .streamid_security_cfg = tegra186_streamid_sec_cfgs, + .num_streamid_security_cfgs = ARRAY_SIZE(tegra186_streamid_sec_cfgs), + .txn_override_cfg = tegra186_txn_override_cfgs, + .num_txn_override_cfgs = ARRAY_SIZE(tegra186_txn_override_cfgs) +}; + +/******************************************************************************* + * Handler to return the pointer to the memory controller's settings struct + ******************************************************************************/ +tegra_mc_settings_t *tegra_get_mc_settings(void) +{ + return &tegra186_mc_settings; +} diff --git a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c index a170b9947..9790b817a 100644 --- a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c +++ b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c @@ -46,11 +46,8 @@ extern void prepare_cpu_pwr_dwn(void); extern void tegra186_cpu_reset_handler(void); -extern uint32_t __tegra186_cpu_reset_handler_data, - __tegra186_cpu_reset_handler_end; - -/* TZDRAM offset for saving SMMU context */ -#define TEGRA186_SMMU_CTX_OFFSET 16 +extern uint32_t __tegra186_cpu_reset_handler_end, + __tegra186_smmu_context; /* state id mask */ #define TEGRA186_STATE_ID_MASK 0xF @@ -151,9 +148,8 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) /* save SMMU context to TZDRAM */ smmu_ctx_base = params_from_bl2->tzdram_base + - ((uintptr_t)&__tegra186_cpu_reset_handler_data - - (uintptr_t)tegra186_cpu_reset_handler) + - TEGRA186_SMMU_CTX_OFFSET; + ((uintptr_t)&__tegra186_smmu_context - + (uintptr_t)tegra186_cpu_reset_handler); tegra_smmu_save_context((uintptr_t)smmu_ctx_base); /* Prepare for system suspend */ @@ -260,7 +256,7 @@ int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & TEGRA186_STATE_ID_MASK; - uint32_t val; + uint64_t val; if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { /* diff --git a/plat/nvidia/tegra/soc/t186/plat_setup.c b/plat/nvidia/tegra/soc/t186/plat_setup.c index e848eabb7..e165df1a8 100644 --- a/plat/nvidia/tegra/soc/t186/plat_setup.c +++ b/plat/nvidia/tegra/soc/t186/plat_setup.c @@ -49,6 +49,13 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, L2CTLR_EL1) extern uint64_t tegra_enable_l2_ecc_parity_prot; +/******************************************************************************* + * Tegra186 CPU numbers in cluster #0 + ******************************************************************************* + */ +#define TEGRA186_CLUSTER0_CORE2 2 +#define TEGRA186_CLUSTER0_CORE3 3 + /******************************************************************************* * The Tegra power domain tree has a single system level power domain i.e. a * single root node. The first entry in the power domain descriptor specifies @@ -102,7 +109,9 @@ static const mmap_region_t tegra_mmap[] = { MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000, /* 384KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_SMMU_BASE, 0x1000000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000, /* 128KB - ARM/Denver */ + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), {0} }; @@ -252,3 +261,40 @@ plat_params_from_bl2_t *plat_get_bl31_plat_params(void) return (plat_params_from_bl2_t *)(uintptr_t)val; } + +/******************************************************************************* + * This function implements a part of the critical interface between the psci + * generic layer and the platform that allows the former to query the platform + * to convert an MPIDR to a unique linear index. An error code (-1) is returned + * in case the MPIDR is invalid. + ******************************************************************************/ +int plat_core_pos_by_mpidr(u_register_t mpidr) +{ + unsigned int cluster_id, cpu_id, pos; + + cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; + cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK; + + /* + * Validate cluster_id by checking whether it represents + * one of the two clusters present on the platform. + */ + if (cluster_id >= PLATFORM_CLUSTER_COUNT) + return PSCI_E_NOT_PRESENT; + + /* + * Validate cpu_id by checking whether it represents a CPU in + * one of the two clusters present on the platform. + */ + if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER) + return PSCI_E_NOT_PRESENT; + + /* calculate the core position */ + pos = cpu_id + (cluster_id << 2); + + /* check for non-existent CPUs */ + if (pos == TEGRA186_CLUSTER0_CORE2 || pos == TEGRA186_CLUSTER0_CORE3) + return PSCI_E_NOT_PRESENT; + + return pos; +} diff --git a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c index 31e903eba..51d363244 100644 --- a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c +++ b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -43,30 +44,35 @@ extern uint32_t tegra186_system_powerdn_state; +/******************************************************************************* + * Offset to read the ref_clk counter value + ******************************************************************************/ +#define REF_CLK_OFFSET 4 + /******************************************************************************* * Tegra186 SiP SMCs ******************************************************************************/ -#define TEGRA_SIP_NEW_VIDEOMEM_REGION 0x82000003 -#define TEGRA_SIP_SYSTEM_SHUTDOWN_STATE 0x82FFFE01 -#define TEGRA_SIP_MCE_CMD_ENTER_CSTATE 0x82FFFF00 -#define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO 0x82FFFF01 -#define TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME 0x82FFFF02 -#define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS 0x82FFFF03 -#define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS 0x82FFFF04 -#define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED 0x82FFFF05 -#define TEGRA_SIP_MCE_CMD_ONLINE_CORE 0x82FFFF06 -#define TEGRA_SIP_MCE_CMD_CC3_CTRL 0x82FFFF07 -#define TEGRA_SIP_MCE_CMD_ECHO_DATA 0x82FFFF08 -#define TEGRA_SIP_MCE_CMD_READ_VERSIONS 0x82FFFF09 -#define TEGRA_SIP_MCE_CMD_ENUM_FEATURES 0x82FFFF0A -#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS 0x82FFFF0B -#define TEGRA_SIP_MCE_CMD_ENUM_READ_MCA 0x82FFFF0C -#define TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA 0x82FFFF0D -#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE 0x82FFFF0E -#define TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE 0x82FFFF0F -#define TEGRA_SIP_MCE_CMD_ENABLE_LATIC 0x82FFFF10 -#define TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ 0x82FFFF11 -#define TEGRA_SIP_MCE_CMD_MISC_CCPLEX 0x82FFFF12 +#define TEGRA_SIP_SYSTEM_SHUTDOWN_STATE 0xC2FFFE01 +#define TEGRA_SIP_GET_ACTMON_CLK_COUNTERS 0xC2FFFE02 +#define TEGRA_SIP_MCE_CMD_ENTER_CSTATE 0xC2FFFF00 +#define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO 0xC2FFFF01 +#define TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME 0xC2FFFF02 +#define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS 0xC2FFFF03 +#define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS 0xC2FFFF04 +#define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED 0xC2FFFF05 +#define TEGRA_SIP_MCE_CMD_ONLINE_CORE 0xC2FFFF06 +#define TEGRA_SIP_MCE_CMD_CC3_CTRL 0xC2FFFF07 +#define TEGRA_SIP_MCE_CMD_ECHO_DATA 0xC2FFFF08 +#define TEGRA_SIP_MCE_CMD_READ_VERSIONS 0xC2FFFF09 +#define TEGRA_SIP_MCE_CMD_ENUM_FEATURES 0xC2FFFF0A +#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS 0xC2FFFF0B +#define TEGRA_SIP_MCE_CMD_ENUM_READ_MCA 0xC2FFFF0C +#define TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA 0xC2FFFF0D +#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE 0xC2FFFF0E +#define TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE 0xC2FFFF0F +#define TEGRA_SIP_MCE_CMD_ENABLE_LATIC 0xC2FFFF10 +#define TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ 0xC2FFFF11 +#define TEGRA_SIP_MCE_CMD_MISC_CCPLEX 0xC2FFFF12 /******************************************************************************* * This function is responsible for handling all T186 SiP calls @@ -81,9 +87,23 @@ int plat_sip_handler(uint32_t smc_fid, uint64_t flags) { int mce_ret; + int impl, cpu; + uint32_t base, core_clk_ctr, ref_clk_ctr; + + if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { + /* 32-bit function, clear top parameter bits */ + + x1 = (uint32_t)x1; + x2 = (uint32_t)x2; + x3 = (uint32_t)x3; + } + + /* + * Convert SMC FID to SMC64, to support SMC32/SMC64 configurations + */ + smc_fid |= (SMC_64 << FUNCID_CC_SHIFT); switch (smc_fid) { - /* * Micro Coded Engine (MCE) commands reside in the 0x82FFFF00 - * 0x82FFFFFF SiP SMC space @@ -112,7 +132,8 @@ int plat_sip_handler(uint32_t smc_fid, /* execute the command and store the result */ mce_ret = mce_command_handler(smc_fid, x1, x2, x3); - write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X0, mce_ret); + write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X0, + (uint64_t)mce_ret); return 0; @@ -143,6 +164,38 @@ int plat_sip_handler(uint32_t smc_fid, return 0; + /* + * This function ID reads the Activity monitor's core/ref clock + * counter values for a core/cluster. + * + * x1 = MPIDR of the target core + * x2 = MIDR of the target core + */ + case TEGRA_SIP_GET_ACTMON_CLK_COUNTERS: + + cpu = (uint32_t)x1 & MPIDR_CPU_MASK; + impl = ((uint32_t)x2 >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; + + /* sanity check target CPU number */ + if (cpu > PLATFORM_MAX_CPUS_PER_CLUSTER) + return -EINVAL; + + /* get the base address for the current CPU */ + base = (impl == DENVER_IMPL) ? TEGRA_DENVER_ACTMON_CTR_BASE : + TEGRA_ARM_ACTMON_CTR_BASE; + + /* read the clock counter values */ + core_clk_ctr = mmio_read_32(base + (8 * cpu)); + ref_clk_ctr = mmio_read_32(base + (8 * cpu) + REF_CLK_OFFSET); + + /* return the counter values as two different parameters */ + write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1, + (uint64_t)core_clk_ctr); + write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X2, + (uint64_t)ref_clk_ctr); + + return 0; + default: break; } diff --git a/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c b/plat/nvidia/tegra/soc/t186/plat_smmu.c similarity index 57% rename from plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c rename to plat/nvidia/tegra/soc/t186/plat_smmu.c index bca6f2ec3..4a8e1bee4 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c +++ b/plat/nvidia/tegra/soc/t186/plat_smmu.c @@ -1,143 +1,33 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ -#include #include -#include -#include -#include #include -#include -#include +#include -typedef struct smmu_regs { - uint32_t reg; - uint32_t val; -} smmu_regs_t; - -#define mc_make_sid_override_cfg(name) \ - { \ - .reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_CFG_ ## name, \ - .val = 0x00000000, \ - } - -#define mc_make_sid_security_cfg(name) \ - { \ - .reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_SECURITY_CFG_ ## name, \ - .val = 0x00000000, \ - } - -#define smmu_make_gnsr0_sec_cfg(name) \ - { \ - .reg = TEGRA_SMMU_BASE + SMMU_GNSR0_ ## name, \ - .val = 0x00000000, \ - } - -/* - * On ARM-SMMU, conditional offset to access secure aliases of non-secure registers - * is 0x400. So, add it to register address - */ -#define smmu_make_gnsr0_nsec_cfg(name) \ - { \ - .reg = TEGRA_SMMU_BASE + 0x400 + SMMU_GNSR0_ ## name, \ - .val = 0x00000000, \ - } - -#define smmu_make_gnsr0_smr_cfg(n) \ - { \ - .reg = TEGRA_SMMU_BASE + SMMU_GNSR0_SMR ## n, \ - .val = 0x00000000, \ - } - -#define smmu_make_gnsr0_s2cr_cfg(n) \ - { \ - .reg = TEGRA_SMMU_BASE + SMMU_GNSR0_S2CR ## n, \ - .val = 0x00000000, \ - } - -#define smmu_make_gnsr1_cbar_cfg(n) \ - { \ - .reg = TEGRA_SMMU_BASE + (1 << PGSHIFT) + SMMU_GNSR1_CBAR ## n, \ - .val = 0x00000000, \ - } - -#define smmu_make_gnsr1_cba2r_cfg(n) \ - { \ - .reg = TEGRA_SMMU_BASE + (1 << PGSHIFT) + SMMU_GNSR1_CBA2R ## n, \ - .val = 0x00000000, \ - } - -#define make_smmu_cb_cfg(name, n) \ - { \ - .reg = TEGRA_SMMU_BASE + (CB_SIZE >> 1) + (n * (1 << PGSHIFT)) \ - + SMMU_CBn_ ## name, \ - .val = 0x00000000, \ - } - -#define smmu_make_smrg_group(n) \ - smmu_make_gnsr0_smr_cfg(n), \ - smmu_make_gnsr0_s2cr_cfg(n), \ - smmu_make_gnsr1_cbar_cfg(n), \ - smmu_make_gnsr1_cba2r_cfg(n) /* don't put "," here. */ - -#define smmu_make_cb_group(n) \ - make_smmu_cb_cfg(SCTLR, n), \ - make_smmu_cb_cfg(TCR2, n), \ - make_smmu_cb_cfg(TTBR0_LO, n), \ - make_smmu_cb_cfg(TTBR0_HI, n), \ - make_smmu_cb_cfg(TCR, n), \ - make_smmu_cb_cfg(PRRR_MAIR0, n),\ - make_smmu_cb_cfg(FSR, n), \ - make_smmu_cb_cfg(FAR_LO, n), \ - make_smmu_cb_cfg(FAR_HI, n), \ - make_smmu_cb_cfg(FSYNR0, n) /* don't put "," here. */ - -#define smmu_bypass_cfg \ - { \ - .reg = TEGRA_MC_BASE + MC_SMMU_BYPASS_CONFIG, \ - .val = 0x00000000, \ - } - -#define _START_OF_TABLE_ \ - { \ - .reg = 0xCAFE05C7, \ - .val = 0x00000000, \ - } - -#define _END_OF_TABLE_ \ - { \ - .reg = 0xFFFFFFFF, \ - .val = 0xFFFFFFFF, \ - } - -static __attribute__((aligned(16))) smmu_regs_t smmu_ctx_regs[] = { +/******************************************************************************* + * Array to hold SMMU context for Tegra186 + ******************************************************************************/ +static __attribute__((aligned(16))) smmu_regs_t tegra186_smmu_context[] = { _START_OF_TABLE_, mc_make_sid_security_cfg(SCEW), mc_make_sid_security_cfg(AFIR), @@ -424,83 +314,13 @@ static __attribute__((aligned(16))) smmu_regs_t smmu_ctx_regs[] = { _END_OF_TABLE_, }; -/* - * Save SMMU settings before "System Suspend" to TZDRAM - */ -void tegra_smmu_save_context(uint64_t smmu_ctx_addr) +/******************************************************************************* + * Handler to return the pointer to the SMMU's context struct + ******************************************************************************/ +smmu_regs_t *plat_get_smmu_ctx(void) { - uint32_t i; -#if DEBUG - plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); - uint64_t tzdram_base = params_from_bl2->tzdram_base; - uint64_t tzdram_end = tzdram_base + params_from_bl2->tzdram_size; - uint32_t reg_id1, pgshift, cb_size; - - /* sanity check SMMU settings c*/ - reg_id1 = mmio_read_32((TEGRA_SMMU_BASE + SMMU_GNSR0_IDR1)); - pgshift = (reg_id1 & ID1_PAGESIZE) ? 16 : 12; - cb_size = (2 << pgshift) * \ - (1 << (((reg_id1 >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1)); - - assert(!((pgshift != PGSHIFT) || (cb_size != CB_SIZE))); -#endif - - assert((smmu_ctx_addr >= tzdram_base) && (smmu_ctx_addr <= tzdram_end)); - /* index of _END_OF_TABLE_ */ - smmu_ctx_regs[0].val = ARRAY_SIZE(smmu_ctx_regs) - 1; + tegra186_smmu_context[0].val = ARRAY_SIZE(tegra186_smmu_context) - 1; - /* save SMMU register values */ - for (i = 1; i < ARRAY_SIZE(smmu_ctx_regs) - 1; i++) - smmu_ctx_regs[i].val = mmio_read_32(smmu_ctx_regs[i].reg); - - /* Save SMMU config settings */ - memcpy16((void *)(uintptr_t)smmu_ctx_addr, (void *)smmu_ctx_regs, - sizeof(smmu_ctx_regs)); - - /* save the SMMU table address */ - mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO, - (uint32_t)smmu_ctx_addr); - mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_HI, - (uint32_t)(smmu_ctx_addr >> 32)); -} - -#define SMMU_NUM_CONTEXTS 64 -#define SMMU_CONTEXT_BANK_MAX_IDX 64 - -/* - * Init SMMU during boot or "System Suspend" exit - */ -void tegra_smmu_init(void) -{ - uint32_t val, i, ctx_base; - - /* Program the SMMU pagesize and reset CACHE_LOCK bit */ - val = tegra_smmu_read_32(SMMU_GSR0_SECURE_ACR); - val |= SMMU_GSR0_PGSIZE_64K; - val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT; - tegra_smmu_write_32(SMMU_GSR0_SECURE_ACR, val); - - /* reset CACHE LOCK bit for NS Aux. Config. Register */ - val = tegra_smmu_read_32(SMMU_GNSR_ACR); - val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT; - tegra_smmu_write_32(SMMU_GNSR_ACR, val); - - /* disable TCU prefetch for all contexts */ - ctx_base = (SMMU_GSR0_PGSIZE_64K * SMMU_NUM_CONTEXTS) + SMMU_CBn_ACTLR; - for (i = 0; i < SMMU_CONTEXT_BANK_MAX_IDX; i++) { - val = tegra_smmu_read_32(ctx_base + (SMMU_GSR0_PGSIZE_64K * i)); - val &= ~SMMU_CBn_ACTLR_CPRE_BIT; - tegra_smmu_write_32(ctx_base + (SMMU_GSR0_PGSIZE_64K * i), val); - } - - /* set CACHE LOCK bit for NS Aux. Config. Register */ - val = tegra_smmu_read_32(SMMU_GNSR_ACR); - val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT; - tegra_smmu_write_32(SMMU_GNSR_ACR, val); - - /* set CACHE LOCK bit for S Aux. Config. Register */ - val = tegra_smmu_read_32(SMMU_GSR0_SECURE_ACR); - val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT; - tegra_smmu_write_32(SMMU_GSR0_SECURE_ACR, val); + return tegra186_smmu_context; } diff --git a/plat/nvidia/tegra/soc/t186/plat_trampoline.S b/plat/nvidia/tegra/soc/t186/plat_trampoline.S index 21393d9b9..ba696f397 100644 --- a/plat/nvidia/tegra/soc/t186/plat_trampoline.S +++ b/plat/nvidia/tegra/soc/t186/plat_trampoline.S @@ -94,6 +94,8 @@ endfunc tegra186_cpu_reset_handler __tegra186_cpu_reset_handler_data: .quad tegra_secure_entrypoint .quad __BL31_END__ - BL31_BASE + .globl __tegra186_smmu_context +__tegra186_smmu_context: .rept TEGRA186_SMMU_CTX_SIZE .quad 0 .endr diff --git a/plat/nvidia/tegra/soc/t186/platform_t186.mk b/plat/nvidia/tegra/soc/t186/platform_t186.mk index b62d47da0..979dcb129 100644 --- a/plat/nvidia/tegra/soc/t186/platform_t186.mk +++ b/plat/nvidia/tegra/soc/t186/platform_t186.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: @@ -29,6 +29,9 @@ # # platform configs +ENABLE_AFI_DEVICE := 1 +$(eval $(call add_define,ENABLE_AFI_DEVICE)) + ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS := 1 $(eval $(call add_define,ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS)) @@ -38,6 +41,12 @@ $(eval $(call add_define,RELOCATE_TO_BL31_BASE)) ENABLE_CHIP_VERIFICATION_HARNESS := 0 $(eval $(call add_define,ENABLE_CHIP_VERIFICATION_HARNESS)) +ENABLE_SMMU_DEVICE := 1 +$(eval $(call add_define,ENABLE_SMMU_DEVICE)) + +NUM_SMMU_DEVICES := 1 +$(eval $(call add_define,NUM_SMMU_DEVICES)) + RESET_TO_BL31 := 1 PROGRAMMABLE_RESET_ADDRESS := 1 @@ -54,10 +63,10 @@ $(eval $(call add_define,PLATFORM_CLUSTER_COUNT)) PLATFORM_MAX_CPUS_PER_CLUSTER := 4 $(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER)) -MAX_XLAT_TABLES := 20 +MAX_XLAT_TABLES := 24 $(eval $(call add_define,MAX_XLAT_TABLES)) -MAX_MMAP_REGIONS := 20 +MAX_MMAP_REGIONS := 24 $(eval $(call add_define,MAX_MMAP_REGIONS)) # platform files @@ -65,14 +74,17 @@ PLAT_INCLUDES += -I${SOC_DIR}/drivers/include BL31_SOURCES += lib/cpus/aarch64/denver.S \ lib/cpus/aarch64/cortex_a57.S \ - ${COMMON_DIR}/drivers/memctrl/memctrl_v2.c \ + ${COMMON_DIR}/drivers/memctrl/memctrl_v2.c \ + ${COMMON_DIR}/drivers/smmu/smmu.c \ ${SOC_DIR}/drivers/mce/mce.c \ ${SOC_DIR}/drivers/mce/ari.c \ ${SOC_DIR}/drivers/mce/nvg.c \ ${SOC_DIR}/drivers/mce/aarch64/nvg_helpers.S \ - ${SOC_DIR}/drivers/smmu/smmu.c \ + ${SOC_DIR}/plat_memctrl.c \ ${SOC_DIR}/plat_psci_handlers.c \ ${SOC_DIR}/plat_setup.c \ ${SOC_DIR}/plat_secondary.c \ ${SOC_DIR}/plat_sip_calls.c \ + ${SOC_DIR}/plat_smmu.c \ ${SOC_DIR}/plat_trampoline.S + diff --git a/plat/rockchip/rk3399/drivers/dram/dfs.c b/plat/rockchip/rk3399/drivers/dram/dfs.c index f589a8ad5..267398ff8 100644 --- a/plat/rockchip/rk3399/drivers/dram/dfs.c +++ b/plat/rockchip/rk3399/drivers/dram/dfs.c @@ -445,7 +445,7 @@ static uint32_t get_pi_tdfi_phy_rdlat(struct dram_timing_t *pdram_timing, } else if (timing_config->dram_type == LPDDR3) { mem_delay_ps = 5500; } else { - printf("get_pi_tdfi_phy_rdlat:dramtype unsupport\n"); + NOTICE("get_pi_tdfi_phy_rdlat:dramtype unsupport\n"); return 0; } round_trip_ps = 1100 + 500 + mem_delay_ps + 500 + 600; @@ -2009,21 +2009,6 @@ static uint32_t prepare_ddr_timing(uint32_t mhz) return index; } -void print_dram_status_info(void) -{ - uint32_t *p; - uint32_t i; - - p = (uint32_t *) &rk3399_dram_status.timing_config; - INFO("rk3399_dram_status.timing_config:\n"); - for (i = 0; i < sizeof(struct timing_related_config) / 4; i++) - tf_printf("%u\n", p[i]); - p = (uint32_t *) &rk3399_dram_status.drv_odt_lp_cfg; - INFO("rk3399_dram_status.drv_odt_lp_cfg:\n"); - for (i = 0; i < sizeof(struct drv_odt_lp_config) / 4; i++) - tf_printf("%u\n", p[i]); -} - uint32_t ddr_set_rate(uint32_t hz) { uint32_t low_power, index, ddr_index; diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk index 9d612dc2a..044d9c970 100644 --- a/plat/xilinx/zynqmp/platform.mk +++ b/plat/xilinx/zynqmp/platform.mk @@ -27,6 +27,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +override ERRATA_A53_855873 := 1 override ENABLE_PLAT_COMPAT := 0 override PROGRAMMABLE_RESET_ADDRESS := 1 PSCI_EXTENDED_STATE_ID := 1 diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index ff515cca2..e5ec5cd22 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -631,7 +631,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); - SMC_RET0(handle); + SMC_RET1(handle, SMC_OK); /* * Request from non secure world to resume the preempted diff --git a/tools/fiptool/fiptool.c b/tools/fiptool/fiptool.c index 542a94666..0503696b8 100644 --- a/tools/fiptool/fiptool.c +++ b/tools/fiptool/fiptool.c @@ -646,7 +646,7 @@ static unsigned long get_image_align(char *arg) unsigned long align; errno = 0; - align = strtoul(arg, &endptr, 10); + align = strtoul(arg, &endptr, 0); if (*endptr != '\0' || !is_power_of_2(align) || errno != 0) log_errx("Invalid alignment: %s", arg);