Merge pull request #1644 from soby-mathew/sm/pie_proto

Position Indepedent Executable (PIE) Support
This commit is contained in:
Soby Mathew 2018-10-29 10:56:30 +00:00 committed by GitHub
commit cf0886e2f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 376 additions and 58 deletions

View File

@ -205,11 +205,6 @@ TF_CFLAGS += $(CPPFLAGS) $(TF_CFLAGS_$(ARCH)) \
-Os -ffunction-sections -fdata-sections
GCC_V_OUTPUT := $(shell $(CC) -v 2>&1)
PIE_FOUND := $(findstring --enable-default-pie,${GCC_V_OUTPUT})
ifneq ($(PIE_FOUND),)
TF_CFLAGS += -fno-PIE
endif
# Force the compiler to include the frame pointer
ifeq (${ENABLE_BACKTRACE},1)
@ -335,6 +330,16 @@ ifeq (${ARM_ARCH_MAJOR},7)
include make_helpers/armv7-a-cpus.mk
endif
ifeq ($(ENABLE_PIE),1)
TF_CFLAGS += -fpie
TF_LDFLAGS += -pie
else
PIE_FOUND := $(findstring --enable-default-pie,${GCC_V_OUTPUT})
ifneq ($(PIE_FOUND),)
TF_CFLAGS += -fno-PIE
endif
endif
# Include the CPU specific operations makefile, which provides default
# values for all CPU errata workarounds and CPU specific optimisations.
# This can be overridden by the platform.
@ -565,6 +570,7 @@ $(eval $(call assert_boolean,ENABLE_AMU))
$(eval $(call assert_boolean,ENABLE_ASSERTIONS))
$(eval $(call assert_boolean,ENABLE_BACKTRACE))
$(eval $(call assert_boolean,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call assert_boolean,ENABLE_PIE))
$(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT))
$(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION))
@ -615,6 +621,7 @@ $(eval $(call add_define,ENABLE_AMU))
$(eval $(call add_define,ENABLE_ASSERTIONS))
$(eval $(call add_define,ENABLE_BACKTRACE))
$(eval $(call add_define,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call add_define,ENABLE_PIE))
$(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT))
$(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION))

View File

@ -70,13 +70,19 @@ func bl2_entrypoint
* - the coherent memory section.
* ---------------------------------------------
*/
ldr x0, =__BSS_START__
ldr x1, =__BSS_SIZE__
adrp x0, __BSS_START__
add x0, x0, :lo12:__BSS_START__
adrp x1, __BSS_END__
add x1, x1, :lo12:__BSS_END__
sub x1, x1, x0
bl zeromem
#if USE_COHERENT_MEM
ldr x0, =__COHERENT_RAM_START__
ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
adrp x0, __COHERENT_RAM_START__
add x0, x0, :lo12:__COHERENT_RAM_START__
adrp x1, __COHERENT_RAM_END_UNALIGNED__
add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
sub x1, x1, x0
bl zeromem
#endif

View File

@ -7,6 +7,7 @@
#include <arch.h>
#include <bl_common.h>
#include <el3_common_macros.S>
#include <platform_def.h>
#include <pmf_asm_macros.S>
#include <runtime_instr.h>
#include <xlat_mmu_helpers.h>
@ -73,6 +74,18 @@ func bl31_entrypoint
mov x22, 0
mov x23, 0
#endif /* RESET_TO_BL31 */
/* --------------------------------------------------------------------
* If PIE is enabled, fixup the Global descriptor Table and dynamic
* relocations
* --------------------------------------------------------------------
*/
#if ENABLE_PIE
mov_imm x0, BL31_BASE
mov_imm x1, BL31_LIMIT
bl fixup_gdt_reloc
#endif /* ENABLE_PIE */
/* ---------------------------------------------
* Perform platform specific early arch. setup
* ---------------------------------------------

View File

@ -26,6 +26,8 @@ SECTIONS
ASSERT(. == ALIGN(PAGE_SIZE),
"BL31_BASE address is not aligned on a page boundary.")
__BL31_START__ = .;
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
@ -63,6 +65,16 @@ SECTIONS
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/*
* Keep the .got section in the RO section as the it is patched
* prior to enabling the MMU and having the .got in RO is better for
* security.
*/
. = ALIGN(16);
__GOT_START__ = .;
*(.got)
__GOT_END__ = .;
/* Place pubsub sections for events */
. = ALIGN(8);
#include <pubsub_events.h>
@ -153,6 +165,16 @@ SECTIONS
__DATA_END__ = .;
} >RAM
. = ALIGN(16);
/*
* .rela.dyn needs to come after .data for the read-elf utility to parse
* this section correctly.
*/
__RELA_START__ = .;
.rela.dyn . : {
} >RAM
__RELA_END__ = .;
#ifdef BL31_PROGBITS_LIMIT
ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
#endif
@ -265,11 +287,5 @@ SECTIONS
__RW_END__ = .;
__BL31_END__ = .;
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
}

View File

@ -371,6 +371,10 @@ Common build options
partitioning in EL3, however. Platform initialisation code should configure
and use partitions in EL3 as required. This option defaults to ``0``.
- ``ENABLE_PIE``: Boolean option to enable Position Independent Executable(PIE)
support within generic code in TF-A. This option is currently only supported
in BL31. Default is 0.
- ``ENABLE_PMF``: Boolean option to enable support for optional Performance
Measurement Framework(PMF). Default is 0.

View File

@ -105,8 +105,9 @@
* Clobber: X30, X1, X2
*/
.macro get_my_mp_stack _name, _size
bl plat_my_core_pos
ldr x2, =(\_name + \_size)
bl plat_my_core_pos
adrp x2, (\_name + \_size)
add x2, x2, :lo12:(\_name + \_size)
mov x1, #\_size
madd x0, x0, x1, x2
.endm
@ -117,7 +118,8 @@
* Out: X0 = physical address of stack base
*/
.macro get_up_stack _name, _size
ldr x0, =(\_name + \_size)
adrp x0, (\_name + \_size)
add x0, x0, :lo12:(\_name + \_size)
.endm
/*

View File

@ -283,26 +283,38 @@
* an earlier boot loader stage.
* -------------------------------------------------------------
*/
ldr x0, =__RW_START__
ldr x1, =__RW_END__
adrp x0, __RW_START__
add x0, x0, :lo12:__RW_START__
adrp x1, __RW_END__
add x1, x1, :lo12:__RW_END__
sub x1, x1, x0
bl inv_dcache_range
#endif
adrp x0, __BSS_START__
add x0, x0, :lo12:__BSS_START__
ldr x0, =__BSS_START__
ldr x1, =__BSS_SIZE__
adrp x1, __BSS_END__
add x1, x1, :lo12:__BSS_END__
sub x1, x1, x0
bl zeromem
#if USE_COHERENT_MEM
ldr x0, =__COHERENT_RAM_START__
ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
adrp x0, __COHERENT_RAM_START__
add x0, x0, :lo12:__COHERENT_RAM_START__
adrp x1, __COHERENT_RAM_END_UNALIGNED__
add x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
sub x1, x1, x0
bl zeromem
#endif
#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_IN_XIP_MEM)
ldr x0, =__DATA_RAM_START__
ldr x1, =__DATA_ROM_START__
ldr x2, =__DATA_SIZE__
adrp x0, __DATA_RAM_START__
add x0, x0, :lo12:__DATA_RAM_START__
adrp x1, __DATA_ROM_START__
add x1, x1, :lo12:__DATA_ROM_START__
adrp x2, __DATA_RAM_END__
add x2, x2, :lo12:__DATA_RAM_END__
sub x2, x2, x0
bl memcpy16
#endif
.endif /* _init_c_runtime */

View File

@ -83,6 +83,7 @@ IMPORT_SYM(unsigned long, __BL2_END__, BL2_END);
#elif defined(IMAGE_BL2U)
IMPORT_SYM(unsigned long, __BL2U_END__, BL2U_END);
#elif defined(IMAGE_BL31)
IMPORT_SYM(unsigned long, __BL31_START__, BL31_START);
IMPORT_SYM(unsigned long, __BL31_END__, BL31_END);
#elif defined(IMAGE_BL32)
IMPORT_SYM(unsigned long, __BL32_END__, BL32_END);

View File

@ -161,10 +161,9 @@
.endif
/*
* Weakly-bound, optional errata status printing function for CPUs of
* Mandatory errata status printing function for CPUs of
* this class.
*/
.weak \_name\()_errata_report
.word \_name\()_errata_report
#ifdef IMAGE_BL32

View File

@ -183,10 +183,9 @@
.endif
/*
* Weakly-bound, optional errata status printing function for CPUs of
* Mandatory errata status printing function for CPUs of
* this class.
*/
.weak \_name\()_errata_report
.quad \_name\()_errata_report
#ifdef IMAGE_BL31

View File

@ -18,10 +18,12 @@
mov x9, x30
bl plat_my_core_pos
mov x30, x9
ldr x1, =__PERCPU_TIMESTAMP_SIZE__
adr x2, __PMF_PERCPU_TIMESTAMP_END__
adr x1, __PMF_TIMESTAMP_START__
sub x1, x2, x1
mov x2, #(\_tid * PMF_TS_SIZE)
madd x0, x0, x1, x2
ldr x1, =pmf_ts_mem_\_name
adr x1, pmf_ts_mem_\_name
add x0, x0, x1
.endm

View File

@ -67,6 +67,29 @@ void zero_normalmem(void *mem, u_register_t length);
* zeroing.
*/
void zeromem(void *mem, u_register_t length);
/*
* Utility function to return the address of a symbol. By default, the
* compiler generates adr/adrp instruction pair to return the reference
* to the symbol and this utility is used to override this compiler
* generated to code to use `ldr` instruction.
*
* This helps when Position Independent Executable needs to reference a symbol
* which is constant and does not depend on the execute address of the binary.
*/
#define DEFINE_LOAD_SYM_ADDR(_name) \
static inline u_register_t load_addr_## _name(void) \
{ \
u_register_t v; \
/* Create a void reference to silence compiler */ \
(void) _name; \
__asm__ volatile ("ldr %0, =" #_name : "=r" (v)); \
return v; \
}
/* Helper to invoke the function defined by DEFINE_LOAD_SYM_ADDR() */
#define LOAD_ADDR_OF(_name) (typeof(_name) *) load_addr_## _name()
#endif /* !(defined(__LINKER__) || defined(__ASSEMBLY__)) */
#endif /* __UTILS_H__ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -7,6 +7,7 @@
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <xlat_tables_defs.h>
.globl get_afflvl_shift
.globl mpidr_mask_lower_afflvls
@ -23,6 +24,8 @@
.globl disable_mmu_icache_el1
.globl disable_mmu_icache_el3
.globl fixup_gdt_reloc
#if SUPPORT_VFP
.globl enable_vfp
#endif
@ -497,3 +500,114 @@ func enable_vfp
ret
endfunc enable_vfp
#endif
/* ---------------------------------------------------------------------------
* Helper to fixup Global Descriptor table (GDT) and dynamic relocations
* (.rela.dyn) at runtime.
*
* This function is meant to be used when the firmware is compiled with -fpie
* and linked with -pie options. We rely on the linker script exporting
* appropriate markers for start and end of the section. For GOT, we
* expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect
* __RELA_START__ and __RELA_END__.
*
* The function takes the limits of the memory to apply fixups to as
* arguments (which is usually the limits of the relocable BL image).
* x0 - the start of the fixup region
* x1 - the limit of the fixup region
* These addresses have to be page (4KB aligned).
* ---------------------------------------------------------------------------
*/
func fixup_gdt_reloc
mov x6, x0
mov x7, x1
/* Test if the limits are 4K aligned */
#if ENABLE_ASSERTIONS
orr x0, x0, x1
tst x0, #(PAGE_SIZE - 1)
ASM_ASSERT(eq)
#endif
/*
* Calculate the offset based on return address in x30.
* Assume that this funtion is called within a page of the start of
* of fixup region.
*/
and x2, x30, #~(PAGE_SIZE - 1)
sub x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */
adrp x1, __GOT_START__
add x1, x1, :lo12:__GOT_START__
adrp x2, __GOT_END__
add x2, x2, :lo12:__GOT_END__
/*
* GOT is an array of 64_bit addresses which must be fixed up as
* new_addr = old_addr + Diff(S).
* The new_addr is the address currently the binary is executing from
* and old_addr is the address at compile time.
*/
1:
ldr x3, [x1]
/* Skip adding offset if address is < lower limit */
cmp x3, x6
b.lo 2f
/* Skip adding offset if address is >= upper limit */
cmp x3, x7
b.ge 2f
add x3, x3, x0
str x3, [x1]
2:
add x1, x1, #8
cmp x1, x2
b.lo 1b
/* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */
adrp x1, __RELA_START__
add x1, x1, :lo12:__RELA_START__
adrp x2, __RELA_END__
add x2, x2, :lo12:__RELA_END__
/*
* According to ELF-64 specification, the RELA data structure is as
* follows:
* typedef struct
* {
* Elf64_Addr r_offset;
* Elf64_Xword r_info;
* Elf64_Sxword r_addend;
* } Elf64_Rela;
*
* r_offset is address of reference
* r_info is symbol index and type of relocation (in this case
* 0x403 which corresponds to R_AARCH64_RELATIV).
* r_addend is constant part of expression.
*
* Size of Elf64_Rela structure is 24 bytes.
*/
1:
/* Assert that the relocation type is R_AARCH64_RELATIV */
#if ENABLE_ASSERTIONS
ldr x3, [x1, #8]
cmp x3, #0x403
ASM_ASSERT(eq)
#endif
ldr x3, [x1] /* r_offset */
add x3, x0, x3
ldr x4, [x1, #16] /* r_addend */
/* Skip adding offset if r_addend is < lower limit */
cmp x4, x6
b.lo 2f
/* Skip adding offset if r_addend entry is >= upper limit */
cmp x4, x7
b.ge 2f
add x4, x0, x4 /* Diff(S) + r_addend */
str x4, [x3]
2: add x1, x1, #24
cmp x1, x2
b.lo 1b
ret
endfunc fixup_gdt_reloc

View File

@ -40,6 +40,15 @@ func aem_generic_cluster_pwr_dwn
b dcsw_op_all
endfunc aem_generic_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for AEM. Must follow AAPCS.
*/
func aem_generic_errata_report
bx lr
endfunc aem_generic_errata_report
#endif
/* cpu_ops for Base AEM FVP */
declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
aem_generic_core_pwr_dwn, \

View File

@ -69,6 +69,15 @@ func cortex_a12_cluster_pwr_dwn
b cortex_a12_disable_smp
endfunc cortex_a12_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-A12. Must follow AAPCS.
*/
func cortex_a12_errata_report
bx lr
endfunc cortex_a12_errata_report
#endif
declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
cortex_a12_reset_func, \
cortex_a12_core_pwr_dwn, \

View File

@ -117,6 +117,15 @@ func cortex_a32_cluster_pwr_dwn
b cortex_a32_disable_smp
endfunc cortex_a32_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-A32. Must follow AAPCS.
*/
func cortex_a32_errata_report
bx lr
endfunc cortex_a32_errata_report
#endif
declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
cortex_a32_reset_func, \
cortex_a32_core_pwr_dwn, \

View File

@ -69,6 +69,15 @@ func cortex_a5_cluster_pwr_dwn
b cortex_a5_disable_smp
endfunc cortex_a5_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-A5. Must follow AAPCS.
*/
func cortex_a5_errata_report
bx lr
endfunc cortex_a5_errata_report
#endif
declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
cortex_a5_reset_func, \
cortex_a5_core_pwr_dwn, \

View File

@ -69,6 +69,15 @@ func cortex_a7_cluster_pwr_dwn
b cortex_a7_disable_smp
endfunc cortex_a7_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-A7. Must follow AAPCS.
*/
func cortex_a7_errata_report
bx lr
endfunc cortex_a7_errata_report
#endif
declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
cortex_a7_reset_func, \
cortex_a7_core_pwr_dwn, \

View File

@ -46,6 +46,15 @@ func aem_generic_cluster_pwr_dwn
b dcsw_op_all
endfunc aem_generic_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for AEM. Must follow AAPCS.
*/
func aem_generic_errata_report
ret
endfunc aem_generic_errata_report
#endif
/* ---------------------------------------------
* This function provides cpu specific
* register information for crash reporting.

View File

@ -114,6 +114,16 @@ func cortex_a35_cluster_pwr_dwn
b cortex_a35_disable_smp
endfunc cortex_a35_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex A35. Must follow AAPCS.
*/
func cortex_a35_errata_report
ret
endfunc cortex_a35_errata_report
#endif
/* ---------------------------------------------
* This function provides cortex_a35 specific
* register information for crash reporting.

View File

@ -27,6 +27,16 @@ func cortex_deimos_core_pwr_dwn
ret
endfunc cortex_deimos_core_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex Deimos. Must follow AAPCS.
*/
func cortex_deimos_errata_report
ret
endfunc cortex_deimos_errata_report
#endif
/* ---------------------------------------------
* This function provides Cortex-Deimos specific
* register information for crash reporting.

View File

@ -19,6 +19,16 @@ func cortex_helios_cpu_pwr_dwn
ret
endfunc cortex_helios_cpu_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex Helios. Must follow AAPCS.
*/
func cortex_helios_errata_report
ret
endfunc cortex_helios_errata_report
#endif
.section .rodata.cortex_helios_regs, "aS"
cortex_helios_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""

View File

@ -25,9 +25,10 @@
IMPORT_SYM(uintptr_t, __PMF_SVC_DESCS_START__, PMF_SVC_DESCS_START);
IMPORT_SYM(uintptr_t, __PMF_SVC_DESCS_END__, PMF_SVC_DESCS_END);
IMPORT_SYM(uintptr_t, __PERCPU_TIMESTAMP_SIZE__, PMF_PERCPU_TIMESTAMP_SIZE);
IMPORT_SYM(uintptr_t, __PMF_PERCPU_TIMESTAMP_END__, PMF_PERCPU_TIMESTAMP_END);
IMPORT_SYM(intptr_t, __PMF_TIMESTAMP_START__, PMF_TIMESTAMP_ARRAY_START);
IMPORT_SYM(uintptr_t, __PMF_TIMESTAMP_END__, PMF_TIMESTAMP_ARRAY_END);
#define PMF_PERCPU_TIMESTAMP_SIZE (PMF_PERCPU_TIMESTAMP_END - PMF_TIMESTAMP_ARRAY_START)
#define PMF_SVC_DESCS_MAX 10

View File

@ -5,7 +5,7 @@
*/
.globl rom_lib_init
.extern __DATA_RAM_START__, __DATA_ROM_START__, __DATA_SIZE__
.extern __DATA_RAM_START__, __DATA_ROM_START__, __DATA_RAM_END__
.extern memset, memcpy
rom_lib_init:
@ -16,13 +16,19 @@ rom_lib_init:
1: stp x29, x30, [sp, #-16]!
adrp x0, __DATA_RAM_START__
ldr x1,= __DATA_ROM_START__
ldr x2, =__DATA_SIZE__
adrp x1, __DATA_ROM_START__
add x1, x1, :lo12:__DATA_ROM_START__
adrp x2, __DATA_RAM_END__
add x2, x2, :lo12:__DATA_RAM_END__
sub x2, x2, x0
bl memcpy
ldr x0, =__BSS_START__
adrp x0,__BSS_START__
add x0, x0, :lo12:__BSS_START__
mov x1, #0
ldr x2, =__BSS_SIZE__
adrp x2, __BSS_END__
add x2, x2, :lo12:__BSS_END__
sub x2, x2, x0
bl memset
ldp x29, x30, [sp], #16

View File

@ -45,7 +45,8 @@
tlbi_invalidate_all \el
mov x7, x0
ldr x0, =mmu_cfg_params
adrp x0, mmu_cfg_params
add x0, x0, :lo12:mmu_cfg_params
/* MAIR */
ldr x1, [x0, #(MMU_CFG_MAIR << 3)]

View File

@ -64,6 +64,9 @@ DYN_DISABLE_AUTH := 0
# Build option to enable MPAM for lower ELs
ENABLE_MPAM_FOR_LOWER_ELS := 0
# Flag to Enable Position Independant support (PIE)
ENABLE_PIE := 0
# Flag to enable Performance Measurement Framework
ENABLE_PMF := 0

View File

@ -19,7 +19,7 @@
.globl plat_arm_calc_core_pos
.macro fvp_choose_gicmmap param1, param2, x_tmp, w_tmp, res
ldr \x_tmp, =V2M_SYSREGS_BASE + V2M_SYS_ID
mov_imm \x_tmp, V2M_SYSREGS_BASE + V2M_SYS_ID
ldr \w_tmp, [\x_tmp]
ubfx \w_tmp, \w_tmp, #V2M_SYS_ID_BLD_SHIFT, #V2M_SYS_ID_BLD_LENGTH
cmp \w_tmp, #BLD_GIC_VE_MMAP
@ -48,7 +48,7 @@ func plat_secondary_cold_boot_setup
* ---------------------------------------------
*/
mrs x0, mpidr_el1
ldr x1, =PWRC_BASE
mov_imm x1, PWRC_BASE
str w0, [x1, #PPOFFR_OFF]
/* ---------------------------------------------
@ -72,8 +72,8 @@ func plat_secondary_cold_boot_setup
b secondary_cold_boot_wait
gicv2_bypass_disable:
ldr x0, =VE_GICC_BASE
ldr x1, =BASE_GICC_BASE
mov_imm x0, VE_GICC_BASE
mov_imm x1, BASE_GICC_BASE
fvp_choose_gicmmap x0, x1, x2, w2, x1
mov w0, #(IRQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP1)
orr w0, w0, #(IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP0)
@ -128,7 +128,7 @@ func plat_get_my_entrypoint
* ---------------------------------------------------------------------
*/
mrs x2, mpidr_el1
ldr x1, =PWRC_BASE
mov_imm x1, PWRC_BASE
str w2, [x1, #PSYSR_OFF]
ldr w2, [x1, #PSYSR_OFF]
ubfx w2, w2, #PSYSR_WK_SHIFT, #PSYSR_WK_WIDTH
@ -171,7 +171,7 @@ endfunc plat_get_my_entrypoint
*/
func plat_is_my_cpu_primary
mrs x0, mpidr_el1
ldr x1, =MPIDR_AFFINITY_MASK
mov_imm x1, MPIDR_AFFINITY_MASK
and x0, x0, x1
cmp x0, #FVP_PRIMARY_CPU
cset w0, eq

View File

@ -202,7 +202,9 @@ ENABLE_AMU := 1
DYNAMIC_WORKAROUND_CVE_2018_3639 := 1
# Enable reclaiming of BL31 initialisation code for secondary cores stacks for FVP
ifneq (${RESET_TO_BL31},1)
RECLAIM_INIT_CODE := 1
endif
ifeq (${ENABLE_AMU},1)
BL31_SOURCES += lib/cpus/aarch64/cortex_a75_pubsub.c \

View File

@ -25,11 +25,13 @@
static entry_point_info_t bl32_image_ep_info;
static entry_point_info_t bl33_image_ep_info;
#if !RESET_TO_BL31
/*
* Check that BL31_BASE is above ARM_TB_FW_CONFIG_LIMIT. The reserved page
* is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
*/
CASSERT(BL31_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
#endif
/* Weak definitions may be overridden in specific ARM standard platform */
#pragma weak bl31_early_platform_setup2
@ -38,8 +40,8 @@ CASSERT(BL31_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
#pragma weak bl31_plat_get_next_image_ep_info
#define MAP_BL31_TOTAL MAP_REGION_FLAT( \
BL31_BASE, \
BL31_END - BL31_BASE, \
BL31_START, \
BL31_END - BL31_START, \
MT_MEMORY | MT_RW | MT_SECURE)
#if RECLAIM_INIT_CODE
IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);

View File

@ -130,6 +130,11 @@ ARM_CRYPTOCELL_INTEG := 0
$(eval $(call assert_boolean,ARM_CRYPTOCELL_INTEG))
$(eval $(call add_define,ARM_CRYPTOCELL_INTEG))
# Enable PIE support for RESET_TO_BL31 case
ifeq (${RESET_TO_BL31},1)
ENABLE_PIE := 1
endif
# CryptoCell integration relies on coherent buffers for passing data from
# the AP CPU to the CryptoCell
ifeq (${ARM_CRYPTOCELL_INTEG},1)

View File

@ -10,6 +10,7 @@
#include <plat_arm.h>
#include <platform.h>
#include <platform_def.h>
#include <utils.h>
/******************************************************************************
* The following functions are defined as weak to allow a platform to override
@ -33,10 +34,16 @@ static const interrupt_prop_t arm_interrupt_props[] = {
/*
* We save and restore the GICv3 context on system suspend. Allocate the
* data in the designated EL3 Secure carve-out memory
* data in the designated EL3 Secure carve-out memory. The `volatile`
* is used to prevent the compiler from removing the gicv3 contexts even
* though the DEFINE_LOAD_SYM_ADDR creates a dummy reference to it.
*/
static gicv3_redist_ctx_t rdist_ctx __section("arm_el3_tzc_dram");
static gicv3_dist_ctx_t dist_ctx __section("arm_el3_tzc_dram");
static volatile gicv3_redist_ctx_t rdist_ctx __section("arm_el3_tzc_dram");
static volatile gicv3_dist_ctx_t dist_ctx __section("arm_el3_tzc_dram");
/* Define accessor function to get reference to the GICv3 context */
DEFINE_LOAD_SYM_ADDR(rdist_ctx)
DEFINE_LOAD_SYM_ADDR(dist_ctx)
/*
* MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
@ -134,6 +141,10 @@ void plat_arm_gic_redistif_off(void)
*****************************************************************************/
void plat_arm_gic_save(void)
{
gicv3_redist_ctx_t * const rdist_context =
(gicv3_redist_ctx_t *)LOAD_ADDR_OF(rdist_ctx);
gicv3_dist_ctx_t * const dist_context =
(gicv3_dist_ctx_t *)LOAD_ADDR_OF(dist_ctx);
/*
* If an ITS is available, save its context before
@ -149,10 +160,10 @@ void plat_arm_gic_save(void)
* we only need to save the context of the CPU that is issuing
* the SYSTEM SUSPEND call, i.e. the current CPU.
*/
gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
gicv3_rdistif_save(plat_my_core_pos(), rdist_context);
/* Save the GIC Distributor context */
gicv3_distif_save(&dist_ctx);
gicv3_distif_save(dist_context);
/*
* From here, all the components of the GIC can be safely powered down
@ -163,8 +174,13 @@ void plat_arm_gic_save(void)
void plat_arm_gic_resume(void)
{
const gicv3_redist_ctx_t *rdist_context =
(gicv3_redist_ctx_t *)LOAD_ADDR_OF(rdist_ctx);
const gicv3_dist_ctx_t *dist_context =
(gicv3_dist_ctx_t *)LOAD_ADDR_OF(dist_ctx);
/* Restore the GIC Distributor context */
gicv3_distif_init_restore(&dist_ctx);
gicv3_distif_init_restore(dist_context);
/*
* Restore the GIC Redistributor and ITS contexts after the
@ -172,7 +188,7 @@ void plat_arm_gic_resume(void)
* we only need to restore the context of the CPU that issued
* the SYSTEM SUSPEND call.
*/
gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
gicv3_rdistif_init_restore(plat_my_core_pos(), rdist_context);
/*
* If an ITS is available, restore its context after

View File

@ -108,7 +108,7 @@ endfunc plat_is_my_cpu_primary
func plat_is_my_cpu_primary
mov x9, x30
bl plat_my_core_pos
ldr x1, =SCP_BOOT_CFG_ADDR
mov_imm x1, SCP_BOOT_CFG_ADDR
ldr x1, [x1]
ubfx x1, x1, #PLAT_CSS_PRIMARY_CPU_SHIFT, \
#PLAT_CSS_PRIMARY_CPU_BIT_WIDTH