Merge pull request #36 from athoelke/at/gc-sections-80

Using GCC --gc-sections to eliminate unused code and data
This commit is contained in:
danh-arm 2014-04-15 09:39:47 +01:00
commit 9c2c763d22
27 changed files with 270 additions and 272 deletions

View File

@ -150,8 +150,10 @@ ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \
CFLAGS := -nostdinc -pedantic -ffreestanding -Wall \
-Werror -mgeneral-regs-only -std=c99 -c -Os \
-DDEBUG=${DEBUG} ${INCLUDES} ${CFLAGS}
CFLAGS += -ffunction-sections -fdata-sections
LDFLAGS += --fatal-warnings -O1
LDFLAGS += --gc-sections
vpath %.ld.S bl1:bl2:bl31

View File

@ -29,13 +29,12 @@
*/
#include <arch.h>
#include <asm_macros.S>
.weak cpu_reset_handler
.section .text, "ax"; .align 3
cpu_reset_handler: ; .type cpu_reset_handler, %function
func cpu_reset_handler
mov x19, x30 // lr
/* ---------------------------------------------

View File

@ -28,6 +28,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
.globl read_icc_sre_el1
.globl read_icc_sre_el2
.globl read_icc_sre_el3
@ -48,42 +50,40 @@
#define ICC_CTLR_EL3 S3_6_C12_C12_4
#define ICC_PMR_EL1 S3_0_C4_C6_0
.section .text, "ax"; .align 3
read_icc_sre_el1: ; .type read_icc_sre_el1, %function
func read_icc_sre_el1
mrs x0, ICC_SRE_EL1
ret
read_icc_sre_el2: ; .type read_icc_sre_el2, %function
func read_icc_sre_el2
mrs x0, ICC_SRE_EL2
ret
read_icc_sre_el3: ; .type read_icc_sre_el3, %function
func read_icc_sre_el3
mrs x0, ICC_SRE_EL3
ret
write_icc_sre_el1: ; .type write_icc_sre_el1, %function
func write_icc_sre_el1
msr ICC_SRE_EL1, x0
isb
ret
write_icc_sre_el2: ; .type write_icc_sre_el2, %function
func write_icc_sre_el2
msr ICC_SRE_EL2, x0
isb
ret
write_icc_sre_el3: ; .type write_icc_sre_el3, %function
func write_icc_sre_el3
msr ICC_SRE_EL3, x0
isb
ret
write_icc_pmr_el1: ; .type write_icc_pmr_el1, %function
func write_icc_pmr_el1
msr ICC_PMR_EL1, x0
isb
ret

View File

@ -29,12 +29,11 @@
*/
#include <arch.h>
#include <asm_macros.S>
.globl bl1_entrypoint
.section .text, "ax"; .align 3
/* -----------------------------------------------------
* bl1_entrypoint() is the entry point into the trusted
* firmware code when a cpu is released from warm or
@ -42,7 +41,7 @@
* -----------------------------------------------------
*/
bl1_entrypoint: ; .type bl1_entrypoint, %function
func bl1_entrypoint
/* ---------------------------------------------
* Perform any processor specific actions upon
* reset e.g. cache, tlb invalidations etc.

View File

@ -178,8 +178,7 @@ SErrorA32:
.align 7
.section .text, "ax"
process_exception:
func process_exception
sub sp, sp, #0x40
stp x0, x1, [sp, #0x0]
stp x2, x3, [sp, #0x10]

View File

@ -43,8 +43,8 @@ SECTIONS
{
ro : {
__RO_START__ = .;
*bl1_entrypoint.o(.text)
*(.text)
*bl1_entrypoint.o(.text*)
*(.text*)
*(.rodata*)
*(.vectors)
__RO_END__ = .;
@ -57,7 +57,7 @@ SECTIONS
. = NEXT(16); /* Align LMA */
.data : ALIGN(16) { /* Align VMA */
__DATA_RAM_START__ = .;
*(.data)
*(.data*)
__DATA_RAM_END__ = .;
} >RAM AT>ROM
@ -73,7 +73,7 @@ SECTIONS
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(.bss)
*(.bss*)
*(COMMON)
__BSS_END__ = .;
} >RAM

View File

@ -30,15 +30,14 @@
#include <bl_common.h>
#include <arch.h>
#include <asm_macros.S>
.globl bl2_entrypoint
.section .text, "ax"; .align 3
bl2_entrypoint: ; .type bl2_entrypoint, %function
func bl2_entrypoint
/*---------------------------------------------
* Store the extents of the tzram available to
* BL2 for future use. Use the opcode param to

View File

@ -47,8 +47,8 @@ SECTIONS
ro . : {
__RO_START__ = .;
*bl2_entrypoint.o(.text)
*(.text)
*bl2_entrypoint.o(.text*)
*(.text*)
*(.rodata*)
*(.vectors)
__RO_END_UNALIGNED__ = .;
@ -63,7 +63,7 @@ SECTIONS
.data . : {
__DATA_START__ = .;
*(.data)
*(.data*)
__DATA_END__ = .;
} >RAM
@ -79,7 +79,7 @@ SECTIONS
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss))
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM

View File

@ -32,20 +32,19 @@
#include <platform.h>
#include <arch.h>
#include "cm_macros.S"
#include <asm_macros.S>
.globl bl31_entrypoint
.section .text, "ax"; .align 3
/* -----------------------------------------------------
* bl31_entrypoint() is the cold boot entrypoint,
* executed only by the primary cpu.
* -----------------------------------------------------
*/
bl31_entrypoint: ; .type bl31_entrypoint, %function
func bl31_entrypoint
/* ---------------------------------------------
* BL2 has populated x0 with the opcode
* indicating BL31 should be run, x3 with

View File

@ -29,6 +29,7 @@
*/
#include <context.h>
#include <asm_macros.S>
/* -----------------------------------------------------
* The following function strictly follows the AArch64
@ -39,7 +40,7 @@
* -----------------------------------------------------
*/
.global el3_sysregs_context_save
el3_sysregs_context_save:
func el3_sysregs_context_save
mrs x9, scr_el3
mrs x10, sctlr_el3
@ -75,7 +76,7 @@ el3_sysregs_context_save:
* -----------------------------------------------------
*/
.global el3_sysregs_context_restore
el3_sysregs_context_restore:
func el3_sysregs_context_restore
ldp x11, xzr, [x0, #CTX_CPTR_EL3]
msr cptr_el3, x11
@ -112,7 +113,7 @@ el3_sysregs_context_restore:
* -----------------------------------------------------
*/
.global el1_sysregs_context_save
el1_sysregs_context_save:
func el1_sysregs_context_save
mrs x9, spsr_el1
mrs x10, elr_el1
@ -193,7 +194,7 @@ el1_sysregs_context_save:
* -----------------------------------------------------
*/
.global el1_sysregs_context_restore
el1_sysregs_context_restore:
func el1_sysregs_context_restore
ldp x9, x10, [x0, #CTX_SPSR_EL1]
msr spsr_el1, x9
@ -284,7 +285,7 @@ el1_sysregs_context_restore:
* -----------------------------------------------------
*/
.global fpregs_context_save
fpregs_context_save:
func fpregs_context_save
stp q0, q1, [x0, #CTX_FP_Q0]
stp q2, q3, [x0, #CTX_FP_Q2]
stp q4, q5, [x0, #CTX_FP_Q4]
@ -327,7 +328,7 @@ fpregs_context_save:
* -----------------------------------------------------
*/
.global fpregs_context_restore
fpregs_context_restore:
func fpregs_context_restore
ldp q0, q1, [x0, #CTX_FP_Q0]
ldp q2, q3, [x0, #CTX_FP_Q2]
ldp q4, q5, [x0, #CTX_FP_Q4]

View File

@ -203,7 +203,6 @@ serror_aarch32:
.align 7
.section .text, "ax"
/* -----------------------------------------------------
* The following code handles secure monitor calls.
* Depending upon the execution state from where the SMC
@ -217,6 +216,7 @@ serror_aarch32:
* used here
* -----------------------------------------------------
*/
func smc_handler
smc_handler32:
/* Check whether aarch32 issued an SMC64 */
tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
@ -330,6 +330,9 @@ smc_handler64:
* This routine assumes that the SP_EL3 is pointing to
* a valid context structure from where the gp regs and
* other special registers can be retrieved.
*
* Keep it in the same section as smc_handler as this
* function uses a fall-through to el3_exit
* -----------------------------------------------------
*/
el3_exit: ; .type el3_exit, %function
@ -383,7 +386,7 @@ rt_svc_fw_critical_error:
* within the 32 instructions per exception vector.
* -----------------------------------------------------
*/
save_scratch_registers: ; .type save_scratch_registers, %function
func save_scratch_registers
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
@ -397,7 +400,7 @@ save_scratch_registers: ; .type save_scratch_registers, %function
stp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
ret
restore_scratch_registers: ; .type restore_scratch_registers, %function
func restore_scratch_registers
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
@ -430,7 +433,7 @@ restore_scratch_registers_callee:
* reporting unhandled exceptions
* -----------------------------------------------------
*/
get_exception_stack: ; .type get_exception_stack, %function
func get_exception_stack
mov x10, x30 // lr
bl platform_get_core_pos
add x0, x0, #1

View File

@ -48,14 +48,14 @@ SECTIONS
ro . : {
__RO_START__ = .;
*bl31_entrypoint.o(.text)
*(.text)
*bl31_entrypoint.o(.text*)
*(.text*)
*(.rodata*)
/* Ensure 8-byte alignment for descriptors */
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__RT_SVC_DESCS_START__ = .;
*(rt_svc_descs)
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
*(.vectors)
@ -71,7 +71,7 @@ SECTIONS
.data . : {
__DATA_START__ = .;
*(.data)
*(.data*)
__DATA_END__ = .;
} >RAM
@ -87,7 +87,7 @@ SECTIONS
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(.bss)
*(.bss*)
*(COMMON)
__BSS_END__ = .;
} >RAM

View File

@ -31,6 +31,7 @@
#include <bl_common.h>
#include <arch.h>
#include <tsp.h>
#include <asm_macros.S>
.globl tsp_entrypoint
@ -53,10 +54,8 @@
smc #0
.endm
.section .text, "ax"; .align 3
tsp_entrypoint: ; .type tsp_entrypoint, %function
func tsp_entrypoint
/*---------------------------------------------
* Store the extents of the tzram available to
* BL32 for future use.
@ -161,7 +160,7 @@ tsp_entrypoint_panic:
* here except for acknowledging the request.
* ---------------------------------------------
*/
tsp_cpu_off_entry: ; .type tsp_cpu_off_entry, %function
func tsp_cpu_off_entry
bl tsp_cpu_off_main
restore_args_call_smc
@ -176,7 +175,7 @@ tsp_cpu_off_entry: ; .type tsp_cpu_off_entry, %function
* will be aarch64 and exceptions masked.
* ---------------------------------------------
*/
tsp_cpu_on_entry: ; .type tsp_cpu_on_entry, %function
func tsp_cpu_on_entry
/* ---------------------------------------------
* Set the exception vector to something sane.
* ---------------------------------------------
@ -236,7 +235,7 @@ tsp_cpu_on_entry_panic:
* the EL1 state.
* ---------------------------------------------
*/
tsp_cpu_suspend_entry: ; .type tsp_cpu_suspend_entry, %function
func tsp_cpu_suspend_entry
bl tsp_cpu_suspend_main
restore_args_call_smc
@ -250,7 +249,7 @@ tsp_cpu_suspend_entry: ; .type tsp_cpu_suspend_entry, %function
* acknowledging the request.
* ---------------------------------------------
*/
tsp_cpu_resume_entry: ; .type tsp_cpu_resume_entry, %function
func tsp_cpu_resume_entry
bl tsp_cpu_resume_main
restore_args_call_smc
tsp_cpu_resume_panic:
@ -261,7 +260,7 @@ tsp_cpu_resume_panic:
* the TSP to service a fast smc request.
* ---------------------------------------------
*/
tsp_fast_smc_entry: ; .type tsp_fast_smc_entry, %function
func tsp_fast_smc_entry
bl tsp_fast_smc_handler
restore_args_call_smc
tsp_fast_smc_entry_panic:

View File

@ -29,18 +29,17 @@
*/
#include <tsp.h>
#include <asm_macros.S>
.globl tsp_get_magic
.section .text, "ax"; .align 3
/*
* This function raises an SMC to retrieve arguments from secure
* monitor/dispatcher, saves the returned arguments the array received in x0,
* and then returns to the caller
*/
tsp_get_magic:
func tsp_get_magic
/* Save address to stack */
stp x0, xzr, [sp, #-16]!

View File

@ -48,8 +48,8 @@ SECTIONS
ro . : {
__RO_START__ = .;
*tsp_entrypoint.o(.text)
*(.text)
*tsp_entrypoint.o(.text*)
*(.text*)
*(.rodata*)
*(.vectors)
__RO_END_UNALIGNED__ = .;
@ -64,7 +64,7 @@ SECTIONS
.data . : {
__DATA_START__ = .;
*(.data)
*(.data*)
__DATA_END__ = .;
} >RAM
@ -80,7 +80,7 @@ SECTIONS
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss))
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM

View File

@ -80,3 +80,14 @@
.error "Vector exceeds 32 instructions"
.endif
.endm
/*
* This macro is used to create a function label and place the
* code into a separate text section based on the function name
* to enable elimination of unused code during linking
*/
.macro func _name
.section .text.\_name, "ax"
.type \_name, %function
\_name:
.endm

View File

@ -44,58 +44,56 @@
.globl dcsw_op_louis
.globl dcsw_op_all
.section .text, "ax"; .align 3
dcisw: ; .type dcisw, %function
func dcisw
dc isw, x0
dsb sy
isb
ret
dccisw: ; .type dccisw, %function
func dccisw
dc cisw, x0
dsb sy
isb
ret
dccsw: ; .type dccsw, %function
func dccsw
dc csw, x0
dsb sy
isb
ret
dccvac: ; .type dccvac, %function
func dccvac
dc cvac, x0
dsb sy
isb
ret
dcivac: ; .type dcivac, %function
func dcivac
dc ivac, x0
dsb sy
isb
ret
dccivac: ; .type dccivac, %function
func dccivac
dc civac, x0
dsb sy
isb
ret
dccvau: ; .type dccvau, %function
func dccvau
dc cvau, x0
dsb sy
isb
ret
dczva: ; .type dczva, %function
func dczva
dc zva, x0
dsb sy
isb
@ -107,7 +105,7 @@ dczva: ; .type dczva, %function
* size. 'x0' = addr, 'x1' = size
* ------------------------------------------
*/
flush_dcache_range: ; .type flush_dcache_range, %function
func flush_dcache_range
dcache_line_size x2, x3
add x1, x0, x1
sub x3, x2, #1
@ -126,7 +124,7 @@ flush_loop:
* size. 'x0' = addr, 'x1' = size
* ------------------------------------------
*/
inv_dcache_range: ; .type inv_dcache_range, %function
func inv_dcache_range
dcache_line_size x2, x3
add x1, x0, x1
sub x3, x2, #1
@ -151,7 +149,7 @@ inv_loop:
* x14
* ----------------------------------
*/
dcsw_op: ; .type dcsw_op, %function
func dcsw_op
all_start_at_level:
add x2, x10, x10, lsr #1 // work out 3x current cache level
lsr x1, x0, x2 // extract cache type bits from clidr
@ -197,7 +195,7 @@ finished:
ret
do_dcsw_op: ; .type do_dcsw_op, %function
func do_dcsw_op
cbz x3, exit
cmp x0, #DCISW
b.eq dc_isw
@ -221,13 +219,13 @@ exit:
ret
dcsw_op_louis: ; .type dcsw_op_louis, %function
func dcsw_op_louis
dsb sy
setup_dcsw_op_args x10, x3, x9, #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
b do_dcsw_op
dcsw_op_all: ; .type dcsw_op_all, %function
func dcsw_op_all
dsb sy
setup_dcsw_op_args x10, x3, x9, #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
b do_dcsw_op

View File

@ -30,6 +30,7 @@
#include <arch_helpers.h>
#include <runtime_svc.h>
#include <asm_macros.S>
.globl enable_irq
.globl disable_irq
@ -79,16 +80,15 @@
.globl zeromem16
.globl memcpy16
.section .text, "ax"
get_afflvl_shift: ; .type get_afflvl_shift, %function
func get_afflvl_shift
cmp x0, #3
cinc x0, x0, eq
mov x1, #MPIDR_AFFLVL_SHIFT
lsl x0, x0, x1
ret
mpidr_mask_lower_afflvls: ; .type mpidr_mask_lower_afflvls, %function
func mpidr_mask_lower_afflvls
cmp x1, #3
cinc x1, x1, eq
mov x2, #MPIDR_AFFLVL_SHIFT
@ -101,57 +101,57 @@ mpidr_mask_lower_afflvls: ; .type mpidr_mask_lower_afflvls, %function
* Asynchronous exception manipulation accessors
* -----------------------------------------------------
*/
enable_irq: ; .type enable_irq, %function
func enable_irq
msr daifclr, #DAIF_IRQ_BIT
ret
enable_fiq: ; .type enable_fiq, %function
func enable_fiq
msr daifclr, #DAIF_FIQ_BIT
ret
enable_serror: ; .type enable_serror, %function
func enable_serror
msr daifclr, #DAIF_ABT_BIT
ret
enable_debug_exceptions:
func enable_debug_exceptions
msr daifclr, #DAIF_DBG_BIT
ret
disable_irq: ; .type disable_irq, %function
func disable_irq
msr daifset, #DAIF_IRQ_BIT
ret
disable_fiq: ; .type disable_fiq, %function
func disable_fiq
msr daifset, #DAIF_FIQ_BIT
ret
disable_serror: ; .type disable_serror, %function
func disable_serror
msr daifset, #DAIF_ABT_BIT
ret
disable_debug_exceptions:
func disable_debug_exceptions
msr daifset, #DAIF_DBG_BIT
ret
read_daif: ; .type read_daif, %function
func read_daif
mrs x0, daif
ret
write_daif: ; .type write_daif, %function
func write_daif
msr daif, x0
ret
read_spsr: ; .type read_spsr, %function
func read_spsr
mrs x0, CurrentEl
cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
b.eq read_spsr_el1
@ -161,22 +161,22 @@ read_spsr: ; .type read_spsr, %function
b.eq read_spsr_el3
read_spsr_el1: ; .type read_spsr_el1, %function
func read_spsr_el1
mrs x0, spsr_el1
ret
read_spsr_el2: ; .type read_spsr_el2, %function
func read_spsr_el2
mrs x0, spsr_el2
ret
read_spsr_el3: ; .type read_spsr_el3, %function
func read_spsr_el3
mrs x0, spsr_el3
ret
write_spsr: ; .type write_spsr, %function
func write_spsr
mrs x1, CurrentEl
cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
b.eq write_spsr_el1
@ -186,25 +186,25 @@ write_spsr: ; .type write_spsr, %function
b.eq write_spsr_el3
write_spsr_el1: ; .type write_spsr_el1, %function
func write_spsr_el1
msr spsr_el1, x0
isb
ret
write_spsr_el2: ; .type write_spsr_el2, %function
func write_spsr_el2
msr spsr_el2, x0
isb
ret
write_spsr_el3: ; .type write_spsr_el3, %function
func write_spsr_el3
msr spsr_el3, x0
isb
ret
read_elr: ; .type read_elr, %function
func read_elr
mrs x0, CurrentEl
cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
b.eq read_elr_el1
@ -214,22 +214,22 @@ read_elr: ; .type read_elr, %function
b.eq read_elr_el3
read_elr_el1: ; .type read_elr_el1, %function
func read_elr_el1
mrs x0, elr_el1
ret
read_elr_el2: ; .type read_elr_el2, %function
func read_elr_el2
mrs x0, elr_el2
ret
read_elr_el3: ; .type read_elr_el3, %function
func read_elr_el3
mrs x0, elr_el3
ret
write_elr: ; .type write_elr, %function
func write_elr
mrs x1, CurrentEl
cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
b.eq write_elr_el1
@ -239,54 +239,54 @@ write_elr: ; .type write_elr, %function
b.eq write_elr_el3
write_elr_el1: ; .type write_elr_el1, %function
func write_elr_el1
msr elr_el1, x0
isb
ret
write_elr_el2: ; .type write_elr_el2, %function
func write_elr_el2
msr elr_el2, x0
isb
ret
write_elr_el3: ; .type write_elr_el3, %function
func write_elr_el3
msr elr_el3, x0
isb
ret
dsb: ; .type dsb, %function
func dsb
dsb sy
ret
isb: ; .type isb, %function
func isb
isb
ret
sev: ; .type sev, %function
func sev
sev
ret
wfe: ; .type wfe, %function
func wfe
wfe
ret
wfi: ; .type wfi, %function
func wfi
wfi
ret
eret: ; .type eret, %function
func eret
eret
smc: ; .type smc, %function
func smc
smc #0
/* -----------------------------------------------------------------------
@ -296,7 +296,7 @@ smc: ; .type smc, %function
* The memory address must be 16-byte aligned.
* -----------------------------------------------------------------------
*/
zeromem16:
func zeromem16
add x2, x0, x1
/* zero 16 bytes at a time */
z_loop16:
@ -322,7 +322,7 @@ z_end: ret
* Destination and source addresses must be 16-byte aligned.
* --------------------------------------------------------------------------
*/
memcpy16:
func memcpy16
/* copy 16 bytes at a time */
m_loop16:
cmp x2, #16

View File

@ -29,6 +29,7 @@
*/
#include <arch_helpers.h>
#include <asm_macros.S>
.globl read_vbar_el1
.globl read_vbar_el2
@ -164,19 +165,17 @@
#endif
.section .text, "ax"
read_current_el: ; .type read_current_el, %function
func read_current_el
mrs x0, CurrentEl
ret
read_id_pfr1_el1: ; .type read_id_pfr1_el1, %function
func read_id_pfr1_el1
mrs x0, id_pfr1_el1
ret
read_id_aa64pfr0_el1: ; .type read_id_aa64pfr0_el1, %function
func read_id_aa64pfr0_el1
mrs x0, id_aa64pfr0_el1
ret
@ -185,34 +184,34 @@ read_id_aa64pfr0_el1: ; .type read_id_aa64pfr0_el1, %function
* VBAR accessors
* -----------------------------------------------------
*/
read_vbar_el1: ; .type read_vbar_el1, %function
func read_vbar_el1
mrs x0, vbar_el1
ret
read_vbar_el2: ; .type read_vbar_el2, %function
func read_vbar_el2
mrs x0, vbar_el2
ret
read_vbar_el3: ; .type read_vbar_el3, %function
func read_vbar_el3
mrs x0, vbar_el3
ret
write_vbar_el1: ; .type write_vbar_el1, %function
func write_vbar_el1
msr vbar_el1, x0
isb
ret
write_vbar_el2: ; .type write_vbar_el2, %function
func write_vbar_el2
msr vbar_el2, x0
isb
ret
write_vbar_el3: ; .type write_vbar_el3, %function
func write_vbar_el3
msr vbar_el3, x0
isb
ret
@ -222,34 +221,34 @@ write_vbar_el3: ; .type write_vbar_el3, %function
* AFSR0 accessors
* -----------------------------------------------------
*/
read_afsr0_el1: ; .type read_afsr0_el1, %function
func read_afsr0_el1
mrs x0, afsr0_el1
ret
read_afsr0_el2: ; .type read_afsr0_el2, %function
func read_afsr0_el2
mrs x0, afsr0_el2
ret
read_afsr0_el3: ; .type read_afsr0_el3, %function
func read_afsr0_el3
mrs x0, afsr0_el3
ret
write_afsr0_el1: ; .type write_afsr0_el1, %function
func write_afsr0_el1
msr afsr0_el1, x0
isb
ret
write_afsr0_el2: ; .type write_afsr0_el2, %function
func write_afsr0_el2
msr afsr0_el2, x0
isb
ret
write_afsr0_el3: ; .type write_afsr0_el3, %function
func write_afsr0_el3
msr afsr0_el3, x0
isb
ret
@ -259,34 +258,34 @@ write_afsr0_el3: ; .type write_afsr0_el3, %function
* FAR accessors
* -----------------------------------------------------
*/
read_far_el1: ; .type read_far_el1, %function
func read_far_el1
mrs x0, far_el1
ret
read_far_el2: ; .type read_far_el2, %function
func read_far_el2
mrs x0, far_el2
ret
read_far_el3: ; .type read_far_el3, %function
func read_far_el3
mrs x0, far_el3
ret
write_far_el1: ; .type write_far_el1, %function
func write_far_el1
msr far_el1, x0
isb
ret
write_far_el2: ; .type write_far_el2, %function
func write_far_el2
msr far_el2, x0
isb
ret
write_far_el3: ; .type write_far_el3, %function
func write_far_el3
msr far_el3, x0
isb
ret
@ -296,34 +295,34 @@ write_far_el3: ; .type write_far_el3, %function
* MAIR accessors
* -----------------------------------------------------
*/
read_mair_el1: ; .type read_mair_el1, %function
func read_mair_el1
mrs x0, mair_el1
ret
read_mair_el2: ; .type read_mair_el2, %function
func read_mair_el2
mrs x0, mair_el2
ret
read_mair_el3: ; .type read_mair_el3, %function
func read_mair_el3
mrs x0, mair_el3
ret
write_mair_el1: ; .type write_mair_el1, %function
func write_mair_el1
msr mair_el1, x0
isb
ret
write_mair_el2: ; .type write_mair_el2, %function
func write_mair_el2
msr mair_el2, x0
isb
ret
write_mair_el3: ; .type write_mair_el3, %function
func write_mair_el3
msr mair_el3, x0
isb
ret
@ -333,34 +332,34 @@ write_mair_el3: ; .type write_mair_el3, %function
* AMAIR accessors
* -----------------------------------------------------
*/
read_amair_el1: ; .type read_amair_el1, %function
func read_amair_el1
mrs x0, amair_el1
ret
read_amair_el2: ; .type read_amair_el2, %function
func read_amair_el2
mrs x0, amair_el2
ret
read_amair_el3: ; .type read_amair_el3, %function
func read_amair_el3
mrs x0, amair_el3
ret
write_amair_el1: ; .type write_amair_el1, %function
func write_amair_el1
msr amair_el1, x0
isb
ret
write_amair_el2: ; .type write_amair_el2, %function
func write_amair_el2
msr amair_el2, x0
isb
ret
write_amair_el3: ; .type write_amair_el3, %function
func write_amair_el3
msr amair_el3, x0
isb
ret
@ -370,17 +369,17 @@ write_amair_el3: ; .type write_amair_el3, %function
* RVBAR accessors
* -----------------------------------------------------
*/
read_rvbar_el1: ; .type read_rvbar_el1, %function
func read_rvbar_el1
mrs x0, rvbar_el1
ret
read_rvbar_el2: ; .type read_rvbar_el2, %function
func read_rvbar_el2
mrs x0, rvbar_el2
ret
read_rvbar_el3: ; .type read_rvbar_el3, %function
func read_rvbar_el3
mrs x0, rvbar_el3
ret
@ -389,34 +388,34 @@ read_rvbar_el3: ; .type read_rvbar_el3, %function
* RMR accessors
* -----------------------------------------------------
*/
read_rmr_el1: ; .type read_rmr_el1, %function
func read_rmr_el1
mrs x0, rmr_el1
ret
read_rmr_el2: ; .type read_rmr_el2, %function
func read_rmr_el2
mrs x0, rmr_el2
ret
read_rmr_el3: ; .type read_rmr_el3, %function
func read_rmr_el3
mrs x0, rmr_el3
ret
write_rmr_el1: ; .type write_rmr_el1, %function
func write_rmr_el1
msr rmr_el1, x0
isb
ret
write_rmr_el2: ; .type write_rmr_el2, %function
func write_rmr_el2
msr rmr_el2, x0
isb
ret
write_rmr_el3: ; .type write_rmr_el3, %function
func write_rmr_el3
msr rmr_el3, x0
isb
ret
@ -426,34 +425,34 @@ write_rmr_el3: ; .type write_rmr_el3, %function
* AFSR1 accessors
* -----------------------------------------------------
*/
read_afsr1_el1: ; .type read_afsr1_el1, %function
func read_afsr1_el1
mrs x0, afsr1_el1
ret
read_afsr1_el2: ; .type read_afsr1_el2, %function
func read_afsr1_el2
mrs x0, afsr1_el2
ret
read_afsr1_el3: ; .type read_afsr1_el3, %function
func read_afsr1_el3
mrs x0, afsr1_el3
ret
write_afsr1_el1: ; .type write_afsr1_el1, %function
func write_afsr1_el1
msr afsr1_el1, x0
isb
ret
write_afsr1_el2: ; .type write_afsr1_el2, %function
func write_afsr1_el2
msr afsr1_el2, x0
isb
ret
write_afsr1_el3: ; .type write_afsr1_el3, %function
func write_afsr1_el3
msr afsr1_el3, x0
isb
ret
@ -463,36 +462,36 @@ write_afsr1_el3: ; .type write_afsr1_el3, %function
* SCTLR accessors
* -----------------------------------------------------
*/
read_sctlr_el1: ; .type read_sctlr_el1, %function
func read_sctlr_el1
mrs x0, sctlr_el1
ret
read_sctlr_el2: ; .type read_sctlr_el2, %function
func read_sctlr_el2
mrs x0, sctlr_el2
ret
read_sctlr_el3: ; .type read_sctlr_el3, %function
func read_sctlr_el3
mrs x0, sctlr_el3
ret
write_sctlr_el1: ; .type write_sctlr_el1, %function
func write_sctlr_el1
msr sctlr_el1, x0
dsb sy
isb
ret
write_sctlr_el2: ; .type write_sctlr_el2, %function
func write_sctlr_el2
msr sctlr_el2, x0
dsb sy
isb
ret
write_sctlr_el3: ; .type write_sctlr_el3, %function
func write_sctlr_el3
msr sctlr_el3, x0
dsb sy
isb
@ -503,36 +502,36 @@ write_sctlr_el3: ; .type write_sctlr_el3, %function
* ACTLR accessors
* -----------------------------------------------------
*/
read_actlr_el1: ; .type read_actlr_el1, %function
func read_actlr_el1
mrs x0, actlr_el1
ret
read_actlr_el2: ; .type read_actlr_el2, %function
func read_actlr_el2
mrs x0, actlr_el2
ret
read_actlr_el3: ; .type read_actlr_el3, %function
func read_actlr_el3
mrs x0, actlr_el3
ret
write_actlr_el1: ; .type write_actlr_el1, %function
func write_actlr_el1
msr actlr_el1, x0
dsb sy
isb
ret
write_actlr_el2: ; .type write_actlr_el2, %function
func write_actlr_el2
msr actlr_el2, x0
dsb sy
isb
ret
write_actlr_el3: ; .type write_actlr_el3, %function
func write_actlr_el3
msr actlr_el3, x0
dsb sy
isb
@ -543,36 +542,36 @@ write_actlr_el3: ; .type write_actlr_el3, %function
* ESR accessors
* -----------------------------------------------------
*/
read_esr_el1: ; .type read_esr_el1, %function
func read_esr_el1
mrs x0, esr_el1
ret
read_esr_el2: ; .type read_esr_el2, %function
func read_esr_el2
mrs x0, esr_el2
ret
read_esr_el3: ; .type read_esr_el3, %function
func read_esr_el3
mrs x0, esr_el3
ret
write_esr_el1: ; .type write_esr_el1, %function
func write_esr_el1
msr esr_el1, x0
dsb sy
isb
ret
write_esr_el2: ; .type write_esr_el2, %function
func write_esr_el2
msr esr_el2, x0
dsb sy
isb
ret
write_esr_el3: ; .type write_esr_el3, %function
func write_esr_el3
msr esr_el3, x0
dsb sy
isb
@ -583,36 +582,36 @@ write_esr_el3: ; .type write_esr_el3, %function
* TCR accessors
* -----------------------------------------------------
*/
read_tcr_el1: ; .type read_tcr_el1, %function
func read_tcr_el1
mrs x0, tcr_el1
ret
read_tcr_el2: ; .type read_tcr_el2, %function
func read_tcr_el2
mrs x0, tcr_el2
ret
read_tcr_el3: ; .type read_tcr_el3, %function
func read_tcr_el3
mrs x0, tcr_el3
ret
write_tcr_el1: ; .type write_tcr_el1, %function
func write_tcr_el1
msr tcr_el1, x0
dsb sy
isb
ret
write_tcr_el2: ; .type write_tcr_el2, %function
func write_tcr_el2
msr tcr_el2, x0
dsb sy
isb
ret
write_tcr_el3: ; .type write_tcr_el3, %function
func write_tcr_el3
msr tcr_el3, x0
dsb sy
isb
@ -623,33 +622,33 @@ write_tcr_el3: ; .type write_tcr_el3, %function
* CPTR accessors
* -----------------------------------------------------
*/
read_cptr_el1: ; .type read_cptr_el1, %function
func read_cptr_el1
b read_cptr_el1
ret
read_cptr_el2: ; .type read_cptr_el2, %function
func read_cptr_el2
mrs x0, cptr_el2
ret
read_cptr_el3: ; .type read_cptr_el3, %function
func read_cptr_el3
mrs x0, cptr_el3
ret
write_cptr_el1: ; .type write_cptr_el1, %function
func write_cptr_el1
b write_cptr_el1
write_cptr_el2: ; .type write_cptr_el2, %function
func write_cptr_el2
msr cptr_el2, x0
dsb sy
isb
ret
write_cptr_el3: ; .type write_cptr_el3, %function
func write_cptr_el3
msr cptr_el3, x0
dsb sy
isb
@ -660,34 +659,34 @@ write_cptr_el3: ; .type write_cptr_el3, %function
* TTBR0 accessors
* -----------------------------------------------------
*/
read_ttbr0_el1: ; .type read_ttbr0_el1, %function
func read_ttbr0_el1
mrs x0, ttbr0_el1
ret
read_ttbr0_el2: ; .type read_ttbr0_el2, %function
func read_ttbr0_el2
mrs x0, ttbr0_el2
ret
read_ttbr0_el3: ; .type read_ttbr0_el3, %function
func read_ttbr0_el3
mrs x0, ttbr0_el3
ret
write_ttbr0_el1: ; .type write_ttbr0_el1, %function
func write_ttbr0_el1
msr ttbr0_el1, x0
isb
ret
write_ttbr0_el2: ; .type write_ttbr0_el2, %function
func write_ttbr0_el2
msr ttbr0_el2, x0
isb
ret
write_ttbr0_el3: ; .type write_ttbr0_el3, %function
func write_ttbr0_el3
msr ttbr0_el3, x0
isb
ret
@ -697,121 +696,121 @@ write_ttbr0_el3: ; .type write_ttbr0_el3, %function
* TTBR1 accessors
* -----------------------------------------------------
*/
read_ttbr1_el1: ; .type read_ttbr1_el1, %function
func read_ttbr1_el1
mrs x0, ttbr1_el1
ret
read_ttbr1_el2: ; .type read_ttbr1_el2, %function
func read_ttbr1_el2
b read_ttbr1_el2
read_ttbr1_el3: ; .type read_ttbr1_el3, %function
func read_ttbr1_el3
b read_ttbr1_el3
write_ttbr1_el1: ; .type write_ttbr1_el1, %function
func write_ttbr1_el1
msr ttbr1_el1, x0
isb
ret
write_ttbr1_el2: ; .type write_ttbr1_el2, %function
func write_ttbr1_el2
b write_ttbr1_el2
write_ttbr1_el3: ; .type write_ttbr1_el3, %function
func write_ttbr1_el3
b write_ttbr1_el3
read_hcr: ; .type read_hcr, %function
func read_hcr
mrs x0, hcr_el2
ret
write_hcr: ; .type write_hcr, %function
func write_hcr
msr hcr_el2, x0
dsb sy
isb
ret
read_cpacr: ; .type read_cpacr, %function
func read_cpacr
mrs x0, cpacr_el1
ret
write_cpacr: ; .type write_cpacr, %function
func write_cpacr
msr cpacr_el1, x0
ret
read_cntfrq_el0: ; .type read_cntfrq_el0, %function
func read_cntfrq_el0
mrs x0, cntfrq_el0
ret
write_cntfrq_el0: ; .type write_cntfrq_el0, %function
func write_cntfrq_el0
msr cntfrq_el0, x0
ret
read_cpuectlr: ; .type read_cpuectlr, %function
func read_cpuectlr
mrs x0, CPUECTLR_EL1
ret
write_cpuectlr: ; .type write_cpuectlr, %function
func write_cpuectlr
msr CPUECTLR_EL1, x0
dsb sy
isb
ret
read_cnthctl_el2: ; .type read_cnthctl_el2, %function
func read_cnthctl_el2
mrs x0, cnthctl_el2
ret
write_cnthctl_el2: ; .type write_cnthctl_el2, %function
func write_cnthctl_el2
msr cnthctl_el2, x0
ret
read_cntfrq: ; .type read_cntfrq, %function
func read_cntfrq
mrs x0, cntfrq_el0
ret
write_cntfrq: ; .type write_cntfrq, %function
func write_cntfrq
msr cntfrq_el0, x0
ret
write_scr: ; .type write_scr, %function
func write_scr
msr scr_el3, x0
dsb sy
isb
ret
read_scr: ; .type read_scr, %function
func read_scr
mrs x0, scr_el3
ret
read_midr: ; .type read_midr, %function
func read_midr
mrs x0, midr_el1
ret
read_mpidr: ; .type read_mpidr, %function
func read_mpidr
mrs x0, mpidr_el1
ret
#if SUPPORT_VFP
enable_vfp: ; .type enable_vfp, %function
func enable_vfp
mrs x0, cpacr_el1
orr x0, x0, #CPACR_VFP_BITS
msr cpacr_el1, x0
@ -822,14 +821,12 @@ enable_vfp: ; .type enable_vfp, %function
ret
// int read_fpexc(void)
read_fpexc: ; .type read_fpexc, %function
func read_fpexc
b read_fpexc
ret
// void write_fpexc(int fpexc)
write_fpexc: ; .type write_fpexc, %function
func write_fpexc
b write_fpexc
ret

View File

@ -29,6 +29,7 @@
*/
#include <arch_helpers.h>
#include <asm_macros.S>
.globl tlbialle1
.globl tlbialle1is
@ -39,50 +40,48 @@
.globl tlbivmalle1
.section .text, "ax"
tlbialle1: ; .type tlbialle1, %function
func tlbialle1
tlbi alle1
dsb sy
isb
ret
tlbialle1is: ; .type tlbialle1is, %function
func tlbialle1is
tlbi alle1is
dsb sy
isb
ret
tlbialle2: ; .type tlbialle2, %function
func tlbialle2
tlbi alle2
dsb sy
isb
ret
tlbialle2is: ; .type tlbialle2is, %function
func tlbialle2is
tlbi alle2is
dsb sy
isb
ret
tlbialle3: ; .type tlbialle3, %function
func tlbialle3
tlbi alle3
dsb sy
isb
ret
tlbialle3is: ; .type tlbialle3is, %function
func tlbialle3is
tlbi alle3is
dsb sy
isb
ret
tlbivmalle1: ; .type tlbivmalle1, %function
func tlbivmalle1
tlbi vmalle1
dsb sy
isb

View File

@ -28,10 +28,10 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
.globl semihosting_call
.section .text, "ax"
semihosting_call: ; .type semihosting_call, %function
func semihosting_call
hlt #0xf000
ret

View File

@ -28,13 +28,13 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
.globl spin_lock
.globl spin_unlock
.section .text, "ax";
spin_lock: ; .type spin_lock, %function
func spin_lock
mov w2, #1
sevl
l1: wfe
@ -45,6 +45,6 @@ l2: ldaxr w1, [x0]
ret
spin_unlock: ; .type spin_unlock, %function
func spin_unlock
stlr wzr, [x0]
ret

View File

@ -30,6 +30,7 @@
#include <arch.h>
#include <platform.h>
#include <asm_macros.S>
.globl pcpu_dv_mem_stack
@ -51,8 +52,6 @@
#define PCPU_DV_MEM_STACK_SIZE 0x300
#endif
.section .text, "ax"; .align 3
/* -----------------------------------------------------
* unsigned long long platform_set_coherent_stack
* (unsigned mpidr);
@ -62,7 +61,7 @@
* SCTLR.C bit e.g. while powering down a cpu
* -----------------------------------------------------
*/
platform_set_coherent_stack: ; .type platform_set_coherent_stack, %function
func platform_set_coherent_stack
mov x5, x30 // lr
bl platform_get_core_pos
add x0, x0, #1
@ -79,7 +78,7 @@ platform_set_coherent_stack: ; .type platform_set_coherent_stack, %function
* CoreId
* -----------------------------------------------------
*/
platform_get_core_pos: ; .type platform_get_core_pos, %function
func platform_get_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
@ -93,7 +92,7 @@ platform_get_core_pos: ; .type platform_get_core_pos, %function
* cpu (applicable ony after a cold boot)
* -----------------------------------------------------
*/
platform_is_primary_cpu: ; .type platform_is_primary_cpu, %function
func platform_is_primary_cpu
and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
cmp x0, #PRIMARY_CPU
cset x0, eq
@ -103,7 +102,7 @@ platform_is_primary_cpu: ; .type platform_is_primary_cpu, %function
* void platform_get_stack (unsigned long mpidr)
* -----------------------------------------------------
*/
platform_get_stack: ; .type platform_get_stack, %function
func platform_get_stack
mov x10, x30 // lr
bl platform_get_core_pos
add x0, x0, #1
@ -117,7 +116,7 @@ platform_get_stack: ; .type platform_get_stack, %function
* void platform_set_stack (unsigned long mpidr)
* -----------------------------------------------------
*/
platform_set_stack: ; .type platform_set_stack, %function
func platform_set_stack
mov x9, x30 // lr
bl platform_get_stack
mov sp, x0
@ -128,7 +127,7 @@ platform_set_stack: ; .type platform_set_stack, %function
* each platform.
* -----------------------------------------------------
*/
platform_check_mpidr: ; .type platform_check_mpidr, %function
func platform_check_mpidr
mov x0, xzr
ret
@ -137,7 +136,7 @@ platform_check_mpidr: ; .type platform_check_mpidr, %function
* each platform.
* -----------------------------------------------------
*/
plat_report_exception:
func plat_report_exception
ret
/* -----------------------------------------------------

View File

@ -32,15 +32,13 @@
#include <platform.h>
#include <fvp_pwrc.h>
#include <gic.h>
#include <asm_macros.S>
.globl platform_get_entrypoint
.globl platform_cold_boot_init
.globl plat_secondary_cold_boot_setup
.section .text, "ax"; .align 3
.macro platform_choose_gicmmap param1, param2, x_tmp, w_tmp, res
ldr \x_tmp, =VE_SYSREGS_BASE + V2M_SYS_ID
ldr \w_tmp, [\x_tmp]
@ -60,7 +58,7 @@
* that the request has gone through.
* -----------------------------------------------------
*/
plat_secondary_cold_boot_setup: ; .type plat_secondary_cold_boot_setup, %function
func plat_secondary_cold_boot_setup
bl read_mpidr
mov x19, x0
bl platform_get_core_pos
@ -120,7 +118,7 @@ cb_panic:
* reset all cpus will read the same WK field
* -----------------------------------------------------
*/
platform_get_entrypoint: ; .type platform_get_entrypoint, %function
func platform_get_entrypoint
mov x9, x30 // lr
mov x2, x0
ldr x1, =PWRC_BASE
@ -160,7 +158,7 @@ _panic: b _panic
* BL1 will always read the mailboxes with the MMU off
* -----------------------------------------------------
*/
platform_mem_init: ; .type platform_mem_init, %function
func platform_mem_init
ldr x0, =TZDRAM_BASE + MBOX_OFF
stp xzr, xzr, [x0, #0]
stp xzr, xzr, [x0, #0x10]
@ -176,7 +174,7 @@ platform_mem_init: ; .type platform_mem_init, %function
* boot to perform early platform initialization
* -----------------------------------------------------
*/
platform_cold_boot_init: ; .type platform_cold_boot_init, %function
func platform_cold_boot_init
mov x20, x0
bl platform_mem_init
bl read_mpidr

View File

@ -30,11 +30,10 @@
#include <arch.h>
#include <platform.h>
#include <asm_macros.S>
.globl plat_report_exception
.section .text, "ax"
/* ---------------------------------------------
* void plat_report_exception(unsigned int type)
* Function to report an unhandled exception
@ -43,7 +42,7 @@
* to indicate where we are
* ---------------------------------------------
*/
plat_report_exception:
func plat_report_exception
mrs x1, CurrentEl
lsr x1, x1, #MODE_EL_SHIFT
lsl x1, x1, #SYS_LED_EL_SHIFT

View File

@ -44,7 +44,7 @@
* saved.
* ---------------------------------------------
*/
tspd_enter_sp:
func tspd_enter_sp
/* Make space for the registers that we're going to save */
mov x3, sp
str x3, [x0, #0]
@ -79,7 +79,7 @@ tspd_enter_sp:
* ---------------------------------------------
*/
.global tspd_exit_sp
tspd_exit_sp:
func tspd_exit_sp
/* Restore the previous stack */
mov sp, x0

View File

@ -41,8 +41,6 @@
.globl __psci_cpu_off
.globl __psci_cpu_suspend
.section .text, "ax"; .align 3
/* -----------------------------------------------------
* This cpu has been physically powered up. Depending
* upon whether it was resumed from suspend or simply
@ -55,7 +53,7 @@
* all this is done.
* -----------------------------------------------------
*/
psci_aff_on_finish_entry:
func psci_aff_on_finish_entry
adr x23, psci_afflvl_on_finishers
b psci_aff_common_finish_entry
@ -120,7 +118,7 @@ _panic:
* suffering from stack coherency issues
* -----------------------------------------------------
*/
__psci_cpu_off:
func __psci_cpu_off
func_prologue
sub sp, sp, #0x10
stp x19, x20, [sp, #0]
@ -137,7 +135,7 @@ __psci_cpu_off:
func_epilogue
ret
__psci_cpu_suspend:
func __psci_cpu_suspend
func_prologue
sub sp, sp, #0x20
stp x19, x20, [sp, #0]
@ -162,7 +160,7 @@ __psci_cpu_suspend:
func_epilogue
ret
final_wfi:
func final_wfi
dsb sy
wfi
wfi_spill: