Merge pull request #1722 from antonio-nino-diaz-arm/an/arch

Synchronize architectural headers with TF-A-Tests
This commit is contained in:
Antonio Niño Díaz 2018-12-18 13:50:38 +01:00 committed by GitHub
commit bcc7ad76c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 155 additions and 14 deletions

View File

@ -248,6 +248,19 @@ DEFINE_COPROCR_RW_FUNCS(cnthp_ctl_el2, CNTHP_CTL)
DEFINE_COPROCR_RW_FUNCS(cnthp_tval_el2, CNTHP_TVAL)
DEFINE_COPROCR_RW_FUNCS_64(cnthp_cval_el2, CNTHP_CVAL_64)
#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
CNTP_CTL_IMASK_MASK)
#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
CNTP_CTL_ISTATUS_MASK)
#define set_cntp_ctl_enable(x) ((x) |= U(1) << CNTP_CTL_ENABLE_SHIFT)
#define set_cntp_ctl_imask(x) ((x) |= U(1) << CNTP_CTL_IMASK_SHIFT)
#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE)
DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE)
DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE)
@ -378,4 +391,59 @@ static inline unsigned int get_current_el(void)
#define read_amcntenset0_el0() read_amcntenset0()
#define read_amcntenset1_el0() read_amcntenset1()
/* Helper functions to manipulate CPSR */
static inline void enable_irq(void)
{
/*
* The compiler memory barrier will prevent the compiler from
* scheduling non-volatile memory access after the write to the
* register.
*
* This could happen if some initialization code issues non-volatile
* accesses to an area used by an interrupt handler, in the assumption
* that it is safe as the interrupts are disabled at the time it does
* that (according to program order). However, non-volatile accesses
* are not necessarily in program order relatively with volatile inline
* assembly statements (and volatile accesses).
*/
COMPILER_BARRIER();
__asm__ volatile ("cpsie i");
isb();
}
static inline void enable_serror(void)
{
COMPILER_BARRIER();
__asm__ volatile ("cpsie a");
isb();
}
static inline void enable_fiq(void)
{
COMPILER_BARRIER();
__asm__ volatile ("cpsie f");
isb();
}
static inline void disable_irq(void)
{
COMPILER_BARRIER();
__asm__ volatile ("cpsid i");
isb();
}
static inline void disable_serror(void)
{
COMPILER_BARRIER();
__asm__ volatile ("cpsid a");
isb();
}
static inline void disable_fiq(void)
{
COMPILER_BARRIER();
__asm__ volatile ("cpsid f");
isb();
}
#endif /* ARCH_HELPERS_H */

View File

@ -534,19 +534,6 @@
#define CNTP_CTL_IMASK_MASK U(1)
#define CNTP_CTL_ISTATUS_MASK U(1)
#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
CNTP_CTL_IMASK_MASK)
#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
CNTP_CTL_ISTATUS_MASK)
#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
/* Exception Syndrome register bits and bobs */
#define ESR_EC_SHIFT U(26)
#define ESR_EC_MASK U(0x3f)

View File

@ -215,11 +215,81 @@ DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_FUNC(isb)
static inline void enable_irq(void)
{
/*
* The compiler memory barrier will prevent the compiler from
* scheduling non-volatile memory access after the write to the
* register.
*
* This could happen if some initialization code issues non-volatile
* accesses to an area used by an interrupt handler, in the assumption
* that it is safe as the interrupts are disabled at the time it does
* that (according to program order). However, non-volatile accesses
* are not necessarily in program order relatively with volatile inline
* assembly statements (and volatile accesses).
*/
COMPILER_BARRIER();
write_daifclr(DAIF_IRQ_BIT);
isb();
}
static inline void enable_fiq(void)
{
COMPILER_BARRIER();
write_daifclr(DAIF_FIQ_BIT);
isb();
}
static inline void enable_serror(void)
{
COMPILER_BARRIER();
write_daifclr(DAIF_ABT_BIT);
isb();
}
static inline void enable_debug_exceptions(void)
{
COMPILER_BARRIER();
write_daifclr(DAIF_DBG_BIT);
isb();
}
static inline void disable_irq(void)
{
COMPILER_BARRIER();
write_daifset(DAIF_IRQ_BIT);
isb();
}
static inline void disable_fiq(void)
{
COMPILER_BARRIER();
write_daifset(DAIF_FIQ_BIT);
isb();
}
static inline void disable_serror(void)
{
COMPILER_BARRIER();
write_daifset(DAIF_ABT_BIT);
isb();
}
static inline void disable_debug_exceptions(void)
{
COMPILER_BARRIER();
write_daifset(DAIF_DBG_BIT);
isb();
}
#if !ERROR_DEPRECATED
uint32_t get_afflvl_shift(uint32_t);
uint32_t mpidr_mask_lower_afflvls(uint64_t, uint32_t);
void __dead2 eret(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
#endif
void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
@ -306,6 +376,19 @@ DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
DEFINE_SYSREG_READ_FUNC(cntpct_el0)
DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
CNTP_CTL_IMASK_MASK)
#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
CNTP_CTL_ISTATUS_MASK)
#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
DEFINE_SYSREG_RW_FUNCS(cntvoff_el2)

View File

@ -9,9 +9,11 @@
#include <assert_macros.S>
#include <xlat_tables_defs.h>
#if !ERROR_DEPRECATED
.globl get_afflvl_shift
.globl mpidr_mask_lower_afflvls
.globl eret
#endif /* ERROR_DEPRECATED */
.globl smc
.globl zero_normalmem
@ -30,6 +32,7 @@
.globl enable_vfp
#endif
#if !ERROR_DEPRECATED
func get_afflvl_shift
cmp x0, #3
cinc x0, x0, eq
@ -52,7 +55,7 @@ endfunc mpidr_mask_lower_afflvls
func eret
eret
endfunc eret
#endif /* ERROR_DEPRECATED */
func smc
smc #0