diff --git a/bl1/aarch64/bl1_arch_setup.c b/bl1/aarch64/bl1_arch_setup.c index cf69ac7fe..eeaa24aff 100644 --- a/bl1/aarch64/bl1_arch_setup.c +++ b/bl1/aarch64/bl1_arch_setup.c @@ -37,20 +37,11 @@ ******************************************************************************/ void bl1_arch_setup(void) { - unsigned long tmp_reg = 0; - - /* Enable alignment checks */ - tmp_reg = read_sctlr_el3(); - tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT); - write_sctlr_el3(tmp_reg); - isb(); - /* * Set the next EL to be AArch64, route external abort and SError * interrupts to EL3 */ - tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_EA_BIT; - write_scr(tmp_reg); + write_scr_el3(SCR_RES1_BITS | SCR_RW_BIT | SCR_EA_BIT); /* * Enable SError and Debug exceptions diff --git a/bl1/aarch64/bl1_entrypoint.S b/bl1/aarch64/bl1_entrypoint.S index ac6d91331..dd7d78feb 100644 --- a/bl1/aarch64/bl1_entrypoint.S +++ b/bl1/aarch64/bl1_entrypoint.S @@ -44,7 +44,7 @@ func bl1_entrypoint /* --------------------------------------------- * Set the CPU endianness before doing anything - * that might involve memory reads or writes + * that might involve memory reads or writes. * --------------------------------------------- */ mrs x0, sctlr_el3 @@ -59,12 +59,14 @@ func bl1_entrypoint */ bl cpu_reset_handler - /* ------------------------------- - * Enable the instruction cache. - * ------------------------------- + /* --------------------------------------------- + * Enable the instruction cache, stack pointer + * and data access alignment checks + * --------------------------------------------- */ + mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) mrs x0, sctlr_el3 - orr x0, x0, #SCTLR_I_BIT + orr x0, x0, x1 msr sctlr_el3, x0 isb diff --git a/bl2/aarch64/bl2_entrypoint.S b/bl2/aarch64/bl2_entrypoint.S index c615baf60..6fcd0405b 100644 --- a/bl2/aarch64/bl2_entrypoint.S +++ b/bl2/aarch64/bl2_entrypoint.S @@ -65,11 +65,13 @@ func bl2_entrypoint msr vbar_el1, x0 /* --------------------------------------------- - * Enable the instruction cache. + * Enable the instruction cache, stack pointer + * and data access alignment checks * --------------------------------------------- */ + mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) mrs x0, sctlr_el1 - orr x0, x0, #SCTLR_I_BIT + orr x0, x0, x1 msr sctlr_el1, x0 isb diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c index e0382b33f..f67881e6b 100644 --- a/bl31/aarch64/bl31_arch_setup.c +++ b/bl31/aarch64/bl31_arch_setup.c @@ -42,21 +42,12 @@ ******************************************************************************/ void bl31_arch_setup(void) { - unsigned long tmp_reg = 0; - uint64_t counter_freq; - - /* Enable alignment checks */ - tmp_reg = read_sctlr_el3(); - tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT); - write_sctlr_el3(tmp_reg); - /* * Route external abort and SError interrupts to EL3 * other SCR bits will be configured before exiting to a lower exception * level */ - tmp_reg = SCR_RES1_BITS | SCR_EA_BIT; - write_scr(tmp_reg); + write_scr_el3(SCR_RES1_BITS | SCR_EA_BIT); /* * Enable SError and Debug exceptions @@ -65,6 +56,5 @@ void bl31_arch_setup(void) enable_debug_exceptions(); /* Program the counter frequency */ - counter_freq = plat_get_syscnt_freq(); - write_cntfrq_el0(counter_freq); + write_cntfrq_el0(plat_get_syscnt_freq()); } diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index 102398377..69d224368 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -52,6 +52,15 @@ func bl31_entrypoint mov x20, x0 mov x21, x1 #else + /* --------------------------------------------- + * Set the CPU endianness before doing anything + * that might involve memory reads or writes. + * --------------------------------------------- + */ + mrs x0, sctlr_el3 + bic x0, x0, #SCTLR_EE_BIT + msr sctlr_el3, x0 + isb /* ----------------------------------------------------- * Perform any processor specific actions upon reset @@ -61,14 +70,15 @@ func bl31_entrypoint */ bl cpu_reset_handler #endif - /* --------------------------------------------- - * Enable the instruction cache. + * Enable the instruction cache, stack pointer + * and data access alignment checks * --------------------------------------------- */ - mrs x1, sctlr_el3 - orr x1, x1, #SCTLR_I_BIT - msr sctlr_el3, x1 + mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) + mrs x0, sctlr_el3 + orr x0, x0, x1 + msr sctlr_el3, x0 isb /* --------------------------------------------- diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S index 7a1797eef..91b6128c5 100644 --- a/bl32/tsp/aarch64/tsp_entrypoint.S +++ b/bl32/tsp/aarch64/tsp_entrypoint.S @@ -89,11 +89,13 @@ func tsp_entrypoint msr vbar_el1, x0 /* --------------------------------------------- - * Enable the instruction cache. + * Enable the instruction cache, stack pointer + * and data access alignment checks * --------------------------------------------- */ + mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) mrs x0, sctlr_el1 - orr x0, x0, #SCTLR_I_BIT + orr x0, x0, x1 msr sctlr_el1, x0 isb @@ -196,11 +198,13 @@ func tsp_cpu_on_entry msr vbar_el1, x0 /* --------------------------------------------- - * Enable the instruction cache. + * Enable the instruction cache, stack pointer + * and data access alignment checks * --------------------------------------------- */ + mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) mrs x0, sctlr_el1 - orr x0, x0, #SCTLR_I_BIT + orr x0, x0, x1 msr sctlr_el1, x0 isb diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index ff91efc41..042720823 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -129,11 +129,8 @@ #define SCTLR_A_BIT (1 << 1) #define SCTLR_C_BIT (1 << 2) #define SCTLR_SA_BIT (1 << 3) -#define SCTLR_B_BIT (1 << 7) -#define SCTLR_Z_BIT (1 << 11) #define SCTLR_I_BIT (1 << 12) #define SCTLR_WXN_BIT (1 << 19) -#define SCTLR_EXCEPTION_BITS (0x3 << 6) #define SCTLR_EE_BIT (1 << 25) /* CPUECTLR definitions */ diff --git a/lib/aarch64/xlat_tables.c b/lib/aarch64/xlat_tables.c index d49411297..ddc9ba88c 100644 --- a/lib/aarch64/xlat_tables.c +++ b/lib/aarch64/xlat_tables.c @@ -329,8 +329,7 @@ void init_xlat_tables(void) isb(); \ \ sctlr = read_sctlr_el##_el(); \ - sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \ - sctlr |= SCTLR_A_BIT; \ + sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \ \ if (flags & DISABLE_DCACHE) \ sctlr &= ~SCTLR_C_BIT; \ diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S index 192b638c1..e9ad1305b 100644 --- a/services/std_svc/psci/psci_entry.S +++ b/services/std_svc/psci/psci_entry.S @@ -54,6 +54,25 @@ psci_aff_suspend_finish_entry: adr x23, psci_afflvl_suspend_finishers psci_aff_common_finish_entry: +#if !RESET_TO_BL31 + /* --------------------------------------------- + * Enable the instruction cache, stack pointer + * and data access alignment checks. Also, set + * the EL3 exception endianess to little-endian. + * It can be assumed that BL3-1 entrypoint code + * will do this when RESET_TO_BL31 is set. The + * same assumption cannot be made when another + * boot loader executes before BL3-1 in the warm + * boot path e.g. BL1. + * --------------------------------------------- + */ + mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) + mrs x0, sctlr_el3 + orr x0, x0, x1 + msr sctlr_el3, x0 + isb +#endif + /* --------------------------------------------- * Initialise the pcpu cache pointer for the CPU * ---------------------------------------------