fvp: Provide per-EL MMU setup functions

Instead of having a single version of the MMU setup functions for all
bootloader images that can execute either in EL3 or in EL1, provide
separate functions for EL1 and EL3. Each bootloader image can then
call the appropriate version of these functions. The aim is to reduce
the amount of code compiled in each BL image by embedding only what's
needed (e.g. BL1 to embed only EL3 variants).

Change-Id: Ib86831d5450cf778ae78c9c1f7553fe91274c2fa
This commit is contained in:
Sandrine Bailleux 2014-05-09 11:35:36 +01:00
parent b3254e8547
commit b793e43166
8 changed files with 116 additions and 118 deletions

View File

@ -203,7 +203,7 @@ func tsp_cpu_on_entry
* Initialise the MMU * Initialise the MMU
* --------------------------------------------- * ---------------------------------------------
*/ */
bl enable_mmu bl enable_mmu_el1
/* --------------------------------------------- /* ---------------------------------------------
* Give ourselves a stack allocated in Normal * Give ourselves a stack allocated in Normal

View File

@ -47,80 +47,68 @@
static unsigned long platform_config[CONFIG_LIMIT]; static unsigned long platform_config[CONFIG_LIMIT];
/******************************************************************************* /*******************************************************************************
* Enable the MMU assuming that the pagetables have already been created * Macro generating the code for the function enabling the MMU in the given
*******************************************************************************/ * exception level, assuming that the pagetables have already been created.
void enable_mmu() *
{ * _el: Exception level at which the function will run
unsigned long mair, tcr, ttbr, sctlr; * _tcr_extra: Extra bits to set in the TCR register. This mask will
* be OR'ed with the default TCR value.
/* Set the attributes in the right indices of the MAIR */ * _tlbi_fct: Function to invalidate the TLBs at the current
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); * exception level
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ******************************************************************************/
ATTR_IWBWA_OWBWA_NTR_INDEX); #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
void enable_mmu_el##_el(void) \
/* { \
* Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32 uint64_t mair, tcr, ttbr; \
*/ uint32_t sctlr; \
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; assert(IS_IN_EL(_el)); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
/* Set TTBR bits as well */ \
ttbr = (unsigned long) l1_xlation_table; /* Set attributes in the right indices of the MAIR */ \
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
if (IS_IN_EL3()) { mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
assert((read_sctlr_el3() & SCTLR_M_BIT) == 0); ATTR_IWBWA_OWBWA_NTR_INDEX); \
write_mair_el##_el(mair); \
write_mair_el3(mair); \
tcr |= TCR_EL3_RES1; /* Invalidate TLBs at the current exception level */ \
/* Invalidate EL3 TLBs */ _tlbi_fct(); \
tlbialle3(); \
/* Set TCR bits as well. */ \
write_tcr_el3(tcr); /* Inner & outer WBWA & shareable + T0SZ = 32 */ \
write_ttbr0_el3(ttbr); tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; \
/* ensure all translation table writes have drained into memory, tcr |= _tcr_extra; \
* the TLB invalidation is complete, and translation register write_tcr_el##_el(tcr); \
* writes are committed before enabling the MMU \
*/ /* Set TTBR bits as well */ \
dsb(); ttbr = (uint64_t) l1_xlation_table; \
isb(); write_ttbr0_el##_el(ttbr); \
\
sctlr = read_sctlr_el3(); /* Ensure all translation table writes have drained */ \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; /* into memory, the TLB invalidation is complete, */ \
sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; /* and translation register writes are committed */ \
write_sctlr_el3(sctlr); /* before enabling the MMU */ \
} else { dsb(); \
assert((read_sctlr_el1() & SCTLR_M_BIT) == 0); isb(); \
\
write_mair_el1(mair); sctlr = read_sctlr_el##_el(); \
/* Invalidate EL1 TLBs */ sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \
tlbivmalle1(); sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; \
write_sctlr_el##_el(sctlr); \
write_tcr_el1(tcr); \
write_ttbr0_el1(ttbr); /* Ensure the MMU enable takes effect immediately */ \
isb(); \
/* ensure all translation table writes have drained into memory,
* the TLB invalidation is complete, and translation register
* writes are committed before enabling the MMU
*/
dsb();
isb();
sctlr = read_sctlr_el1();
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
write_sctlr_el1(sctlr);
} }
/* ensure the MMU enable takes effect immediately */
isb();
return; /* Define EL1 and EL3 variants of the function enabling the MMU */
} DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)
/* /*
* Table of regions to map using the MMU. * Table of regions to map using the MMU.
* This doesn't include TZRAM as the 'mem_layout' argument passed to to * This doesn't include TZRAM as the 'mem_layout' argument passed to
* configure_mmu() will give the available subset of that, * configure_mmu_elx() will give the available subset of that,
*/ */
const mmap_region_t fvp_mmap[] = { const mmap_region_t fvp_mmap[] = {
{ TZROM_BASE, TZROM_SIZE, MT_MEMORY | MT_RO | MT_SECURE }, { TZROM_BASE, TZROM_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
@ -138,28 +126,32 @@ const mmap_region_t fvp_mmap[] = {
}; };
/******************************************************************************* /*******************************************************************************
* Setup the pagetables as per the platform memory map & initialize the mmu * Macro generating the code for the function setting up the pagetables as per
*******************************************************************************/ * the platform memory map & initialize the mmu, for the given exception level
void configure_mmu(meminfo_t *mem_layout, ******************************************************************************/
unsigned long ro_start, #define DEFINE_CONFIGURE_MMU_EL(_el) \
unsigned long ro_limit, void configure_mmu_el##_el(meminfo_t *mem_layout, \
unsigned long coh_start, unsigned long ro_start, \
unsigned long coh_limit) unsigned long ro_limit, \
{ unsigned long coh_start, \
mmap_add_region(mem_layout->total_base, mem_layout->total_size, unsigned long coh_limit) \
MT_MEMORY | MT_RW | MT_SECURE); { \
mmap_add_region(ro_start, ro_limit - ro_start, mmap_add_region(mem_layout->total_base, \
MT_MEMORY | MT_RO | MT_SECURE); mem_layout->total_size, \
mmap_add_region(coh_start, coh_limit - coh_start, MT_MEMORY | MT_RW | MT_SECURE); \
MT_DEVICE | MT_RW | MT_SECURE); mmap_add_region(ro_start, ro_limit - ro_start, \
MT_MEMORY | MT_RO | MT_SECURE); \
mmap_add_region(coh_start, coh_limit - coh_start, \
MT_DEVICE | MT_RW | MT_SECURE); \
mmap_add(fvp_mmap); \
init_xlat_tables(); \
\
enable_mmu_el##_el(); \
}
mmap_add(fvp_mmap); /* Define EL1 and EL3 variants of the function initialising the MMU */
DEFINE_CONFIGURE_MMU_EL(1)
init_xlat_tables(); DEFINE_CONFIGURE_MMU_EL(3)
enable_mmu();
return;
}
/* Simple routine which returns a configuration variable value */ /* Simple routine which returns a configuration variable value */
unsigned long platform_get_cfgvar(unsigned int var_id) unsigned long platform_get_cfgvar(unsigned int var_id)

View File

@ -138,9 +138,9 @@ void bl1_plat_arch_setup(void)
cci_enable_coherency(read_mpidr()); cci_enable_coherency(read_mpidr());
} }
configure_mmu(&bl1_tzram_layout, configure_mmu_el3(&bl1_tzram_layout,
TZROM_BASE, TZROM_BASE,
TZROM_BASE + TZROM_SIZE, TZROM_BASE + TZROM_SIZE,
BL1_COHERENT_RAM_BASE, BL1_COHERENT_RAM_BASE,
BL1_COHERENT_RAM_LIMIT); BL1_COHERENT_RAM_LIMIT);
} }

View File

@ -172,9 +172,9 @@ void bl2_platform_setup()
******************************************************************************/ ******************************************************************************/
void bl2_plat_arch_setup() void bl2_plat_arch_setup()
{ {
configure_mmu(&bl2_tzram_layout, configure_mmu_el1(&bl2_tzram_layout,
BL2_RO_BASE, BL2_RO_BASE,
BL2_RO_LIMIT, BL2_RO_LIMIT,
BL2_COHERENT_RAM_BASE, BL2_COHERENT_RAM_BASE,
BL2_COHERENT_RAM_LIMIT); BL2_COHERENT_RAM_LIMIT);
} }

View File

@ -172,9 +172,9 @@ void bl31_platform_setup()
******************************************************************************/ ******************************************************************************/
void bl31_plat_arch_setup() void bl31_plat_arch_setup()
{ {
configure_mmu(&bl2_to_bl31_args->bl31_meminfo, configure_mmu_el3(&bl2_to_bl31_args->bl31_meminfo,
BL31_RO_BASE, BL31_RO_BASE,
BL31_RO_LIMIT, BL31_RO_LIMIT,
BL31_COHERENT_RAM_BASE, BL31_COHERENT_RAM_BASE,
BL31_COHERENT_RAM_LIMIT); BL31_COHERENT_RAM_LIMIT);
} }

View File

@ -111,9 +111,9 @@ void bl32_platform_setup()
******************************************************************************/ ******************************************************************************/
void bl32_plat_arch_setup() void bl32_plat_arch_setup()
{ {
configure_mmu(&bl32_tzdram_layout, configure_mmu_el1(&bl32_tzdram_layout,
BL32_RO_BASE, BL32_RO_BASE,
BL32_RO_LIMIT, BL32_RO_LIMIT,
BL32_COHERENT_RAM_BASE, BL32_COHERENT_RAM_BASE,
BL32_COHERENT_RAM_LIMIT); BL32_COHERENT_RAM_LIMIT);
} }

View File

@ -370,12 +370,18 @@ extern void bl2_plat_arch_setup(void);
extern void bl31_plat_arch_setup(void); extern void bl31_plat_arch_setup(void);
extern int platform_setup_pm(const struct plat_pm_ops **); extern int platform_setup_pm(const struct plat_pm_ops **);
extern unsigned int platform_get_core_pos(unsigned long mpidr); extern unsigned int platform_get_core_pos(unsigned long mpidr);
extern void enable_mmu(void); extern void enable_mmu_el1(void);
extern void configure_mmu(struct meminfo *, extern void enable_mmu_el3(void);
unsigned long, extern void configure_mmu_el1(struct meminfo *mem_layout,
unsigned long, unsigned long ro_start,
unsigned long, unsigned long ro_limit,
unsigned long); unsigned long coh_start,
unsigned long coh_limit);
extern void configure_mmu_el3(struct meminfo *mem_layout,
unsigned long ro_start,
unsigned long ro_limit,
unsigned long coh_start,
unsigned long coh_limit);
extern unsigned long platform_get_cfgvar(unsigned int); extern unsigned long platform_get_cfgvar(unsigned int);
extern int platform_config_setup(void); extern int platform_config_setup(void);
extern void plat_report_exception(unsigned long); extern void plat_report_exception(unsigned long);

View File

@ -362,7 +362,7 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
/* /*
* Arch. management: Turn on mmu & restore architectural state * Arch. management: Turn on mmu & restore architectural state
*/ */
enable_mmu(); enable_mmu_el3();
/* /*
* All the platform specific actions for turning this cpu * All the platform specific actions for turning this cpu