Merge pull request #60 from athoelke/disable-mmu-v2
Replace disable_mmu with assembler version v2
This commit is contained in:
commit
a1ec2f4c9a
|
@ -212,18 +212,10 @@ func process_exception
|
|||
/* ---------------------------------------------
|
||||
* If BL31 is to be executed in EL3 as well
|
||||
* then turn off the MMU so that it can perform
|
||||
* its own setup. TODO: Assuming flat mapped
|
||||
* translations here. Also all should go into a
|
||||
* separate MMU teardown function
|
||||
* its own setup.
|
||||
* ---------------------------------------------
|
||||
*/
|
||||
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
|
||||
bl read_sctlr_el3
|
||||
bic x0, x0, x1
|
||||
bl write_sctlr_el3
|
||||
isb
|
||||
mov x0, #DCCISW
|
||||
bl dcsw_op_all
|
||||
bl disable_mmu_icache_el3
|
||||
bl tlbialle3
|
||||
skip_mmu_teardown:
|
||||
ldp x6, x7, [sp, #0x30]
|
||||
|
|
|
@ -78,6 +78,9 @@ extern void inv_dcache_range(unsigned long, unsigned long);
|
|||
extern void dcsw_op_louis(unsigned int);
|
||||
extern void dcsw_op_all(unsigned int);
|
||||
|
||||
extern void disable_mmu_el3(void);
|
||||
extern void disable_mmu_icache_el3(void);
|
||||
|
||||
/*******************************************************************************
|
||||
* Misc. accessor prototypes
|
||||
******************************************************************************/
|
||||
|
|
|
@ -79,6 +79,9 @@
|
|||
.globl zeromem16
|
||||
.globl memcpy16
|
||||
|
||||
.globl disable_mmu_el3
|
||||
.globl disable_mmu_icache_el3
|
||||
|
||||
|
||||
func get_afflvl_shift
|
||||
cmp x0, #3
|
||||
|
@ -332,3 +335,27 @@ m_loop1:
|
|||
subs x2, x2, #1
|
||||
b.ne m_loop1
|
||||
m_end: ret
|
||||
|
||||
/* ---------------------------------------------------------------------------
|
||||
* Disable the MMU at EL3
|
||||
* This is implemented in assembler to ensure that the data cache is cleaned
|
||||
* and invalidated after the MMU is disabled without any intervening cacheable
|
||||
* data accesses
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
func disable_mmu_el3
|
||||
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
|
||||
do_disable_mmu:
|
||||
mrs x0, sctlr_el3
|
||||
bic x0, x0, x1
|
||||
msr sctlr_el3, x0
|
||||
isb // ensure MMU is off
|
||||
mov x0, #DCCISW // DCache clean and invalidate
|
||||
b dcsw_op_all
|
||||
|
||||
|
||||
func disable_mmu_icache_el3
|
||||
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
|
||||
b do_disable_mmu
|
||||
|
||||
|
|
|
@ -118,29 +118,6 @@ void enable_mmu()
|
|||
return;
|
||||
}
|
||||
|
||||
void disable_mmu(void)
|
||||
{
|
||||
unsigned long sctlr;
|
||||
unsigned long current_el = read_current_el();
|
||||
|
||||
if (GET_EL(current_el) == MODE_EL3) {
|
||||
sctlr = read_sctlr_el3();
|
||||
sctlr = sctlr & ~(SCTLR_M_BIT | SCTLR_C_BIT);
|
||||
write_sctlr_el3(sctlr);
|
||||
} else {
|
||||
sctlr = read_sctlr_el1();
|
||||
sctlr = sctlr & ~(SCTLR_M_BIT | SCTLR_C_BIT);
|
||||
write_sctlr_el1(sctlr);
|
||||
}
|
||||
/* ensure the MMU disable takes effect immediately */
|
||||
isb();
|
||||
|
||||
/* Flush the caches */
|
||||
dcsw_op_all(DCCISW);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Table of regions to map using the MMU.
|
||||
* This doesn't include TZRAM as the 'mem_layout' argument passed to to
|
||||
|
|
|
@ -370,7 +370,6 @@ extern void bl2_plat_arch_setup(void);
|
|||
extern void bl31_plat_arch_setup(void);
|
||||
extern int platform_setup_pm(const struct plat_pm_ops **);
|
||||
extern unsigned int platform_get_core_pos(unsigned long mpidr);
|
||||
extern void disable_mmu(void);
|
||||
extern void enable_mmu(void);
|
||||
extern void configure_mmu(struct meminfo *,
|
||||
unsigned long,
|
||||
|
|
Loading…
Reference in New Issue