fix(pie): invalidate data cache in the entire image range if PIE is enabled

Currently on image entry, the data cache in the RW address range is
invalidated before MMU is enabled to safeguard against potential
stale data from previous firmware stage. If PIE is enabled however,
RO sections including the GOT may be also modified during pie fixup.
Therefore, to be on the safe side, invalidate the entire image
region if PIE is enabled.

Signed-off-by: Zelalem Aweke <zelalem.aweke@arm.com>
Change-Id: I7ee2a324fe4377b026e32f9ab842617ad4e09d89
This commit is contained in:
Zelalem Aweke 2021-10-15 17:25:52 -05:00
parent cc808acb9d
commit 596d20d9e4
3 changed files with 45 additions and 5 deletions

View File

@ -100,11 +100,27 @@ func tsp_entrypoint _align=3
* sections. This is done to safeguard against * sections. This is done to safeguard against
* possible corruption of this memory by dirty * possible corruption of this memory by dirty
* cache lines in a system cache as a result of * cache lines in a system cache as a result of
* use by an earlier boot loader stage. * use by an earlier boot loader stage. If PIE
* is enabled however, RO sections including the
* GOT may be modified during pie fixup.
* Therefore, to be on the safe side, invalidate
* the entire image region if PIE is enabled.
* --------------------------------------------- * ---------------------------------------------
*/ */
adr x0, __RW_START__ #if ENABLE_PIE
adr x1, __RW_END__ #if SEPARATE_CODE_AND_RODATA
adrp x0, __TEXT_START__
add x0, x0, :lo12:__TEXT_START__
#else
adrp x0, __RO_START__
add x0, x0, :lo12:__RO_START__
#endif /* SEPARATE_CODE_AND_RODATA */
#else
adrp x0, __RW_START__
add x0, x0, :lo12:__RW_START__
#endif /* ENABLE_PIE */
adrp x1, __RW_END__
add x1, x1, :lo12:__RW_END__
sub x1, x1, x0 sub x1, x1, x0
bl inv_dcache_range bl inv_dcache_range

View File

@ -380,10 +380,21 @@
* includes the data and NOBITS sections. This is done to * includes the data and NOBITS sections. This is done to
* safeguard against possible corruption of this memory by * safeguard against possible corruption of this memory by
* dirty cache lines in a system cache as a result of use by * dirty cache lines in a system cache as a result of use by
* an earlier boot loader stage. * an earlier boot loader stage. If PIE is enabled however,
* RO sections including the GOT may be modified during
* pie fixup. Therefore, to be on the safe side, invalidate
* the entire image region if PIE is enabled.
* ----------------------------------------------------------------- * -----------------------------------------------------------------
*/ */
#if ENABLE_PIE
#if SEPARATE_CODE_AND_RODATA
ldr r0, =__TEXT_START__
#else
ldr r0, =__RO_START__
#endif /* SEPARATE_CODE_AND_RODATA */
#else
ldr r0, =__RW_START__ ldr r0, =__RW_START__
#endif /* ENABLE_PIE */
ldr r1, =__RW_END__ ldr r1, =__RW_END__
sub r1, r1, r0 sub r1, r1, r0
bl inv_dcache_range bl inv_dcache_range

View File

@ -430,11 +430,24 @@
* includes the data and NOBITS sections. This is done to * includes the data and NOBITS sections. This is done to
* safeguard against possible corruption of this memory by * safeguard against possible corruption of this memory by
* dirty cache lines in a system cache as a result of use by * dirty cache lines in a system cache as a result of use by
* an earlier boot loader stage. * an earlier boot loader stage. If PIE is enabled however,
* RO sections including the GOT may be modified during
* pie fixup. Therefore, to be on the safe side, invalidate
* the entire image region if PIE is enabled.
* ------------------------------------------------------------- * -------------------------------------------------------------
*/ */
#if ENABLE_PIE
#if SEPARATE_CODE_AND_RODATA
adrp x0, __TEXT_START__
add x0, x0, :lo12:__TEXT_START__
#else
adrp x0, __RO_START__
add x0, x0, :lo12:__RO_START__
#endif /* SEPARATE_CODE_AND_RODATA */
#else
adrp x0, __RW_START__ adrp x0, __RW_START__
add x0, x0, :lo12:__RW_START__ add x0, x0, :lo12:__RW_START__
#endif /* ENABLE_PIE */
adrp x1, __RW_END__ adrp x1, __RW_END__
add x1, x1, :lo12:__RW_END__ add x1, x1, :lo12:__RW_END__
sub x1, x1, x0 sub x1, x1, x0