2017-03-08 14:40:23 +00:00
|
|
|
/*
|
2019-01-11 11:20:10 +00:00
|
|
|
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
|
2017-03-08 14:40:23 +00:00
|
|
|
*
|
2017-05-03 09:38:09 +01:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
2017-03-08 14:40:23 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
2018-08-02 09:57:29 +01:00
|
|
|
#include <stdbool.h>
|
2018-12-14 00:18:21 +00:00
|
|
|
|
|
|
|
#include <platform_def.h>
|
|
|
|
|
|
|
|
#include <arch.h>
|
2019-01-11 11:20:10 +00:00
|
|
|
#include <arch_features.h>
|
2018-12-14 00:18:21 +00:00
|
|
|
#include <arch_helpers.h>
|
|
|
|
#include <lib/cassert.h>
|
|
|
|
#include <lib/utils_def.h>
|
|
|
|
#include <lib/xlat_tables/xlat_tables_v2.h>
|
|
|
|
|
2017-03-08 14:40:23 +00:00
|
|
|
#include "../xlat_tables_private.h"
|
|
|
|
|
xlat: Fix MISRA defects
Fix defects of MISRA C-2012 rules 8.13, 10.1, 10.3, 10.4, 10.8, 11.6,
14.4, 15.7, 17.8, 20.10, 20.12, 21.1 and Directive 4.9.
Change-Id: I7ff61e71733908596dbafe2e99d99b4fce9765bd
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2018-07-24 10:20:53 +01:00
|
|
|
#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
|
2017-11-08 12:53:47 +00:00
|
|
|
#error ARMv7 target does not support LPAE MMU descriptors
|
|
|
|
#endif
|
|
|
|
|
2018-06-11 13:40:32 +01:00
|
|
|
/*
|
2018-08-02 09:57:29 +01:00
|
|
|
* Returns true if the provided granule size is supported, false otherwise.
|
2018-06-11 13:40:32 +01:00
|
|
|
*/
|
2018-08-02 09:57:29 +01:00
|
|
|
bool xlat_arch_is_granule_size_supported(size_t size)
|
2018-06-11 13:40:32 +01:00
|
|
|
{
|
|
|
|
/*
|
2018-07-12 15:54:10 +01:00
|
|
|
* The library uses the long descriptor translation table format, which
|
|
|
|
* supports 4 KiB pages only.
|
2018-06-11 13:40:32 +01:00
|
|
|
*/
|
2018-08-02 09:57:29 +01:00
|
|
|
return size == PAGE_SIZE_4KB;
|
2018-06-11 13:40:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t xlat_arch_get_max_supported_granule_size(void)
|
|
|
|
{
|
xlat: Fix MISRA defects
Fix defects of MISRA C-2012 rules 8.13, 10.1, 10.3, 10.4, 10.8, 11.6,
14.4, 15.7, 17.8, 20.10, 20.12, 21.1 and Directive 4.9.
Change-Id: I7ff61e71733908596dbafe2e99d99b4fce9765bd
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2018-07-24 10:20:53 +01:00
|
|
|
return PAGE_SIZE_4KB;
|
2018-06-11 13:40:32 +01:00
|
|
|
}
|
|
|
|
|
2017-03-22 15:48:51 +00:00
|
|
|
#if ENABLE_ASSERTIONS
|
2017-05-31 13:31:48 +01:00
|
|
|
unsigned long long xlat_arch_get_max_supported_pa(void)
|
2017-03-08 14:40:23 +00:00
|
|
|
{
|
|
|
|
/* Physical address space size for long descriptor format. */
|
2018-02-16 21:12:58 +00:00
|
|
|
return (1ULL << 40) - 1ULL;
|
2017-03-08 14:40:23 +00:00
|
|
|
}
|
2019-01-25 11:36:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return minimum virtual address space size supported by the architecture
|
|
|
|
*/
|
|
|
|
uintptr_t xlat_get_min_virt_addr_space_size(void)
|
|
|
|
{
|
|
|
|
return MIN_VIRT_ADDR_SPACE_SIZE;
|
|
|
|
}
|
2017-03-22 15:48:51 +00:00
|
|
|
#endif /* ENABLE_ASSERTIONS*/
|
2017-03-08 14:40:23 +00:00
|
|
|
|
2018-08-07 19:59:49 +01:00
|
|
|
bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
|
2017-03-08 14:40:23 +00:00
|
|
|
{
|
2018-08-07 19:59:49 +01:00
|
|
|
if (ctx->xlat_regime == EL1_EL0_REGIME) {
|
|
|
|
assert(xlat_arch_current_el() == 1U);
|
|
|
|
return (read_sctlr() & SCTLR_M_BIT) != 0U;
|
|
|
|
} else {
|
|
|
|
assert(ctx->xlat_regime == EL2_REGIME);
|
|
|
|
assert(xlat_arch_current_el() == 2U);
|
|
|
|
return (read_hsctlr() & HSCTLR_M_BIT) != 0U;
|
|
|
|
}
|
xlat v2: Flush xlat tables after being modified
During cold boot, the initial translation tables are created with data
caches disabled, so all modifications go to memory directly. After the
MMU is enabled and data cache is enabled, any modification to the tables
goes to data cache, and eventually may get flushed to memory.
If CPU0 modifies the tables while CPU1 is off, CPU0 will have the
modified tables in its data cache. When CPU1 is powered on, the MMU is
enabled, then it enables coherency, and then it enables the data cache.
Until this is done, CPU1 isn't in coherency, and the translation tables
it sees can be outdated if CPU0 still has some modified entries in its
data cache.
This can be a problem in some cases. For example, the warm boot code
uses only the tables mapped during cold boot, which don't normally
change. However, if they are modified (and a RO page is made RW, or a XN
page is made executable) the CPU will see the old attributes and crash
when it tries to access it.
This doesn't happen in systems with HW_ASSISTED_COHERENCY or
WARMBOOT_ENABLE_DCACHE_EARLY. In these systems, the data cache is
enabled at the same time as the MMU. As soon as this happens, the CPU is
in coherency.
There was an attempt of a fix in psci_helpers.S, but it didn't solve the
problem. That code has been deleted. The code was introduced in commit
<264410306381> ("Invalidate TLB entries during warm boot").
Now, during a map or unmap operation, the memory associated to each
modified table is flushed. Traversing a table will also flush it's
memory, as there is no way to tell in the current implementation if the
table that has been traversed has also been modified.
Change-Id: I4b520bca27502f1018878061bc5fb82af740bb92
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2018-08-07 12:47:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool is_dcache_enabled(void)
|
|
|
|
{
|
2018-08-07 19:59:49 +01:00
|
|
|
if (IS_IN_EL2()) {
|
|
|
|
return (read_hsctlr() & HSCTLR_C_BIT) != 0U;
|
|
|
|
} else {
|
|
|
|
return (read_sctlr() & SCTLR_C_BIT) != 0U;
|
|
|
|
}
|
2017-03-08 14:40:23 +00:00
|
|
|
}
|
|
|
|
|
2018-08-07 19:59:49 +01:00
|
|
|
uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
|
2018-07-05 08:11:48 +01:00
|
|
|
{
|
2018-08-07 19:59:49 +01:00
|
|
|
if (xlat_regime == EL1_EL0_REGIME) {
|
|
|
|
return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN);
|
|
|
|
} else {
|
|
|
|
assert(xlat_regime == EL2_REGIME);
|
|
|
|
return UPPER_ATTRS(XN);
|
|
|
|
}
|
2018-07-05 08:11:48 +01:00
|
|
|
}
|
|
|
|
|
2018-08-07 19:59:49 +01:00
|
|
|
void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
|
2017-09-25 15:23:22 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Ensure the translation table write has drained into memory before
|
|
|
|
* invalidating the TLB entry.
|
|
|
|
*/
|
|
|
|
dsbishst();
|
|
|
|
|
2018-08-07 19:59:49 +01:00
|
|
|
if (xlat_regime == EL1_EL0_REGIME) {
|
|
|
|
tlbimvaais(TLBI_ADDR(va));
|
|
|
|
} else {
|
|
|
|
assert(xlat_regime == EL2_REGIME);
|
|
|
|
tlbimvahis(TLBI_ADDR(va));
|
|
|
|
}
|
2017-09-25 15:23:22 +01:00
|
|
|
}
|
|
|
|
|
2017-02-27 17:23:54 +00:00
|
|
|
void xlat_arch_tlbi_va_sync(void)
|
|
|
|
{
|
|
|
|
/* Invalidate all entries from branch predictors. */
|
|
|
|
bpiallis();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A TLB maintenance instruction can complete at any time after
|
|
|
|
* it is issued, but is only guaranteed to be complete after the
|
|
|
|
* execution of DSB by the PE that executed the TLB maintenance
|
|
|
|
* instruction. After the TLB invalidate instruction is
|
|
|
|
* complete, no new memory accesses using the invalidated TLB
|
|
|
|
* entries will be observed by any observer of the system
|
|
|
|
* domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
|
|
|
|
* "Ordering and completion of TLB maintenance instructions".
|
|
|
|
*/
|
|
|
|
dsbish();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The effects of a completed TLB maintenance instruction are
|
|
|
|
* only guaranteed to be visible on the PE that executed the
|
|
|
|
* instruction after the execution of an ISB instruction by the
|
|
|
|
* PE that executed the TLB maintenance instruction.
|
|
|
|
*/
|
|
|
|
isb();
|
|
|
|
}
|
|
|
|
|
xlat: Fix MISRA defects
Fix defects of MISRA C-2012 rules 8.13, 10.1, 10.3, 10.4, 10.8, 11.6,
14.4, 15.7, 17.8, 20.10, 20.12, 21.1 and Directive 4.9.
Change-Id: I7ff61e71733908596dbafe2e99d99b4fce9765bd
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2018-07-24 10:20:53 +01:00
|
|
|
unsigned int xlat_arch_current_el(void)
|
Fix execute-never permissions in xlat tables libs
Translation regimes that only support one virtual address space (such as
the ones for EL2 and EL3) can flag memory regions as execute-never by
setting to 1 the XN bit in the Upper Attributes field in the translation
tables descriptors. Translation regimes that support two different
virtual address spaces (such as the one shared by EL1 and EL0) use bits
PXN and UXN instead.
The Trusted Firmware runs at EL3 and EL1, it has to handle translation
tables of both translation regimes, but the previous code handled both
regimes the same way, as if both had only 1 VA range.
When trying to set a descriptor as execute-never it would set the XN
bit correctly in EL3, but it would set the XN bit in EL1 as well. XN is
at the same bit position as UXN, which means that EL0 was being
prevented from executing code at this region, not EL1 as the code
intended. Therefore, the PXN bit was unset to 0 all the time. The result
is that, in AArch64 mode, read-only data sections of BL2 weren't
protected from being executed.
This patch adds support of translation regimes with two virtual address
spaces to both versions of the translation tables library, fixing the
execute-never permissions for translation tables in EL1.
The library currently does not support initializing translation tables
for EL0 software, therefore it does not set/unset the UXN bit. If EL1
software needs to initialize translation tables for EL0 software, it
should use a different library instead.
Change-Id: If27588f9820ff42988851d90dc92801c8ecbe0c9
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2017-04-27 13:30:22 +01:00
|
|
|
{
|
2018-08-07 19:59:49 +01:00
|
|
|
if (IS_IN_HYP()) {
|
|
|
|
return 2U;
|
|
|
|
} else {
|
|
|
|
assert(IS_IN_SVC() || IS_IN_MON());
|
|
|
|
/*
|
|
|
|
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor,
|
|
|
|
* System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
|
|
|
|
*
|
|
|
|
* The PL1&0 translation regime in AArch32 behaves like the
|
|
|
|
* EL1&0 regime in AArch64 except for the XN bits, but we set
|
|
|
|
* and unset them at the same time, so there's no difference in
|
|
|
|
* practice.
|
|
|
|
*/
|
|
|
|
return 1U;
|
|
|
|
}
|
Fix execute-never permissions in xlat tables libs
Translation regimes that only support one virtual address space (such as
the ones for EL2 and EL3) can flag memory regions as execute-never by
setting to 1 the XN bit in the Upper Attributes field in the translation
tables descriptors. Translation regimes that support two different
virtual address spaces (such as the one shared by EL1 and EL0) use bits
PXN and UXN instead.
The Trusted Firmware runs at EL3 and EL1, it has to handle translation
tables of both translation regimes, but the previous code handled both
regimes the same way, as if both had only 1 VA range.
When trying to set a descriptor as execute-never it would set the XN
bit correctly in EL3, but it would set the XN bit in EL1 as well. XN is
at the same bit position as UXN, which means that EL0 was being
prevented from executing code at this region, not EL1 as the code
intended. Therefore, the PXN bit was unset to 0 all the time. The result
is that, in AArch64 mode, read-only data sections of BL2 weren't
protected from being executed.
This patch adds support of translation regimes with two virtual address
spaces to both versions of the translation tables library, fixing the
execute-never permissions for translation tables in EL1.
The library currently does not support initializing translation tables
for EL0 software, therefore it does not set/unset the UXN bit. If EL1
software needs to initialize translation tables for EL0 software, it
should use a different library instead.
Change-Id: If27588f9820ff42988851d90dc92801c8ecbe0c9
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2017-04-27 13:30:22 +01:00
|
|
|
}
|
|
|
|
|
2017-03-08 14:40:23 +00:00
|
|
|
/*******************************************************************************
|
2018-08-07 19:59:49 +01:00
|
|
|
* Function for enabling the MMU in PL1 or PL2, assuming that the page tables
|
2017-05-31 13:38:51 +01:00
|
|
|
* have already been created.
|
2017-03-08 14:40:23 +00:00
|
|
|
******************************************************************************/
|
2018-07-15 16:42:01 +01:00
|
|
|
void setup_mmu_cfg(uint64_t *params, unsigned int flags,
|
|
|
|
const uint64_t *base_table, unsigned long long max_pa,
|
|
|
|
uintptr_t max_va, __unused int xlat_regime)
|
2017-03-08 14:40:23 +00:00
|
|
|
{
|
2018-07-12 15:44:42 +01:00
|
|
|
uint64_t mair, ttbr0;
|
|
|
|
uint32_t ttbcr;
|
2017-03-08 14:40:23 +00:00
|
|
|
|
|
|
|
/* Set attributes in the right indices of the MAIR */
|
2018-07-12 15:44:42 +01:00
|
|
|
mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
|
|
|
|
mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
|
2017-03-08 14:40:23 +00:00
|
|
|
ATTR_IWBWA_OWBWA_NTR_INDEX);
|
2018-07-12 15:44:42 +01:00
|
|
|
mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
|
2017-03-08 14:40:23 +00:00
|
|
|
ATTR_NON_CACHEABLE_INDEX);
|
|
|
|
|
|
|
|
/*
|
2018-08-07 19:59:49 +01:00
|
|
|
* Configure the control register for stage 1 of the PL1&0 or EL2
|
|
|
|
* translation regimes.
|
2017-05-31 13:38:51 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Use the Long-descriptor translation table format. */
|
|
|
|
ttbcr = TTBCR_EAE_BIT;
|
|
|
|
|
2018-08-07 19:59:49 +01:00
|
|
|
if (xlat_regime == EL1_EL0_REGIME) {
|
|
|
|
assert(IS_IN_SVC() || IS_IN_MON());
|
|
|
|
/*
|
|
|
|
* Disable translation table walk for addresses that are
|
|
|
|
* translated using TTBR1. Therefore, only TTBR0 is used.
|
|
|
|
*/
|
|
|
|
ttbcr |= TTBCR_EPD1_BIT;
|
|
|
|
} else {
|
|
|
|
assert(xlat_regime == EL2_REGIME);
|
|
|
|
assert(IS_IN_HYP());
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set HTCR bits as well. Set HTTBR table properties
|
|
|
|
* as Inner & outer WBWA & shareable.
|
|
|
|
*/
|
|
|
|
ttbcr |= HTCR_RES1 |
|
|
|
|
HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA |
|
|
|
|
HTCR_RGN0_INNER_WBA;
|
|
|
|
}
|
2017-05-31 13:38:51 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit the input address ranges and memory region sizes translated
|
2017-07-11 15:11:10 +01:00
|
|
|
* using TTBR0 to the given virtual address space size, if smaller than
|
|
|
|
* 32 bits.
|
2017-05-31 13:38:51 +01:00
|
|
|
*/
|
2017-07-11 15:11:10 +01:00
|
|
|
if (max_va != UINT32_MAX) {
|
xlat: Fix MISRA defects
Fix defects of MISRA C-2012 rules 8.13, 10.1, 10.3, 10.4, 10.8, 11.6,
14.4, 15.7, 17.8, 20.10, 20.12, 21.1 and Directive 4.9.
Change-Id: I7ff61e71733908596dbafe2e99d99b4fce9765bd
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2018-07-24 10:20:53 +01:00
|
|
|
uintptr_t virtual_addr_space_size = max_va + 1U;
|
|
|
|
|
2019-01-25 11:36:01 +00:00
|
|
|
assert(virtual_addr_space_size >=
|
|
|
|
xlat_get_min_virt_addr_space_size());
|
|
|
|
assert(virtual_addr_space_size <=
|
|
|
|
MAX_VIRT_ADDR_SPACE_SIZE);
|
|
|
|
assert(IS_POWER_OF_TWO(virtual_addr_space_size));
|
|
|
|
|
2017-07-11 15:11:10 +01:00
|
|
|
/*
|
2017-07-19 10:11:13 +01:00
|
|
|
* __builtin_ctzll(0) is undefined but here we are guaranteed
|
2017-07-11 15:11:10 +01:00
|
|
|
* that virtual_addr_space_size is in the range [1, UINT32_MAX].
|
|
|
|
*/
|
xlat: Fix MISRA defects
Fix defects of MISRA C-2012 rules 8.13, 10.1, 10.3, 10.4, 10.8, 11.6,
14.4, 15.7, 17.8, 20.10, 20.12, 21.1 and Directive 4.9.
Change-Id: I7ff61e71733908596dbafe2e99d99b4fce9765bd
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2018-07-24 10:20:53 +01:00
|
|
|
int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size);
|
|
|
|
|
|
|
|
ttbcr |= (uint32_t) t0sz;
|
2017-07-11 15:11:10 +01:00
|
|
|
}
|
2017-05-31 13:38:51 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the cacheability and shareability attributes for memory
|
|
|
|
* associated with translation table walks using TTBR0.
|
2017-03-08 14:40:23 +00:00
|
|
|
*/
|
xlat: Fix MISRA defects
Fix defects of MISRA C-2012 rules 8.13, 10.1, 10.3, 10.4, 10.8, 11.6,
14.4, 15.7, 17.8, 20.10, 20.12, 21.1 and Directive 4.9.
Change-Id: I7ff61e71733908596dbafe2e99d99b4fce9765bd
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2018-07-24 10:20:53 +01:00
|
|
|
if ((flags & XLAT_TABLE_NC) != 0U) {
|
2017-03-16 17:16:34 +00:00
|
|
|
/* Inner & outer non-cacheable non-shareable. */
|
2017-05-31 13:38:51 +01:00
|
|
|
ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
|
|
|
|
TTBCR_RGN0_INNER_NC;
|
2017-03-16 17:16:34 +00:00
|
|
|
} else {
|
|
|
|
/* Inner & outer WBWA & shareable. */
|
2017-05-31 13:38:51 +01:00
|
|
|
ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
|
|
|
|
TTBCR_RGN0_INNER_WBA;
|
2017-03-16 17:16:34 +00:00
|
|
|
}
|
2017-03-08 14:40:23 +00:00
|
|
|
|
|
|
|
/* Set TTBR0 bits as well */
|
|
|
|
ttbr0 = (uint64_t)(uintptr_t) base_table;
|
2018-07-12 15:44:42 +01:00
|
|
|
|
2019-01-11 11:20:10 +00:00
|
|
|
if (is_armv8_2_ttcnp_present()) {
|
|
|
|
/* Enable CnP bit so as to share page tables with all PEs. */
|
|
|
|
ttbr0 |= TTBR_CNP_BIT;
|
|
|
|
}
|
2017-05-31 13:38:51 +01:00
|
|
|
|
xlat v2: Split MMU setup and enable
At present, the function provided by the translation library to enable
MMU constructs appropriate values for translation library, and programs
them to the right registers. The construction of initial values,
however, is only required once as both the primary and secondaries
program the same values.
Additionally, the MMU-enabling function is written in C, which means
there's an active stack at the time of enabling MMU. On some systems,
like Arm DynamIQ, having active stack while enabling MMU during warm
boot might lead to coherency problems.
This patch addresses both the above problems by:
- Splitting the MMU-enabling function into two: one that sets up
values to be programmed into the registers, and another one that
takes the pre-computed values and writes to the appropriate
registers. With this, the primary effectively calls both functions
to have the MMU enabled, but secondaries only need to call the
latter.
- Rewriting the function that enables MMU in assembly so that it
doesn't use stack.
This patch fixes a bunch of MISRA issues on the way.
Change-Id: I0faca97263a970ffe765f0e731a1417e43fbfc45
Signed-off-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
2018-04-27 15:06:57 +01:00
|
|
|
/* Now populate MMU configuration */
|
2018-07-15 16:42:01 +01:00
|
|
|
params[MMU_CFG_MAIR] = mair;
|
|
|
|
params[MMU_CFG_TCR] = (uint64_t) ttbcr;
|
|
|
|
params[MMU_CFG_TTBR0] = ttbr0;
|
2017-03-08 14:40:23 +00:00
|
|
|
}
|