2017-04-10 19:45:52 +01:00
|
|
|
/*
|
2019-05-29 05:03:58 +01:00
|
|
|
* Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
|
2020-02-13 21:07:12 +00:00
|
|
|
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
|
2017-04-10 19:45:52 +01:00
|
|
|
*
|
2017-05-03 09:38:09 +01:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
2017-04-10 19:45:52 +01:00
|
|
|
*/
|
|
|
|
|
2018-07-18 13:23:07 +01:00
|
|
|
#ifndef UTILS_DEF_H
|
|
|
|
#define UTILS_DEF_H
|
2017-04-10 19:45:52 +01:00
|
|
|
|
2019-05-29 05:03:58 +01:00
|
|
|
#include <export/lib/utils_def_exp.h>
|
|
|
|
|
2017-04-10 19:45:52 +01:00
|
|
|
/* Compute the number of elements in the given array */
|
|
|
|
#define ARRAY_SIZE(a) \
|
|
|
|
(sizeof(a) / sizeof((a)[0]))
|
|
|
|
|
|
|
|
#define IS_POWER_OF_TWO(x) \
|
|
|
|
(((x) & ((x) - 1)) == 0)
|
|
|
|
|
|
|
|
#define SIZE_FROM_LOG2_WORDS(n) (4 << (n))
|
|
|
|
|
2018-06-14 12:28:31 +01:00
|
|
|
#define BIT_32(nr) (U(1) << (nr))
|
|
|
|
#define BIT_64(nr) (ULL(1) << (nr))
|
|
|
|
|
2019-07-09 22:02:43 +01:00
|
|
|
#ifdef __aarch64__
|
2018-06-14 12:28:31 +01:00
|
|
|
#define BIT BIT_64
|
2019-07-09 22:02:43 +01:00
|
|
|
#else
|
|
|
|
#define BIT BIT_32
|
2018-06-14 12:28:31 +01:00
|
|
|
#endif
|
2017-04-10 19:45:52 +01:00
|
|
|
|
2018-06-14 17:35:33 +01:00
|
|
|
/*
|
|
|
|
* Create a contiguous bitmask starting at bit position @l and ending at
|
|
|
|
* position @h. For example
|
|
|
|
* GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
|
|
|
|
*/
|
2019-07-09 21:49:11 +01:00
|
|
|
#if defined(__LINKER__) || defined(__ASSEMBLER__)
|
2018-11-15 08:49:24 +00:00
|
|
|
#define GENMASK_32(h, l) \
|
|
|
|
(((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32 - 1 - (h))))
|
|
|
|
|
|
|
|
#define GENMASK_64(h, l) \
|
|
|
|
((~0 << (l)) & (~0 >> (64 - 1 - (h))))
|
|
|
|
#else
|
2018-06-14 17:35:33 +01:00
|
|
|
#define GENMASK_32(h, l) \
|
|
|
|
(((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
|
|
|
|
|
|
|
|
#define GENMASK_64(h, l) \
|
|
|
|
(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
|
2018-11-15 08:49:24 +00:00
|
|
|
#endif
|
2018-06-14 17:35:33 +01:00
|
|
|
|
2019-07-09 22:02:43 +01:00
|
|
|
#ifdef __aarch64__
|
2018-06-14 17:35:33 +01:00
|
|
|
#define GENMASK GENMASK_64
|
2019-07-09 22:02:43 +01:00
|
|
|
#else
|
|
|
|
#define GENMASK GENMASK_32
|
2018-06-14 17:35:33 +01:00
|
|
|
#endif
|
|
|
|
|
2017-07-13 15:19:51 +01:00
|
|
|
/*
|
|
|
|
* This variant of div_round_up can be used in macro definition but should not
|
|
|
|
* be used in C code as the `div` parameter is evaluated twice.
|
|
|
|
*/
|
|
|
|
#define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d))
|
|
|
|
|
2018-01-22 21:56:13 +00:00
|
|
|
#define div_round_up(val, div) __extension__ ({ \
|
|
|
|
__typeof__(div) _div = (div); \
|
2018-09-19 14:23:03 +01:00
|
|
|
((val) + _div - (__typeof__(div)) 1) / _div; \
|
2018-01-22 21:56:13 +00:00
|
|
|
})
|
|
|
|
|
2017-04-10 19:45:52 +01:00
|
|
|
#define MIN(x, y) __extension__ ({ \
|
|
|
|
__typeof__(x) _x = (x); \
|
|
|
|
__typeof__(y) _y = (y); \
|
|
|
|
(void)(&_x == &_y); \
|
|
|
|
_x < _y ? _x : _y; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define MAX(x, y) __extension__ ({ \
|
|
|
|
__typeof__(x) _x = (x); \
|
|
|
|
__typeof__(y) _y = (y); \
|
|
|
|
(void)(&_x == &_y); \
|
|
|
|
_x > _y ? _x : _y; \
|
|
|
|
})
|
|
|
|
|
2020-01-02 10:14:16 +00:00
|
|
|
#define CLAMP(x, min, max) __extension__ ({ \
|
|
|
|
__typeof__(x) _x = (x); \
|
|
|
|
__typeof__(min) _min = (min); \
|
|
|
|
__typeof__(max) _max = (max); \
|
|
|
|
(void)(&_x == &_min); \
|
|
|
|
(void)(&_x == &_max); \
|
|
|
|
(_x > _max ? _max : (_x < _min ? _min : _x)); \
|
|
|
|
})
|
|
|
|
|
2017-04-10 19:45:52 +01:00
|
|
|
/*
|
|
|
|
* The round_up() macro rounds up a value to the given boundary in a
|
|
|
|
* type-agnostic yet type-safe manner. The boundary must be a power of two.
|
|
|
|
* In other words, it computes the smallest multiple of boundary which is
|
|
|
|
* greater than or equal to value.
|
|
|
|
*
|
|
|
|
* round_down() is similar but rounds the value down instead.
|
|
|
|
*/
|
|
|
|
#define round_boundary(value, boundary) \
|
|
|
|
((__typeof__(value))((boundary) - 1))
|
|
|
|
|
|
|
|
#define round_up(value, boundary) \
|
|
|
|
((((value) - 1) | round_boundary(value, boundary)) + 1)
|
|
|
|
|
|
|
|
#define round_down(value, boundary) \
|
|
|
|
((value) & ~round_boundary(value, boundary))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
|
|
|
|
* Both arguments must be unsigned pointer values (i.e. uintptr_t).
|
|
|
|
*/
|
2018-07-18 13:23:07 +01:00
|
|
|
#define check_uptr_overflow(_ptr, _inc) \
|
|
|
|
((_ptr) > (UINTPTR_MAX - (_inc)))
|
2017-04-10 19:45:52 +01:00
|
|
|
|
2017-12-07 08:43:05 +00:00
|
|
|
/*
|
|
|
|
* Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
|
|
|
|
* Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
|
|
|
|
*/
|
2018-07-18 13:23:07 +01:00
|
|
|
#define check_u32_overflow(_u32, _inc) \
|
|
|
|
((_u32) > (UINT32_MAX - (_inc)))
|
2017-12-07 08:43:05 +00:00
|
|
|
|
2017-12-12 22:23:26 +00:00
|
|
|
/* Register size of the current architecture. */
|
2019-07-09 22:02:43 +01:00
|
|
|
#ifdef __aarch64__
|
2017-12-12 22:23:26 +00:00
|
|
|
#define REGSZ U(8)
|
2019-07-09 22:02:43 +01:00
|
|
|
#else
|
|
|
|
#define REGSZ U(4)
|
2017-12-12 22:23:26 +00:00
|
|
|
#endif
|
|
|
|
|
2017-08-16 11:44:25 +01:00
|
|
|
/*
|
|
|
|
* Test for the current architecture version to be at least the version
|
|
|
|
* expected.
|
|
|
|
*/
|
|
|
|
#define ARM_ARCH_AT_LEAST(_maj, _min) \
|
xlat v2: Split MMU setup and enable
At present, the function provided by the translation library to enable
MMU constructs appropriate values for translation library, and programs
them to the right registers. The construction of initial values,
however, is only required once as both the primary and secondaries
program the same values.
Additionally, the MMU-enabling function is written in C, which means
there's an active stack at the time of enabling MMU. On some systems,
like Arm DynamIQ, having active stack while enabling MMU during warm
boot might lead to coherency problems.
This patch addresses both the above problems by:
- Splitting the MMU-enabling function into two: one that sets up
values to be programmed into the registers, and another one that
takes the pre-computed values and writes to the appropriate
registers. With this, the primary effectively calls both functions
to have the MMU enabled, but secondaries only need to call the
latter.
- Rewriting the function that enables MMU in assembly so that it
doesn't use stack.
This patch fixes a bunch of MISRA issues on the way.
Change-Id: I0faca97263a970ffe765f0e731a1417e43fbfc45
Signed-off-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
2018-04-27 15:06:57 +01:00
|
|
|
((ARM_ARCH_MAJOR > (_maj)) || \
|
|
|
|
((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
|
2017-08-16 11:44:25 +01:00
|
|
|
|
2018-03-21 11:40:57 +00:00
|
|
|
/*
|
|
|
|
* Import an assembly or linker symbol as a C expression with the specified
|
|
|
|
* type
|
|
|
|
*/
|
|
|
|
#define IMPORT_SYM(type, sym, name) \
|
|
|
|
extern char sym[];\
|
|
|
|
static const __attribute__((unused)) type name = (type) sym;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the symbol is used to hold a pointer, its alignment can be asserted
|
|
|
|
* with this macro. For example, if there is a linker symbol that is going to
|
|
|
|
* be used as a 64-bit pointer, the value of the linker symbol must also be
|
|
|
|
* aligned to 64 bit. This macro makes sure this is the case.
|
|
|
|
*/
|
|
|
|
#define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
|
|
|
|
|
2018-11-22 15:53:17 +00:00
|
|
|
#define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
|
2018-03-21 11:40:57 +00:00
|
|
|
|
Initial Spectre V1 mitigations (CVE-2017-5753).
Initial Spectre Variant 1 mitigations (CVE-2017-5753).
A potential speculative data leak was found in PSCI code, this depends
on a non-robust implementation of the `plat_get_core_pos_by_mpidr()`
function. This is considered very low-risk. This patch adds a macro to
mitigate this. Note not all code paths could be analyzed with current
tools.
Add a macro which makes a variable 'speculation safe', using the
__builtin_speculation_safe_value function of GCC and llvm. This will be
available in GCC 9, and is planned for llvm, but is not currently in
mainline GCC or llvm. In order to implement this mitigation the compiler
must support this builtin. Support is indicated by the
__HAVE_SPECULATION_SAFE_VALUE flag.
The -mtrack-speculation option maintains a 'tracker' register, which
determines if the processor is in false speculation at any point. This
adds instructions and increases code size, but avoids the performance
impact of a hard barrier.
Without the -mtrack-speculation option, __builtin_speculation_safe_value
expands to a
ISB
DSB SY
sequence after a conditional branch, before the
speculation safe variable is used. With -mtrack-speculation a
CSEL tracker, tracker, XZR, [cond];
AND safeval,tracker;
CSDB
sequence is added instead, clearing the vulnerable variable by
AND'ing it with the tracker register, which is zero during speculative
execution. [cond] are the status flags which will only be true during
speculative execution. For more information on
__builtin_speculation_safe_value and the -mtrack-speculation option see
https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability/compiler-support-for-mitigations
The -mtracking option was not added, as the performance impact of the
mitigation is low, and there is only one occurence.
Change-Id: Ic9e66d1f4a5155e42e3e4055594974c230bfba3c
Signed-off-by: Joel Hutton <Joel.Hutton@Arm.com>
2018-10-09 14:08:42 +01:00
|
|
|
/* Compiler builtin of GCC >= 9 and planned in llvm */
|
|
|
|
#ifdef __HAVE_SPECULATION_SAFE_VALUE
|
|
|
|
# define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
|
|
|
|
#else
|
|
|
|
# define SPECULATION_SAFE_VALUE(var) var
|
|
|
|
#endif
|
|
|
|
|
2020-02-13 21:07:12 +00:00
|
|
|
/*
|
|
|
|
* Ticks elapsed in one second with a signal of 1 MHz
|
|
|
|
*/
|
|
|
|
#define MHZ_TICKS_PER_SEC U(1000000)
|
|
|
|
|
2018-07-18 13:23:07 +01:00
|
|
|
#endif /* UTILS_DEF_H */
|