diff --git a/include/lib/cpus/aarch64/cpuamu.h b/include/lib/cpus/aarch64/cpuamu.h index 960a52484..921abdbd4 100644 --- a/include/lib/cpus/aarch64/cpuamu.h +++ b/include/lib/cpus/aarch64/cpuamu.h @@ -4,8 +4,8 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __CPUAMU_H__ -#define __CPUAMU_H__ +#ifndef CPUAMU_H +#define CPUAMU_H /******************************************************************************* * CPU Activity Monitor Unit register specific definitions. @@ -32,8 +32,8 @@ #ifndef __ASSEMBLY__ #include -uint64_t cpuamu_cnt_read(int idx); -void cpuamu_cnt_write(int idx, uint64_t val); +uint64_t cpuamu_cnt_read(unsigned int idx); +void cpuamu_cnt_write(unsigned int idx, uint64_t val); unsigned int cpuamu_read_cpuamcntenset_el0(void); unsigned int cpuamu_read_cpuamcntenclr_el0(void); void cpuamu_write_cpuamcntenset_el0(unsigned int mask); @@ -45,4 +45,4 @@ void cpuamu_context_restore(unsigned int nr_counters); #endif /* __ASSEMBLY__ */ -#endif /* __CPUAMU_H__ */ +#endif /* CPUAMU_H */ diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h index 46d5e1593..1836fe5ae 100644 --- a/include/lib/extensions/amu.h +++ b/include/lib/extensions/amu.h @@ -4,33 +4,35 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __AMU_H__ -#define __AMU_H__ +#ifndef AMU_H +#define AMU_H #include #include +#include #include +#include /* All group 0 counters */ -#define AMU_GROUP0_COUNTERS_MASK 0xf +#define AMU_GROUP0_COUNTERS_MASK U(0xf) #ifdef PLAT_AMU_GROUP1_COUNTERS_MASK #define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK #else -#define AMU_GROUP1_COUNTERS_MASK 0 +#define AMU_GROUP1_COUNTERS_MASK U(0) #endif #ifdef PLAT_AMU_GROUP1_NR_COUNTERS #define AMU_GROUP1_NR_COUNTERS PLAT_AMU_GROUP1_NR_COUNTERS #else -#define AMU_GROUP1_NR_COUNTERS 0 +#define AMU_GROUP1_NR_COUNTERS U(0) #endif CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask); CASSERT(AMU_GROUP1_NR_COUNTERS <= 16, invalid_amu_group1_nr_counters); -int amu_supported(void); -void amu_enable(int el2_unused); +bool amu_supported(void); +void amu_enable(bool el2_unused); /* Group 0 configuration helpers */ uint64_t amu_group0_cnt_read(int idx); @@ -41,4 +43,4 @@ uint64_t amu_group1_cnt_read(int idx); void amu_group1_cnt_write(int idx, uint64_t val); void amu_group1_set_evtype(int idx, unsigned int val); -#endif /* __AMU_H__ */ +#endif /* AMU_H */ diff --git a/include/lib/extensions/amu_private.h b/include/lib/extensions/amu_private.h index 0c660bb83..ab4e6aaba 100644 --- a/include/lib/extensions/amu_private.h +++ b/include/lib/extensions/amu_private.h @@ -1,19 +1,19 @@ /* - * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __AMU_PRIVATE_H__ -#define __AMU_PRIVATE_H__ +#ifndef AMU_PRIVATE_H +#define AMU_PRIVATE_H #include uint64_t amu_group0_cnt_read_internal(int idx); -void amu_group0_cnt_write_internal(int idx, uint64_t); +void amu_group0_cnt_write_internal(int idx, uint64_t val); uint64_t amu_group1_cnt_read_internal(int idx); -void amu_group1_cnt_write_internal(int idx, uint64_t); +void amu_group1_cnt_write_internal(int idx, uint64_t val); void amu_group1_set_evtype_internal(int idx, unsigned int val); -#endif /* __AMU_PRIVATE_H__ */ +#endif /* AMU_PRIVATE_H */ diff --git a/include/lib/extensions/mpam.h b/include/lib/extensions/mpam.h index 571b96b65..ac8c00a43 100644 --- a/include/lib/extensions/mpam.h +++ b/include/lib/extensions/mpam.h @@ -10,6 +10,6 @@ #include bool mpam_supported(void); -void mpam_enable(int el2_unused); +void mpam_enable(bool el2_unused); #endif /* MPAM_H */ diff --git a/include/lib/extensions/spe.h b/include/lib/extensions/spe.h index b2b188ef5..d4b925fe4 100644 --- a/include/lib/extensions/spe.h +++ b/include/lib/extensions/spe.h @@ -4,11 +4,13 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __SPE_H__ -#define __SPE_H__ +#ifndef SPE_H +#define SPE_H -int spe_supported(void); -void spe_enable(int el2_unused); +#include + +bool spe_supported(void); +void spe_enable(bool el2_unused); void spe_disable(void); -#endif /* __SPE_H__ */ +#endif /* SPE_H */ diff --git a/include/lib/extensions/sve.h b/include/lib/extensions/sve.h index 9c7f37f26..83df1775e 100644 --- a/include/lib/extensions/sve.h +++ b/include/lib/extensions/sve.h @@ -4,10 +4,12 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __SVE_H__ -#define __SVE_H__ +#ifndef SVE_H +#define SVE_H -int sve_supported(void); -void sve_enable(int el2_unused); +#include -#endif /* __SVE_H__ */ +bool sve_supported(void); +void sve_enable(bool el2_unused); + +#endif /* SVE_H */ diff --git a/lib/cpus/aarch64/cpuamu_helpers.S b/lib/cpus/aarch64/cpuamu_helpers.S index 8965d6d09..79b72883b 100644 --- a/lib/cpus/aarch64/cpuamu_helpers.S +++ b/lib/cpus/aarch64/cpuamu_helpers.S @@ -16,7 +16,7 @@ .globl cpuamu_write_cpuamcntenclr_el0 /* - * uint64_t cpuamu_cnt_read(int idx); + * uint64_t cpuamu_cnt_read(unsigned int idx); * * Given `idx`, read the corresponding AMU counter * and return it in `x0`. @@ -41,7 +41,7 @@ func cpuamu_cnt_read endfunc cpuamu_cnt_read /* - * void cpuamu_cnt_write(int idx, uint64_t val); + * void cpuamu_cnt_write(unsigned int idx, uint64_t val); * * Given `idx`, write `val` to the corresponding AMU counter. */ diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c index 11ef6e5f4..80cea2847 100644 --- a/lib/el3_runtime/aarch32/context_mgmt.c +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -129,7 +130,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise * it is zero. ******************************************************************************/ -static void enable_extensions_nonsecure(int el2_unused) +static void enable_extensions_nonsecure(bool el2_unused) { #if IMAGE_BL32 #if ENABLE_AMU @@ -175,7 +176,7 @@ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t hsctlr, scr; cpu_context_t *ctx = cm_get_context(security_state); - int el2_unused = 0; + bool el2_unused = false; assert(ctx); @@ -200,7 +201,7 @@ void cm_prepare_el3_exit(uint32_t security_state) isb(); } else if (read_id_pfr1() & (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { - el2_unused = 1; + el2_unused = true; /* * Set the NS bit to access NS copies of certain banked diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index d3984a289..f037e18a4 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -231,7 +232,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise * it is zero. ******************************************************************************/ -static void enable_extensions_nonsecure(int el2_unused) +static void enable_extensions_nonsecure(bool el2_unused) { #if IMAGE_BL31 #if ENABLE_SPE_FOR_LOWER_ELS @@ -289,7 +290,7 @@ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t sctlr_elx, scr_el3, mdcr_el2; cpu_context_t *ctx = cm_get_context(security_state); - int el2_unused = 0; + bool el2_unused = false; uint64_t hcr_el2 = 0; assert(ctx); @@ -304,7 +305,7 @@ void cm_prepare_el3_exit(uint32_t security_state) sctlr_elx |= SCTLR_EL2_RES1; write_sctlr_el2(sctlr_elx); } else if (EL_IMPLEMENTED(2)) { - el2_unused = 1; + el2_unused = true; /* * EL2 present but unused, need to disable safely. diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c index 05c98f1cd..585d908f7 100644 --- a/lib/extensions/amu/aarch32/amu.c +++ b/lib/extensions/amu/aarch32/amu.c @@ -10,6 +10,7 @@ #include #include #include +#include #define AMU_GROUP0_NR_COUNTERS 4 @@ -20,17 +21,17 @@ struct amu_ctx { static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; -int amu_supported(void) +bool amu_supported(void) { uint64_t features; features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT; - return (features & ID_PFR0_AMU_MASK) == 1; + return (features & ID_PFR0_AMU_MASK) == 1U; } -void amu_enable(int el2_unused) +void amu_enable(bool el2_unused) { - if (amu_supported() == 0) + if (!amu_supported()) return; if (el2_unused) { @@ -54,8 +55,8 @@ void amu_enable(int el2_unused) /* Read the group 0 counter identified by the given `idx`. */ uint64_t amu_group0_cnt_read(int idx) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS)); return amu_group0_cnt_read_internal(idx); } @@ -63,8 +64,8 @@ uint64_t amu_group0_cnt_read(int idx) /* Write the group 0 counter identified by the given `idx` with `val`. */ void amu_group0_cnt_write(int idx, uint64_t val) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS)); amu_group0_cnt_write_internal(idx, val); isb(); @@ -73,8 +74,8 @@ void amu_group0_cnt_write(int idx, uint64_t val) /* Read the group 1 counter identified by the given `idx`. */ uint64_t amu_group1_cnt_read(int idx) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); return amu_group1_cnt_read_internal(idx); } @@ -82,8 +83,8 @@ uint64_t amu_group1_cnt_read(int idx) /* Write the group 1 counter identified by the given `idx` with `val`. */ void amu_group1_cnt_write(int idx, uint64_t val) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); amu_group1_cnt_write_internal(idx, val); isb(); @@ -91,8 +92,8 @@ void amu_group1_cnt_write(int idx, uint64_t val) void amu_group1_set_evtype(int idx, unsigned int val) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); amu_group1_set_evtype_internal(idx, val); isb(); @@ -103,7 +104,7 @@ static void *amu_context_save(const void *arg) struct amu_ctx *ctx; int i; - if (amu_supported() == 0) + if (!amu_supported()) return (void *)-1; ctx = &amu_ctxs[plat_my_core_pos()]; @@ -126,7 +127,7 @@ static void *amu_context_save(const void *arg) for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) ctx->group1_cnts[i] = amu_group1_cnt_read(i); - return 0; + return (void *)0; } static void *amu_context_restore(const void *arg) @@ -134,13 +135,13 @@ static void *amu_context_restore(const void *arg) struct amu_ctx *ctx; int i; - if (amu_supported() == 0) + if (!amu_supported()) return (void *)-1; ctx = &amu_ctxs[plat_my_core_pos()]; /* Counters were disabled in `amu_context_save()` */ - assert(read_amcntenset0() == 0 && read_amcntenset1() == 0); + assert((read_amcntenset0() == 0U) && (read_amcntenset1() == 0U)); /* Restore group 0 counters */ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) @@ -153,7 +154,7 @@ static void *amu_context_restore(const void *arg) /* Enable group 1 counters */ write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); - return 0; + return (void *)0; } SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c index 5d556e5d3..1564e8402 100644 --- a/lib/extensions/amu/aarch64/amu.c +++ b/lib/extensions/amu/aarch64/amu.c @@ -11,6 +11,7 @@ #include #include #include +#include #define AMU_GROUP0_NR_COUNTERS 4 @@ -21,23 +22,23 @@ struct amu_ctx { static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; -int amu_supported(void) +bool amu_supported(void) { uint64_t features; features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT; - return (features & ID_AA64PFR0_AMU_MASK) == 1; + return (features & ID_AA64PFR0_AMU_MASK) == 1U; } /* * Enable counters. This function is meant to be invoked * by the context management library before exiting from EL3. */ -void amu_enable(int el2_unused) +void amu_enable(bool el2_unused) { uint64_t v; - if (amu_supported() == 0) + if (!amu_supported()) return; if (el2_unused) { @@ -67,8 +68,8 @@ void amu_enable(int el2_unused) /* Read the group 0 counter identified by the given `idx`. */ uint64_t amu_group0_cnt_read(int idx) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS)); return amu_group0_cnt_read_internal(idx); } @@ -76,8 +77,8 @@ uint64_t amu_group0_cnt_read(int idx) /* Write the group 0 counter identified by the given `idx` with `val`. */ void amu_group0_cnt_write(int idx, uint64_t val) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS)); amu_group0_cnt_write_internal(idx, val); isb(); @@ -86,8 +87,8 @@ void amu_group0_cnt_write(int idx, uint64_t val) /* Read the group 1 counter identified by the given `idx`. */ uint64_t amu_group1_cnt_read(int idx) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); return amu_group1_cnt_read_internal(idx); } @@ -95,8 +96,8 @@ uint64_t amu_group1_cnt_read(int idx) /* Write the group 1 counter identified by the given `idx` with `val`. */ void amu_group1_cnt_write(int idx, uint64_t val) { - assert(amu_supported() != 0); - assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); amu_group1_cnt_write_internal(idx, val); isb(); @@ -108,8 +109,8 @@ void amu_group1_cnt_write(int idx, uint64_t val) */ void amu_group1_set_evtype(int idx, unsigned int val) { - assert(amu_supported() != 0); - assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); + assert(amu_supported()); + assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); amu_group1_set_evtype_internal(idx, val); isb(); @@ -120,14 +121,14 @@ static void *amu_context_save(const void *arg) struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; int i; - if (amu_supported() == 0) + if (!amu_supported()) return (void *)-1; /* Assert that group 0/1 counter configuration is what we expect */ - assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK && - read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK); + assert((read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK) && + (read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK)); - assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK) + assert(((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)) <= AMU_GROUP1_NR_COUNTERS); /* @@ -146,7 +147,7 @@ static void *amu_context_save(const void *arg) for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) ctx->group1_cnts[i] = amu_group1_cnt_read(i); - return 0; + return (void *)0; } static void *amu_context_restore(const void *arg) @@ -154,30 +155,30 @@ static void *amu_context_restore(const void *arg) struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; int i; - if (amu_supported() == 0) + if (!amu_supported()) return (void *)-1; /* Counters were disabled in `amu_context_save()` */ - assert(read_amcntenset0_el0() == 0 && read_amcntenset1_el0() == 0); + assert((read_amcntenset0_el0() == 0U) && (read_amcntenset1_el0() == 0U)); - assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK) + assert(((sizeof(int) * 8U) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)) <= AMU_GROUP1_NR_COUNTERS); /* Restore group 0 counters */ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) - if (AMU_GROUP0_COUNTERS_MASK & (1U << i)) + if ((AMU_GROUP0_COUNTERS_MASK & (1U << i)) != 0U) amu_group0_cnt_write(i, ctx->group0_cnts[i]); /* Restore group 1 counters */ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) - if (AMU_GROUP1_COUNTERS_MASK & (1U << i)) + if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) amu_group1_cnt_write(i, ctx->group1_cnts[i]); /* Restore group 0/1 counter configuration */ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); - return 0; + return (void *)0; } SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); diff --git a/lib/extensions/mpam/mpam.c b/lib/extensions/mpam/mpam.c index e628827bf..d57bb470b 100644 --- a/lib/extensions/mpam/mpam.c +++ b/lib/extensions/mpam/mpam.c @@ -16,7 +16,7 @@ bool mpam_supported(void) return ((features & ID_AA64PFR0_MPAM_MASK) != 0U); } -void mpam_enable(int el2_unused) +void mpam_enable(bool el2_unused) { if (!mpam_supported()) return; @@ -31,7 +31,7 @@ void mpam_enable(int el2_unused) * If EL2 is implemented but unused, disable trapping to EL2 when lower * ELs access their own MPAM registers. */ - if (el2_unused != 0) { + if (el2_unused) { write_mpam2_el2(0); if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c index dc3584037..e5df015b0 100644 --- a/lib/extensions/spe/spe.c +++ b/lib/extensions/spe/spe.c @@ -8,26 +8,30 @@ #include #include #include +#include -/* - * The assembler does not yet understand the psb csync mnemonic - * so use the equivalent hint instruction. - */ -#define psb_csync() asm volatile("hint #17") +static inline void psb_csync(void) +{ + /* + * The assembler does not yet understand the psb csync mnemonic + * so use the equivalent hint instruction. + */ + __asm__ volatile("hint #17"); +} -int spe_supported(void) +bool spe_supported(void) { uint64_t features; features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT; - return (features & ID_AA64DFR0_PMS_MASK) == 1; + return (features & ID_AA64DFR0_PMS_MASK) == 1U; } -void spe_enable(int el2_unused) +void spe_enable(bool el2_unused) { uint64_t v; - if (spe_supported() == 0) + if (!spe_supported()) return; if (el2_unused) { @@ -59,7 +63,7 @@ void spe_disable(void) { uint64_t v; - if (spe_supported() == 0) + if (!spe_supported()) return; /* Drain buffered data */ @@ -75,13 +79,14 @@ void spe_disable(void) static void *spe_drain_buffers_hook(const void *arg) { - if (spe_supported() == 0) + if (!spe_supported()) return (void *)-1; /* Drain buffered data */ psb_csync(); dsbnsh(); - return 0; + + return (void *)0; } SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook); diff --git a/lib/extensions/sve/sve.c b/lib/extensions/sve/sve.c index 644248780..e031bf61b 100644 --- a/lib/extensions/sve/sve.c +++ b/lib/extensions/sve/sve.c @@ -7,21 +7,22 @@ #include #include #include +#include #include -int sve_supported(void) +bool sve_supported(void) { uint64_t features; features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT; - return (features & ID_AA64PFR0_SVE_MASK) == 1; + return (features & ID_AA64PFR0_SVE_MASK) == 1U; } static void *disable_sve_hook(const void *arg) { uint64_t cptr; - if (sve_supported() == 0) + if (!sve_supported()) return (void *)-1; /* @@ -39,14 +40,14 @@ static void *disable_sve_hook(const void *arg) * No explicit ISB required here as ERET to switch to Secure * world covers it */ - return 0; + return (void *)0; } static void *enable_sve_hook(const void *arg) { uint64_t cptr; - if (sve_supported() == 0) + if (!sve_supported()) return (void *)-1; /* @@ -60,14 +61,14 @@ static void *enable_sve_hook(const void *arg) * No explicit ISB required here as ERET to switch to Non-secure * world covers it */ - return 0; + return (void *)0; } -void sve_enable(int el2_unused) +void sve_enable(bool el2_unused) { uint64_t cptr; - if (sve_supported() == 0) + if (!sve_supported()) return; #if CTX_INCLUDE_FPREGS