diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h index 7a7012d3d..198b890c7 100644 --- a/include/lib/utils_def.h +++ b/include/lib/utils_def.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -104,6 +104,13 @@ #define round_down(value, boundary) \ ((value) & ~round_boundary(value, boundary)) +/** + * Helper macro to ensure a value lies on a given boundary. + */ +#define is_aligned(value, boundary) \ + (round_up((uintptr_t) value, boundary) == \ + round_down((uintptr_t) value, boundary)) + /* * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise. * Both arguments must be unsigned pointer values (i.e. uintptr_t). diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h index 766450901..b62a63158 100644 --- a/include/plat/common/platform.h +++ b/include/plat/common/platform.h @@ -347,6 +347,10 @@ int plat_spm_sp_get_next_address(void **sp_base, size_t *sp_size, int plat_spm_core_manifest_load(spmc_manifest_attribute_t *manifest, const void *pm_addr); #endif +#if defined(SPMC_AT_EL3) +int plat_spmc_shmem_datastore_get(uint8_t **datastore, size_t *size); +#endif + /******************************************************************************* * Mandatory BL image load functions(may be overridden). ******************************************************************************/ diff --git a/include/services/el3_spmc_ffa_memory.h b/include/services/el3_spmc_ffa_memory.h new file mode 100644 index 000000000..2037ecadc --- /dev/null +++ b/include/services/el3_spmc_ffa_memory.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef EL3_SPMC_FFA_MEM_H +#define EL3_SPMC_FFA_MEM_H + +#include + +/* + * Subset of Arm Firmware Framework for Armv8-A + * (https://developer.arm.com/docs/den0077/a) needed for shared memory. + */ + +/** + * typedef ffa_endpoint_id16_t - Endpoint ID + * + * Current implementation only supports VM IDs. FF-A spec also support stream + * endpoint ids. + */ +typedef uint16_t ffa_endpoint_id16_t; + +/** + * struct ffa_cons_mrd - Constituent memory region descriptor + * @address: + * Start address of contiguous memory region. Must be 4K page aligned. + * @page_count: + * Number of 4K pages in region. + * @reserved_12_15: + * Reserve bytes 12-15 to pad struct size to 16 bytes. + */ +struct ffa_cons_mrd { + uint64_t address; + uint32_t page_count; + uint32_t reserved_12_15; +}; +CASSERT(sizeof(struct ffa_cons_mrd) == 16, assert_ffa_cons_mrd_size_mismatch); + +/** + * struct ffa_comp_mrd - Composite memory region descriptor + * @total_page_count: + * Number of 4k pages in memory region. Must match sum of + * @address_range_array[].page_count. + * @address_range_count: + * Number of entries in @address_range_array. + * @reserved_8_15: + * Reserve bytes 8-15 to pad struct size to 16 byte alignment and + * make @address_range_array 16 byte aligned. + * @address_range_array: + * Array of &struct ffa_cons_mrd entries. + */ +struct ffa_comp_mrd { + uint32_t total_page_count; + uint32_t address_range_count; + uint64_t reserved_8_15; + struct ffa_cons_mrd address_range_array[]; +}; +CASSERT(sizeof(struct ffa_comp_mrd) == 16, assert_ffa_comp_mrd_size_mismatch); + +/** + * typedef ffa_mem_attr8_t - Memory region attributes v1.0. + * typedef ffa_mem_attr16_t - Memory region attributes v1.1. + * + * * @FFA_MEM_ATTR_NS_BIT: + * Memory security state. + * * @FFA_MEM_ATTR_DEVICE_NGNRNE: + * Device-nGnRnE. + * * @FFA_MEM_ATTR_DEVICE_NGNRE: + * Device-nGnRE. + * * @FFA_MEM_ATTR_DEVICE_NGRE: + * Device-nGRE. + * * @FFA_MEM_ATTR_DEVICE_GRE: + * Device-GRE. + * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED + * Normal memory. Non-cacheable. + * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB + * Normal memory. Write-back cached. + * * @FFA_MEM_ATTR_NON_SHAREABLE + * Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. + * * @FFA_MEM_ATTR_OUTER_SHAREABLE + * Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. + * * @FFA_MEM_ATTR_INNER_SHAREABLE + * Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. + */ +typedef uint8_t ffa_mem_attr8_t; +typedef uint16_t ffa_mem_attr16_t; +#define FFA_MEM_ATTR_NS_BIT (0x1U << 6) +#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2)) +#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2)) +#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2)) +#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2)) +#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2)) +#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2)) +#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0) +#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0) +#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0) + +/** + * typedef ffa_mem_perm8_t - Memory access permissions + * + * * @FFA_MEM_ATTR_RO + * Request or specify read-only mapping. + * * @FFA_MEM_ATTR_RW + * Request or allow read-write mapping. + * * @FFA_MEM_PERM_NX + * Deny executable mapping. + * * @FFA_MEM_PERM_X + * Request executable mapping. + */ +typedef uint8_t ffa_mem_perm8_t; +#define FFA_MEM_PERM_RO (1U << 0) +#define FFA_MEM_PERM_RW (1U << 1) +#define FFA_MEM_PERM_NX (1U << 2) +#define FFA_MEM_PERM_X (1U << 3) + +/** + * typedef ffa_mem_flag8_t - Endpoint memory flags + * + * * @FFA_MEM_FLAG_NON_RETRIEVAL_BORROWER + * Non-retrieval Borrower. Memory region must not be or was not retrieved on + * behalf of this endpoint. + */ +typedef uint8_t ffa_mem_flag8_t; +#define FFA_MEM_FLAG_NON_RETRIEVAL_BORROWER (1U << 0) + +/** + * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags + * + * * @FFA_MTD_FLAG_ZERO_MEMORY + * Zero memory after unmapping from sender (must be 0 for share). + * * @FFA_MTD_FLAG_TIME_SLICING + * Not supported by this implementation. + * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH + * Zero memory after unmapping from borrowers (must be 0 for share). + * * @FFA_MTD_FLAG_TYPE_MASK + * Bit-mask to extract memory management transaction type from flags. + * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY + * Share memory transaction flag. + * Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from + * @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that + * it must have. + * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK + * Not supported by this implementation. + */ +typedef uint32_t ffa_mtd_flag32_t; +#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0) +#define FFA_MTD_FLAG_TIME_SLICING (1U << 1) +#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2) +#define FFA_MTD_FLAG_TYPE_MASK (3U << 3) +#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3) +#define FFA_MTD_FLAG_TYPE_LEND_MEMORY (1U << 4) +#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5) + +/** + * struct ffa_mapd - Memory access permissions descriptor + * @endpoint_id: + * Endpoint id that @memory_access_permissions and @flags apply to. + * (&typedef ffa_endpoint_id16_t). + * @memory_access_permissions: + * FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t). + * @flags: + * FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t). + */ +struct ffa_mapd { + ffa_endpoint_id16_t endpoint_id; + ffa_mem_perm8_t memory_access_permissions; + ffa_mem_flag8_t flags; +}; +CASSERT(sizeof(struct ffa_mapd) == 4, assert_ffa_mapd_size_mismatch); + +/** + * struct ffa_emad_v1_0 - Endpoint memory access descriptor. + * @mapd: &struct ffa_mapd. + * @comp_mrd_offset: + * Offset of &struct ffa_comp_mrd from start of &struct ffa_mtd_v1_0. + * @reserved_8_15: + * Reserved bytes 8-15. Must be 0. + */ +struct ffa_emad_v1_0 { + struct ffa_mapd mapd; + uint32_t comp_mrd_offset; + uint64_t reserved_8_15; +}; +CASSERT(sizeof(struct ffa_emad_v1_0) == 16, assert_ffa_emad_v1_0_size_mismatch); + +/** + * struct ffa_mtd_v1_0 - Memory transaction descriptor. + * @sender_id: + * Sender endpoint id. + * @memory_region_attributes: + * FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t). + * @reserved_3: + * Reserved bytes 3. Must be 0. + * @flags: + * FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t). + * @handle: + * Id of shared memory object. Must be 0 for MEM_SHARE or MEM_LEND. + * @tag: Client allocated tag. Must match original value. + * @reserved_24_27: + * Reserved bytes 24-27. Must be 0. + * @emad_count: + * Number of entries in @emad. + * @emad: + * Endpoint memory access descriptor array (see @struct ffa_emad_v1_0). + */ +struct ffa_mtd_v1_0 { + ffa_endpoint_id16_t sender_id; + ffa_mem_attr8_t memory_region_attributes; + uint8_t reserved_3; + ffa_mtd_flag32_t flags; + uint64_t handle; + uint64_t tag; + uint32_t reserved_24_27; + uint32_t emad_count; + struct ffa_emad_v1_0 emad[]; +}; +CASSERT(sizeof(struct ffa_mtd_v1_0) == 32, assert_ffa_mtd_size_v1_0_mismatch); + +/** + * struct ffa_mtd - Memory transaction descriptor for FF-A v1.1. + * @sender_id: + * Sender endpoint id. + * @memory_region_attributes: + * FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr16_t). + * @flags: + * FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t). + * @handle: + * Id of shared memory object. Must be 0 for MEM_SHARE or MEM_LEND. + * @tag: Client allocated tag. Must match original value. + * @emad_size: + * Size of the emad descriptor. + * @emad_count: + * Number of entries in the emad array. + * @emad_offset: + * Offset from the beginning of the descriptor to the location of the + * memory access descriptor array (see @struct ffa_emad_v1_0). + * @reserved_36_39: + * Reserved bytes 36-39. Must be 0. + * @reserved_40_47: + * Reserved bytes 44-47. Must be 0. + */ +struct ffa_mtd { + ffa_endpoint_id16_t sender_id; + ffa_mem_attr16_t memory_region_attributes; + ffa_mtd_flag32_t flags; + uint64_t handle; + uint64_t tag; + uint32_t emad_size; + uint32_t emad_count; + uint32_t emad_offset; + uint32_t reserved_36_39; + uint64_t reserved_40_47; +}; +CASSERT(sizeof(struct ffa_mtd) == 48, assert_ffa_mtd_size_mismatch); + +#endif /* EL3_SPMC_FFA_MEM_H */ diff --git a/include/services/ffa_svc.h b/include/services/ffa_svc.h index 85844899d..da016fd46 100644 --- a/include/services/ffa_svc.h +++ b/include/services/ffa_svc.h @@ -271,4 +271,71 @@ static inline bool ffa_is_normal_world_id(uint16_t id) return !ffa_is_secure_world_id(id); } + +/****************************************************************************** + * Boot information protocol as per the FF-A v1.1 spec. + *****************************************************************************/ +#define FFA_INIT_DESC_SIGNATURE 0x00000FFA + +/* Boot information type. */ +#define FFA_BOOT_INFO_TYPE_STD U(0x0) +#define FFA_BOOT_INFO_TYPE_IMPL U(0x1) + +#define FFA_BOOT_INFO_TYPE_MASK U(0x1) +#define FFA_BOOT_INFO_TYPE_SHIFT U(0x7) +#define FFA_BOOT_INFO_TYPE(type) \ + (((type) & FFA_BOOT_INFO_TYPE_MASK) \ + << FFA_BOOT_INFO_TYPE_SHIFT) + +/* Boot information identifier. */ +#define FFA_BOOT_INFO_TYPE_ID_FDT U(0x0) +#define FFA_BOOT_INFO_TYPE_ID_HOB U(0x1) + +#define FFA_BOOT_INFO_TYPE_ID_MASK U(0x3F) +#define FFA_BOOT_INFO_TYPE_ID_SHIFT U(0x0) +#define FFA_BOOT_INFO_TYPE_ID(type) \ + (((type) & FFA_BOOT_INFO_TYPE_ID_MASK) \ + << FFA_BOOT_INFO_TYPE_ID_SHIFT) + +/* Format of Flags Name field. */ +#define FFA_BOOT_INFO_FLAG_NAME_STRING U(0x0) +#define FFA_BOOT_INFO_FLAG_NAME_UUID U(0x1) + +#define FFA_BOOT_INFO_FLAG_NAME_MASK U(0x3) +#define FFA_BOOT_INFO_FLAG_NAME_SHIFT U(0x0) +#define FFA_BOOT_INFO_FLAG_NAME(type) \ + (((type) & FFA_BOOT_INFO_FLAG_NAME_MASK)\ + << FFA_BOOT_INFO_FLAG_NAME_SHIFT) + +/* Format of Flags Contents field. */ +#define FFA_BOOT_INFO_FLAG_CONTENT_ADR U(0x0) +#define FFA_BOOT_INFO_FLAG_CONTENT_VAL U(0x1) + +#define FFA_BOOT_INFO_FLAG_CONTENT_MASK U(0x1) +#define FFA_BOOT_INFO_FLAG_CONTENT_SHIFT U(0x2) +#define FFA_BOOT_INFO_FLAG_CONTENT(content) \ + (((content) & FFA_BOOT_INFO_FLAG_CONTENT_MASK) \ + << FFA_BOOT_INFO_FLAG_CONTENT_SHIFT) + +/* Boot information descriptor. */ +struct ffa_boot_info_desc { + uint8_t name[16]; + uint8_t type; + uint8_t reserved; + uint16_t flags; + uint32_t size_boot_info; + uint64_t content; +}; + +/* Boot information header. */ +struct ffa_boot_info_header { + uint32_t signature; /* 0xFFA */ + uint32_t version; + uint32_t size_boot_info_blob; + uint32_t size_boot_info_desc; + uint32_t count_boot_info_desc; + uint32_t offset_boot_info_desc; + uint64_t reserved; +}; + #endif /* FFA_SVC_H */ diff --git a/plat/arm/board/fvp/fvp_el3_spmc.c b/plat/arm/board/fvp/fvp_el3_spmc.c new file mode 100644 index 000000000..2b347ed62 --- /dev/null +++ b/plat/arm/board/fvp/fvp_el3_spmc.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include + +#include + +/* + * On the FVP platform when using the EL3 SPMC implementation allocate the + * datastore for tracking shared memory descriptors in the TZC DRAM section + * to ensure sufficient storage can be allocated. + * Provide an implementation of the accessor method to allow the datastore + * details to be retrieved by the SPMC. + * The SPMC will take care of initializing the memory region. + */ + +#define PLAT_SPMC_SHMEM_DATASTORE_SIZE 512 * 1024 + +__section("arm_el3_tzc_dram") static uint8_t +plat_spmc_shmem_datastore[PLAT_SPMC_SHMEM_DATASTORE_SIZE]; + +int plat_spmc_shmem_datastore_get(uint8_t **datastore, size_t *size) +{ + *datastore = plat_spmc_shmem_datastore; + *size = PLAT_SPMC_SHMEM_DATASTORE_SIZE; + return 0; +} + +/* + * Add dummy implementations of memory management related platform hooks. + * These can be used to implement platform specific functionality to support + * a memory sharing/lending operation. + * + * Note: The hooks must be located as part of the initial share request and + * final reclaim to prevent order dependencies with operations that may take + * place in the normal world without visibility of the SPMC. + */ +int plat_spmc_shmem_begin(struct ffa_mtd *desc) +{ + return 0; +} +int plat_spmc_shmem_reclaim(struct ffa_mtd *desc) +{ + return 0; +} diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk index 89ca18540..54c5e7545 100644 --- a/plat/arm/board/fvp/platform.mk +++ b/plat/arm/board/fvp/platform.mk @@ -425,3 +425,7 @@ ENABLE_SYS_REG_TRACE_FOR_NS := 1 # enable trace filter control registers access to NS by default ENABLE_TRF_FOR_NS := 1 + +ifeq (${SPMC_AT_EL3}, 1) +PLAT_BL_COMMON_SOURCES += plat/arm/board/fvp/fvp_el3_spmc.c +endif diff --git a/services/std_svc/spm/el3_spmc/spmc.h b/services/std_svc/spm/el3_spmc/spmc.h index 0e08d2e38..d62be91eb 100644 --- a/services/std_svc/spm/el3_spmc/spmc.h +++ b/services/std_svc/spm/el3_spmc/spmc.h @@ -37,6 +37,7 @@ #define FFA_ID_MASK U(0xFFFF) #define FFA_PARTITION_ID_SHIFT U(16) #define FFA_FEATURES_BIT31_MASK U(0x1u << 31) +#define FFA_FEATURES_RET_REQ_NS_BIT U(0x1 << 1) #define FFA_RUN_EP_ID(ep_vcpu_ids) \ ((ep_vcpu_ids >> FFA_PARTITION_ID_SHIFT) & FFA_ID_MASK) @@ -170,6 +171,12 @@ struct secure_partition_desc { * Store whether the SP has subscribed to any power management messages. */ uint16_t pwr_mgmt_msgs; + + /* + * Store whether the SP has requested the use of the NS bit for memory + * management transactions if it is using FF-A v1.0. + */ + bool ns_bit_requested; }; /* @@ -225,7 +232,8 @@ extern const spd_pm_ops_t spmc_pm; /* Setup Function for different SP types. */ void spmc_sp_common_setup(struct secure_partition_desc *sp, - entry_point_info_t *ep_info); + entry_point_info_t *ep_info, + int32_t boot_info_reg); void spmc_el1_sp_setup(struct secure_partition_desc *sp, entry_point_info_t *ep_info); void spmc_sp_common_ep_commit(struct secure_partition_desc *sp, @@ -272,4 +280,16 @@ struct el3_lp_desc *get_el3_lp_array(void); */ struct mailbox *spmc_get_mbox_desc(bool secure_origin); +/* + * Helper function to obtain the context of an SP with a given partition ID. + */ +struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id); + +/* + * Add helper function to obtain the FF-A version of the calling + * partition. + */ +uint32_t get_partition_ffa_version(bool secure_origin); + + #endif /* SPMC_H */ diff --git a/services/std_svc/spm/el3_spmc/spmc.mk b/services/std_svc/spm/el3_spmc/spmc.mk index 9f82ccbf3..aa591d9f3 100644 --- a/services/std_svc/spm/el3_spmc/spmc.mk +++ b/services/std_svc/spm/el3_spmc/spmc.mk @@ -12,7 +12,8 @@ SPMC_SOURCES := $(addprefix services/std_svc/spm/el3_spmc/, \ spmc_main.c \ spmc_setup.c \ logical_sp.c \ - spmc_pm.c) + spmc_pm.c \ + spmc_shared_mem.c) # Specify platform specific logical partition implementation. SPMC_LP_SOURCES := $(addprefix ${PLAT_DIR}/, \ diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c index 342f55c0f..9b8621a73 100644 --- a/services/std_svc/spm/el3_spmc/spmc_main.c +++ b/services/std_svc/spm/el3_spmc/spmc_main.c @@ -26,6 +26,7 @@ #include #include #include "spmc.h" +#include "spmc_shared_mem.h" #include @@ -989,6 +990,53 @@ err: return spmc_ffa_error_return(handle, ret); } +static uint64_t ffa_feature_success(void *handle, uint32_t arg2) +{ + SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2); +} + +static uint64_t ffa_features_retrieve_request(bool secure_origin, + uint32_t input_properties, + void *handle) +{ + /* + * If we're called by the normal world we don't support any + * additional features. + */ + if (!secure_origin) { + if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) { + return spmc_ffa_error_return(handle, + FFA_ERROR_NOT_SUPPORTED); + } + + } else { + struct secure_partition_desc *sp = spmc_get_current_sp_ctx(); + /* + * If v1.1 the NS bit must be set otherwise it is an invalid + * call. If v1.0 check and store whether the SP has requested + * the use of the NS bit. + */ + if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) { + if ((input_properties & + FFA_FEATURES_RET_REQ_NS_BIT) == 0U) { + return spmc_ffa_error_return(handle, + FFA_ERROR_NOT_SUPPORTED); + } + return ffa_feature_success(handle, + FFA_FEATURES_RET_REQ_NS_BIT); + } else { + sp->ns_bit_requested = (input_properties & + FFA_FEATURES_RET_REQ_NS_BIT) != + 0U; + } + if (sp->ns_bit_requested) { + return ffa_feature_success(handle, + FFA_FEATURES_RET_REQ_NS_BIT); + } + } + SMC_RET1(handle, FFA_SUCCESS_SMC32); +} + static uint64_t ffa_features_handler(uint32_t smc_fid, bool secure_origin, uint64_t x1, @@ -1002,21 +1050,34 @@ static uint64_t ffa_features_handler(uint32_t smc_fid, uint32_t function_id = (uint32_t) x1; uint32_t input_properties = (uint32_t) x2; - /* - * We don't currently support any additional input properties - * for any ABI therefore ensure this value is always set to 0. - */ - if (input_properties != 0) { - return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); - } - /* Check if a Feature ID was requested. */ if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) { /* We currently don't support any additional features. */ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); } - /* Report if an FF-A ABI is supported. */ + /* + * Handle the cases where we have separate handlers due to additional + * properties. + */ + switch (function_id) { + case FFA_MEM_RETRIEVE_REQ_SMC32: + case FFA_MEM_RETRIEVE_REQ_SMC64: + return ffa_features_retrieve_request(secure_origin, + input_properties, + handle); + } + + /* + * We don't currently support additional input properties for these + * other ABIs therefore ensure this value is set to 0. + */ + if (input_properties != 0U) { + return spmc_ffa_error_return(handle, + FFA_ERROR_NOT_SUPPORTED); + } + + /* Report if any other FF-A ABI is supported. */ switch (function_id) { /* Supported features from both worlds. */ case FFA_ERROR: @@ -1033,6 +1094,7 @@ static uint64_t ffa_features_handler(uint32_t smc_fid, case FFA_RXTX_MAP_SMC32: case FFA_RXTX_MAP_SMC64: case FFA_RXTX_UNMAP: + case FFA_MEM_FRAG_TX: case FFA_MSG_RUN: /* @@ -1050,9 +1112,25 @@ static uint64_t ffa_features_handler(uint32_t smc_fid, case FFA_SECONDARY_EP_REGISTER_SMC64: case FFA_MSG_SEND_DIRECT_RESP_SMC32: case FFA_MSG_SEND_DIRECT_RESP_SMC64: + case FFA_MEM_RELINQUISH: case FFA_MSG_WAIT: if (!secure_origin) { + return spmc_ffa_error_return(handle, + FFA_ERROR_NOT_SUPPORTED); + } + SMC_RET1(handle, FFA_SUCCESS_SMC32); + /* Execution stops here. */ + + /* Supported features only from the normal world. */ + case FFA_MEM_SHARE_SMC32: + case FFA_MEM_SHARE_SMC64: + case FFA_MEM_LEND_SMC32: + case FFA_MEM_LEND_SMC64: + case FFA_MEM_RECLAIM: + case FFA_MEM_FRAG_RX: + + if (secure_origin) { return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); } @@ -1316,7 +1394,8 @@ static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid, ******************************************************************************/ static int sp_manifest_parse(void *sp_manifest, int offset, struct secure_partition_desc *sp, - entry_point_info_t *ep_info) + entry_point_info_t *ep_info, + int32_t *boot_info_reg) { int32_t ret, node; uint32_t config_32; @@ -1433,6 +1512,20 @@ static int sp_manifest_parse(void *sp_manifest, int offset, sp->pwr_mgmt_msgs = config_32; } + ret = fdt_read_uint32(sp_manifest, node, + "gp-register-num", &config_32); + if (ret != 0) { + WARN("Missing boot information register.\n"); + } else { + /* Check if a register number between 0-3 is specified. */ + if (config_32 < 4) { + *boot_info_reg = config_32; + } else { + WARN("Incorrect boot information register (%u).\n", + config_32); + } + } + return 0; } @@ -1448,7 +1541,7 @@ static int find_and_prepare_sp_context(void) uintptr_t manifest_base; uintptr_t manifest_base_align; entry_point_info_t *next_image_ep_info; - int32_t ret; + int32_t ret, boot_info_reg = -1; struct secure_partition_desc *sp; next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE); @@ -1507,7 +1600,8 @@ static int find_and_prepare_sp_context(void) SECURE | EP_ST_ENABLE); /* Parse the SP manifest. */ - ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info); + ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info, + &boot_info_reg); if (ret != 0) { ERROR("Error in Secure Partition manifest parsing.\n"); return ret; @@ -1520,7 +1614,7 @@ static int find_and_prepare_sp_context(void) } /* Perform any common initialisation. */ - spmc_sp_common_setup(sp, next_image_ep_info); + spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg); /* Perform any initialisation specific to S-EL1 SPs. */ spmc_el1_sp_setup(sp, next_image_ep_info); @@ -1677,6 +1771,18 @@ int32_t spmc_setup(void) initalize_sp_descs(); initalize_ns_ep_descs(); + /* + * Retrieve the information of the datastore for tracking shared memory + * requests allocated by platform code and zero the region if available. + */ + ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data, + &spmc_shmem_obj_state.data_size); + if (ret != 0) { + ERROR("Failed to obtain memory descriptor backing store!\n"); + return ret; + } + memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size); + /* Setup logical SPs. */ ret = logical_sp_init(); if (ret != 0) { @@ -1799,6 +1905,35 @@ uint64_t spmc_smc_handler(uint32_t smc_fid, case FFA_MSG_RUN: return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4, cookie, handle, flags); + + case FFA_MEM_SHARE_SMC32: + case FFA_MEM_SHARE_SMC64: + case FFA_MEM_LEND_SMC32: + case FFA_MEM_LEND_SMC64: + return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4, + cookie, handle, flags); + + case FFA_MEM_FRAG_TX: + return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3, + x4, cookie, handle, flags); + + case FFA_MEM_FRAG_RX: + return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3, + x4, cookie, handle, flags); + + case FFA_MEM_RETRIEVE_REQ_SMC32: + case FFA_MEM_RETRIEVE_REQ_SMC64: + return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2, + x3, x4, cookie, handle, flags); + + case FFA_MEM_RELINQUISH: + return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2, + x3, x4, cookie, handle, flags); + + case FFA_MEM_RECLAIM: + return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3, + x4, cookie, handle, flags); + default: WARN("Unsupported FF-A call 0x%08x.\n", smc_fid); break; diff --git a/services/std_svc/spm/el3_spmc/spmc_setup.c b/services/std_svc/spm/el3_spmc/spmc_setup.c index af5219d02..8ebae2852 100644 --- a/services/std_svc/spm/el3_spmc/spmc_setup.c +++ b/services/std_svc/spm/el3_spmc/spmc_setup.c @@ -10,18 +10,138 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include #include "spm_common.h" #include "spmc.h" +#include #include +/* + * Statically allocate a page of memory for passing boot information to an SP. + */ +static uint8_t ffa_boot_info_mem[PAGE_SIZE] __aligned(PAGE_SIZE); + +/* + * This function creates a initialization descriptor in the memory reserved + * for passing boot information to an SP. It then copies the partition manifest + * into this region and ensures that its reference in the initialization + * descriptor is updated. + */ +static void spmc_create_boot_info(entry_point_info_t *ep_info, + struct secure_partition_desc *sp) +{ + struct ffa_boot_info_header *boot_header; + struct ffa_boot_info_desc *boot_descriptor; + uintptr_t manifest_addr; + + /* + * Calculate the maximum size of the manifest that can be accommodated + * in the boot information memory region. + */ + const unsigned int + max_manifest_sz = sizeof(ffa_boot_info_mem) - + (sizeof(struct ffa_boot_info_header) + + sizeof(struct ffa_boot_info_desc)); + + /* + * The current implementation only supports the FF-A v1.1 + * implementation of the boot protocol, therefore check + * that a v1.0 SP has not requested use of the protocol. + */ + if (sp->ffa_version == MAKE_FFA_VERSION(1, 0)) { + ERROR("FF-A boot protocol not supported for v1.0 clients\n"); + return; + } + + /* + * Check if the manifest will fit into the boot info memory region else + * bail. + */ + if (ep_info->args.arg1 > max_manifest_sz) { + WARN("Unable to copy manifest into boot information. "); + WARN("Max sz = %u bytes. Manifest sz = %lu bytes\n", + max_manifest_sz, ep_info->args.arg1); + return; + } + + /* Zero the memory region before populating. */ + memset(ffa_boot_info_mem, 0, PAGE_SIZE); + + /* + * Populate the ffa_boot_info_header at the start of the boot info + * region. + */ + boot_header = (struct ffa_boot_info_header *) ffa_boot_info_mem; + + /* Position the ffa_boot_info_desc after the ffa_boot_info_header. */ + boot_header->offset_boot_info_desc = + sizeof(struct ffa_boot_info_header); + boot_descriptor = (struct ffa_boot_info_desc *) + (ffa_boot_info_mem + + boot_header->offset_boot_info_desc); + + /* + * We must use the FF-A version coresponding to the version implemented + * by the SP. Currently this can only be v1.1. + */ + boot_header->version = sp->ffa_version; + + /* Populate the boot information header. */ + boot_header->size_boot_info_desc = sizeof(struct ffa_boot_info_desc); + + /* Set the signature "0xFFA". */ + boot_header->signature = FFA_INIT_DESC_SIGNATURE; + + /* Set the count. Currently 1 since only the manifest is specified. */ + boot_header->count_boot_info_desc = 1; + + /* Populate the boot information descriptor for the manifest. */ + boot_descriptor->type = + FFA_BOOT_INFO_TYPE(FFA_BOOT_INFO_TYPE_STD) | + FFA_BOOT_INFO_TYPE_ID(FFA_BOOT_INFO_TYPE_ID_FDT); + + boot_descriptor->flags = + FFA_BOOT_INFO_FLAG_NAME(FFA_BOOT_INFO_FLAG_NAME_UUID) | + FFA_BOOT_INFO_FLAG_CONTENT(FFA_BOOT_INFO_FLAG_CONTENT_ADR); + + /* + * Copy the manifest into boot info region after the boot information + * descriptor. + */ + boot_descriptor->size_boot_info = (uint32_t) ep_info->args.arg1; + + manifest_addr = (uintptr_t) (ffa_boot_info_mem + + boot_header->offset_boot_info_desc + + boot_header->size_boot_info_desc); + + memcpy((void *) manifest_addr, (void *) ep_info->args.arg0, + boot_descriptor->size_boot_info); + + boot_descriptor->content = manifest_addr; + + /* Calculate the size of the total boot info blob. */ + boot_header->size_boot_info_blob = boot_header->offset_boot_info_desc + + boot_descriptor->size_boot_info + + (boot_header->count_boot_info_desc * + boot_header->size_boot_info_desc); + + INFO("SP boot info @ 0x%lx, size: %u bytes.\n", + (uintptr_t) ffa_boot_info_mem, + boot_header->size_boot_info_blob); + INFO("SP manifest @ 0x%lx, size: %u bytes.\n", + boot_descriptor->content, + boot_descriptor->size_boot_info); +} + /* * We are assuming that the index of the execution * context used is the linear index of the current physical cpu. @@ -68,7 +188,8 @@ void spmc_el1_sp_setup(struct secure_partition_desc *sp, /* Common initialisation for all SPs. */ void spmc_sp_common_setup(struct secure_partition_desc *sp, - entry_point_info_t *ep_info) + entry_point_info_t *ep_info, + int32_t boot_info_reg) { uint16_t sp_id; @@ -96,11 +217,50 @@ void spmc_sp_common_setup(struct secure_partition_desc *sp, */ assert(sp->runtime_el == S_EL1); - /* - * Clear the general purpose registers. These should be populated as - * required. - */ - zeromem(&ep_info->args, sizeof(ep_info->args)); + /* Check if the SP wants to use the FF-A boot protocol. */ + if (boot_info_reg >= 0) { + /* + * Create a boot information descriptor and copy the partition + * manifest into the reserved memory region for consumption by + * the SP. + */ + spmc_create_boot_info(ep_info, sp); + + /* + * We have consumed what we need from ep args so we can now + * zero them before we start populating with new information + * specifically for the SP. + */ + zeromem(&ep_info->args, sizeof(ep_info->args)); + + /* + * Pass the address of the boot information in the + * boot_info_reg. + */ + switch (boot_info_reg) { + case 0: + ep_info->args.arg0 = (uintptr_t) ffa_boot_info_mem; + break; + case 1: + ep_info->args.arg1 = (uintptr_t) ffa_boot_info_mem; + break; + case 2: + ep_info->args.arg2 = (uintptr_t) ffa_boot_info_mem; + break; + case 3: + ep_info->args.arg3 = (uintptr_t) ffa_boot_info_mem; + break; + default: + ERROR("Invalid value for \"gp-register-num\" %d.\n", + boot_info_reg); + } + } else { + /* + * We don't need any of the information that was populated + * in ep_args so we can clear them. + */ + zeromem(&ep_info->args, sizeof(ep_info->args)); + } } /* diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.c b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c new file mode 100644 index 000000000..1602981bf --- /dev/null +++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c @@ -0,0 +1,1812 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include "spmc.h" +#include "spmc_shared_mem.h" + +#include + +/** + * struct spmc_shmem_obj - Shared memory object. + * @desc_size: Size of @desc. + * @desc_filled: Size of @desc already received. + * @in_use: Number of clients that have called ffa_mem_retrieve_req + * without a matching ffa_mem_relinquish call. + * @desc: FF-A memory region descriptor passed in ffa_mem_share. + */ +struct spmc_shmem_obj { + size_t desc_size; + size_t desc_filled; + size_t in_use; + struct ffa_mtd desc; +}; + +/* + * Declare our data structure to store the metadata of memory share requests. + * The main datastore is allocated on a per platform basis to ensure enough + * storage can be made available. + * The address of the data store will be populated by the SPMC during its + * initialization. + */ + +struct spmc_shmem_obj_state spmc_shmem_obj_state = { + /* Set start value for handle so top 32 bits are needed quickly. */ + .next_handle = 0xffffffc0U, +}; + +/** + * spmc_shmem_obj_size - Convert from descriptor size to object size. + * @desc_size: Size of struct ffa_memory_region_descriptor object. + * + * Return: Size of struct spmc_shmem_obj object. + */ +static size_t spmc_shmem_obj_size(size_t desc_size) +{ + return desc_size + offsetof(struct spmc_shmem_obj, desc); +} + +/** + * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj. + * @state: Global state. + * @desc_size: Size of struct ffa_memory_region_descriptor object that + * allocated object will hold. + * + * Return: Pointer to newly allocated object, or %NULL if there not enough space + * left. The returned pointer is only valid while @state is locked, to + * used it again after unlocking @state, spmc_shmem_obj_lookup must be + * called. + */ +static struct spmc_shmem_obj * +spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size) +{ + struct spmc_shmem_obj *obj; + size_t free = state->data_size - state->allocated; + + if (state->data == NULL) { + ERROR("Missing shmem datastore!\n"); + return NULL; + } + + if (spmc_shmem_obj_size(desc_size) > free) { + WARN("%s(0x%zx) failed, free 0x%zx\n", + __func__, desc_size, free); + return NULL; + } + obj = (struct spmc_shmem_obj *)(state->data + state->allocated); + obj->desc = (struct ffa_mtd) {0}; + obj->desc_size = desc_size; + obj->desc_filled = 0; + obj->in_use = 0; + state->allocated += spmc_shmem_obj_size(desc_size); + return obj; +} + +/** + * spmc_shmem_obj_free - Free struct spmc_shmem_obj. + * @state: Global state. + * @obj: Object to free. + * + * Release memory used by @obj. Other objects may move, so on return all + * pointers to struct spmc_shmem_obj object should be considered invalid, not + * just @obj. + * + * The current implementation always compacts the remaining objects to simplify + * the allocator and to avoid fragmentation. + */ + +static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state, + struct spmc_shmem_obj *obj) +{ + size_t free_size = spmc_shmem_obj_size(obj->desc_size); + uint8_t *shift_dest = (uint8_t *)obj; + uint8_t *shift_src = shift_dest + free_size; + size_t shift_size = state->allocated - (shift_src - state->data); + + if (shift_size != 0U) { + memmove(shift_dest, shift_src, shift_size); + } + state->allocated -= free_size; +} + +/** + * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle. + * @state: Global state. + * @handle: Unique handle of object to return. + * + * Return: struct spmc_shmem_obj_state object with handle matching @handle. + * %NULL, if not object in @state->data has a matching handle. + */ +static struct spmc_shmem_obj * +spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle) +{ + uint8_t *curr = state->data; + + while (curr - state->data < state->allocated) { + struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr; + + if (obj->desc.handle == handle) { + return obj; + } + curr += spmc_shmem_obj_size(obj->desc_size); + } + return NULL; +} + +/** + * spmc_shmem_obj_get_next - Get the next memory object from an offset. + * @offset: Offset used to track which objects have previously been + * returned. + * + * Return: the next struct spmc_shmem_obj_state object from the provided + * offset. + * %NULL, if there are no more objects. + */ +static struct spmc_shmem_obj * +spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset) +{ + uint8_t *curr = state->data + *offset; + + if (curr - state->data < state->allocated) { + struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr; + + *offset += spmc_shmem_obj_size(obj->desc_size); + + return obj; + } + return NULL; +} + +/******************************************************************************* + * FF-A memory descriptor helper functions. + ******************************************************************************/ +/** + * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the + * clients FF-A version. + * @desc: The memory transaction descriptor. + * @index: The index of the emad element to be accessed. + * @ffa_version: FF-A version of the provided structure. + * @emad_size: Will be populated with the size of the returned emad + * descriptor. + * Return: A pointer to the requested emad structure. + */ +static void * +spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index, + uint32_t ffa_version, size_t *emad_size) +{ + uint8_t *emad; + /* + * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0 + * format, otherwise assume it is a v1.1 format. + */ + if (ffa_version == MAKE_FFA_VERSION(1, 0)) { + /* Cast our descriptor to the v1.0 format. */ + struct ffa_mtd_v1_0 *mtd_v1_0 = + (struct ffa_mtd_v1_0 *) desc; + emad = (uint8_t *) &(mtd_v1_0->emad); + *emad_size = sizeof(struct ffa_emad_v1_0); + } else { + if (!is_aligned(desc->emad_offset, 16)) { + WARN("Emad offset is not aligned.\n"); + return NULL; + } + emad = ((uint8_t *) desc + desc->emad_offset); + *emad_size = desc->emad_size; + } + return (emad + (*emad_size * index)); +} + +/** + * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the + * FF-A version of the descriptor. + * @obj: Object containing ffa_memory_region_descriptor. + * + * Return: struct ffa_comp_mrd object corresponding to the composite memory + * region descriptor. + */ +static struct ffa_comp_mrd * +spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version) +{ + size_t emad_size; + /* + * The comp_mrd_offset field of the emad descriptor remains consistent + * between FF-A versions therefore we can use the v1.0 descriptor here + * in all cases. + */ + struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0, + ffa_version, + &emad_size); + /* Ensure the emad array was found. */ + if (emad == NULL) { + return NULL; + } + + /* Ensure the composite descriptor offset is aligned. */ + if (!is_aligned(emad->comp_mrd_offset, 8)) { + WARN("Unaligned composite memory region descriptor offset.\n"); + return NULL; + } + + return (struct ffa_comp_mrd *) + ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset); +} + +/** + * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj. + * @obj: Object containing ffa_memory_region_descriptor. + * + * Return: Size of ffa_constituent_memory_region_descriptors in @obj. + */ +static size_t +spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj, + uint32_t ffa_version) +{ + struct ffa_comp_mrd *comp_mrd; + + comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version); + if (comp_mrd == NULL) { + return 0; + } + return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd); +} + +/* + * Compare two memory regions to determine if any range overlaps with another + * ongoing memory transaction. + */ +static bool +overlapping_memory_regions(struct ffa_comp_mrd *region1, + struct ffa_comp_mrd *region2) +{ + uint64_t region1_start; + uint64_t region1_size; + uint64_t region1_end; + uint64_t region2_start; + uint64_t region2_size; + uint64_t region2_end; + + assert(region1 != NULL); + assert(region2 != NULL); + + if (region1 == region2) { + return true; + } + + /* + * Check each memory region in the request against existing + * transactions. + */ + for (size_t i = 0; i < region1->address_range_count; i++) { + + region1_start = region1->address_range_array[i].address; + region1_size = + region1->address_range_array[i].page_count * + PAGE_SIZE_4KB; + region1_end = region1_start + region1_size; + + for (size_t j = 0; j < region2->address_range_count; j++) { + + region2_start = region2->address_range_array[j].address; + region2_size = + region2->address_range_array[j].page_count * + PAGE_SIZE_4KB; + region2_end = region2_start + region2_size; + + if ((region1_start >= region2_start && + region1_start < region2_end) || + (region1_end >= region2_start + && region1_end < region2_end)) { + WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n", + region1_start, region1_end, + region2_start, region2_end); + return true; + } + } + } + return false; +} + +/******************************************************************************* + * FF-A v1.0 Memory Descriptor Conversion Helpers. + ******************************************************************************/ +/** + * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1 + * converted descriptor. + * @orig: The original v1.0 memory transaction descriptor. + * @desc_size: The size of the original v1.0 memory transaction descriptor. + * + * Return: the size required to store the descriptor store in the v1.1 format. + */ +static size_t +spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size) +{ + size_t size = 0; + struct ffa_comp_mrd *mrd; + struct ffa_emad_v1_0 *emad_array = orig->emad; + + /* Get the size of the v1.1 descriptor. */ + size += sizeof(struct ffa_mtd); + + /* Add the size of the emad descriptors. */ + size += orig->emad_count * sizeof(struct ffa_emad_v1_0); + + /* Add the size of the composite mrds. */ + size += sizeof(struct ffa_comp_mrd); + + /* Add the size of the constituent mrds. */ + mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig + + emad_array[0].comp_mrd_offset); + + /* Check the calculated address is within the memory descriptor. */ + if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) { + return 0; + } + size += mrd->address_range_count * sizeof(struct ffa_cons_mrd); + + return size; +} + +/** + * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0 + * converted descriptor. + * @orig: The original v1.1 memory transaction descriptor. + * @desc_size: The size of the original v1.1 memory transaction descriptor. + * + * Return: the size required to store the descriptor store in the v1.0 format. + */ +static size_t +spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size) +{ + size_t size = 0; + struct ffa_comp_mrd *mrd; + struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *) + ((uint8_t *) orig + + orig->emad_offset); + + /* Get the size of the v1.0 descriptor. */ + size += sizeof(struct ffa_mtd_v1_0); + + /* Add the size of the v1.0 emad descriptors. */ + size += orig->emad_count * sizeof(struct ffa_emad_v1_0); + + /* Add the size of the composite mrds. */ + size += sizeof(struct ffa_comp_mrd); + + /* Add the size of the constituent mrds. */ + mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig + + emad_array[0].comp_mrd_offset); + + /* Check the calculated address is within the memory descriptor. */ + if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) { + return 0; + } + size += mrd->address_range_count * sizeof(struct ffa_cons_mrd); + + return size; +} + +/** + * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object. + * @out_obj: The shared memory object to populate the converted descriptor. + * @orig: The shared memory object containing the v1.0 descriptor. + * + * Return: true if the conversion is successful else false. + */ +static bool +spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj, + struct spmc_shmem_obj *orig) +{ + struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc; + struct ffa_mtd *out = &out_obj->desc; + struct ffa_emad_v1_0 *emad_array_in; + struct ffa_emad_v1_0 *emad_array_out; + struct ffa_comp_mrd *mrd_in; + struct ffa_comp_mrd *mrd_out; + + size_t mrd_in_offset; + size_t mrd_out_offset; + size_t mrd_size = 0; + + /* Populate the new descriptor format from the v1.0 struct. */ + out->sender_id = mtd_orig->sender_id; + out->memory_region_attributes = mtd_orig->memory_region_attributes; + out->flags = mtd_orig->flags; + out->handle = mtd_orig->handle; + out->tag = mtd_orig->tag; + out->emad_count = mtd_orig->emad_count; + out->emad_size = sizeof(struct ffa_emad_v1_0); + + /* + * We will locate the emad descriptors directly after the ffa_mtd + * struct. This will be 8-byte aligned. + */ + out->emad_offset = sizeof(struct ffa_mtd); + + emad_array_in = mtd_orig->emad; + emad_array_out = (struct ffa_emad_v1_0 *) + ((uint8_t *) out + out->emad_offset); + + /* Copy across the emad structs. */ + for (unsigned int i = 0U; i < out->emad_count; i++) { + memcpy(&emad_array_out[i], &emad_array_in[i], + sizeof(struct ffa_emad_v1_0)); + } + + /* Place the mrd descriptors after the end of the emad descriptors.*/ + mrd_in_offset = emad_array_in->comp_mrd_offset; + mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count); + mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset); + + /* Add the size of the composite memory region descriptor. */ + mrd_size += sizeof(struct ffa_comp_mrd); + + /* Find the mrd descriptor. */ + mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset); + + /* Add the size of the constituent memory region descriptors. */ + mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd); + + /* + * Update the offset in the emads by the delta between the input and + * output addresses. + */ + for (unsigned int i = 0U; i < out->emad_count; i++) { + emad_array_out[i].comp_mrd_offset = + emad_array_in[i].comp_mrd_offset + + (mrd_out_offset - mrd_in_offset); + } + + /* Verify that we stay within bound of the memory descriptors. */ + if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) > + (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) || + ((uintptr_t)((uint8_t *) mrd_out + mrd_size) > + (uintptr_t)((uint8_t *) out + out_obj->desc_size))) { + ERROR("%s: Invalid mrd structure.\n", __func__); + return false; + } + + /* Copy the mrd descriptors directly. */ + memcpy(mrd_out, mrd_in, mrd_size); + + return true; +} + +/** + * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to + * v1.0 memory object. + * @out_obj: The shared memory object to populate the v1.0 descriptor. + * @orig: The shared memory object containing the v1.1 descriptor. + * + * Return: true if the conversion is successful else false. + */ +static bool +spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj, + struct spmc_shmem_obj *orig) +{ + struct ffa_mtd *mtd_orig = &orig->desc; + struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc; + struct ffa_emad_v1_0 *emad_in; + struct ffa_emad_v1_0 *emad_array_in; + struct ffa_emad_v1_0 *emad_array_out; + struct ffa_comp_mrd *mrd_in; + struct ffa_comp_mrd *mrd_out; + + size_t mrd_in_offset; + size_t mrd_out_offset; + size_t emad_out_array_size; + size_t mrd_size = 0; + + /* Populate the v1.0 descriptor format from the v1.1 struct. */ + out->sender_id = mtd_orig->sender_id; + out->memory_region_attributes = mtd_orig->memory_region_attributes; + out->flags = mtd_orig->flags; + out->handle = mtd_orig->handle; + out->tag = mtd_orig->tag; + out->emad_count = mtd_orig->emad_count; + + /* Determine the location of the emad array in both descriptors. */ + emad_array_in = (struct ffa_emad_v1_0 *) + ((uint8_t *) mtd_orig + mtd_orig->emad_offset); + emad_array_out = out->emad; + + /* Copy across the emad structs. */ + emad_in = emad_array_in; + for (unsigned int i = 0U; i < out->emad_count; i++) { + memcpy(&emad_array_out[i], emad_in, + sizeof(struct ffa_emad_v1_0)); + + emad_in += mtd_orig->emad_size; + } + + /* Place the mrd descriptors after the end of the emad descriptors. */ + emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count; + + mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out + + emad_out_array_size; + + mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset); + + mrd_in_offset = mtd_orig->emad_offset + + (mtd_orig->emad_size * mtd_orig->emad_count); + + /* Add the size of the composite memory region descriptor. */ + mrd_size += sizeof(struct ffa_comp_mrd); + + /* Find the mrd descriptor. */ + mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset); + + /* Add the size of the constituent memory region descriptors. */ + mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd); + + /* + * Update the offset in the emads by the delta between the input and + * output addresses. + */ + emad_in = emad_array_in; + + for (unsigned int i = 0U; i < out->emad_count; i++) { + emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset + + (mrd_out_offset - + mrd_in_offset); + emad_in += mtd_orig->emad_size; + } + + /* Verify that we stay within bound of the memory descriptors. */ + if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) > + (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) || + ((uintptr_t)((uint8_t *) mrd_out + mrd_size) > + (uintptr_t)((uint8_t *) out + out_obj->desc_size))) { + ERROR("%s: Invalid mrd structure.\n", __func__); + return false; + } + + /* Copy the mrd descriptors directly. */ + memcpy(mrd_out, mrd_in, mrd_size); + + return true; +} + +/** + * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to + * the v1.0 format and populates the + * provided buffer. + * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor. + * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor. + * @buf_size: Size of the buffer to populate. + * @offset: The offset of the converted descriptor to copy. + * @copy_size: Will be populated with the number of bytes copied. + * @out_desc_size: Will be populated with the total size of the v1.0 + * descriptor. + * + * Return: 0 if conversion and population succeeded. + * Note: This function invalidates the reference to @orig therefore + * `spmc_shmem_obj_lookup` must be called if further usage is required. + */ +static uint32_t +spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj, + size_t buf_size, size_t offset, + size_t *copy_size, size_t *v1_0_desc_size) +{ + struct spmc_shmem_obj *v1_0_obj; + + /* Calculate the size that the v1.0 descriptor will require. */ + *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size( + &orig_obj->desc, orig_obj->desc_size); + + if (*v1_0_desc_size == 0) { + ERROR("%s: cannot determine size of descriptor.\n", + __func__); + return FFA_ERROR_INVALID_PARAMETER; + } + + /* Get a new obj to store the v1.0 descriptor. */ + v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, + *v1_0_desc_size); + + if (!v1_0_obj) { + return FFA_ERROR_NO_MEMORY; + } + + /* Perform the conversion from v1.1 to v1.0. */ + if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) { + spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj); + return FFA_ERROR_INVALID_PARAMETER; + } + + *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size); + memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size); + + /* + * We're finished with the v1.0 descriptor for now so free it. + * Note that this will invalidate any references to the v1.1 + * descriptor. + */ + spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj); + + return 0; +} + +/** + * spmc_shmem_check_obj - Check that counts in descriptor match overall size. + * @obj: Object containing ffa_memory_region_descriptor. + * @ffa_version: FF-A version of the provided descriptor. + * + * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor + * offset or count is invalid. + */ +static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj, + uint32_t ffa_version) +{ + uint32_t comp_mrd_offset = 0; + + if (obj->desc.emad_count == 0U) { + WARN("%s: unsupported attribute desc count %u.\n", + __func__, obj->desc.emad_count); + return -EINVAL; + } + + for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) { + size_t size; + size_t count; + size_t expected_size; + size_t total_page_count; + size_t emad_size; + size_t desc_size; + size_t header_emad_size; + uint32_t offset; + struct ffa_comp_mrd *comp; + struct ffa_emad_v1_0 *emad; + + emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num, + ffa_version, &emad_size); + if (emad == NULL) { + WARN("%s: invalid emad structure.\n", __func__); + return -EINVAL; + } + + /* + * Validate the calculated emad address resides within the + * descriptor. + */ + if ((uintptr_t) emad >= + (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) { + WARN("Invalid emad access.\n"); + return -EINVAL; + } + + offset = emad->comp_mrd_offset; + + if (ffa_version == MAKE_FFA_VERSION(1, 0)) { + desc_size = sizeof(struct ffa_mtd_v1_0); + } else { + desc_size = sizeof(struct ffa_mtd); + } + + header_emad_size = desc_size + + (obj->desc.emad_count * emad_size); + + if (offset < header_emad_size) { + WARN("%s: invalid object, offset %u < header + emad %zu\n", + __func__, offset, header_emad_size); + return -EINVAL; + } + + size = obj->desc_size; + + if (offset > size) { + WARN("%s: invalid object, offset %u > total size %zu\n", + __func__, offset, obj->desc_size); + return -EINVAL; + } + size -= offset; + + if (size < sizeof(struct ffa_comp_mrd)) { + WARN("%s: invalid object, offset %u, total size %zu, no header space.\n", + __func__, offset, obj->desc_size); + return -EINVAL; + } + size -= sizeof(struct ffa_comp_mrd); + + count = size / sizeof(struct ffa_cons_mrd); + + comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version); + + if (comp == NULL) { + WARN("%s: invalid comp_mrd offset\n", __func__); + return -EINVAL; + } + + if (comp->address_range_count != count) { + WARN("%s: invalid object, desc count %u != %zu\n", + __func__, comp->address_range_count, count); + return -EINVAL; + } + + expected_size = offset + sizeof(*comp) + + spmc_shmem_obj_ffa_constituent_size(obj, + ffa_version); + + if (expected_size != obj->desc_size) { + WARN("%s: invalid object, computed size %zu != size %zu\n", + __func__, expected_size, obj->desc_size); + return -EINVAL; + } + + if (obj->desc_filled < obj->desc_size) { + /* + * The whole descriptor has not yet been received. + * Skip final checks. + */ + return 0; + } + + /* + * The offset provided to the composite memory region descriptor + * should be consistent across endpoint descriptors. Store the + * first entry and compare against subsequent entries. + */ + if (comp_mrd_offset == 0) { + comp_mrd_offset = offset; + } else { + if (comp_mrd_offset != offset) { + ERROR("%s: mismatching offsets provided, %u != %u\n", + __func__, offset, comp_mrd_offset); + return -EINVAL; + } + } + + total_page_count = 0; + + for (size_t i = 0; i < count; i++) { + total_page_count += + comp->address_range_array[i].page_count; + } + if (comp->total_page_count != total_page_count) { + WARN("%s: invalid object, desc total_page_count %u != %zu\n", + __func__, comp->total_page_count, + total_page_count); + return -EINVAL; + } + } + return 0; +} + +/** + * spmc_shmem_check_state_obj - Check if the descriptor describes memory + * regions that are currently involved with an + * existing memory transactions. This implies that + * the memory is not in a valid state for lending. + * @obj: Object containing ffa_memory_region_descriptor. + * + * Return: 0 if object is valid, -EINVAL if invalid memory state. + */ +static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj, + uint32_t ffa_version) +{ + size_t obj_offset = 0; + struct spmc_shmem_obj *inflight_obj; + + struct ffa_comp_mrd *other_mrd; + struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj, + ffa_version); + + if (requested_mrd == NULL) { + return -EINVAL; + } + + inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state, + &obj_offset); + + while (inflight_obj != NULL) { + /* + * Don't compare the transaction to itself or to partially + * transmitted descriptors. + */ + if ((obj->desc.handle != inflight_obj->desc.handle) && + (obj->desc_size == obj->desc_filled)) { + other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj, + ffa_version); + if (other_mrd == NULL) { + return -EINVAL; + } + if (overlapping_memory_regions(requested_mrd, + other_mrd)) { + return -EINVAL; + } + } + + inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state, + &obj_offset); + } + return 0; +} + +static long spmc_ffa_fill_desc(struct mailbox *mbox, + struct spmc_shmem_obj *obj, + uint32_t fragment_length, + ffa_mtd_flag32_t mtd_flag, + uint32_t ffa_version, + void *smc_handle) +{ + int ret; + size_t emad_size; + uint32_t handle_low; + uint32_t handle_high; + struct ffa_emad_v1_0 *emad; + struct ffa_emad_v1_0 *other_emad; + + if (mbox->rxtx_page_count == 0U) { + WARN("%s: buffer pair not registered.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_arg; + } + + if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) { + WARN("%s: bad fragment size %u > %u buffer size\n", __func__, + fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_arg; + } + + memcpy((uint8_t *)&obj->desc + obj->desc_filled, + (uint8_t *) mbox->tx_buffer, fragment_length); + + if (fragment_length > obj->desc_size - obj->desc_filled) { + WARN("%s: bad fragment size %u > %zu remaining\n", __func__, + fragment_length, obj->desc_size - obj->desc_filled); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_arg; + } + + /* Ensure that the sender ID resides in the normal world. */ + if (ffa_is_secure_world_id(obj->desc.sender_id)) { + WARN("%s: Invalid sender ID 0x%x.\n", + __func__, obj->desc.sender_id); + ret = FFA_ERROR_DENIED; + goto err_arg; + } + + /* Ensure the NS bit is set to 0. */ + if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) { + WARN("%s: NS mem attributes flags MBZ.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_arg; + } + + /* + * We don't currently support any optional flags so ensure none are + * requested. + */ + if (obj->desc.flags != 0U && mtd_flag != 0U && + (obj->desc.flags != mtd_flag)) { + WARN("%s: invalid memory transaction flags %u != %u\n", + __func__, obj->desc.flags, mtd_flag); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_arg; + } + + if (obj->desc_filled == 0U) { + /* First fragment, descriptor header has been copied */ + obj->desc.handle = spmc_shmem_obj_state.next_handle++; + obj->desc.flags |= mtd_flag; + } + + obj->desc_filled += fragment_length; + ret = spmc_shmem_check_obj(obj, ffa_version); + if (ret != 0) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_bad_desc; + } + + handle_low = (uint32_t)obj->desc.handle; + handle_high = obj->desc.handle >> 32; + + if (obj->desc_filled != obj->desc_size) { + SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low, + handle_high, obj->desc_filled, + (uint32_t)obj->desc.sender_id << 16, 0, 0, 0); + } + + /* The full descriptor has been received, perform any final checks. */ + + /* + * If a partition ID resides in the secure world validate that the + * partition ID is for a known partition. Ignore any partition ID + * belonging to the normal world as it is assumed the Hypervisor will + * have validated these. + */ + for (size_t i = 0; i < obj->desc.emad_count; i++) { + emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version, + &emad_size); + if (emad == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_bad_desc; + } + + ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id; + + if (ffa_is_secure_world_id(ep_id)) { + if (spmc_get_sp_ctx(ep_id) == NULL) { + WARN("%s: Invalid receiver id 0x%x\n", + __func__, ep_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_bad_desc; + } + } + } + + /* Ensure partition IDs are not duplicated. */ + for (size_t i = 0; i < obj->desc.emad_count; i++) { + emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version, + &emad_size); + if (emad == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_bad_desc; + } + for (size_t j = i + 1; j < obj->desc.emad_count; j++) { + other_emad = spmc_shmem_obj_get_emad(&obj->desc, j, + ffa_version, + &emad_size); + if (other_emad == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_bad_desc; + } + + if (emad->mapd.endpoint_id == + other_emad->mapd.endpoint_id) { + WARN("%s: Duplicated endpoint id 0x%x\n", + __func__, emad->mapd.endpoint_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_bad_desc; + } + } + } + + ret = spmc_shmem_check_state_obj(obj, ffa_version); + if (ret) { + ERROR("%s: invalid memory region descriptor.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_bad_desc; + } + + /* + * Everything checks out, if the sender was using FF-A v1.0, convert + * the descriptor format to use the v1.1 structures. + */ + if (ffa_version == MAKE_FFA_VERSION(1, 0)) { + struct spmc_shmem_obj *v1_1_obj; + uint64_t mem_handle; + + /* Calculate the size that the v1.1 descriptor will required. */ + size_t v1_1_desc_size = + spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc, + fragment_length); + + if (v1_1_desc_size == 0U) { + ERROR("%s: cannot determine size of descriptor.\n", + __func__); + goto err_arg; + } + + /* Get a new obj to store the v1.1 descriptor. */ + v1_1_obj = + spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size); + + if (!obj) { + ret = FFA_ERROR_NO_MEMORY; + goto err_arg; + } + + /* Perform the conversion from v1.0 to v1.1. */ + v1_1_obj->desc_size = v1_1_desc_size; + v1_1_obj->desc_filled = v1_1_desc_size; + if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) { + ERROR("%s: Could not convert mtd!\n", __func__); + spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj); + goto err_arg; + } + + /* + * We're finished with the v1.0 descriptor so free it + * and continue our checks with the new v1.1 descriptor. + */ + mem_handle = obj->desc.handle; + spmc_shmem_obj_free(&spmc_shmem_obj_state, obj); + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); + if (obj == NULL) { + ERROR("%s: Failed to find converted descriptor.\n", + __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + return spmc_ffa_error_return(smc_handle, ret); + } + } + + /* Allow for platform specific operations to be performed. */ + ret = plat_spmc_shmem_begin(&obj->desc); + if (ret != 0) { + goto err_arg; + } + + SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0, + 0, 0, 0); + +err_bad_desc: +err_arg: + spmc_shmem_obj_free(&spmc_shmem_obj_state, obj); + return spmc_ffa_error_return(smc_handle, ret); +} + +/** + * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation. + * @client: Client state. + * @total_length: Total length of shared memory descriptor. + * @fragment_length: Length of fragment of shared memory descriptor passed in + * this call. + * @address: Not supported, must be 0. + * @page_count: Not supported, must be 0. + * @smc_handle: Handle passed to smc call. Used to return + * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS. + * + * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed + * to share or lend memory from non-secure os to secure os (with no stream + * endpoints). + * + * Return: 0 on success, error code on failure. + */ +long spmc_ffa_mem_send(uint32_t smc_fid, + bool secure_origin, + uint64_t total_length, + uint32_t fragment_length, + uint64_t address, + uint32_t page_count, + void *cookie, + void *handle, + uint64_t flags) + +{ + long ret; + struct spmc_shmem_obj *obj; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + ffa_mtd_flag32_t mtd_flag; + uint32_t ffa_version = get_partition_ffa_version(secure_origin); + + if (address != 0U || page_count != 0U) { + WARN("%s: custom memory region for message not supported.\n", + __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if (secure_origin) { + WARN("%s: unsupported share direction.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + /* + * Check if the descriptor is smaller than the v1.0 descriptor. The + * descriptor cannot be smaller than this structure. + */ + if (fragment_length < sizeof(struct ffa_mtd_v1_0)) { + WARN("%s: bad first fragment size %u < %zu\n", + __func__, fragment_length, sizeof(struct ffa_mtd_v1_0)); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) { + mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY; + } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) { + mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY; + } else { + WARN("%s: invalid memory management operation.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&spmc_shmem_obj_state.lock); + obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length); + if (obj == NULL) { + ret = FFA_ERROR_NO_MEMORY; + goto err_unlock; + } + + spin_lock(&mbox->lock); + ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag, + ffa_version, handle); + spin_unlock(&mbox->lock); + + spin_unlock(&spmc_shmem_obj_state.lock); + return ret; + +err_unlock: + spin_unlock(&spmc_shmem_obj_state.lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation. + * @client: Client state. + * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX. + * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX. + * @fragment_length: Length of fragments transmitted. + * @sender_id: Vmid of sender in bits [31:16] + * @smc_handle: Handle passed to smc call. Used to return + * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS. + * + * Return: @smc_handle on success, error code on failure. + */ +long spmc_ffa_mem_frag_tx(uint32_t smc_fid, + bool secure_origin, + uint64_t handle_low, + uint64_t handle_high, + uint32_t fragment_length, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags) +{ + long ret; + uint32_t desc_sender_id; + uint32_t ffa_version = get_partition_ffa_version(secure_origin); + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + + struct spmc_shmem_obj *obj; + uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); + if (obj == NULL) { + WARN("%s: invalid handle, 0x%lx, not a valid handle.\n", + __func__, mem_handle); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock; + } + + desc_sender_id = (uint32_t)obj->desc.sender_id << 16; + if (sender_id != desc_sender_id) { + WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__, + sender_id, desc_sender_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock; + } + + if (obj->desc_filled == obj->desc_size) { + WARN("%s: object desc already filled, %zu\n", __func__, + obj->desc_filled); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock; + } + + spin_lock(&mbox->lock); + ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version, + handle); + spin_unlock(&mbox->lock); + + spin_unlock(&spmc_shmem_obj_state.lock); + return ret; + +err_unlock: + spin_unlock(&spmc_shmem_obj_state.lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor + * if the caller implements a version greater + * than FF-A 1.0 or if they have requested + * the functionality. + * TODO: We are assuming that the caller is + * an SP. To support retrieval from the + * normal world this function will need to be + * expanded accordingly. + * @resp: Descriptor populated in callers RX buffer. + * @sp_ctx: Context of the calling SP. + */ +void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp, + struct secure_partition_desc *sp_ctx) +{ + if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) || + sp_ctx->ns_bit_requested) { + /* + * Currently memory senders must reside in the normal + * world, and we do not have the functionlaity to change + * the state of memory dynamically. Therefore we can always set + * the NS bit to 1. + */ + resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT; + } +} + +/** + * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation. + * @smc_fid: FID of SMC + * @total_length: Total length of retrieve request descriptor if this is + * the first call. Otherwise (unsupported) must be 0. + * @fragment_length: Length of fragment of retrieve request descriptor passed + * in this call. Only @fragment_length == @length is + * supported by this implementation. + * @address: Not supported, must be 0. + * @page_count: Not supported, must be 0. + * @smc_handle: Handle passed to smc call. Used to return + * FFA_MEM_RETRIEVE_RESP. + * + * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call. + * Used by secure os to retrieve memory already shared by non-secure os. + * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message, + * the client must call FFA_MEM_FRAG_RX until the full response has been + * received. + * + * Return: @handle on success, error code on failure. + */ +long +spmc_ffa_mem_retrieve_req(uint32_t smc_fid, + bool secure_origin, + uint32_t total_length, + uint32_t fragment_length, + uint64_t address, + uint32_t page_count, + void *cookie, + void *handle, + uint64_t flags) +{ + int ret; + size_t buf_size; + size_t copy_size = 0; + size_t min_desc_size; + size_t out_desc_size = 0; + + /* + * Currently we are only accessing fields that are the same in both the + * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly + * here. We only need validate against the appropriate struct size. + */ + struct ffa_mtd *resp; + const struct ffa_mtd *req; + struct spmc_shmem_obj *obj = NULL; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + uint32_t ffa_version = get_partition_ffa_version(secure_origin); + struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx(); + + if (!secure_origin) { + WARN("%s: unsupported retrieve req direction.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if (address != 0U || page_count != 0U) { + WARN("%s: custom memory region not supported.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&mbox->lock); + + req = mbox->tx_buffer; + resp = mbox->rx_buffer; + buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; + + if (mbox->rxtx_page_count == 0U) { + WARN("%s: buffer pair not registered.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + if (mbox->state != MAILBOX_STATE_EMPTY) { + WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state); + ret = FFA_ERROR_DENIED; + goto err_unlock_mailbox; + } + + if (fragment_length != total_length) { + WARN("%s: fragmented retrieve request not supported.\n", + __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + if (req->emad_count == 0U) { + WARN("%s: unsupported attribute desc count %u.\n", + __func__, obj->desc.emad_count); + return -EINVAL; + } + + /* Determine the appropriate minimum descriptor size. */ + if (ffa_version == MAKE_FFA_VERSION(1, 0)) { + min_desc_size = sizeof(struct ffa_mtd_v1_0); + } else { + min_desc_size = sizeof(struct ffa_mtd); + } + if (total_length < min_desc_size) { + WARN("%s: invalid length %u < %zu\n", __func__, total_length, + min_desc_size); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle); + if (obj == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (obj->desc_filled != obj->desc_size) { + WARN("%s: incomplete object desc filled %zu < size %zu\n", + __func__, obj->desc_filled, obj->desc_size); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) { + WARN("%s: wrong sender id 0x%x != 0x%x\n", + __func__, req->sender_id, obj->desc.sender_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->emad_count != 0U && req->tag != obj->desc.tag) { + WARN("%s: wrong tag 0x%lx != 0x%lx\n", + __func__, req->tag, obj->desc.tag); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) { + WARN("%s: mistmatch of endpoint counts %u != %u\n", + __func__, req->emad_count, obj->desc.emad_count); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + /* Ensure the NS bit is set to 0 in the request. */ + if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) { + WARN("%s: NS mem attributes flags MBZ.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->flags != 0U) { + if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) != + (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) { + /* + * If the retrieve request specifies the memory + * transaction ensure it matches what we expect. + */ + WARN("%s: wrong mem transaction flags %x != %x\n", + __func__, req->flags, obj->desc.flags); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY && + req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) { + /* + * Current implementation does not support donate and + * it supports no other flags. + */ + WARN("%s: invalid flags 0x%x\n", __func__, req->flags); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + } + + /* Validate that the provided emad offset and structure is valid.*/ + for (size_t i = 0; i < req->emad_count; i++) { + size_t emad_size; + struct ffa_emad_v1_0 *emad; + + emad = spmc_shmem_obj_get_emad(req, i, ffa_version, + &emad_size); + if (emad == NULL) { + WARN("%s: invalid emad structure.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if ((uintptr_t) emad >= (uintptr_t) + ((uint8_t *) req + total_length)) { + WARN("Invalid emad access.\n"); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + } + + /* + * Validate all the endpoints match in the case of multiple + * borrowers. We don't mandate that the order of the borrowers + * must match in the descriptors therefore check to see if the + * endpoints match in any order. + */ + for (size_t i = 0; i < req->emad_count; i++) { + bool found = false; + size_t emad_size; + struct ffa_emad_v1_0 *emad; + struct ffa_emad_v1_0 *other_emad; + + emad = spmc_shmem_obj_get_emad(req, i, ffa_version, + &emad_size); + if (emad == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + for (size_t j = 0; j < obj->desc.emad_count; j++) { + other_emad = spmc_shmem_obj_get_emad( + &obj->desc, j, MAKE_FFA_VERSION(1, 1), + &emad_size); + + if (other_emad == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->emad_count && + emad->mapd.endpoint_id == + other_emad->mapd.endpoint_id) { + found = true; + break; + } + } + + if (!found) { + WARN("%s: invalid receiver id (0x%x).\n", + __func__, emad->mapd.endpoint_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + } + + mbox->state = MAILBOX_STATE_FULL; + + if (req->emad_count != 0U) { + obj->in_use++; + } + + /* + * If the caller is v1.0 convert the descriptor, otherwise copy + * directly. + */ + if (ffa_version == MAKE_FFA_VERSION(1, 0)) { + ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0, + ©_size, + &out_desc_size); + if (ret != 0U) { + ERROR("%s: Failed to process descriptor.\n", __func__); + goto err_unlock_all; + } + } else { + copy_size = MIN(obj->desc_size, buf_size); + out_desc_size = obj->desc_size; + + memcpy(resp, &obj->desc, copy_size); + } + + /* Set the NS bit in the response if applicable. */ + spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx); + + spin_unlock(&spmc_shmem_obj_state.lock); + spin_unlock(&mbox->lock); + + SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size, + copy_size, 0, 0, 0, 0, 0); + +err_unlock_all: + spin_unlock(&spmc_shmem_obj_state.lock); +err_unlock_mailbox: + spin_unlock(&mbox->lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation. + * @client: Client state. + * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0]. + * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32]. + * @fragment_offset: Byte offset in descriptor to resume at. + * @sender_id: Bit[31:16]: Endpoint id of sender if client is a + * hypervisor. 0 otherwise. + * @smc_handle: Handle passed to smc call. Used to return + * FFA_MEM_FRAG_TX. + * + * Return: @smc_handle on success, error code on failure. + */ +long spmc_ffa_mem_frag_rx(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t fragment_offset, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags) +{ + int ret; + void *src; + size_t buf_size; + size_t copy_size; + size_t full_copy_size; + uint32_t desc_sender_id; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); + struct spmc_shmem_obj *obj; + uint32_t ffa_version = get_partition_ffa_version(secure_origin); + + if (!secure_origin) { + WARN("%s: can only be called from swld.\n", + __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); + if (obj == NULL) { + WARN("%s: invalid handle, 0x%lx, not a valid handle.\n", + __func__, mem_handle); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_shmem; + } + + desc_sender_id = (uint32_t)obj->desc.sender_id << 16; + if (sender_id != 0U && sender_id != desc_sender_id) { + WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__, + sender_id, desc_sender_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_shmem; + } + + if (fragment_offset >= obj->desc_size) { + WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n", + __func__, fragment_offset, obj->desc_size); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_shmem; + } + + spin_lock(&mbox->lock); + + if (mbox->rxtx_page_count == 0U) { + WARN("%s: buffer pair not registered.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (mbox->state != MAILBOX_STATE_EMPTY) { + WARN("%s: RX Buffer is full!\n", __func__); + ret = FFA_ERROR_DENIED; + goto err_unlock_all; + } + + buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; + + mbox->state = MAILBOX_STATE_FULL; + + /* + * If the caller is v1.0 convert the descriptor, otherwise copy + * directly. + */ + if (ffa_version == MAKE_FFA_VERSION(1, 0)) { + size_t out_desc_size; + + ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj, + buf_size, + fragment_offset, + ©_size, + &out_desc_size); + if (ret != 0U) { + ERROR("%s: Failed to process descriptor.\n", __func__); + goto err_unlock_all; + } + } else { + full_copy_size = obj->desc_size - fragment_offset; + copy_size = MIN(full_copy_size, buf_size); + + src = &obj->desc; + + memcpy(mbox->rx_buffer, src + fragment_offset, copy_size); + } + + spin_unlock(&mbox->lock); + spin_unlock(&spmc_shmem_obj_state.lock); + + SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high, + copy_size, sender_id, 0, 0, 0); + +err_unlock_all: + spin_unlock(&mbox->lock); +err_unlock_shmem: + spin_unlock(&spmc_shmem_obj_state.lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation. + * @client: Client state. + * + * Implements a subset of the FF-A FFA_MEM_RELINQUISH call. + * Used by secure os release previously shared memory to non-secure os. + * + * The handle to release must be in the client's (secure os's) transmit buffer. + * + * Return: 0 on success, error code on failure. + */ +int spmc_ffa_mem_relinquish(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t fragment_offset, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags) +{ + int ret; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + struct spmc_shmem_obj *obj; + const struct ffa_mem_relinquish_descriptor *req; + + if (!secure_origin) { + WARN("%s: unsupported relinquish direction.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&mbox->lock); + + if (mbox->rxtx_page_count == 0U) { + WARN("%s: buffer pair not registered.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + req = mbox->tx_buffer; + + if (req->flags != 0U) { + WARN("%s: unsupported flags 0x%x\n", __func__, req->flags); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + if (req->endpoint_count == 0) { + WARN("%s: endpoint count cannot be 0.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle); + if (obj == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (obj->desc.emad_count != req->endpoint_count) { + WARN("%s: mismatch of endpoint count %u != %u\n", __func__, + obj->desc.emad_count, req->endpoint_count); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + /* Validate requested endpoint IDs match descriptor. */ + for (size_t i = 0; i < req->endpoint_count; i++) { + bool found = false; + size_t emad_size; + struct ffa_emad_v1_0 *emad; + + for (unsigned int j = 0; j < obj->desc.emad_count; j++) { + emad = spmc_shmem_obj_get_emad(&obj->desc, j, + MAKE_FFA_VERSION(1, 1), + &emad_size); + if (req->endpoint_array[i] == + emad->mapd.endpoint_id) { + found = true; + break; + } + } + + if (!found) { + WARN("%s: Invalid endpoint ID (0x%x).\n", + __func__, req->endpoint_array[i]); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + } + + if (obj->in_use == 0U) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + obj->in_use--; + + spin_unlock(&spmc_shmem_obj_state.lock); + spin_unlock(&mbox->lock); + + SMC_RET1(handle, FFA_SUCCESS_SMC32); + +err_unlock_all: + spin_unlock(&spmc_shmem_obj_state.lock); +err_unlock_mailbox: + spin_unlock(&mbox->lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation. + * @client: Client state. + * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0]. + * @handle_high: Unique handle of shared memory object to reclaim. + * Bit[63:32]. + * @flags: Unsupported, ignored. + * + * Implements a subset of the FF-A FFA_MEM_RECLAIM call. + * Used by non-secure os reclaim memory previously shared with secure os. + * + * Return: 0 on success, error code on failure. + */ +int spmc_ffa_mem_reclaim(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t mem_flags, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags) +{ + int ret; + struct spmc_shmem_obj *obj; + uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); + + if (secure_origin) { + WARN("%s: unsupported reclaim direction.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if (mem_flags != 0U) { + WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); + if (obj == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock; + } + if (obj->in_use != 0U) { + ret = FFA_ERROR_DENIED; + goto err_unlock; + } + + /* Allow for platform specific operations to be performed. */ + ret = plat_spmc_shmem_reclaim(&obj->desc); + if (ret != 0) { + goto err_unlock; + } + + spmc_shmem_obj_free(&spmc_shmem_obj_state, obj); + spin_unlock(&spmc_shmem_obj_state.lock); + + SMC_RET1(handle, FFA_SUCCESS_SMC32); + +err_unlock: + spin_unlock(&spmc_shmem_obj_state.lock); + return spmc_ffa_error_return(handle, ret); +} diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.h b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h new file mode 100644 index 000000000..839f7a140 --- /dev/null +++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SPMC_SHARED_MEM_H +#define SPMC_SHARED_MEM_H + +#include + +/** + * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor. + * @handle: + * Id of shared memory object to relinquish. + * @flags: + * If bit 0 is set clear memory after unmapping from borrower. Must be 0 + * for share. Bit[1]: Time slicing. Not supported, must be 0. All other + * bits are reserved 0. + * @endpoint_count: + * Number of entries in @endpoint_array. + * @endpoint_array: + * Array of endpoint ids. + */ +struct ffa_mem_relinquish_descriptor { + uint64_t handle; + uint32_t flags; + uint32_t endpoint_count; + ffa_endpoint_id16_t endpoint_array[]; +}; +CASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16, + assert_ffa_mem_relinquish_descriptor_size_mismatch); + +/** + * struct spmc_shmem_obj_state - Global state. + * @data: Backing store for spmc_shmem_obj objects. + * @data_size: The size allocated for the backing store. + * @allocated: Number of bytes allocated in @data. + * @next_handle: Handle used for next allocated object. + * @lock: Lock protecting all state in this file. + */ +struct spmc_shmem_obj_state { + uint8_t *data; + size_t data_size; + size_t allocated; + uint64_t next_handle; + spinlock_t lock; +}; + +extern struct spmc_shmem_obj_state spmc_shmem_obj_state; +extern int plat_spmc_shmem_begin(struct ffa_mtd *desc); +extern int plat_spmc_shmem_reclaim(struct ffa_mtd *desc); + +long spmc_ffa_mem_send(uint32_t smc_fid, + bool secure_origin, + uint64_t total_length, + uint32_t fragment_length, + uint64_t address, + uint32_t page_count, + void *cookie, + void *handle, + uint64_t flags); + +long spmc_ffa_mem_frag_tx(uint32_t smc_fid, + bool secure_origin, + uint64_t handle_low, + uint64_t handle_high, + uint32_t fragment_length, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags); + +long spmc_ffa_mem_retrieve_req(uint32_t smc_fid, + bool secure_origin, + uint32_t total_length, + uint32_t fragment_length, + uint64_t address, + uint32_t page_count, + void *cookie, + void *handle, + uint64_t flags); + +long spmc_ffa_mem_frag_rx(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t fragment_offset, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags); + + +int spmc_ffa_mem_relinquish(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t fragment_offset, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags); + +int spmc_ffa_mem_reclaim(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t mem_flags, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); + +#endif /* SPMC_SHARED_MEM_H */