diff --git a/include/services/el3_spmc_ffa_memory.h b/include/services/el3_spmc_ffa_memory.h new file mode 100644 index 000000000..d4738a1bd --- /dev/null +++ b/include/services/el3_spmc_ffa_memory.h @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef EL3_SPMC_FFA_MEM_H +#define EL3_SPMC_FFA_MEM_H + +#include + +/* + * Subset of Arm Firmware Framework for Armv8-A + * (https://developer.arm.com/docs/den0077/a) needed for shared memory. + */ + +/** + * typedef ffa_endpoint_id16_t - Endpoint ID + * + * Current implementation only supports VM IDs. FF-A spec also support stream + * endpoint ids. + */ +typedef uint16_t ffa_endpoint_id16_t; + +/** + * struct ffa_cons_mrd - Constituent memory region descriptor + * @address: + * Start address of contiguous memory region. Must be 4K page aligned. + * @page_count: + * Number of 4K pages in region. + * @reserved_12_15: + * Reserve bytes 12-15 to pad struct size to 16 bytes. + */ +struct ffa_cons_mrd { + uint64_t address; + uint32_t page_count; + uint32_t reserved_12_15; +}; +CASSERT(sizeof(struct ffa_cons_mrd) == 16, assert_ffa_cons_mrd_size_mismatch); + +/** + * struct ffa_comp_mrd - Composite memory region descriptor + * @total_page_count: + * Number of 4k pages in memory region. Must match sum of + * @address_range_array[].page_count. + * @address_range_count: + * Number of entries in @address_range_array. + * @reserved_8_15: + * Reserve bytes 8-15 to pad struct size to 16 byte alignment and + * make @address_range_array 16 byte aligned. + * @address_range_array: + * Array of &struct ffa_cons_mrd entries. + */ +struct ffa_comp_mrd { + uint32_t total_page_count; + uint32_t address_range_count; + uint64_t reserved_8_15; + struct ffa_cons_mrd address_range_array[]; +}; +CASSERT(sizeof(struct ffa_comp_mrd) == 16, assert_ffa_comp_mrd_size_mismatch); + +/** + * typedef ffa_mem_attr8_t - Memory region attributes + * + * * @FFA_MEM_ATTR_DEVICE_NGNRNE: + * Device-nGnRnE. + * * @FFA_MEM_ATTR_DEVICE_NGNRE: + * Device-nGnRE. + * * @FFA_MEM_ATTR_DEVICE_NGRE: + * Device-nGRE. + * * @FFA_MEM_ATTR_DEVICE_GRE: + * Device-GRE. + * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED + * Normal memory. Non-cacheable. + * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB + * Normal memory. Write-back cached. + * * @FFA_MEM_ATTR_NON_SHAREABLE + * Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. + * * @FFA_MEM_ATTR_OUTER_SHAREABLE + * Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. + * * @FFA_MEM_ATTR_INNER_SHAREABLE + * Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. + */ +typedef uint8_t ffa_mem_attr8_t; +#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2)) +#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2)) +#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2)) +#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2)) +#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2)) +#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2)) +#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0) +#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0) +#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0) + +/** + * typedef ffa_mem_perm8_t - Memory access permissions + * + * * @FFA_MEM_ATTR_RO + * Request or specify read-only mapping. + * * @FFA_MEM_ATTR_RW + * Request or allow read-write mapping. + * * @FFA_MEM_PERM_NX + * Deny executable mapping. + * * @FFA_MEM_PERM_X + * Request executable mapping. + */ +typedef uint8_t ffa_mem_perm8_t; +#define FFA_MEM_PERM_RO (1U << 0) +#define FFA_MEM_PERM_RW (1U << 1) +#define FFA_MEM_PERM_NX (1U << 2) +#define FFA_MEM_PERM_X (1U << 3) + +/** + * typedef ffa_mem_flag8_t - Endpoint memory flags + * + * * @FFA_MEM_FLAG_NON_RETRIEVAL_BORROWER + * Non-retrieval Borrower. Memory region must not be or was not retrieved on + * behalf of this endpoint. + */ +typedef uint8_t ffa_mem_flag8_t; +#define FFA_MEM_FLAG_NON_RETRIEVAL_BORROWER (1U << 0) + +/** + * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags + * + * * @FFA_MTD_FLAG_ZERO_MEMORY + * Zero memory after unmapping from sender (must be 0 for share). + * * @FFA_MTD_FLAG_TIME_SLICING + * Not supported by this implementation. + * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH + * Zero memory after unmapping from borrowers (must be 0 for share). + * * @FFA_MTD_FLAG_TYPE_MASK + * Bit-mask to extract memory management transaction type from flags. + * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY + * Share memory transaction flag. + * Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from + * @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that + * it must have. + * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK + * Not supported by this implementation. + */ +typedef uint32_t ffa_mtd_flag32_t; +#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0) +#define FFA_MTD_FLAG_TIME_SLICING (1U << 1) +#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2) +#define FFA_MTD_FLAG_TYPE_MASK (3U << 3) +#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3) +#define FFA_MTD_FLAG_TYPE_LEND_MEMORY (1U << 4) +#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5) + +/** + * struct ffa_mapd - Memory access permissions descriptor + * @endpoint_id: + * Endpoint id that @memory_access_permissions and @flags apply to. + * (&typedef ffa_endpoint_id16_t). + * @memory_access_permissions: + * FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t). + * @flags: + * FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t). + */ +struct ffa_mapd { + ffa_endpoint_id16_t endpoint_id; + ffa_mem_perm8_t memory_access_permissions; + ffa_mem_flag8_t flags; +}; +CASSERT(sizeof(struct ffa_mapd) == 4, assert_ffa_mapd_size_mismatch); + +/** + * struct ffa_emad_v1_0 - Endpoint memory access descriptor. + * @mapd: &struct ffa_mapd. + * @comp_mrd_offset: + * Offset of &struct ffa_comp_mrd from start of &struct ffa_mtd_v1_0. + * @reserved_8_15: + * Reserved bytes 8-15. Must be 0. + */ +struct ffa_emad_v1_0 { + struct ffa_mapd mapd; + uint32_t comp_mrd_offset; + uint64_t reserved_8_15; +}; +CASSERT(sizeof(struct ffa_emad_v1_0) == 16, assert_ffa_emad_v1_0_size_mismatch); + +/** + * struct ffa_mtd_v1_0 - Memory transaction descriptor. + * @sender_id: + * Sender endpoint id. + * @memory_region_attributes: + * FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t). + * @reserved_3: + * Reserved bytes 3. Must be 0. + * @flags: + * FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t). + * @handle: + * Id of shared memory object. Must be 0 for MEM_SHARE or MEM_LEND. + * @tag: Client allocated tag. Must match original value. + * @reserved_24_27: + * Reserved bytes 24-27. Must be 0. + * @emad_count: + * Number of entries in @emad. Must be 1 in current implementation. + * FFA spec allows more entries. + * @emad: + * Endpoint memory access descriptor array (see @struct ffa_emad_v1_0). + */ +struct ffa_mtd_v1_0 { + ffa_endpoint_id16_t sender_id; + ffa_mem_attr8_t memory_region_attributes; + uint8_t reserved_3; + ffa_mtd_flag32_t flags; + uint64_t handle; + uint64_t tag; + uint32_t reserved_24_27; + uint32_t emad_count; + struct ffa_emad_v1_0 emad[]; +}; +CASSERT(sizeof(struct ffa_mtd_v1_0) == 32, assert_ffa_mtd_size_v1_0_mismatch); + +#endif /* EL3_SPMC_FFA_MEM_H */ diff --git a/services/std_svc/spm/el3_spmc/spmc.mk b/services/std_svc/spm/el3_spmc/spmc.mk index 9f82ccbf3..aa591d9f3 100644 --- a/services/std_svc/spm/el3_spmc/spmc.mk +++ b/services/std_svc/spm/el3_spmc/spmc.mk @@ -12,7 +12,8 @@ SPMC_SOURCES := $(addprefix services/std_svc/spm/el3_spmc/, \ spmc_main.c \ spmc_setup.c \ logical_sp.c \ - spmc_pm.c) + spmc_pm.c \ + spmc_shared_mem.c) # Specify platform specific logical partition implementation. SPMC_LP_SOURCES := $(addprefix ${PLAT_DIR}/, \ diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c index 342f55c0f..74c00da0e 100644 --- a/services/std_svc/spm/el3_spmc/spmc_main.c +++ b/services/std_svc/spm/el3_spmc/spmc_main.c @@ -26,6 +26,7 @@ #include #include #include "spmc.h" +#include "spmc_shared_mem.h" #include @@ -1033,6 +1034,7 @@ static uint64_t ffa_features_handler(uint32_t smc_fid, case FFA_RXTX_MAP_SMC32: case FFA_RXTX_MAP_SMC64: case FFA_RXTX_UNMAP: + case FFA_MEM_FRAG_TX: case FFA_MSG_RUN: /* @@ -1050,9 +1052,27 @@ static uint64_t ffa_features_handler(uint32_t smc_fid, case FFA_SECONDARY_EP_REGISTER_SMC64: case FFA_MSG_SEND_DIRECT_RESP_SMC32: case FFA_MSG_SEND_DIRECT_RESP_SMC64: + case FFA_MEM_RETRIEVE_REQ_SMC32: + case FFA_MEM_RETRIEVE_REQ_SMC64: + case FFA_MEM_RELINQUISH: case FFA_MSG_WAIT: if (!secure_origin) { + return spmc_ffa_error_return(handle, + FFA_ERROR_NOT_SUPPORTED); + } + SMC_RET1(handle, FFA_SUCCESS_SMC32); + /* Execution stops here. */ + + /* Supported features only from the normal world. */ + case FFA_MEM_SHARE_SMC32: + case FFA_MEM_SHARE_SMC64: + case FFA_MEM_LEND_SMC32: + case FFA_MEM_LEND_SMC64: + case FFA_MEM_RECLAIM: + case FFA_MEM_FRAG_RX: + + if (secure_origin) { return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); } @@ -1677,6 +1697,18 @@ int32_t spmc_setup(void) initalize_sp_descs(); initalize_ns_ep_descs(); + /* + * Retrieve the information of the datastore for tracking shared memory + * requests allocated by platform code and zero the region if available. + */ + ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data, + &spmc_shmem_obj_state.data_size); + if (ret != 0) { + ERROR("Failed to obtain memory descriptor backing store!\n"); + return ret; + } + memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size); + /* Setup logical SPs. */ ret = logical_sp_init(); if (ret != 0) { @@ -1799,6 +1831,35 @@ uint64_t spmc_smc_handler(uint32_t smc_fid, case FFA_MSG_RUN: return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4, cookie, handle, flags); + + case FFA_MEM_SHARE_SMC32: + case FFA_MEM_SHARE_SMC64: + case FFA_MEM_LEND_SMC32: + case FFA_MEM_LEND_SMC64: + return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4, + cookie, handle, flags); + + case FFA_MEM_FRAG_TX: + return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3, + x4, cookie, handle, flags); + + case FFA_MEM_FRAG_RX: + return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3, + x4, cookie, handle, flags); + + case FFA_MEM_RETRIEVE_REQ_SMC32: + case FFA_MEM_RETRIEVE_REQ_SMC64: + return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2, + x3, x4, cookie, handle, flags); + + case FFA_MEM_RELINQUISH: + return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2, + x3, x4, cookie, handle, flags); + + case FFA_MEM_RECLAIM: + return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3, + x4, cookie, handle, flags); + default: WARN("Unsupported FF-A call 0x%08x.\n", smc_fid); break; diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.c b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c new file mode 100644 index 000000000..b9ca2fe55 --- /dev/null +++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c @@ -0,0 +1,920 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include + +#include +#include +#include +#include +#include +#include +#include "spmc.h" +#include "spmc_shared_mem.h" + +#include + +/** + * struct spmc_shmem_obj - Shared memory object. + * @desc_size: Size of @desc. + * @desc_filled: Size of @desc already received. + * @in_use: Number of clients that have called ffa_mem_retrieve_req + * without a matching ffa_mem_relinquish call. + * @desc: FF-A memory region descriptor passed in ffa_mem_share. + */ +struct spmc_shmem_obj { + size_t desc_size; + size_t desc_filled; + size_t in_use; + struct ffa_mtd_v1_0 desc; +}; + +/* + * Declare our data structure to store the metadata of memory share requests. + * The main datastore is allocated on a per platform basis to ensure enough + * storage can be made available. + * The address of the data store will be populated by the SPMC during its + * initialization. + */ + +struct spmc_shmem_obj_state spmc_shmem_obj_state = { + /* Set start value for handle so top 32 bits are needed quickly. */ + .next_handle = 0xffffffc0U, +}; + +/** + * spmc_shmem_obj_size - Convert from descriptor size to object size. + * @desc_size: Size of struct ffa_memory_region_descriptor object. + * + * Return: Size of struct spmc_shmem_obj object. + */ +static size_t spmc_shmem_obj_size(size_t desc_size) +{ + return desc_size + offsetof(struct spmc_shmem_obj, desc); +} + +/** + * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj. + * @state: Global state. + * @desc_size: Size of struct ffa_memory_region_descriptor object that + * allocated object will hold. + * + * Return: Pointer to newly allocated object, or %NULL if there not enough space + * left. The returned pointer is only valid while @state is locked, to + * used it again after unlocking @state, spmc_shmem_obj_lookup must be + * called. + */ +static struct spmc_shmem_obj * +spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size) +{ + struct spmc_shmem_obj *obj; + size_t free = state->data_size - state->allocated; + + if (state->data == NULL) { + ERROR("Missing shmem datastore!\n"); + return NULL; + } + + if (spmc_shmem_obj_size(desc_size) > free) { + WARN("%s(0x%zx) failed, free 0x%zx\n", + __func__, desc_size, free); + return NULL; + } + obj = (struct spmc_shmem_obj *)(state->data + state->allocated); + obj->desc = (struct ffa_mtd_v1_0) {0}; + obj->desc_size = desc_size; + obj->desc_filled = 0; + obj->in_use = 0; + state->allocated += spmc_shmem_obj_size(desc_size); + return obj; +} + +/** + * spmc_shmem_obj_free - Free struct spmc_shmem_obj. + * @state: Global state. + * @obj: Object to free. + * + * Release memory used by @obj. Other objects may move, so on return all + * pointers to struct spmc_shmem_obj object should be considered invalid, not + * just @obj. + * + * The current implementation always compacts the remaining objects to simplify + * the allocator and to avoid fragmentation. + */ + +static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state, + struct spmc_shmem_obj *obj) +{ + size_t free_size = spmc_shmem_obj_size(obj->desc_size); + uint8_t *shift_dest = (uint8_t *)obj; + uint8_t *shift_src = shift_dest + free_size; + size_t shift_size = state->allocated - (shift_src - state->data); + + if (shift_size != 0U) { + memmove(shift_dest, shift_src, shift_size); + } + state->allocated -= free_size; +} + +/** + * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle. + * @state: Global state. + * @handle: Unique handle of object to return. + * + * Return: struct spmc_shmem_obj_state object with handle matching @handle. + * %NULL, if not object in @state->data has a matching handle. + */ +static struct spmc_shmem_obj * +spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle) +{ + uint8_t *curr = state->data; + + while (curr - state->data < state->allocated) { + struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr; + + if (obj->desc.handle == handle) { + return obj; + } + curr += spmc_shmem_obj_size(obj->desc_size); + } + return NULL; +} + +static struct ffa_comp_mrd * +spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj) +{ + return (struct ffa_comp_mrd *) + ((uint8_t *)(&obj->desc) + obj->desc.emad[0].comp_mrd_offset); +} + +/** + * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj. + * @obj: Object containing ffa_memory_region_descriptor. + * + * Return: Size of ffa_constituent_memory_region_descriptors in @obj. + */ +static size_t +spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj) +{ + return spmc_shmem_obj_get_comp_mrd(obj)->address_range_count * + sizeof(struct ffa_cons_mrd); +} + +/** + * spmc_shmem_check_obj - Check that counts in descriptor match overall size. + * @obj: Object containing ffa_memory_region_descriptor. + * + * Return: 0 if object is valid, -EINVAL if memory region attributes count is + * not 1, -EINVAL if constituent_memory_region_descriptor offset or count is + * invalid. + */ +static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj) +{ + if (obj->desc.emad_count != 1) { + WARN("%s: unsupported attribute desc count %u != 1\n", + __func__, obj->desc.emad_count); + return -EINVAL; + } + + for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) { + size_t size; + size_t count; + size_t expected_size; + size_t total_page_count; + struct ffa_comp_mrd *comp; + + uint32_t offset = obj->desc.emad[emad_num].comp_mrd_offset; + size_t header_emad_size = sizeof(obj->desc) + + obj->desc.emad_count * sizeof(obj->desc.emad[emad_num]); + + if (offset < header_emad_size) { + WARN("%s: invalid object, offset %u < header + emad %zu\n", + __func__, offset, header_emad_size); + return -EINVAL; + } + + size = obj->desc_size; + + if (offset > size) { + WARN("%s: invalid object, offset %u > total size %zu\n", + __func__, offset, obj->desc_size); + return -EINVAL; + } + size -= offset; + + if (size < sizeof(struct ffa_comp_mrd)) { + WARN("%s: invalid object, offset %u, total size %zu, no header space.\n", + __func__, offset, obj->desc_size); + return -EINVAL; + } + size -= sizeof(struct ffa_comp_mrd); + + count = size / sizeof(struct ffa_cons_mrd); + + comp = spmc_shmem_obj_get_comp_mrd(obj); + + if (comp->address_range_count != count) { + WARN("%s: invalid object, desc count %u != %zu\n", + __func__, comp->address_range_count, count); + return -EINVAL; + } + + expected_size = offset + sizeof(*comp) + + spmc_shmem_obj_ffa_constituent_size(obj); + if (expected_size != obj->desc_size) { + WARN("%s: invalid object, computed size %zu != size %zu\n", + __func__, expected_size, obj->desc_size); + return -EINVAL; + } + + if (obj->desc_filled < obj->desc_size) { + /* + * The whole descriptor has not yet been received. + * Skip final checks. + */ + return 0; + } + + total_page_count = 0; + + for (size_t i = 0; i < count; i++) { + total_page_count += + comp->address_range_array[i].page_count; + } + if (comp->total_page_count != total_page_count) { + WARN("%s: invalid object, desc total_page_count %u != %zu\n", + __func__, comp->total_page_count, + total_page_count); + return -EINVAL; + } + } + + return 0; +} + +static long spmc_ffa_fill_desc(struct mailbox *mbox, + struct spmc_shmem_obj *obj, + uint32_t fragment_length, + ffa_mtd_flag32_t mtd_flag, + void *smc_handle) +{ + int ret; + uint32_t handle_low; + uint32_t handle_high; + + if (mbox->rxtx_page_count == 0U) { + WARN("%s: buffer pair not registered.\n", __func__); + ret = -EINVAL; + goto err_arg; + } + + if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) { + WARN("%s: bad fragment size %u > %u buffer size\n", __func__, + fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB); + ret = -EINVAL; + goto err_arg; + } + + memcpy((uint8_t *)&obj->desc + obj->desc_filled, + (uint8_t *) mbox->tx_buffer, + fragment_length); + + if (fragment_length > obj->desc_size - obj->desc_filled) { + WARN("%s: bad fragment size %u > %zu remaining\n", __func__, + fragment_length, obj->desc_size - obj->desc_filled); + ret = -EINVAL; + goto err_arg; + } + + /* Ensure that the sender ID resides in the normal world. */ + if (ffa_is_secure_world_id(obj->desc.sender_id)) { + WARN("%s: Invalid sender ID 0x%x.\n", + __func__, obj->desc.sender_id); + ret = FFA_ERROR_DENIED; + goto err_arg; + } + + /* + * We don't currently support any optional flags so ensure none are + * requested. + */ + if (obj->desc.flags != 0U && mtd_flag != 0U && + (obj->desc.flags != mtd_flag)) { + WARN("%s: invalid memory transaction flags %u != %u\n", + __func__, obj->desc.flags, mtd_flag); + ret = -EINVAL; + goto err_arg; + } + + if (obj->desc_filled == 0U) { + /* First fragment, descriptor header has been copied */ + obj->desc.handle = spmc_shmem_obj_state.next_handle++; + obj->desc.flags |= mtd_flag; + } + + obj->desc_filled += fragment_length; + + ret = spmc_shmem_check_obj(obj); + if (ret != 0) { + goto err_bad_desc; + } + + handle_low = (uint32_t)obj->desc.handle; + handle_high = obj->desc.handle >> 32; + + if (obj->desc_filled != obj->desc_size) { + SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low, + handle_high, obj->desc_filled, + (uint32_t)obj->desc.sender_id << 16, 0, 0, 0); + } + + SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0, + 0, 0, 0); + +err_bad_desc: +err_arg: + spmc_shmem_obj_free(&spmc_shmem_obj_state, obj); + return spmc_ffa_error_return(smc_handle, FFA_ERROR_INVALID_PARAMETER); +} + +/** + * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation. + * @client: Client state. + * @total_length: Total length of shared memory descriptor. + * @fragment_length: Length of fragment of shared memory descriptor passed in + * this call. + * @address: Not supported, must be 0. + * @page_count: Not supported, must be 0. + * @smc_handle: Handle passed to smc call. Used to return + * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS. + * + * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed + * to share or lend memory from non-secure os to secure os (with no stream + * endpoints). + * + * Return: 0 on success, error code on failure. + */ +long spmc_ffa_mem_send(uint32_t smc_fid, + bool secure_origin, + uint64_t total_length, + uint32_t fragment_length, + uint64_t address, + uint32_t page_count, + void *cookie, + void *handle, + uint64_t flags) + +{ + long ret; + struct spmc_shmem_obj *obj; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + ffa_mtd_flag32_t mtd_flag; + + if (address != 0U || page_count != 0U) { + WARN("%s: custom memory region for message not supported.\n", + __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if (secure_origin) { + WARN("%s: unsupported share direction.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if (fragment_length < sizeof(obj->desc)) { + WARN("%s: bad first fragment size %u < %zu\n", + __func__, fragment_length, sizeof(obj->desc)); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) { + mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY; + } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) { + mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY; + } else { + WARN("%s: invalid memory management operation.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length); + if (obj == NULL) { + ret = FFA_ERROR_NO_MEMORY; + goto err_unlock; + } + + spin_lock(&mbox->lock); + ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag, handle); + spin_unlock(&mbox->lock); + + spin_unlock(&spmc_shmem_obj_state.lock); + return ret; + +err_unlock: + spin_unlock(&spmc_shmem_obj_state.lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation. + * @client: Client state. + * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX. + * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX. + * @fragment_length: Length of fragments transmitted. + * @sender_id: Vmid of sender in bits [31:16] + * @smc_handle: Handle passed to smc call. Used to return + * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS. + * + * Return: @smc_handle on success, error code on failure. + */ +long spmc_ffa_mem_frag_tx(uint32_t smc_fid, + bool secure_origin, + uint64_t handle_low, + uint64_t handle_high, + uint32_t fragment_length, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags) +{ + long ret; + uint32_t desc_sender_id; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + + struct spmc_shmem_obj *obj; + uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); + if (obj == NULL) { + WARN("%s: invalid handle, 0x%lx, not a valid handle.\n", + __func__, mem_handle); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock; + } + + desc_sender_id = (uint32_t)obj->desc.sender_id << 16; + if (sender_id != desc_sender_id) { + WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__, + sender_id, desc_sender_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock; + } + + if (obj->desc_filled == obj->desc_size) { + WARN("%s: object desc already filled, %zu\n", __func__, + obj->desc_filled); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock; + } + + spin_lock(&mbox->lock); + ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, handle); + spin_unlock(&mbox->lock); + + spin_unlock(&spmc_shmem_obj_state.lock); + return ret; + +err_unlock: + spin_unlock(&spmc_shmem_obj_state.lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation. + * @smc_fid: FID of SMC + * @total_length: Total length of retrieve request descriptor if this is + * the first call. Otherwise (unsupported) must be 0. + * @fragment_length: Length of fragment of retrieve request descriptor passed + * in this call. Only @fragment_length == @length is + * supported by this implementation. + * @address: Not supported, must be 0. + * @page_count: Not supported, must be 0. + * @smc_handle: Handle passed to smc call. Used to return + * FFA_MEM_RETRIEVE_RESP. + * + * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call. + * Used by secure os to retrieve memory already shared by non-secure os. + * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message, + * the client must call FFA_MEM_FRAG_RX until the full response has been + * received. + * + * Return: @handle on success, error code on failure. + */ +long +spmc_ffa_mem_retrieve_req(uint32_t smc_fid, + bool secure_origin, + uint32_t total_length, + uint32_t fragment_length, + uint64_t address, + uint32_t page_count, + void *cookie, + void *handle, + uint64_t flags) +{ + int ret; + size_t buf_size; + size_t copy_size; + struct ffa_mtd_v1_0 *resp; + const struct ffa_mtd_v1_0 *req; + struct spmc_shmem_obj *obj = NULL; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + + if (!secure_origin) { + WARN("%s: unsupported retrieve req direction.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if (address != 0U || page_count != 0U) { + WARN("%s: custom memory region not supported.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&mbox->lock); + + req = mbox->tx_buffer; + resp = mbox->rx_buffer; + buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; + + if (mbox->rxtx_page_count == 0U) { + WARN("%s: buffer pair not registered.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + if (mbox->state != MAILBOX_STATE_EMPTY) { + WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state); + ret = FFA_ERROR_DENIED; + goto err_unlock_mailbox; + } + + if (fragment_length != total_length) { + WARN("%s: fragmented retrieve request not supported.\n", + __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + /* + * Ensure endpoint count is 1, additional receivers not currently + * supported. + */ + if (req->emad_count != 1U) { + WARN("%s: unsupported retrieve descriptor count: %u\n", + __func__, req->emad_count); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + if (total_length < sizeof(*req)) { + WARN("%s: invalid length %u < %zu\n", __func__, total_length, + sizeof(*req)); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle); + if (obj == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (obj->desc_filled != obj->desc_size) { + WARN("%s: incomplete object desc filled %zu < size %zu\n", + __func__, obj->desc_filled, obj->desc_size); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) { + WARN("%s: wrong sender id 0x%x != 0x%x\n", + __func__, req->sender_id, obj->desc.sender_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->emad_count != 0U && req->tag != obj->desc.tag) { + WARN("%s: wrong tag 0x%lx != 0x%lx\n", + __func__, req->tag, obj->desc.tag); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->flags != 0U) { + if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) != + (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) { + /* + * If the retrieve request specifies the memory + * transaction ensure it matches what we expect. + */ + WARN("%s: wrong mem transaction flags %x != %x\n", + __func__, req->flags, obj->desc.flags); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY && + req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) { + /* + * Current implementation does not support donate and + * it supports no other flags. + */ + WARN("%s: invalid flags 0x%x\n", __func__, req->flags); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + } + + /* TODO: support more than one endpoint ids. */ + if (req->emad_count != 0U && + req->emad[0].mapd.endpoint_id != + obj->desc.emad[0].mapd.endpoint_id) { + WARN("%s: wrong receiver id 0x%x != 0x%x\n", + __func__, req->emad[0].mapd.endpoint_id, + obj->desc.emad[0].mapd.endpoint_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + mbox->state = MAILBOX_STATE_FULL; + + if (req->emad_count != 0U) { + obj->in_use++; + } + + copy_size = MIN(obj->desc_size, buf_size); + + memcpy(resp, &obj->desc, copy_size); + + spin_unlock(&spmc_shmem_obj_state.lock); + spin_unlock(&mbox->lock); + + SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, obj->desc_size, + copy_size, 0, 0, 0, 0, 0); + +err_unlock_all: + spin_unlock(&spmc_shmem_obj_state.lock); +err_unlock_mailbox: + spin_unlock(&mbox->lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation. + * @client: Client state. + * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0]. + * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32]. + * @fragment_offset: Byte offset in descriptor to resume at. + * @sender_id: Bit[31:16]: Endpoint id of sender if client is a + * hypervisor. 0 otherwise. + * @smc_handle: Handle passed to smc call. Used to return + * FFA_MEM_FRAG_TX. + * + * Return: @smc_handle on success, error code on failure. + */ +long spmc_ffa_mem_frag_rx(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t fragment_offset, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags) +{ + int ret; + void *src; + size_t buf_size; + size_t copy_size; + size_t full_copy_size; + uint32_t desc_sender_id; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); + struct spmc_shmem_obj *obj; + + if (!secure_origin) { + WARN("%s: can only be called from swld.\n", + __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); + if (obj == NULL) { + WARN("%s: invalid handle, 0x%lx, not a valid handle.\n", + __func__, mem_handle); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_shmem; + } + + desc_sender_id = (uint32_t)obj->desc.sender_id << 16; + if (sender_id != 0U && sender_id != desc_sender_id) { + WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__, + sender_id, desc_sender_id); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_shmem; + } + + if (fragment_offset >= obj->desc_size) { + WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n", + __func__, fragment_offset, obj->desc_size); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_shmem; + } + + spin_lock(&mbox->lock); + + if (mbox->rxtx_page_count == 0U) { + WARN("%s: buffer pair not registered.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (mbox->state != MAILBOX_STATE_EMPTY) { + WARN("%s: RX Buffer is full!\n", __func__); + ret = FFA_ERROR_DENIED; + goto err_unlock_all; + } + + buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; + + mbox->state = MAILBOX_STATE_FULL; + + full_copy_size = obj->desc_size - fragment_offset; + copy_size = MIN(full_copy_size, buf_size); + + src = &obj->desc; + + memcpy(mbox->rx_buffer, src + fragment_offset, copy_size); + + spin_unlock(&mbox->lock); + spin_unlock(&spmc_shmem_obj_state.lock); + + SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high, + copy_size, sender_id, 0, 0, 0); + +err_unlock_all: + spin_unlock(&mbox->lock); +err_unlock_shmem: + spin_unlock(&spmc_shmem_obj_state.lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation. + * @client: Client state. + * + * Implements a subset of the FF-A FFA_MEM_RELINQUISH call. + * Used by secure os release previously shared memory to non-secure os. + * + * The handle to release must be in the client's (secure os's) transmit buffer. + * + * Return: 0 on success, error code on failure. + */ +int spmc_ffa_mem_relinquish(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t fragment_offset, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags) +{ + int ret; + struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); + struct spmc_shmem_obj *obj; + const struct ffa_mem_relinquish_descriptor *req; + + if (!secure_origin) { + WARN("%s: unsupported relinquish direction.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&mbox->lock); + + if (mbox->rxtx_page_count == 0U) { + WARN("%s: buffer pair not registered.\n", __func__); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + req = mbox->tx_buffer; + + if (req->flags != 0U) { + WARN("%s: unsupported flags 0x%x\n", __func__, req->flags); + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_mailbox; + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle); + if (obj == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + + if (obj->desc.emad_count != req->endpoint_count) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + for (size_t i = 0; i < req->endpoint_count; i++) { + if (req->endpoint_array[i] != + obj->desc.emad[i].mapd.endpoint_id) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + } + if (obj->in_use == 0U) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock_all; + } + obj->in_use--; + + spin_unlock(&spmc_shmem_obj_state.lock); + spin_unlock(&mbox->lock); + + SMC_RET1(handle, FFA_SUCCESS_SMC32); + +err_unlock_all: + spin_unlock(&spmc_shmem_obj_state.lock); +err_unlock_mailbox: + spin_unlock(&mbox->lock); + return spmc_ffa_error_return(handle, ret); +} + +/** + * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation. + * @client: Client state. + * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0]. + * @handle_high: Unique handle of shared memory object to reclaim. + * Bit[63:32]. + * @flags: Unsupported, ignored. + * + * Implements a subset of the FF-A FFA_MEM_RECLAIM call. + * Used by non-secure os reclaim memory previously shared with secure os. + * + * Return: 0 on success, error code on failure. + */ +int spmc_ffa_mem_reclaim(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t mem_flags, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags) +{ + int ret; + struct spmc_shmem_obj *obj; + uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); + + if (secure_origin) { + WARN("%s: unsupported reclaim direction.\n", __func__); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + if (mem_flags != 0U) { + WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags); + return spmc_ffa_error_return(handle, + FFA_ERROR_INVALID_PARAMETER); + } + + spin_lock(&spmc_shmem_obj_state.lock); + + obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); + if (obj == NULL) { + ret = FFA_ERROR_INVALID_PARAMETER; + goto err_unlock; + } + if (obj->in_use != 0U) { + ret = FFA_ERROR_DENIED; + goto err_unlock; + } + spmc_shmem_obj_free(&spmc_shmem_obj_state, obj); + spin_unlock(&spmc_shmem_obj_state.lock); + + SMC_RET1(handle, FFA_SUCCESS_SMC32); + +err_unlock: + spin_unlock(&spmc_shmem_obj_state.lock); + return spmc_ffa_error_return(handle, ret); +} diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.h b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h new file mode 100644 index 000000000..8571e9436 --- /dev/null +++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SPMC_SHARED_MEM_H +#define SPMC_SHARED_MEM_H + +#include + +/** + * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor. + * @handle: + * Id of shared memory object to relinquish. + * @flags: + * If bit 0 is set clear memory after unmapping from borrower. Must be 0 + * for share. Bit[1]: Time slicing. Not supported, must be 0. All other + * bits are reserved 0. + * @endpoint_count: + * Number of entries in @endpoint_array. + * @endpoint_array: + * Array of endpoint ids. + */ +struct ffa_mem_relinquish_descriptor { + uint64_t handle; + uint32_t flags; + uint32_t endpoint_count; + ffa_endpoint_id16_t endpoint_array[]; +}; +CASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16, + assert_ffa_mem_relinquish_descriptor_size_mismatch); + +/** + * struct spmc_shmem_obj_state - Global state. + * @data: Backing store for spmc_shmem_obj objects. + * @data_size: The size allocated for the backing store. + * @allocated: Number of bytes allocated in @data. + * @next_handle: Handle used for next allocated object. + * @lock: Lock protecting all state in this file. + */ +struct spmc_shmem_obj_state { + uint8_t *data; + size_t data_size; + size_t allocated; + uint64_t next_handle; + spinlock_t lock; +}; + +extern struct spmc_shmem_obj_state spmc_shmem_obj_state; + +long spmc_ffa_mem_send(uint32_t smc_fid, + bool secure_origin, + uint64_t total_length, + uint32_t fragment_length, + uint64_t address, + uint32_t page_count, + void *cookie, + void *handle, + uint64_t flags); + +long spmc_ffa_mem_frag_tx(uint32_t smc_fid, + bool secure_origin, + uint64_t handle_low, + uint64_t handle_high, + uint32_t fragment_length, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags); + +long spmc_ffa_mem_retrieve_req(uint32_t smc_fid, + bool secure_origin, + uint32_t total_length, + uint32_t fragment_length, + uint64_t address, + uint32_t page_count, + void *cookie, + void *handle, + uint64_t flags); + +long spmc_ffa_mem_frag_rx(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t fragment_offset, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags); + + +int spmc_ffa_mem_relinquish(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t fragment_offset, + uint32_t sender_id, + void *cookie, + void *handle, + uint64_t flags); + +int spmc_ffa_mem_reclaim(uint32_t smc_fid, + bool secure_origin, + uint32_t handle_low, + uint32_t handle_high, + uint32_t mem_flags, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); + +#endif /* SPMC_SHARED_MEM_H */