Merge branch 'upstream/master' into main

Change-Id: Id6f67665a9ee7f90cf5114727c525727a6c4cb19
diff --git a/plat/qemu/common/qemu_bl2_setup.c b/plat/qemu/common/qemu_bl2_setup.c
index 3e289fc..586704e 100644
--- a/plat/qemu/common/qemu_bl2_setup.c
+++ b/plat/qemu/common/qemu_bl2_setup.c
@@ -48,6 +48,214 @@
 	 */
 }
 
+#ifdef SPD_trusty
+
+#define GIC_SPI 0
+#define GIC_PPI 1
+
+static int spd_add_dt_node(void *fdt)
+{
+	int offs, trusty_offs, root_offs;
+	int gic, ipi;
+	int len;
+	const uint32_t *prop;
+
+	if (fdt_path_offset(fdt, "/trusty") >= 0) {
+		WARN("Trusty Device Tree node already exists!\n");
+		return 0;
+	}
+
+	offs = fdt_node_offset_by_compatible(fdt, -1, "arm,cortex-a15-gic");
+	if (offs < 0)
+		offs = fdt_node_offset_by_compatible(fdt, -1, "arm,gic-v3");
+
+	if (offs < 0)
+		return -1;
+	gic = fdt_get_phandle(fdt, offs);
+	if (!gic) {
+		WARN("Failed to get gic phandle\n");
+		return -1;
+	}
+	INFO("Found gic phandle 0x%x\n", gic);
+
+	offs = fdt_path_offset(fdt, "/");
+	if (offs < 0)
+		return -1;
+	root_offs = offs;
+
+	/* CustomIPI node for pre 5.10 linux driver */
+	offs = fdt_add_subnode(fdt, offs, "interrupt-controller");
+	if (offs < 0)
+		return -1;
+	ipi = fdt_get_max_phandle(fdt) + 1;
+	if (fdt_setprop_u32(fdt, offs, "phandle", 1))
+		return -1;
+	INFO("Found ipi phandle 0x%x\n", ipi);
+
+	ipi = fdt_get_phandle(fdt, offs);
+	if (!ipi) {
+		WARN("Failed to get ipi phandle\n");
+		return -1;
+	}
+
+	if (fdt_appendprop_string(fdt, offs, "compatible", "android,CustomIPI"))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "#interrupt-cells", 1))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "interrupt-controller", 0))
+		return -1;
+
+	offs = fdt_add_subnode(fdt, root_offs, "trusty");
+	if (offs < 0)
+		return -1;
+	trusty_offs = offs;
+
+	if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-smc-v1"))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "ranges", 0))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "#address-cells", 2))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "#size-cells", 2))
+		return -1;
+
+	offs = fdt_add_subnode(fdt, trusty_offs, "irq");
+	if (offs < 0)
+		return -1;
+	if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-irq-v1"))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", ipi))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 0))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", gic))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 1))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", GIC_PPI))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 4))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", gic))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 1))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", GIC_SPI))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 4))
+		return -1;
+
+	/* CustomIPI range for pre 5.10 linux driver */
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 0))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 15))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 0))
+		return -1;
+
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 16))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 31))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 1))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 32))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 63))
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 2))
+		return -1;
+
+	if (fdt_appendprop_u32(fdt, offs, "ipi-range", 8))  /* beg */
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "ipi-range", 15)) /* end */
+		return -1;
+	if (fdt_appendprop_u32(fdt, offs, "ipi-range", 8))  /* ipi_base */
+		return -1;
+
+	offs = fdt_add_subnode(fdt, trusty_offs, "log");
+	if (offs < 0)
+		return -1;
+	if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-log-v1"))
+		return -1;
+
+	offs = fdt_add_subnode(fdt, trusty_offs, "test");
+	if (offs < 0)
+		return -1;
+	if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-test-v1"))
+		return -1;
+
+	offs = fdt_add_subnode(fdt, trusty_offs, "virtio");
+	if (offs < 0)
+		return -1;
+	if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-virtio-v1"))
+		return -1;
+
+	offs = fdt_node_offset_by_compatible(fdt, -1, "arm,armv8-timer");
+	if (offs < 0)
+		offs = fdt_node_offset_by_compatible(fdt, -1, "arm,armv7-timer");
+	if (offs < 0)
+		return -1;
+
+	prop = fdt_getprop(fdt, offs, "interrupts", &len);
+	if (fdt_setprop_inplace_namelen_partial(fdt, offs, "interrupts",
+	                                        strlen("interrupts"), 0,
+	                                        prop + len / 4 / 2, len / 4))
+		return -1;
+
+	return 0;
+}
+
+#else
+
+static int spd_add_dt_node(void *fdt)
+{
+	return 0;
+}
+
+#endif
+
+static int qemu_dt_fixup_securemem(void *fdt)
+{
+	/*
+	 * QEMU adds a device tree node for secure memory. Linux fails to ignore
+	 * it and will crash when it allocates memory out of this secure memory
+	 * region. We currently don't use this node for anything, remove it.
+	 */
+
+	int offs;
+	const char *prop;
+	const char memory_device_type[] = "memory";
+
+	offs = -1;
+	while (true) {
+		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
+		                                     memory_device_type,
+		                                     sizeof(memory_device_type)
+		                                     );
+		if (offs < 0)
+			break;
+
+		prop = fdt_getprop(fdt, offs, "status", NULL);
+		if (prop == NULL)
+			continue;
+		if ((strcmp(prop, "disabled") != 0))
+			continue;
+		prop = fdt_getprop(fdt, offs, "secure-status", NULL);
+		if (prop == NULL)
+			continue;
+		if ((strcmp(prop, "okay") != 0))
+			continue;
+
+		if (fdt_del_node(fdt, offs)) {
+			return -1;
+		}
+		INFO("Removed secure memory node\n");
+	}
+
+	return 0;
+}
+
 static void update_dt(void)
 {
 	int ret;
@@ -59,6 +267,11 @@
 		return;
 	}
 
+	if (qemu_dt_fixup_securemem(fdt)) {
+		ERROR("Failed to fixup secure-mem Device Tree node\n");
+		return;
+	}
+
 	if (dt_add_psci_node(fdt)) {
 		ERROR("Failed to add PSCI Device Tree node\n");
 		return;
@@ -69,6 +282,11 @@
 		return;
 	}
 
+	if (spd_add_dt_node(fdt)) {
+		ERROR("Failed to add SPD Device Tree node\n");
+		return;
+	}
+
 	ret = fdt_pack(fdt);
 	if (ret < 0)
 		ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
diff --git a/plat/qemu/qemu/include/platform_def.h b/plat/qemu/qemu/include/platform_def.h
index c02eff9..dbc47eb 100644
--- a/plat/qemu/qemu/include/platform_def.h
+++ b/plat/qemu/qemu/include/platform_def.h
@@ -84,10 +84,10 @@
 #define NS_DRAM0_SIZE			ULL(0xc0000000)
 
 #define SEC_SRAM_BASE			0x0e000000
-#define SEC_SRAM_SIZE			0x00060000
+#define SEC_SRAM_SIZE			0x00100000
 
 #define SEC_DRAM_BASE			0x0e100000
-#define SEC_DRAM_SIZE			0x00f00000
+#define SEC_DRAM_SIZE			0x2ff00000
 
 #define SECURE_GPIO_BASE		0x090b0000
 #define SECURE_GPIO_SIZE		0x00001000
@@ -137,7 +137,7 @@
  * Put BL2 just below BL3-1. BL2_BASE is calculated using the current BL2 debug
  * size plus a little space for growth.
  */
-#define BL2_BASE			(BL31_BASE - 0x25000)
+#define BL2_BASE			(BL31_BASE - 0x15000)
 #define BL2_LIMIT			BL31_BASE
 
 /*
@@ -146,7 +146,7 @@
  * Put BL3-1 at the top of the Trusted SRAM. BL31_BASE is calculated using the
  * current BL3-1 debug size plus a little space for growth.
  */
-#define BL31_BASE			(BL31_LIMIT - 0x20000)
+#define BL31_BASE			(BL31_LIMIT - 0xd0000)
 #define BL31_LIMIT			(BL_RAM_BASE + BL_RAM_SIZE)
 #define BL31_PROGBITS_LIMIT		BL1_RW_BASE
 
diff --git a/services/spd/trusty/shared-mem-smcall.c b/services/spd/trusty/shared-mem-smcall.c
new file mode 100644
index 0000000..854f60a
--- /dev/null
+++ b/services/spd/trusty/shared-mem-smcall.c
@@ -0,0 +1,1061 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <errno.h>
+#include <lib/object_pool.h>
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+
+#include "shared-mem-smcall.h"
+
+/*
+ * Use a 512KB buffer by default for shared memory descriptors. Set
+ * TRUSTY_SHARED_MEMORY_OBJ_SIZE in platform_def.h to use a different value.
+ */
+#ifndef TRUSTY_SHARED_MEMORY_OBJ_SIZE
+#define TRUSTY_SHARED_MEMORY_OBJ_SIZE (512 * 1024)
+#endif
+
+/**
+ * struct trusty_shmem_obj - Shared memory object.
+ * @desc_size:      Size of @desc.
+ * @desc_filled:    Size of @desc already received.
+ * @in_use:         Number of clients that have called ffa_mem_retrieve_req
+ *                  without a matching ffa_mem_relinquish call.
+ * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
+ */
+struct trusty_shmem_obj {
+	size_t desc_size;
+	size_t desc_filled;
+	size_t in_use;
+	struct ffa_mtd desc;
+};
+
+/**
+ * struct trusty_shmem_obj_state - Global state.
+ * @data:           Backing store for trusty_shmem_obj objects.
+ * @allocated:      Number of bytes allocated in @data.
+ * @next_handle:    Handle used for next allocated object.
+ * @lock:           Lock protecting all state in this file.
+ */
+struct trusty_shmem_obj_state {
+	uint8_t data[TRUSTY_SHARED_MEMORY_OBJ_SIZE];
+	size_t allocated;
+	uint64_t next_handle;
+	struct spinlock lock;
+};
+
+/**
+ * struct trusty_shmem_client_state - Per client state.
+ * @tx_buf:             Client's transmit buffer.
+ * @rx_buf:             Client's receive buffer.
+ * @buf_size:           Size of @tx_buf and @rx_buf.
+ * @secure:             If %true, the client is the secure os.
+ * @identity_mapped:    If %true, all client memory is identity mapped.
+ * @receiver:           If %true, the client is allowed to receive memory.
+ *                      If %false, the client is allowed to send memory.
+ */
+struct trusty_shmem_client_state {
+	const void *tx_buf;
+	void *rx_buf;
+	size_t buf_size;
+	const bool secure;
+	const bool identity_mapped;
+	const bool receiver;
+};
+
+static struct trusty_shmem_obj_state trusty_shmem_obj_state = {
+	/* Set start value for handle so top 32 bits are needed quickly */
+	.next_handle = 0xffffffc0,
+};
+
+static struct trusty_shmem_client_state trusty_shmem_client_state[2] = {
+	[true].secure = true,
+	[true].identity_mapped = true,
+	[true].receiver = true,
+};
+
+/**
+ * trusty_shmem_obj_size - Convert from descriptor size to object size.
+ * @desc_size:  Size of struct ffa_memory_region_descriptor object.
+ *
+ * Return: Size of struct trusty_shmem_obj object.
+ */
+static size_t trusty_shmem_obj_size(size_t desc_size)
+{
+	return desc_size + offsetof(struct trusty_shmem_obj, desc);
+}
+
+/**
+ * trusty_shmem_obj_alloc - Allocate struct trusty_shmem_obj.
+ * @state:      Global state.
+ * @desc_size:  Size of struct ffa_memory_region_descriptor object that
+ *              allocated object will hold.
+ *
+ * Return: Pointer to newly allocated object, or %NULL if there not enough space
+ *         left. The returned pointer is only valid while @state is locked, to
+ *         used it again after unlocking @state, trusty_shmem_obj_lookup must be
+ *         called.
+ */
+static struct trusty_shmem_obj *
+trusty_shmem_obj_alloc(struct trusty_shmem_obj_state *state, size_t desc_size)
+{
+	struct trusty_shmem_obj *obj;
+	size_t free = sizeof(state->data) - state->allocated;
+	if (trusty_shmem_obj_size(desc_size) > free) {
+		NOTICE("%s(0x%zx) failed, free 0x%zx\n",
+		       __func__, desc_size, free);
+		return NULL;
+	}
+	obj = (struct trusty_shmem_obj *)(state->data + state->allocated);
+	obj->desc_size = desc_size;
+	obj->desc_filled = 0;
+	obj->in_use = 0;
+	state->allocated += trusty_shmem_obj_size(desc_size);
+	return obj;
+}
+
+/**
+ * trusty_shmem_obj_free - Free struct trusty_shmem_obj.
+ * @state:      Global state.
+ * @obj:        Object to free.
+ *
+ * Release memory used by @obj. Other objects may move, so on return all
+ * pointers to struct trusty_shmem_obj object should be considered invalid, not
+ * just @obj.
+ *
+ * The current implementation always compacts the remaining objects to simplify
+ * the allocator and to avoid fragmentation.
+ */
+
+static void trusty_shmem_obj_free(struct trusty_shmem_obj_state *state,
+				  struct trusty_shmem_obj *obj)
+{
+	size_t free_size = trusty_shmem_obj_size(obj->desc_size);
+	uint8_t *shift_dest = (uint8_t *)obj;
+	uint8_t *shift_src = shift_dest + free_size;
+	size_t shift_size = state->allocated - (shift_src - state->data);
+	if (shift_size) {
+		memmove(shift_dest, shift_src, shift_size);
+	}
+	state->allocated -= free_size;
+}
+
+/**
+ * trusty_shmem_obj_lookup - Lookup struct trusty_shmem_obj by handle.
+ * @state:      Global state.
+ * @handle:     Unique handle of object to return.
+ *
+ * Return: struct trusty_shmem_obj_state object with handle matching @handle.
+ *         %NULL, if not object in @state->data has a matching handle.
+ */
+static struct trusty_shmem_obj *
+trusty_shmem_obj_lookup(struct trusty_shmem_obj_state *state, uint64_t handle)
+{
+	uint8_t *curr = state->data;
+	while (curr - state->data < state->allocated) {
+		struct trusty_shmem_obj *obj = (struct trusty_shmem_obj *)curr;
+		if (obj->desc.handle == handle) {
+			return obj;
+		}
+		curr += trusty_shmem_obj_size(obj->desc_size);
+	}
+	return NULL;
+}
+
+static struct ffa_comp_mrd *
+trusty_shmem_obj_get_comp_mrd(struct trusty_shmem_obj *obj)
+{
+	return (struct ffa_comp_mrd *)
+		((uint8_t *)(&obj->desc) + obj->desc.emad[0].comp_mrd_offset);
+}
+
+/**
+ * trusty_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
+ * @obj:    Object containing ffa_memory_region_descriptor.
+ *
+ * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
+ */
+static size_t
+trusty_shmem_obj_ffa_constituent_size(struct trusty_shmem_obj *obj)
+{
+	return trusty_shmem_obj_get_comp_mrd(obj)->address_range_count *
+		sizeof(struct ffa_cons_mrd);
+}
+
+/**
+ * trusty_shmem_check_obj - Check that counts in descriptor match overall size.
+ * @obj:    Object containing ffa_memory_region_descriptor.
+ *
+ * Return: 0 if object is valid, -EINVAL if memory region attributes count is
+ * not 1, -EINVAL if constituent_memory_region_descriptor offset or count is
+ * invalid.
+ */
+static int trusty_shmem_check_obj(struct trusty_shmem_obj *obj)
+{
+	if (obj->desc.emad_count != 1) {
+		NOTICE("%s: unsupported attribute desc count %u != 1\n",
+		       __func__, obj->desc.emad_count);
+		return -EINVAL;
+	}
+
+	uint32_t offset = obj->desc.emad[0].comp_mrd_offset;
+	size_t header_emad_size = sizeof(obj->desc) +
+		obj->desc.emad_count * sizeof(obj->desc.emad[0]);
+
+	if (offset < header_emad_size) {
+		NOTICE("%s: invalid object, offset %u < header + emad %zu\n",
+		       __func__, offset, header_emad_size);
+		return -EINVAL;
+	}
+
+	size_t size = obj->desc_size;
+	if (offset > size) {
+		NOTICE("%s: invalid object, offset %u > total size %zu\n",
+		       __func__, offset, obj->desc_size);
+		return -EINVAL;
+	}
+	size -= offset;
+
+	if (size < sizeof(struct ffa_comp_mrd)) {
+		NOTICE("%s: invalid object, offset %u, total size %zu, no space for header\n",
+		       __func__, offset, obj->desc_size);
+		return -EINVAL;
+	}
+	size -= sizeof(struct ffa_comp_mrd);
+
+	size_t count = size / sizeof(struct ffa_cons_mrd);
+
+	struct ffa_comp_mrd *comp = trusty_shmem_obj_get_comp_mrd(obj);
+
+	if (comp->address_range_count != count) {
+		NOTICE("%s: invalid object, desc count %u != %zu\n",
+		       __func__, comp->address_range_count, count);
+		return -EINVAL;
+	}
+
+	size_t expected_size = offset + sizeof(*comp) +
+	                       trusty_shmem_obj_ffa_constituent_size(obj);
+	if (expected_size != obj->desc_size) {
+		NOTICE("%s: invalid object, computed size %zu != size %zu\n",
+		       __func__, expected_size, obj->desc_size);
+		return -EINVAL;
+	}
+
+	if (obj->desc_filled < obj->desc_size) {
+		/*
+		 * The whole descriptor has not yet been received. Skip final
+		 * checks.
+		 */
+		return 0;
+	}
+
+	size_t total_page_count = 0;
+	for (size_t i = 0; i < count; i++) {
+		total_page_count +=
+			comp->address_range_array[i].page_count;
+	}
+	if (comp->total_page_count != total_page_count) {
+		NOTICE("%s: invalid object, desc total_page_count %u != %zu\n",
+		       __func__, comp->total_page_count,
+		       total_page_count);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static long trusty_ffa_fill_desc(struct trusty_shmem_client_state *client,
+				 struct trusty_shmem_obj *obj,
+				 uint32_t fragment_length,
+				 void *smc_handle)
+{
+	int ret;
+
+	if (!client->buf_size) {
+		NOTICE("%s: buffer pair not registered\n", __func__);
+		ret = -EINVAL;
+		goto err_arg;
+	}
+
+	if (fragment_length > client->buf_size) {
+		NOTICE("%s: bad fragment size %u > %zu buffer size\n", __func__,
+		       fragment_length, client->buf_size);
+		ret = -EINVAL;
+		goto err_arg;
+	}
+
+	if (fragment_length > obj->desc_size - obj->desc_filled) {
+		NOTICE("%s: bad fragment size %u > %zu remaining\n", __func__,
+		       fragment_length, obj->desc_size - obj->desc_filled);
+		ret = -EINVAL;
+		goto err_arg;
+	}
+
+	memcpy((uint8_t *)&obj->desc + obj->desc_filled, client->tx_buf,
+	       fragment_length);
+
+	if (!obj->desc_filled) {
+		/* First fragment, descriptor header has been copied */
+		obj->desc.handle = trusty_shmem_obj_state.next_handle++;
+		obj->desc.flags = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
+	}
+
+	obj->desc_filled += fragment_length;
+
+	ret = trusty_shmem_check_obj(obj);
+	if (ret) {
+		goto err_bad_desc;
+	}
+
+	uint32_t handle_low = (uint32_t)obj->desc.handle;
+	uint32_t handle_high = obj->desc.handle >> 32;
+	if (obj->desc_filled != obj->desc_size) {
+		SMC_RET8(smc_handle, SMC_FC_FFA_MEM_FRAG_RX, handle_low,
+			 handle_high, obj->desc_filled,
+			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
+	}
+
+	SMC_RET8(smc_handle, SMC_FC_FFA_SUCCESS, 0, handle_low, handle_high, 0,
+		 0, 0, 0);
+
+err_bad_desc:
+err_arg:
+	trusty_shmem_obj_free(&trusty_shmem_obj_state, obj);
+	return ret;
+}
+
+/**
+ * trusty_ffa_mem_share - FFA_MEM_SHARE implementation.
+ * @client:             Client state.
+ * @total_length:       Total length of shared memory descriptor.
+ * @fragment_length:    Length of fragment of shared memory descriptor passed in
+ *                      this call.
+ * @address:            Not supported, must be 0.
+ * @page_count:         Not supported, must be 0.
+ * @smc_handle:         Handle passed to smc call. Used to return
+ *                      SMC_FC_FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
+ *
+ * Implements a subset of the FF-A FFA_MEM_SHARE call needed to share memory
+ * from non-secure os to secure os (with no stream endpoints).
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static long trusty_ffa_mem_share(struct trusty_shmem_client_state *client,
+				 uint32_t total_length,
+				 uint32_t fragment_length,
+				 uint64_t address,
+				 uint32_t page_count,
+				 void *smc_handle)
+{
+	struct trusty_shmem_obj *obj;
+
+	if (address || page_count) {
+		NOTICE("%s: custom memory region for message not supported\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	if (client->receiver) {
+		NOTICE("%s: unsupported share direction\n", __func__);
+		return -EINVAL;
+	}
+
+	if (fragment_length < sizeof(obj->desc)) {
+		NOTICE("%s: bad first fragment size %u < %zu\n",
+		       __func__, fragment_length, sizeof(obj->desc));
+		return -EINVAL;
+	}
+	obj = trusty_shmem_obj_alloc(&trusty_shmem_obj_state, total_length);
+	if (!obj) {
+		return -ENOMEM;
+	}
+
+	return trusty_ffa_fill_desc(client, obj, fragment_length, smc_handle);
+}
+
+/**
+ * trusty_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
+ * @client:             Client state.
+ * @handle_low:         Handle_low value returned from SMC_FC_FFA_MEM_FRAG_RX.
+ * @handle_high:        Handle_high value returned from SMC_FC_FFA_MEM_FRAG_RX.
+ * @fragment_length:    Length of fragments transmitted.
+ * @sender_id:          Vmid of sender in bits [31:16]
+ * @smc_handle:         Handle passed to smc call. Used to return
+ *                      SMC_FC_FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+static long trusty_ffa_mem_frag_tx(struct trusty_shmem_client_state *client,
+				   uint32_t handle_low,
+				   uint32_t handle_high,
+				   uint32_t fragment_length,
+				   uint32_t sender_id,
+				   void *smc_handle)
+{
+	struct trusty_shmem_obj *obj;
+	uint64_t handle = handle_low | (((uint64_t)handle_high) << 32);
+
+	if (client->receiver) {
+		NOTICE("%s: unsupported share direction\n", __func__);
+		return -EINVAL;
+	}
+
+	obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, handle);
+	if (!obj) {
+		NOTICE("%s: invalid handle, 0x%llx, not a valid handle\n",
+		       __func__, handle);
+		return -ENOENT;
+	}
+
+	if (sender_id != (uint32_t)obj->desc.sender_id << 16) {
+		NOTICE("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
+		       sender_id, (uint32_t)obj->desc.sender_id << 16);
+		return -ENOENT;
+	}
+
+	if (obj->desc_filled == obj->desc_size) {
+		NOTICE("%s: object desc already filled, %zu\n", __func__,
+		       obj->desc_filled);
+		return -EINVAL;
+	}
+
+	return trusty_ffa_fill_desc(client, obj, fragment_length, smc_handle);
+}
+
+/**
+ * trusty_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
+ * @client:             Client state.
+ * @total_length:       Total length of retrieve request descriptor if this is
+ *                      the first call. Otherwise (unsupported) must be 0.
+ * @fragment_length:    Length of fragment of retrieve request descriptor passed
+ *                      in this call. Only @fragment_length == @length is
+ *                      supported by this implementation.
+ * @address:            Not supported, must be 0.
+ * @page_count:         Not supported, must be 0.
+ * @smc_handle:         Handle passed to smc call. Used to return
+ *                      SMC_FC_FFA_MEM_RETRIEVE_RESP.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
+ * Used by secure os to retrieve memory already shared by non-secure os.
+ * If the data does not fit in a single SMC_FC_FFA_MEM_RETRIEVE_RESP message,
+ * the client must call FFA_MEM_FRAG_RX until the full response has been
+ * received.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+static long
+trusty_ffa_mem_retrieve_req(struct trusty_shmem_client_state *client,
+			    uint32_t total_length,
+			    uint32_t fragment_length,
+			    uint64_t address,
+			    uint32_t page_count,
+			    void *smc_handle)
+{
+	struct trusty_shmem_obj *obj = NULL;
+	const struct ffa_mtd *req = client->tx_buf;
+	struct ffa_mtd *resp = client->rx_buf;
+
+	if (!client->buf_size) {
+		NOTICE("%s: buffer pair not registered\n", __func__);
+		return -EINVAL;
+	}
+
+	if (address || page_count) {
+		NOTICE("%s: custom memory region not supported\n", __func__);
+		return -EINVAL;
+	}
+
+	if (fragment_length != total_length) {
+		NOTICE("%s: fragmented retrieve request not supported\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	/* req->emad_count is not set for retrieve by hypervisor */
+	if (client->receiver && req->emad_count != 1) {
+		NOTICE("%s: unsupported retrieve descriptor count: %u\n",
+		       __func__, req->emad_count);
+		return -EINVAL;
+	}
+
+	if (total_length < sizeof(*req)) {
+		NOTICE("%s: invalid length %u < %zu\n", __func__, total_length,
+		       sizeof(*req));
+		return -EINVAL;
+	}
+
+	obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, req->handle);
+	if (!obj) {
+		return -ENOENT;
+	}
+
+	if (obj->desc_filled != obj->desc_size) {
+		NOTICE("%s: incomplete object desc filled %zu < size %zu\n",
+		       __func__, obj->desc_filled, obj->desc_size);
+		return -EINVAL;
+	}
+
+	if (req->emad_count && req->sender_id != obj->desc.sender_id) {
+		NOTICE("%s: wrong sender id 0x%x != 0x%x\n",
+		       __func__, req->sender_id, obj->desc.sender_id);
+		return -EINVAL;
+	}
+
+	if (req->emad_count && req->tag != obj->desc.tag) {
+		NOTICE("%s: wrong tag 0x%llx != 0x%llx\n",
+		       __func__, req->tag, obj->desc.tag);
+		return -EINVAL;
+	}
+
+	if (req->flags != 0 && req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY) {
+		/*
+		 * Current implementation does not support lend or donate, and
+		 * it supports no other flags.
+		 */
+		NOTICE("%s: invalid flags 0x%x\n", __func__, req->flags);
+		return -EINVAL;
+	}
+
+	/* TODO: support more than one endpoint ids */
+	if (req->emad_count &&
+	    req->emad[0].mapd.endpoint_id !=
+	    obj->desc.emad[0].mapd.endpoint_id) {
+		NOTICE("%s: wrong receiver id 0x%x != 0x%x\n",
+		       __func__, req->emad[0].mapd.endpoint_id,
+		       obj->desc.emad[0].mapd.endpoint_id);
+		return -EINVAL;
+	}
+
+	if (req->emad_count) {
+		obj->in_use++;
+	}
+
+	size_t copy_size = MIN(obj->desc_size, client->buf_size);
+
+	memcpy(resp, &obj->desc, copy_size);
+
+	SMC_RET8(smc_handle, SMC_FC_FFA_MEM_RETRIEVE_RESP, obj->desc_size,
+		 copy_size, 0, 0, 0, 0, 0);
+}
+
+/**
+ * trusty_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
+ * @client:             Client state.
+ * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
+ * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
+ * @fragment_offset:    Byte offset in descriptor to resume at.
+ * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
+ *                      hypervisor. 0 otherwise.
+ * @smc_handle:         Handle passed to smc call. Used to return
+ *                      SMC_FC_FFA_MEM_FRAG_TX.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+static long trusty_ffa_mem_frag_rx(struct trusty_shmem_client_state *client,
+				   uint32_t handle_low,
+				   uint32_t handle_high,
+				   uint32_t fragment_offset,
+				   uint32_t sender_id,
+				   void *smc_handle)
+{
+	struct trusty_shmem_obj *obj;
+	uint64_t handle = handle_low | (((uint64_t)handle_high) << 32);
+
+	if (!client->buf_size) {
+		NOTICE("%s: buffer pair not registered\n", __func__);
+		return -EINVAL;
+	}
+
+	if (client->secure && sender_id) {
+		NOTICE("%s: invalid sender_id 0x%x != 0\n",
+		       __func__, sender_id);
+		return -EINVAL;
+	}
+
+	obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, handle);
+	if (!obj) {
+		NOTICE("%s: invalid handle, 0x%llx, not a valid handle\n",
+		       __func__, handle);
+		return -ENOENT;
+	}
+
+	if (!client->secure && sender_id &&
+	    sender_id != (uint32_t)obj->desc.sender_id << 16) {
+		NOTICE("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
+		       sender_id, (uint32_t)obj->desc.sender_id << 16);
+		return -ENOENT;
+	}
+
+	if (fragment_offset >= obj->desc_size) {
+		NOTICE("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
+		       __func__, fragment_offset, obj->desc_size);
+		return -EINVAL;
+	}
+
+	size_t full_copy_size = obj->desc_size - fragment_offset;
+	size_t copy_size = MIN(full_copy_size, client->buf_size);
+
+	void *src = &obj->desc;
+
+	memcpy(client->rx_buf, src + fragment_offset, copy_size);
+
+	SMC_RET8(smc_handle, SMC_FC_FFA_MEM_FRAG_TX, handle_low, handle_high,
+		 copy_size, sender_id, 0, 0, 0);
+}
+
+/**
+ * trusty_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
+ * @client:             Client state.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
+ * Used by secure os release previously shared memory to non-secure os.
+ *
+ * The handle to release must be in the client's (secure os's) transmit buffer.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int trusty_ffa_mem_relinquish(struct trusty_shmem_client_state *client)
+{
+	struct trusty_shmem_obj *obj;
+	const struct ffa_mem_relinquish_descriptor *req = client->tx_buf;
+
+	if (!client->buf_size) {
+		NOTICE("%s: buffer pair not registered\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!client->receiver) {
+		NOTICE("%s: unsupported share direction\n", __func__);
+		return -EINVAL;
+	}
+
+	if (req->flags) {
+		NOTICE("%s: unsupported flags 0x%x\n", __func__, req->flags);
+		return -EINVAL;
+	}
+
+	obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, req->handle);
+	if (!obj) {
+		return -ENOENT;
+	}
+
+	if (obj->desc.emad_count != req->endpoint_count) {
+		return -EINVAL;
+	}
+	for (size_t i = 0; i < req->endpoint_count; i++) {
+		if (req->endpoint_array[i] !=
+		    obj->desc.emad[i].mapd.endpoint_id) {
+			return -EINVAL;
+		}
+	}
+	if (!obj->in_use) {
+		return -EACCES;
+	}
+	obj->in_use--;
+	return 0;
+}
+
+/**
+ * trusty_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
+ * @client:         Client state.
+ * @handle_low:     Unique handle of shared memory object to relaim. Bit[31:0].
+ * @handle_high:    Unique handle of shared memory object to relaim. Bit[63:32].
+ * @flags:          Unsupported, ignored.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
+ * Used by non-secure os reclaim memory previously shared with secure os.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int trusty_ffa_mem_reclaim(struct trusty_shmem_client_state *client,
+				  uint32_t handle_low, uint32_t handle_high,
+				  uint32_t flags)
+{
+	struct trusty_shmem_obj *obj;
+	uint64_t handle = handle_low | (((uint64_t)handle_high) << 32);
+
+	if (client->receiver) {
+		NOTICE("%s: unsupported share direction\n", __func__);
+		return -EINVAL;
+	}
+
+	if (flags) {
+		NOTICE("%s: unsupported flags 0x%x\n", __func__, flags);
+		return -EINVAL;
+	}
+
+	obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, handle);
+	if (!obj) {
+		return -ENOENT;
+	}
+	if (obj->in_use) {
+		return -EACCES;
+	}
+	trusty_shmem_obj_free(&trusty_shmem_obj_state, obj);
+	return 0;
+}
+
+/**
+ * trusty_ffa_rxtx_map - FFA_RXTX_MAP implementation.
+ * @client:     Client state.
+ * @tx_address: Address of client's transmit buffer.
+ * @rx_address: Address of client's receive buffer.
+ * @page_count: Number of (contiguous) 4K pages per buffer.
+ *
+ * Implements the FF-A FFA_RXTX_MAP call.
+ * Used by non-secure os and secure os to register their RX/TX buffer pairs.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static long trusty_ffa_rxtx_map(struct trusty_shmem_client_state *client,
+				u_register_t tx_address,
+				u_register_t rx_address,
+				uint32_t page_count)
+{
+	int ret;
+	uintptr_t tx_va;
+	uintptr_t rx_va;
+	size_t buf_size = page_count * FFA_PAGE_SIZE;
+
+	if (!buf_size) {
+		NOTICE("%s: invalid page_count %u\n", __func__, page_count);
+		return -EINVAL;
+	}
+
+	if (client->buf_size) {
+		NOTICE("%s: buffer pair already registered\n", __func__);
+		return -EACCES;
+	}
+
+	if (client->identity_mapped) {
+		tx_va = tx_address;
+		rx_va = rx_address;
+	} else {
+		unsigned int attr = client->secure ? MT_SECURE : MT_NS;
+		ret = mmap_add_dynamic_region_alloc_va(tx_address, &tx_va,
+						       buf_size,
+						       attr | MT_RO_DATA);
+		if (ret) {
+			NOTICE("%s: failed to map tx buffer @ 0x%lx, size 0x%zx\n",
+			       __func__, tx_address, buf_size);
+			goto err_map_tx;
+		}
+		ret = mmap_add_dynamic_region_alloc_va(rx_address, &rx_va,
+						       buf_size,
+						       attr | MT_RW_DATA);
+		if (ret) {
+			NOTICE("%s: failed to map rx buffer @ 0x%lx, size 0x%zx\n",
+			       __func__, rx_address, buf_size);
+			goto err_map_rx;
+		}
+	}
+
+	client->buf_size = buf_size;
+	client->tx_buf = (const void *)tx_va;
+	client->rx_buf = (void *)rx_va;
+
+	return 0;
+
+err_map_rx:
+	mmap_remove_dynamic_region(tx_va, buf_size);
+err_map_tx:
+	return ret;
+}
+
+/**
+ * trusty_ffa_rxtx_unmap - FFA_RXTX_UNMAP implementation.
+ * @client:     Client state.
+ * @id:         Unsupported, ignored.
+ *
+ * Implements the FF-A FFA_RXTX_UNMAP call.
+ * Used by non-secure os and secure os to release their RX/TX buffer pairs.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static long trusty_ffa_rxtx_unmap(struct trusty_shmem_client_state *client,
+				  uint32_t id)
+{
+	int ret;
+
+	if (!client->buf_size) {
+		NOTICE("%s: buffer pair not registered\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!client->identity_mapped) {
+		ret = mmap_remove_dynamic_region((uintptr_t)client->tx_buf,
+						 client->buf_size);
+		if (ret) {
+			NOTICE("%s: failed to unmap tx buffer @ %p, size 0x%zx\n",
+			       __func__, client->tx_buf, client->buf_size);
+		}
+		ret = mmap_remove_dynamic_region((uintptr_t)client->rx_buf,
+						 client->buf_size);
+		if (ret) {
+			NOTICE("%s: failed to unmap rx buffer @ %p, size 0x%zx\n",
+			       __func__, client->rx_buf, client->buf_size);
+		}
+	}
+	if (trusty_shmem_obj_state.allocated) {
+		WARN("%s: shared memory regions are still active\n", __func__);
+	}
+
+	client->buf_size = 0;
+	client->tx_buf = NULL;
+	client->rx_buf = NULL;
+	return 0;
+}
+
+/**
+ * trusty_ffa_id_get - FFA_ID_GET implementation.
+ * @client:     Client state.
+ * @idp:        Pointer to store id return value in.
+ *
+ * Return the ID of the caller. For the non-secure client, use ID 0 as required
+ * by FF-A. For the secure side return 0x8000 as Hafnium expects the secure OS
+ * to use that ID.
+ *
+ * Note that the sender_id check in trusty_ffa_mem_frag_tx and
+ * trusty_ffa_mem_frag_rx only works when there is no hypervisor because we use
+ * id 0. The spec says the sender_id field must be 0 in that case.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int trusty_ffa_id_get(struct trusty_shmem_client_state *client,
+			     u_register_t *idp)
+{
+	*idp = client->secure ? 0x8000 : 0;
+	return 0;
+}
+
+/**
+ * trusty_ffa_version - FFA_VERSION implementation.
+ * @client:     Client state.
+ * @version_in: Version supported by client.
+ * @smc_handle: Handle passed to smc call. Used to return version or error code
+ *              directly as this call does not use the FFA_SUCCESS and FFA_ERROR
+ *              opcodes that the other calls use.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static long trusty_ffa_version(struct trusty_shmem_client_state *client,
+			       uint32_t version_in, void *smc_handle)
+{
+	if (version_in & (1U << 31)) {
+		goto err_not_suppoprted;
+	}
+
+	/*
+	 * We only implement one version. If the client specified a newer major
+	 * version than ours, return the version we suppoort. Otherwise return
+	 * not-supported.
+	 */
+	if (FFA_VERSION_TO_MAJOR(version_in) >= FFA_CURRENT_VERSION_MAJOR) {
+		SMC_RET8(smc_handle, FFA_CURRENT_VERSION, 0, 0, 0, 0, 0, 0, 0);
+	}
+
+err_not_suppoprted:
+	SMC_RET1(smc_handle, (uint32_t)FFA_ERROR_NOT_SUPPORTED);
+}
+
+/**
+ * trusty_ffa_features - FFA_FEATURES implementation.
+ * @client:     Client state.
+ * @func:       Api to check.
+ * @ret2:       Pointer to return value2 on success.
+ * @ret3:       Pointer to return value3 on success.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int trusty_ffa_features(struct trusty_shmem_client_state *client,
+			       uint32_t func, u_register_t *ret2,
+			       u_register_t *ret3)
+{
+	if (SMC_ENTITY(func) != SMC_ENTITY_SHARED_MEMORY ||
+	    !SMC_IS_FASTCALL(func)) {
+		return -EINVAL;
+	}
+	switch (func) {
+	case SMC_FC_FFA_ERROR:
+	case SMC_FC_FFA_SUCCESS:
+	case SMC_FC_FFA_VERSION:
+	case SMC_FC_FFA_FEATURES:
+	case SMC_FC_FFA_RXTX_UNMAP:
+	case SMC_FC_FFA_ID_GET:
+	case SMC_FC_FFA_MEM_RETRIEVE_RESP:
+	case SMC_FC_FFA_MEM_FRAG_RX:
+	case SMC_FC_FFA_MEM_FRAG_TX:
+		return 0;
+
+	case SMC_FC_FFA_RXTX_MAP:
+	case SMC_FC64_FFA_RXTX_MAP:
+		*ret2 = FFA_FEATURES2_RXTX_MAP_BUF_SIZE_4K;
+		return 0;
+
+	case SMC_FC_FFA_MEM_RETRIEVE_REQ:
+	case SMC_FC64_FFA_MEM_RETRIEVE_REQ:
+		/*
+		 * Indicate that object can be retrieved up to 2^64 - 1 times
+		 * (on a 64 bit build). We track the number of times an object
+		 * had been retrieved in a variable of type size_t.
+		 */
+		*ret3 = sizeof(size_t) * 8 - 1;
+		__attribute__((fallthrough));
+
+	case SMC_FC_FFA_MEM_SHARE:
+	case SMC_FC64_FFA_MEM_SHARE:
+	case SMC_FC_FFA_MEM_RELINQUISH:
+	case SMC_FC_FFA_MEM_RECLAIM:
+		*ret2 = 0;
+		return 0;
+
+	default:
+		return -ENOTSUP;
+	}
+}
+
+/**
+ * to_spi_err - Convert from local error code to FF-A error code.
+ * @ret:    Local error code.
+ *
+ * Return: FF-A defined error code.
+ */
+static int to_spi_err(long ret)
+{
+	switch(ret) {
+	case -ENOMEM:
+		return FFA_ERROR_NO_MEMORY;
+	case -EINVAL:
+	case -ENOENT:
+		return FFA_ERROR_INVALID_PARAMETERS;
+	case -EACCES:
+		return FFA_ERROR_DENIED;
+	case -ENOTSUP:
+		return FFA_ERROR_NOT_SUPPORTED;
+	default:
+		return FFA_ERROR_INVALID_PARAMETERS;
+	}
+}
+
+/*
+ * trusty_shared_memory_smc - SMC call handler.
+ */
+uintptr_t spm_mm_smc_handler(uint32_t smc_fid,
+			     u_register_t x1,
+			     u_register_t x2,
+			     u_register_t x3,
+			     u_register_t x4,
+			     void *cookie,
+			     void *handle,
+			     u_register_t flags)
+{
+	long ret = -1;
+	/*
+	 * Some arguments to FF-A functions are specified to come from 32 bit
+	 * (w) registers. Create 32 bit copies of the 64 bit arguments that can
+	 * be passed to these functions.
+	 */
+	uint32_t w1 = (uint32_t)x1;
+	uint32_t w2 = (uint32_t)x2;
+	uint32_t w3 = (uint32_t)x3;
+	uint32_t w4 = (uint32_t)x4;
+	u_register_t ret_reg2 = 0;
+	u_register_t ret_reg3 = 0;
+	struct trusty_shmem_client_state *client = &trusty_shmem_client_state[
+		is_caller_secure(flags)];
+
+	if (((smc_fid < SMC_FC32_FFA_MIN) || (smc_fid > SMC_FC32_FFA_MAX)) &&
+	    ((smc_fid < SMC_FC64_FFA_MIN) || (smc_fid > SMC_FC64_FFA_MAX))) {
+		NOTICE("%s(0x%x) unknown smc\n", __func__, smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+
+	spin_lock(&trusty_shmem_obj_state.lock);
+
+	switch (smc_fid) {
+	case SMC_FC_FFA_VERSION:
+		ret = trusty_ffa_version(client, w1, handle);
+		break;
+
+	case SMC_FC_FFA_FEATURES:
+		ret = trusty_ffa_features(client, w1, &ret_reg2, &ret_reg3);
+		break;
+
+	case SMC_FC_FFA_RXTX_MAP:
+		ret = trusty_ffa_rxtx_map(client, w1, w2, w3);
+		break;
+
+	case SMC_FC64_FFA_RXTX_MAP:
+		ret = trusty_ffa_rxtx_map(client, x1, x2, w3);
+		break;
+
+	case SMC_FC_FFA_RXTX_UNMAP:
+		ret = trusty_ffa_rxtx_unmap(client, w1);
+		break;
+
+	case SMC_FC_FFA_ID_GET:
+		ret = trusty_ffa_id_get(client, &ret_reg2);
+		break;
+
+	case SMC_FC_FFA_MEM_SHARE:
+		ret = trusty_ffa_mem_share(client, w1, w2, w3, w4, handle);
+		break;
+
+	case SMC_FC64_FFA_MEM_SHARE:
+		ret = trusty_ffa_mem_share(client, w1, w2, x3, w4, handle);
+		break;
+
+	case SMC_FC_FFA_MEM_RETRIEVE_REQ:
+		ret = trusty_ffa_mem_retrieve_req(client, w1, w2, w3, w4,
+						  handle);
+		break;
+
+	case SMC_FC64_FFA_MEM_RETRIEVE_REQ:
+		ret = trusty_ffa_mem_retrieve_req(client, w1, w2, x3, w4,
+						  handle);
+		break;
+
+	case SMC_FC_FFA_MEM_RELINQUISH:
+		ret = trusty_ffa_mem_relinquish(client);
+		break;
+
+	case SMC_FC_FFA_MEM_RECLAIM:
+		ret = trusty_ffa_mem_reclaim(client, w1, w2, w3);
+		break;
+
+	case SMC_FC_FFA_MEM_FRAG_RX:
+		ret = trusty_ffa_mem_frag_rx(client, w1, w2, w3, w4, handle);
+		break;
+
+	case SMC_FC_FFA_MEM_FRAG_TX:
+		ret = trusty_ffa_mem_frag_tx(client, w1, w2, w3, w4, handle);
+		break;
+
+	default:
+		NOTICE("%s(0x%x, 0x%lx) unsupported ffa smc\n", __func__,
+		       smc_fid, x1);
+		ret = -ENOTSUP;
+		break;
+	}
+	spin_unlock(&trusty_shmem_obj_state.lock);
+
+	if (ret) {
+		if (ret == (int64_t)handle) {
+			/* return value already encoded, pass through */
+			return ret;
+		}
+		NOTICE("%s(0x%x) failed %ld\n", __func__, smc_fid, ret);
+		SMC_RET8(handle, SMC_FC_FFA_ERROR, 0, to_spi_err(ret), 0, 0, 0,
+			 0, 0);
+	} else {
+		SMC_RET8(handle, SMC_FC_FFA_SUCCESS, 0, ret_reg2, ret_reg3, 0,
+			 0, 0, 0);
+	}
+}
diff --git a/services/spd/trusty/shared-mem-smcall.h b/services/spd/trusty/shared-mem-smcall.h
new file mode 100644
index 0000000..3ddf030
--- /dev/null
+++ b/services/spd/trusty/shared-mem-smcall.h
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#pragma once
+
+/*
+ * Subset of Arm PSA Firmware Framework for Arm v8-A 1.0 EAC
+ * (https://developer.arm.com/docs/den0077/a) needed for shared memory.
+ */
+
+#include "smcall.h"
+
+#ifndef STATIC_ASSERT
+#define STATIC_ASSERT(e) _Static_assert(e, #e)
+#endif
+
+#define FFA_CURRENT_VERSION_MAJOR (1U)
+#define FFA_CURRENT_VERSION_MINOR (0U)
+
+#define FFA_VERSION_TO_MAJOR(version) ((version) >> 16)
+#define FFA_VERSION_TO_MINOR(version) ((version) & (0xffff))
+#define FFA_VERSION(major, minor) (((major) << 16) | (minor))
+#define FFA_CURRENT_VERSION \
+    FFA_VERSION(FFA_CURRENT_VERSION_MAJOR, FFA_CURRENT_VERSION_MINOR)
+
+#define SMC_ENTITY_SHARED_MEMORY 4
+
+#define SMC_FASTCALL_NR_SHARED_MEMORY(nr) \
+    SMC_FASTCALL_NR(SMC_ENTITY_SHARED_MEMORY, nr)
+#define SMC_FASTCALL64_NR_SHARED_MEMORY(nr) \
+    SMC_FASTCALL64_NR(SMC_ENTITY_SHARED_MEMORY, nr)
+
+#define FFA_PAGE_SIZE (4096)
+
+/**
+ * typedef ffa_endpoint_id16_t - Endpoint ID
+ *
+ * Current implementation only supports VMIDs. FFA spec also support stream
+ * endpoint ids.
+ */
+typedef uint16_t ffa_endpoint_id16_t;
+
+/**
+ * struct ffa_cons_mrd - Constituent memory region descriptor
+ * @address:
+ *         Start address of contiguous memory region. Must be 4K page aligned.
+ * @page_count:
+ *         Number of 4K pages in region.
+ * @reserved_12_15:
+ *         Reserve bytes 12-15 to pad struct size to 16 bytes.
+ */
+struct ffa_cons_mrd {
+    uint64_t address;
+    uint32_t page_count;
+    uint32_t reserved_12_15;
+};
+STATIC_ASSERT(sizeof(struct ffa_cons_mrd) == 16);
+
+/**
+ * struct ffa_comp_mrd - Composite memory region descriptor
+ * @total_page_count:
+ *         Number of 4k pages in memory region. Must match sum of
+ *         @address_range_array[].page_count.
+ * @address_range_count:
+ *         Number of entries in @address_range_array.
+ * @reserved_8_15:
+ *         Reserve bytes 8-15 to pad struct size to 16 byte alignment and
+ *         make @address_range_array 16 byte aligned.
+ * @address_range_array:
+ *         Array of &struct ffa_cons_mrd entries.
+ */
+struct ffa_comp_mrd {
+    uint32_t total_page_count;
+    uint32_t address_range_count;
+    uint64_t reserved_8_15;
+    struct ffa_cons_mrd address_range_array[];
+};
+STATIC_ASSERT(sizeof(struct ffa_comp_mrd) == 16);
+
+/**
+ * typedef ffa_mem_attr8_t - Memory region attributes
+ *
+ * * @FFA_MEM_ATTR_DEVICE_NGNRNE:
+ *     Device-nGnRnE.
+ * * @FFA_MEM_ATTR_DEVICE_NGNRE:
+ *     Device-nGnRE.
+ * * @FFA_MEM_ATTR_DEVICE_NGRE:
+ *     Device-nGRE.
+ * * @FFA_MEM_ATTR_DEVICE_GRE:
+ *     Device-GRE.
+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED
+ *     Normal memory. Non-cacheable.
+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB
+ *     Normal memory. Write-back cached.
+ * * @FFA_MEM_ATTR_NON_SHAREABLE
+ *     Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ * * @FFA_MEM_ATTR_OUTER_SHAREABLE
+ *     Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ * * @FFA_MEM_ATTR_INNER_SHAREABLE
+ *     Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ */
+typedef uint8_t ffa_mem_attr8_t;
+#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2))
+#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2))
+#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2))
+#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2))
+#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2))
+#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2))
+#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0)
+#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0)
+#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0)
+
+/**
+ * typedef ffa_mem_perm8_t - Memory access permissions
+ *
+ * * @FFA_MEM_ATTR_RO
+ *     Request or specify read-only mapping.
+ * * @FFA_MEM_ATTR_RW
+ *     Request or allow read-write mapping.
+ * * @FFA_MEM_PERM_NX
+ *     Deny executable mapping.
+ * * @FFA_MEM_PERM_X
+ *     Request executable mapping.
+ */
+typedef uint8_t ffa_mem_perm8_t;
+#define FFA_MEM_PERM_RO (1U << 0)
+#define FFA_MEM_PERM_RW (1U << 1)
+#define FFA_MEM_PERM_NX (1U << 2)
+#define FFA_MEM_PERM_X (1U << 3)
+
+/**
+ * typedef ffa_mem_flag8_t - Endpoint memory flags
+ *
+ * * @FFA_MEM_FLAG_NON_RETRIEVAL_BORROWER
+ *     Non-retrieval Borrower. Memory region must not be or was not retrieved on
+ *     behalf of this endpoint.
+ */
+typedef uint8_t ffa_mem_flag8_t;
+#define FFA_MEM_FLAG_NON_RETRIEVAL_BORROWER (1U << 0)
+
+/**
+ * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags
+ *
+ * * @FFA_MTD_FLAG_ZERO_MEMORY
+ *     Zero memory after unmapping from sender (must be 0 for share).
+ * * @FFA_MTD_FLAG_TIME_SLICING
+ *     Not supported by this implementation.
+ * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH
+ *     Zero memory after unmapping from borrowers (must be 0 for share).
+ * * @FFA_MTD_FLAG_TYPE_MASK
+ *     Bit-mask to extract memory management transaction type from flags.
+ * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY
+ *     Share memory transaction flag.
+ *     Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from
+ *     @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that
+ *     it must have.
+ * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK
+ *     Not supported by this implementation.
+ */
+typedef uint32_t ffa_mtd_flag32_t;
+#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0)
+#define FFA_MTD_FLAG_TIME_SLICING (1U << 1)
+#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2)
+#define FFA_MTD_FLAG_TYPE_MASK (3U << 3)
+#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3)
+#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5)
+
+/**
+ * struct ffa_mapd - Memory access permissions descriptor
+ * @endpoint_id:
+ *         Endpoint id that @memory_access_permissions and @flags apply to.
+ *         (&typedef ffa_endpoint_id16_t).
+ * @memory_access_permissions:
+ *         FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t).
+ * @flags:
+ *         FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t).
+ */
+struct ffa_mapd {
+    ffa_endpoint_id16_t endpoint_id;
+    ffa_mem_perm8_t memory_access_permissions;
+    ffa_mem_flag8_t flags;
+};
+STATIC_ASSERT(sizeof(struct ffa_mapd) == 4);
+
+/**
+ * struct ffa_emad - Endpoint memory access descriptor.
+ * @mapd:  &struct ffa_mapd.
+ * @comp_mrd_offset:
+ *         Offset of &struct ffa_comp_mrd form start of &struct ffa_mtd.
+ * @reserved_8_15:
+ *         Reserved bytes 8-15. Must be 0.
+ */
+struct ffa_emad {
+    struct ffa_mapd mapd;
+    uint32_t comp_mrd_offset;
+    uint64_t reserved_8_15;
+};
+STATIC_ASSERT(sizeof(struct ffa_emad) == 16);
+
+/**
+ * struct ffa_mtd - Memory transaction descriptor.
+ * @sender_id:
+ *         Sender endpoint id.
+ * @memory_region_attributes:
+ *         FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t).
+ * @reserved_3:
+ *         Reserved bytes 3. Must be 0.
+ * @flags:
+ *         FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t).
+ * @handle:
+ *         Id of shared memory object. Most be 0 for MEM_SHARE.
+ * @tag:   Client allocated tag. Must match original value.
+ * @reserved_24_27:
+ *         Reserved bytes 24-27. Must be 0.
+ * @emad_count:
+ *         Number of entries in @emad. Must be 1 in current implementation.
+ *         FFA spec allows more entries.
+ * @emad:
+ *         Endpoint memory access descriptor array (see @struct ffa_emad).
+ */
+struct ffa_mtd {
+    ffa_endpoint_id16_t sender_id;
+    ffa_mem_attr8_t memory_region_attributes;
+    uint8_t reserved_3;
+    ffa_mtd_flag32_t flags;
+    uint64_t handle;
+    uint64_t tag;
+    uint32_t reserved_24_27;
+    uint32_t emad_count;
+    struct ffa_emad emad[];
+};
+STATIC_ASSERT(sizeof(struct ffa_mtd) == 32);
+
+/**
+ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor.
+ * @handle:
+ *         Id of shared memory object to relinquish.
+ * @flags:
+ *         If bit 0 is set clear memory after unmapping from borrower. Must be 0
+ *         for share. Bit[1]: Time slicing. Not supported, must be 0. All other
+ *         bits are reserved 0.
+ * @endpoint_count:
+ *         Number of entries in @endpoint_array.
+ * @endpoint_array:
+ *         Array of endpoint ids.
+ */
+struct ffa_mem_relinquish_descriptor {
+    uint64_t handle;
+    uint32_t flags;
+    uint32_t endpoint_count;
+    ffa_endpoint_id16_t endpoint_array[];
+};
+STATIC_ASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16);
+
+/**
+ * typedef ffa_features2_t - FFA_FEATURES values returned in w2
+ *
+ * * @FFA_FEATURES2_RXTX_MAP_BUF_SIZE_MASK
+ *     For RXTX_MAP: min buffer size and alignment boundary mask.
+ * * @FFA_FEATURES2_RXTX_MAP_BUF_SIZE_4K
+ *     For RXTX_MAP: min buffer size and alignment boundary is 4K.
+ * * @FFA_FEATURES2_RXTX_MAP_BUF_SIZE_64K
+ *     For RXTX_MAP: min buffer size and alignment boundary is 64K.
+ * * @FFA_FEATURES2_RXTX_MAP_BUF_SIZE_16K
+ *     For RXTX_MAP: min buffer size and alignment boundary is 16K.
+ * * @FFA_FEATURES2_MEM_DYNAMIC_BUFFER
+ *     Supports custom buffers for memory transactions.
+ *
+ * For all other bits and commands: must be 0.
+ */
+typedef uint32_t ffa_features2_t;
+#define FFA_FEATURES2_RXTX_MAP_BUF_SIZE_MASK 0x3U
+#define FFA_FEATURES2_RXTX_MAP_BUF_SIZE_4K 0x0U
+#define FFA_FEATURES2_RXTX_MAP_BUF_SIZE_64K 0x1U
+#define FFA_FEATURES2_RXTX_MAP_BUF_SIZE_16K 0x2U
+#define FFA_FEATURES2_MEM_DYNAMIC_BUFFER 0x1U
+
+/**
+ * typedef ffa_features3_t - FFA_FEATURES values returned in w3
+ *
+ * * @FFA_FEATURES3_MEM_RETRIEVE_REQ_REFCOUNT_MASK
+ *     For FFA_MEM_RETRIEVE_REQ, bit[7-0]: Number of times receiver can
+ *     retrieve each memory region before relinquishing it specified as
+ *     ((1U << (value + 1)) - 1 (or value = bits in reference count - 1).
+ *
+ * For all other bits and commands: must be 0.
+ */
+typedef uint32_t ffa_features3_t;
+#define FFA_FEATURES3_MEM_RETRIEVE_REQ_REFCOUNT_MASK 0xffU
+
+/**
+ * enum ffa_error - FF-A error code
+ * @FFA_ERROR_NOT_SUPPORTED:
+ *         Operation is not supported by the current implementation.
+ * @FFA_ERROR_INVALID_PARAMETERS:
+ *         Invalid parameters. Conditions function specific.
+ * @FFA_ERROR_NO_MEMORY:
+ *         Not enough memory.
+ * @FFA_ERROR_DENIED:
+ *         Operation not allowed. Conditions function specific.
+ *
+ * FF-A 1.0 EAC defines other error codes as well but the current implementation
+ * does not use them.
+ */
+enum ffa_error {
+    FFA_ERROR_NOT_SUPPORTED = -1,
+    FFA_ERROR_INVALID_PARAMETERS = -2,
+    FFA_ERROR_NO_MEMORY = -3,
+    FFA_ERROR_DENIED = -6,
+};
+
+/**
+ * SMC_FC32_FFA_MIN - First 32 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC32_FFA_MIN SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC32_FFA_MAX - Last 32 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC32_FFA_MAX SMC_FASTCALL_NR_SHARED_MEMORY(0x7F)
+
+/**
+ * SMC_FC64_FFA_MIN - First 64 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC64_FFA_MIN SMC_FASTCALL64_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC64_FFA_MAX - Last 64 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC64_FFA_MAX SMC_FASTCALL64_NR_SHARED_MEMORY(0x7F)
+
+/**
+ * SMC_FC_FFA_ERROR - SMC error return opcode
+ *
+ * Register arguments:
+ *
+ * * w1:     VMID in [31:16], vCPU in [15:0]
+ * * w2:     Error code (&enum ffa_error)
+ */
+#define SMC_FC_FFA_ERROR SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC_FFA_SUCCESS - 32 bit SMC success return opcode
+ *
+ * Register arguments:
+ *
+ * * w1:     VMID in [31:16], vCPU in [15:0]
+ * * w2-w7:  Function specific
+ */
+#define SMC_FC_FFA_SUCCESS SMC_FASTCALL_NR_SHARED_MEMORY(0x61)
+
+/**
+ * SMC_FC64_FFA_SUCCESS - 64 bit SMC success return opcode
+ *
+ * Register arguments:
+ *
+ * * w1:             VMID in [31:16], vCPU in [15:0]
+ * * w2/x2-w7/x7:    Function specific
+ */
+#define SMC_FC64_FFA_SUCCESS SMC_FASTCALL64_NR_SHARED_MEMORY(0x61)
+
+/**
+ * SMC_FC_FFA_VERSION - SMC opcode to return supported FF-A version
+ *
+ * Register arguments:
+ *
+ * * w1:     Major version bit[30:16] and minor version in bit[15:0] supported
+ *           by caller. Bit[31] must be 0.
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2:     Major version bit[30:16], minor version in bit[15:0], bit[31] must
+ *           be 0.
+ *
+ * or
+ *
+ * * w0:     &SMC_FC_FFA_ERROR
+ * * w2:     %FFA_ERROR_NOT_SUPPORTED if major version passed in is less than
+ *           the minimum major version supported.
+ */
+#define SMC_FC_FFA_VERSION SMC_FASTCALL_NR_SHARED_MEMORY(0x63)
+
+/**
+ * SMC_FC_FFA_FEATURES - SMC opcode to check optional feature support
+ *
+ * Register arguments:
+ *
+ * * w1:     FF-A function ID
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2:     &typedef ffa_features2_t
+ * * w3:     &typedef ffa_features3_t
+ *
+ * or
+ *
+ * * w0:     &SMC_FC_FFA_ERROR
+ * * w2:     %FFA_ERROR_NOT_SUPPORTED if function is not implemented, or
+ *           %FFA_ERROR_INVALID_PARAMETERS if function id is not valid.
+ */
+#define SMC_FC_FFA_FEATURES SMC_FASTCALL_NR_SHARED_MEMORY(0x64)
+
+/**
+ * SMC_FC_FFA_RXTX_MAP - 32 bit SMC opcode to map message buffers
+ *
+ * Register arguments:
+ *
+ * * w1:     TX address
+ * * w2:     RX address
+ * * w3:     RX/TX page count in bit[5:0]
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_RXTX_MAP SMC_FASTCALL_NR_SHARED_MEMORY(0x66)
+
+/**
+ * SMC_FC64_FFA_RXTX_MAP - 64 bit SMC opcode to map message buffers
+ *
+ * Register arguments:
+ *
+ * * x1:     TX address
+ * * x2:     RX address
+ * * x3:     RX/TX page count in bit[5:0]
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC64_FFA_RXTX_MAP SMC_FASTCALL64_NR_SHARED_MEMORY(0x66)
+
+/**
+ * SMC_FC_FFA_RXTX_UNMAP - SMC opcode to unmap message buffers
+ *
+ * Register arguments:
+ *
+ * * w1:     ID in [31:16]
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_RXTX_UNMAP SMC_FASTCALL_NR_SHARED_MEMORY(0x67)
+
+/**
+ * SMC_FC_FFA_ID_GET - SMC opcode to get endpoint id of caller
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2:     ID in bit[15:0], bit[31:16] must be 0.
+ */
+#define SMC_FC_FFA_ID_GET SMC_FASTCALL_NR_SHARED_MEMORY(0x69)
+
+/**
+ * SMC_FC_FFA_MEM_DONATE - 32 bit SMC opcode to donate memory
+ *
+ * Not supported.
+ */
+#define SMC_FC_FFA_MEM_DONATE SMC_FASTCALL_NR_SHARED_MEMORY(0x71)
+
+/**
+ * SMC_FC_FFA_MEM_LEND - 32 bit SMC opcode to lend memory
+ *
+ * Not currently supported.
+ */
+#define SMC_FC_FFA_MEM_LEND SMC_FASTCALL_NR_SHARED_MEMORY(0x72)
+
+/**
+ * SMC_FC_FFA_MEM_SHARE - 32 bit SMC opcode to share memory
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ * * w3:     Address
+ * * w4:     Page count
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2/w3:  Handle
+ *
+ * or
+ *
+ * * w0:     &SMC_FC_FFA_MEM_FRAG_RX
+ * * w1-:    See &SMC_FC_FFA_MEM_FRAG_RX
+ *
+ * or
+ *
+ * * w0:     &SMC_FC_FFA_ERROR
+ * * w2:     Error code (&enum ffa_error)
+ */
+#define SMC_FC_FFA_MEM_SHARE SMC_FASTCALL_NR_SHARED_MEMORY(0x73)
+
+/**
+ * SMC_FC64_FFA_MEM_SHARE - 64 bit SMC opcode to share memory
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ * * x3:     Address
+ * * w4:     Page count
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2/w3:  Handle
+ *
+ * or
+ *
+ * * w0:     &SMC_FC_FFA_MEM_FRAG_RX
+ * * w1-:    See &SMC_FC_FFA_MEM_FRAG_RX
+ *
+ * or
+ *
+ * * w0:     &SMC_FC_FFA_ERROR
+ * * w2:     Error code (&enum ffa_error)
+ */
+#define SMC_FC64_FFA_MEM_SHARE SMC_FASTCALL64_NR_SHARED_MEMORY(0x73)
+
+/**
+ * SMC_FC_FFA_MEM_RETRIEVE_REQ - 32 bit SMC opcode to retrieve shared memory
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ * * w3:     Address
+ * * w4:     Page count
+ *
+ * Return:
+ * * w0:             &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ * * w1/x1-w5/x5:    See &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ */
+#define SMC_FC_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL_NR_SHARED_MEMORY(0x74)
+
+/**
+ * SMC_FC64_FFA_MEM_RETRIEVE_REQ - 64 bit SMC opcode to retrieve shared memory
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ * * x3:     Address
+ * * w4:     Page count
+ *
+ * Return:
+ * * w0:             &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ * * w1/x1-w5/x5:    See &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ */
+#define SMC_FC64_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL64_NR_SHARED_MEMORY(0x74)
+
+/**
+ * SMC_FC_FFA_MEM_RETRIEVE_RESP - Retrieve 32 bit SMC return opcode
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ */
+#define SMC_FC_FFA_MEM_RETRIEVE_RESP SMC_FASTCALL_NR_SHARED_MEMORY(0x75)
+
+/**
+ * SMC_FC_FFA_MEM_RELINQUISH - SMC opcode to relinquish shared memory
+ *
+ * Input in &struct ffa_mem_relinquish_descriptor format in message buffer.
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_MEM_RELINQUISH SMC_FASTCALL_NR_SHARED_MEMORY(0x76)
+
+/**
+ * SMC_FC_FFA_MEM_RECLAIM - SMC opcode to reclaim shared memory
+ *
+ * Register arguments:
+ *
+ * * w1/w2:  Handle
+ * * w3:     Flags
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_MEM_RECLAIM SMC_FASTCALL_NR_SHARED_MEMORY(0x77)
+
+/**
+ * SMC_FC_FFA_MEM_FRAG_RX - SMC opcode to request next fragment.
+ *
+ * Register arguments:
+ *
+ * * w1/w2:  Handle
+ * * w3:     Fragment offset.
+ * * w4:     Endpoint id ID in [31:16], if client is hypervisor.
+ *
+ * Return:
+ * * w0:             &SMC_FC_FFA_MEM_FRAG_TX
+ * * w1/x1-w5/x5:    See &SMC_FC_FFA_MEM_FRAG_TX
+ */
+#define SMC_FC_FFA_MEM_FRAG_RX SMC_FASTCALL_NR_SHARED_MEMORY(0x7A)
+
+/**
+ * SMC_FC_FFA_MEM_FRAG_TX - SMC opcode to transmit next fragment
+ *
+ * Register arguments:
+ *
+ * * w1/w2:  Handle
+ * * w3:     Fragment length.
+ * * w4:     Sender endpoint id ID in [31:16], if client is hypervisor.
+ *
+ * Return:
+ * * w0:             &SMC_FC_FFA_MEM_FRAG_RX or &SMC_FC_FFA_SUCCESS.
+ * * w1/x1-w5/x5:    See opcode in w0.
+ */
+#define SMC_FC_FFA_MEM_FRAG_TX SMC_FASTCALL_NR_SHARED_MEMORY(0x7B)
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
index 7daebcd..e0f1ba2 100644
--- a/services/spd/trusty/trusty.c
+++ b/services/spd/trusty/trusty.c
@@ -438,12 +438,14 @@
 		return -1;
 	}
 
+#ifdef LATE_MAPPED_BL32
 	/* memmap first page of trusty's code memory before peeking */
 	ret = mmap_add_dynamic_region(ep_info->pc, /* PA */
 			ep_info->pc, /* VA */
 			PAGE_SIZE, /* size */
 			MT_SECURE | MT_RW_DATA); /* attrs */
 	assert(ret == 0);
+#endif
 
 	/* peek into trusty's code to see if we have a 32-bit or 64-bit image */
 	instr = *(uint32_t *)ep_info->pc;
@@ -458,8 +460,10 @@
 		return -1;
 	}
 
+#ifdef LATE_MAPPED_BL32
 	/* unmap trusty's memory page */
 	(void)mmap_remove_dynamic_region(ep_info->pc, PAGE_SIZE);
+#endif
 
 	SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
 	if (!aarch32)
@@ -523,12 +527,22 @@
 	trusty_fast,
 
 	OEN_TOS_START,
-	OEN_TOS_END,
+	SMC_ENTITY_SECURE_MONITOR,
 	SMC_TYPE_FAST,
 	trusty_setup,
 	trusty_smc_handler
 );
 
+DECLARE_RT_SVC(
+	trusty_fast_uuid,
+
+	OEN_TOS_END,
+	OEN_TOS_END,
+	SMC_TYPE_FAST,
+	NULL,
+	trusty_smc_handler
+);
+
 /* Define a SPD runtime service descriptor for yielding SMC calls */
 DECLARE_RT_SVC(
 	trusty_std,
diff --git a/services/spd/trusty/trusty.mk b/services/spd/trusty/trusty.mk
index 43b80bb..9dcdc1d 100644
--- a/services/spd/trusty/trusty.mk
+++ b/services/spd/trusty/trusty.mk
@@ -4,6 +4,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
+TRUSTY_SPD_WITH_SHARED_MEM ?= 1
+
 SPD_INCLUDES		:=
 
 SPD_SOURCES		:=	services/spd/trusty/trusty.c		\
@@ -13,6 +15,19 @@
 SPD_SOURCES		+=	services/spd/trusty/generic-arm64-smcall.c
 endif
 
+ifeq (${TRUSTY_SPD_WITH_SHARED_MEM},1)
+BL31_CFLAGS		+=	-DPLAT_XLAT_TABLES_DYNAMIC=1 \
+				-DTRUSTY_SPM=1
+SPD_SOURCES		+=	services/spd/trusty/shared-mem-smcall.c
+endif
+
+# On Tegra, BL2 does not map us into memory before our spd is initialized.
+# Setting LATE_MAPPED_BL32 indicates that we need to dynamically map in
+# our first page for bittage detection.
+ifeq (${PLAT},tegra)
+CFLAGS                  +=      -DLATE_MAPPED_BL32
+endif
+
 NEED_BL32		:=	yes
 
 CTX_INCLUDE_FPREGS	:=	1
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 39db429..e6baafd 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -125,7 +125,7 @@
 		SMC_RET1(handle, ret);
 	}
 
-#if SPM_MM
+#if SPM_MM || TRUSTY_SPM
 	/*
 	 * Dispatch SPM calls to SPM SMC handler and return its return
 	 * value