WIP: Remove Trusty virtio hack and Implement VM destruction notifications

Change-Id: Id0d8764979abc9d9849867a4d82793c0ef9a24d1
diff --git a/arch/arm64/kvm/hyp/include/nvhe/ffa.h b/arch/arm64/kvm/hyp/include/nvhe/ffa.h
index 614d4a0..cba1f63 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/ffa.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/ffa.h
@@ -20,7 +20,19 @@
  */
 #define HOST_FFA_ID	0
 
+/*
+ * The maximum number of secure partitions.
+ */
+#define FFA_MAX_SPS 8
+
+#define FFA_MSG_FLAGS_VM_CREATED 0x80000004
+#define FFA_MSG_FLAGS_VM_DESTROYED 0x80000006
+#define FFA_PROP_VM_EVENT_SUB (1 << 6)
+
+enum kvm_ffa_vm_event_type { MSG_VM_CREATED, MSG_VM_DESTROYED };
+
 int hyp_ffa_init(void *pages);
+void hyp_ffa_post_rxtx_init(void);
 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
 int kvm_guest_ffa_handler(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code);
 
@@ -40,6 +52,7 @@ static inline bool is_ffa_error(struct kvm_vcpu *vcpu)
 }
 
 bool hyp_ffa_release_buffers(struct pkvm_hyp_vcpu *vcpu, int vmid, void *addr);
+int queue_vm_messages(u16 vmid, enum kvm_ffa_vm_event_type event);
 int guest_ffa_reclaim_memory(struct pkvm_hyp_vm *vm);
 
 #endif /* __KVM_HYP_FFA_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 1594f0c..c3bcea3 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -10,8 +10,10 @@
 # will explode instantly (Words of Marc Zyngier). So introduce a generic flag
 # __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM.
 ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
+# TODO(dmitriyf): remove __NO_FORTIFY after fixing ld.lld: error: undefined symbol: __kvm_nvhe_fortify_panic
 ccflags-y += -fno-stack-protector	\
 	     -DDISABLE_BRANCH_PROFILING	\
+	     -D__NO_FORTIFY \
 	     $(DISABLE_STACKLEAK_PLUGIN)
 
 hostprogs := gen-hyprel
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index 356f884..f274d35 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -29,6 +29,7 @@
 #include <linux/arm-smccc.h>
 #include <linux/arm_ffa.h>
 #include <linux/list.h>
+#include <linux/circ_buf.h>
 #include <asm/kvm_pkvm.h>
 
 #include <kvm/arm_hypercalls.h>
@@ -51,12 +52,6 @@
 #define SMC_SC_VIRTIO_STOP      SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
 
 /*
- * "ID value 0 must be returned at the Non-secure physical FF-A instance"
- * We share this ID with the host.
- */
-#define HOST_FFA_ID	0
-
-/*
  * A buffer to hold the maximum descriptor size we can see from the host,
  * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
  * when resolving the handle on the reclaim path.
@@ -87,6 +82,19 @@ struct kvm_ffa_buffers {
 	struct list_head transfers;
 };
 
+struct kvm_ffa_vm_msg {
+	uint16_t vm_id;
+	enum kvm_ffa_vm_event_type msg;
+};
+
+struct kvm_ffa_subscription {
+	uint16_t sp_id;
+	bool is_active;
+	struct kvm_ffa_vm_msg pending_msgs[8];
+	u8 pending_msg_head;
+	u8 pending_msg_tail;
+};
+
 /*
  * Note that we don't currently lock these buffers explicitly, instead
  * relying on the locking of the hyp FFA buffers.
@@ -95,6 +103,7 @@ static struct kvm_ffa_buffers hyp_buffers;
 static struct kvm_ffa_buffers non_secure_el1_buffers[KVM_MAX_PVMS];
 static u8 hyp_buffer_refcnt;
 static bool ffa_available;
+static struct kvm_ffa_subscription ffa_vm_events_subscriptions[8];
 
 static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
 {
@@ -206,11 +215,48 @@ static void spmd_retrieve_req(struct arm_smccc_res *res, u32 len)
 			  res);
 }
 
-static void trusty_stop_virtio(struct arm_smccc_res *res, u32 client_id)
+static bool notify_sp_vm_events(u16 vmid)
 {
-	arm_smccc_1_1_smc(SMC_SC_VIRTIO_STOP,
-			  0, 0, 0, 0, 0, 0, client_id,
-			  res);
+	int i;
+	u32 ffa_id;
+	u32 msg_flags;
+	struct kvm_ffa_subscription *sub;
+	struct arm_smccc_res res;
+
+	for (i = 0; i < FFA_MAX_SPS; i++) {
+		sub = &ffa_vm_events_subscriptions[i];
+		if (!sub->is_active)
+			continue;
+		while (CIRC_CNT(sub->pending_msg_head, sub->pending_msg_tail,
+				FFA_MAX_SPS)) {
+			struct kvm_ffa_vm_msg *pending_msg =
+				&sub->pending_msgs[sub->pending_msg_tail];
+			if (pending_msg->vm_id != vmid)
+				continue;
+			switch (pending_msg->msg) {
+			case MSG_VM_CREATED:
+				msg_flags = FFA_MSG_FLAGS_VM_CREATED;
+				break;
+			case MSG_VM_DESTROYED:
+				msg_flags = FFA_MSG_FLAGS_VM_DESTROYED;
+				break;
+			default:
+				continue;
+			}
+			ffa_id =
+				(((HOST_FFA_ID & 0x7fff) << 16) | (sub->sp_id));
+			arm_smccc_1_1_smc(FFA_FN64_MSG_SEND_DIRECT_REQ, ffa_id,
+					  msg_flags, 0, 0, vmid, 0, 0, &res);
+			if (res.a2) {
+				//TODO(dmitriyf): should we loop here if we're BUSY
+				return false;
+			}
+
+			sub->pending_msg_tail++;
+			sub->pending_msg_tail &= FFA_MAX_SPS - 1;
+		}
+	}
+	return true;
 }
 
 static int host_share_hyp_buffers(struct kvm_cpu_context *ctxt)
@@ -885,6 +931,7 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
 	return;
 }
 
+//TODO(dmitriyf): why always inline?
 static __always_inline int do_ffa_mem_xfer(const u64 func_id,
 					   struct arm_smccc_res *res,
 					   struct kvm_cpu_context *ctxt,
@@ -1236,6 +1283,8 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
 	/* Memory management */
 	case FFA_FN64_RXTX_MAP:
 		do_ffa_rxtx_map(&res, host_ctxt, 0);
+		//TODO(dmitriyf): only do once
+		hyp_ffa_post_rxtx_init();
 		goto out_handled;
 	case FFA_RXTX_UNMAP:
 		do_ffa_rxtx_unmap(&res, host_ctxt, 0);
@@ -1367,6 +1416,25 @@ bool hyp_ffa_release_buffers(struct pkvm_hyp_vcpu *vcpu, int vmid, void *addr)
 	return !!found;
 }
 
+int queue_vm_messages(u16 vmid, enum kvm_ffa_vm_event_type event)
+{
+	int i;
+	for (i = 0; i < FFA_MAX_SPS; i++) {
+		struct kvm_ffa_subscription *sub =
+			&ffa_vm_events_subscriptions[i];
+		if (sub->is_active && CIRC_SPACE(sub->pending_msg_head,
+						 sub->pending_msg_tail, 0x8)) {
+			struct kvm_ffa_vm_msg *pending_msg =
+				&sub->pending_msgs[sub->pending_msg_head];
+			pending_msg->msg = event;
+			pending_msg->vm_id = vmid;
+			sub->pending_msg_head++;
+			sub->pending_msg_head &= 0x7;
+		}
+	}
+	return 0;
+}
+
 int guest_ffa_reclaim_memory(struct pkvm_hyp_vm *vm)
 {
 	struct pkvm_hyp_vcpu *hyp_vcpu = vm->vcpus[0];
@@ -1395,14 +1463,13 @@ int guest_ffa_reclaim_memory(struct pkvm_hyp_vm *vm)
 	guest_ctxt = &non_secure_el1_buffers[vmid];
 	req = hyp_buffers.tx;
 
-	if (!ffa_available || list_empty(&guest_ctxt->transfers)) {
-		ret= 0;
+	if (!ffa_available) {
+		ret = 0;
 		goto unlock;
 	}
 
-	trusty_stop_virtio(&res, vmid & U32_MAX);
-	if (res.a0 == -5) {
-		ret = res.a0;
+	if (!notify_sp_vm_events(vmid)) {
+		ret = -EAGAIN;
 		goto unlock;
 	}
 
@@ -1413,11 +1480,6 @@ int guest_ffa_reclaim_memory(struct pkvm_hyp_vm *vm)
 			.handle         = transfer_ctxt->ffa_handle,
 		};
 
-		/* TODO: Remove the hack to relinquish the FF-A global memory
-		 * handlers on the Secure OS side once FF-A destroy
-		 * message is implemented.
-		 */
-
 		handle_lo = HANDLE_LOW(transfer_ctxt->ffa_handle);
 		handle_hi = HANDLE_HIGH(transfer_ctxt->ffa_handle);
 
@@ -1549,3 +1611,39 @@ int hyp_ffa_init(void *pages)
 
 	return 0;
 }
+
+void hyp_ffa_post_rxtx_init(void)
+{
+	struct arm_smccc_res res;
+	struct ffa_partition_info buffer[8];
+	size_t count;
+	size_t size;
+	size_t total_size;
+	size_t i;
+	//TODO(dmitriyf): retry?
+	arm_smccc_1_1_smc(FFA_PARTITION_INFO_GET, 0, 0, 0, 0, 0, 0, 0, &res);
+	if (res.a0 != FFA_SUCCESS)
+		return;
+
+	count = res.a2;
+	size = res.a3;
+
+	if (unlikely(check_mul_overflow(count, size, &total_size)))
+		return;
+	if (total_size > sizeof(buffer))
+		return;
+
+	memcpy(buffer, hyp_buffers.rx, total_size);
+	arm_smccc_1_1_smc(FFA_RX_RELEASE, 0, 0, 0, 0, 0, 0, 0, &res);
+
+	for (i = 0; i < FFA_MAX_SPS; i++) {
+		struct kvm_ffa_subscription *sub =
+			&ffa_vm_events_subscriptions[i];
+		sub->is_active =
+			!!(buffer[i].properties & FFA_PROP_VM_EVENT_SUB);
+		if (sub->is_active) {
+			sub->sp_id = buffer[i].id;
+			sub->pending_msg_head = sub->pending_msg_tail = 0;
+		}
+	}
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index fc88438..72e368d 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -943,6 +943,10 @@ int __pkvm_start_teardown_vm(pkvm_handle_t handle)
 		goto unlock;
 	}
 
+	if (!hyp_vm->is_dying) {
+		queue_vm_messages(atomic64_read(&hyp_vm->kvm.arch.mmu.vmid.id),
+				  MSG_VM_DESTROYED);
+	}
 	hyp_vm->is_dying = true;
 	ret = guest_ffa_reclaim_memory(hyp_vm);
 unlock:
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 41e7a07..9a228fd 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -4,6 +4,7 @@
  * Author: Quentin Perret <qperret@google.com>
  */
 
+#include "linux/delay.h"
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/kmemleak.h>
@@ -266,9 +267,10 @@ void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
 		ret = kvm_call_hyp_nvhe(__pkvm_start_teardown_vm,
 					host_kvm->arch.pkvm.handle);
 		if (ret != 0) {
-			WARN_ON(ret);
+			// WARN_ON(ret);
 			pr_warn("start teardown returned: %d\n", ret);
 			cond_resched();
+			msleep(100);
 		} else {
 			break;
 		}
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 7aa2eb7..4048d10 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -42,6 +42,7 @@
 #define VIRTIO_ID_RPROC_SERIAL		11 /* virtio remoteproc serial link */
 #define VIRTIO_ID_CAIF			12 /* Virtio caif */
 #define VIRTIO_ID_MEMORY_BALLOON	13 /* virtio memory balloon */
+#define VIRTIO_ID_TRUSTY_IPC 14
 #define VIRTIO_ID_GPU			16 /* virtio GPU */
 #define VIRTIO_ID_CLOCK			17 /* virtio clock/timer */
 #define VIRTIO_ID_INPUT			18 /* virtio input */