[DEBUG] All my (@thibautp) debug code for the hyp-proxy setup

Change-Id: I4b44a5a15950040b0fac1a4516c6ea3d1d5517f0
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 86e0ecf..6659f19 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1135,6 +1135,7 @@
 		 * a fatal error.
 		 */
 		if (vcpu_mode_is_bad_32bit(vcpu)) {
+			pr_warn("Host decided to return IL");
 			/*
 			 * As we have caught the guest red-handed, decide that
 			 * it isn't fit for purpose anymore by making the vcpu
diff --git a/arch/arm64/kvm/hyp/debug-pl011.h b/arch/arm64/kvm/hyp/debug-pl011.h
index 4c35bc5..74233d2 100644
--- a/arch/arm64/kvm/hyp/debug-pl011.h
+++ b/arch/arm64/kvm/hyp/debug-pl011.h
@@ -30,7 +30,7 @@
 	tbz		\tmp, #0, 9990f
 	isb
 
-alternative_cb kvm_hyp_debug_uart_set_basep
+alternative_cb ARM64_ALWAYS_SYSTEM, kvm_hyp_debug_uart_set_basep
 	movz		\tmp, #0
 	movk		\tmp, #0, lsl #16
 	movk		\tmp, #0, lsl #32
@@ -183,8 +183,6 @@
 
 	while (i--)
 		__hyp_putx4(x >> (4 * i));
-
-	hyp_putc('\n');
 }
 
 static inline void hyp_putx32(unsigned int x)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index f3aa773..9b9ad86 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -44,7 +44,8 @@
 alternative_else_nop_endif
 	mrs	x1, isr_el1
 	cbz	x1,  1f
-	mov	x0, #ARM_EXCEPTION_IRQ
+	// mov	x0, #ARM_EXCEPTION_IRQ
+	mov	x0, #42
 	ret
 
 1:
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 8f3f93f..69acc4a 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -14,6 +14,10 @@
 #include <asm/kvm_asm.h>
 #include <asm/mmu.h>
 #include <asm/spectre.h>
+#ifdef __KVM_NVHE_HYPERVISOR__
+#include <asm/kvm_mmu.h>
+#include "debug-pl011.h"
+#endif
 
 .macro save_caller_saved_regs_vect
 	/* x0 and x1 were saved in the vector entry */
@@ -50,7 +54,7 @@
 	b.ne	el1_trap
 
 	/*
-	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+	 ;; * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
 	 * The workaround has already been applied on the host,
 	 * so let's quickly get back to the guest. We don't bother
 	 * restoring x1, as it can be clobbered anyway.
@@ -104,6 +108,15 @@
 	eret
 
 1:
+#ifdef __KVM_NVHE_HYPERVISOR__
+	// Can't use hyp_puts, because, I only have 2 usable registers.
+	mov w0, 'I'
+	hyp_putc w0, 1
+	mov w0, 'L'
+	hyp_putc w0, 1
+	mov w0, '\n'
+	hyp_putc w0, 1
+#endif
 	/* Let's attempt a recovery from the illegal exception return */
 	get_vcpu_ptr	x1, x0
 	mov	x0, #ARM_EXCEPTION_IL
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index ea4489e..08d3718 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -746,6 +746,7 @@
 	if (unlikely(hyp_vcpu)) {
 		flush_hyp_vcpu(hyp_vcpu);
 
+                hyp_puts("Running __kvm_vcpu_run\n");
 		ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
 
 		sync_hyp_vcpu(hyp_vcpu, ret);
@@ -794,11 +795,15 @@
 	if (!hyp_vcpu)
 		goto out;
 
+        hyp_puts("Map guest: before memcache\n");
+
 	/* Top-up our per-vcpu memcache from the host's */
 	ret = pkvm_refill_memcache(hyp_vcpu);
 	if (ret)
 		goto out;
 
+	hyp_puts("Map guest: after memcache topup\n");
+
 	if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
 		ret = __pkvm_host_donate_guest(pfn, gfn, hyp_vcpu);
 	else
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 2eafde2..fd07665 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -4,6 +4,7 @@
  * Author: Fuad Tabba <tabba@google.com>
  */
 
+#include "asm/memory.h"
 #include <linux/kvm_host.h>
 #include <linux/mm.h>
 
@@ -16,6 +17,7 @@
 #include <nvhe/memory.h>
 #include <nvhe/pkvm.h>
 #include <nvhe/trap_handler.h>
+#include "../debug-pl011.h"
 
 /* Used by icache_is_vpipt(). */
 unsigned long __icache_flags;
@@ -652,11 +654,20 @@
 	void *pgd = NULL;
 	int ret;
 
+        hyp_puts("Entered __pkvm_init_vm\n");
+
 	ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
 	if (ret)
 		return ret;
 
 	nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
+	hyp_puts("Got ");
+	hyp_putx32(nr_vcpus);
+	hyp_puts(" at ");
+	hyp_putx64(hyp_virt_to_phys(host_kvm));
+	hyp_puts(" at offset ");
+	hyp_putx64((void*)&host_kvm->created_vcpus - (void*)host_kvm);
+	hyp_putc('\n');
 	if (nr_vcpus < 1) {
 		ret = -EINVAL;
 		goto err_unpin_kvm;
@@ -680,18 +691,27 @@
 	if (!pgd)
 		goto err_remove_mappings;
 
+	hyp_puts("Memory donated\n");
+
 	init_pkvm_hyp_vm(host_kvm, hyp_vm, last_ran, nr_vcpus);
 
+	hyp_puts("VM initialized\n");
+
 	hyp_spin_lock(&vm_table_lock);
 	ret = insert_vm_table_entry(host_kvm, hyp_vm);
 	if (ret < 0)
 		goto err_unlock;
 
+	hyp_puts("VM inserted\n");
+
 	ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
 	if (ret)
 		goto err_remove_vm_table_entry;
 	hyp_spin_unlock(&vm_table_lock);
 
+	hyp_puts("VM page table set up\n");
+	hyp_puts("VM initialization complete\n");
+
 	return hyp_vm->kvm.arch.pkvm.handle;
 
 err_remove_vm_table_entry:
@@ -726,6 +746,8 @@
 	unsigned int idx;
 	int ret;
 
+	hyp_puts("Entered __pkvm_init_vm\n");
+
 	hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
 	if (!hyp_vcpu)
 		return -ENOMEM;
@@ -744,6 +766,8 @@
 		goto unlock;
 	}
 
+	hyp_puts("Got VM lock \n");
+
 	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
 	if (ret)
 		goto unlock;
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 221f34d..a3daf689b 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -29,6 +29,8 @@
 #include <nvhe/mem_protect.h>
 #include <nvhe/pkvm.h>
 
+#include "../debug-pl011.h"
+
 /* Non-VHE specific context */
 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
@@ -234,6 +236,7 @@
 		vcpu->arch.target = -1;
 		*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
 		*exit_code |= ARM_EXCEPTION_IL;
+		hyp_puts("32 bit protected guest: forbidden\n");
 	}
 }
 
@@ -306,10 +309,20 @@
 
 	__debug_switch_to_guest(vcpu);
 
+	hyp_puts("Fire jumping with spsr: ");
+	hyp_putx64(read_sysreg(SPSR_EL2));
+	hyp_puts("and HCR: ");
+	hyp_putx64(read_sysreg(HCR_EL2));
+	hyp_putc('\n');
 	do {
 		/* Jump in the fire! */
 		exit_code = __guest_enter(vcpu);
 
+		if(exit_code == 42){
+			hyp_puts("Early exit");
+			exit_code = ARM_EXCEPTION_IRQ;
+		}
+
 		/* And we're baaack! */
 	} while (fixup_guest_exit(vcpu, &exit_code));
 
@@ -349,6 +362,10 @@
 
 	host_ctxt->__hyp_running_vcpu = NULL;
 
+        hyp_puts("Finished running with code ");
+        hyp_putx32(exit_code);
+        hyp_putc('\n');
+
 	return exit_code;
 }