[Debug] More printing for kcov large buffers

Change-Id: Icef11b55701e0eabdb18954010c09f025116f110
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 952a121..ea550a4 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -19,6 +19,8 @@
 #include <nvhe/mem_protect.h>
 #include <nvhe/mm.h>
 
+#include "../debug-pl011.h"
+
 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
 
 struct host_mmu host_mmu;
@@ -1125,6 +1127,11 @@ static int check_share(struct pkvm_mem_share *share)
 	switch (tx->initiator.id) {
 	case PKVM_ID_HOST:
 		ret = host_request_owned_transition(&completer_addr, tx);
+		if (ret < 0) {
+			hyp_puts("Host emit denied share phys: ");
+			hyp_putx64(tx->initiator.addr);
+			hyp_putc('\n');
+		}
 		break;
 	case PKVM_ID_GUEST:
 		ret = guest_request_share(&completer_addr, tx);
@@ -1142,6 +1149,11 @@ static int check_share(struct pkvm_mem_share *share)
 		break;
 	case PKVM_ID_HYP:
 		ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
+		if (ret < 0) {
+			hyp_puts("Hyp receive denied share phys: ");
+			hyp_putx64(tx->initiator.addr);
+			hyp_putc('\n');
+		}
 		break;
 	case PKVM_ID_GUEST:
 		ret = guest_ack_share(completer_addr, tx, share->completer_prot);
@@ -1613,13 +1625,21 @@ int __hyp_pin_shared_mem_locked(void *from, void *to)
 
 	ret = __host_check_page_state_range(__hyp_pa(start), size,
 					    PKVM_PAGE_SHARED_OWNED);
-	if (ret)
+	if (ret) {
+		hyp_puts("hyp pin, not shared by host, addr: ");
+		hyp_putx64(from);
+		hyp_putc('\n');
 		return ret;
+	}
 
 	ret = __hyp_check_page_state_range(start, size,
 					   PKVM_PAGE_SHARED_BORROWED);
-	if (ret)
+	if (ret) {
+		hyp_puts("hyp pin, not shared to hyp, addr: ");
+		hyp_putx64(from);
+		hyp_putc('\n');
 		return ret;
+	}
 
 	for (cur = start; cur < end; cur += PAGE_SIZE)
 		hyp_page_ref_inc(hyp_virt_to_page(cur));
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm_kcov.c b/arch/arm64/kvm/hyp/nvhe/pkvm_kcov.c
index 97b152b..b0b1cee 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm_kcov.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm_kcov.c
@@ -5,6 +5,7 @@
  * Author: Thibaut Perami <thibautp@google.com>
  */
 
+#include "linux/math.h"
 #include <linux/kvm_host.h>
 #include <linux/mm.h>
 
@@ -63,22 +64,22 @@ void pkvm_kcov_enter_from_host(void)
 	 */
 	u64 ucurrent = (u64)current;
 
-	hyp_puts("Entering EL2 from ");
-	hyp_putx64(ucurrent);
-	hyp_putc('\n');
+	/* hyp_puts("Entering EL2 from "); */
+	/* hyp_putx64(ucurrent); */
+	/* hyp_putc('\n'); */
 
 	for (int i = 0; i < CONFIG_KVM_ARM_HYP_KCOV_NUM_BUFFERS; i++) {
 		if (kcov_buffers[i].current_id == ucurrent) {
-			hyp_puts("Enabling tracing on buffer ");
-			hyp_putx32(i);
-			hyp_puts(" at address ");
-			hyp_putx64(&kcov_buffers[i]);
-			hyp_putc('\n');
+			/* hyp_puts("Enabling tracing on buffer "); */
+			/* hyp_putx32(i); */
+			/* hyp_puts(" at address "); */
+			/* hyp_putx64(&kcov_buffers[i]); */
+			/* hyp_putc('\n'); */
 			if (WARN_ON(kcov_buffers[i].area == NULL))
 				continue;
 			*SHIFT_PERCPU_PTR(&kcov_active_buffer, cpu_offset) =
 				&kcov_buffers[i];
-			hyp_puts("Enabled tracing! running the rest of hypercall\n");
+			/* hyp_puts("Enabled tracing! running the rest of hypercall\n"); */
 			return;
 		}
 	}
@@ -88,7 +89,7 @@ void pkvm_kcov_exit_to_host(void)
 {
 	u64 cpu_offset = read_sysreg(tpidr_el2);
 	*SHIFT_PERCPU_PTR(&kcov_active_buffer, cpu_offset) = NULL;
-	hyp_puts("Exiting EL2, disabling tracing\n");
+	/* hyp_puts("Exiting EL2, disabling tracing\n"); */
 }
 
 /*
@@ -124,9 +125,9 @@ void __sanitizer_cov_trace_pc(void)
 		hyp_puts("Tracing broken!!\n");
 		return;
 	}
-	hyp_puts("Tracing on buffer ");
-	hyp_putx64(buf);
-	hyp_puts("\n");
+	/* hyp_puts("Tracing on buffer "); */
+	/* hyp_putx64(buf); */
+	/* hyp_puts("\n"); */
 
 	/* There is no concurrent access possible, because only one CPU can have
 	 * current active at a time and hypervisor code is not interruptible
@@ -139,7 +140,7 @@ void __sanitizer_cov_trace_pc(void)
 	}
 	buf->area[0] = pos;
 	buf->area[pos] = canonicalize_ip(_RET_IP_);
-	hyp_puts("Tracing Done!\n");
+	/* hyp_puts("Tracing Done!\n"); */
 }
 
 static int find_free_buffer_slot(void)
@@ -172,8 +173,16 @@ u64 __pkvm_kcov_init_buffer(phys_addr_t pfns_list, uint size)
 	int index;
 	int ret;
 
-	hyp_puts("Received pfn list at ");
+	hyp_puts("Received pfn list phys: ");
 	hyp_putx64(pfns_list);
+	hyp_puts(" virt: ");
+	hyp_putx64(pfns);
+	hyp_puts(" size: ");
+	hyp_putx32(size);
+	hyp_puts(" pfn_size: ");
+	hyp_putx32(pfns_size);
+	hyp_puts(" pfn_nr_pages: ");
+	hyp_putx32(pfns_nr_pages);
 	hyp_putc('\n');
 
 
@@ -194,8 +203,10 @@ u64 __pkvm_kcov_init_buffer(phys_addr_t pfns_list, uint size)
 
 	// Step 1: Donate pfns_array to hyp temporarily
 	ret = __pkvm_host_donate_hyp(hyp_phys_to_pfn(pfns_list), pfns_nr_pages);
-	if (ret)
+	if (ret){
+		hyp_puts("Denied donation small buffer\n");
 		return ret;
+	}
 
 	// Step 2: Allocate the private VA range (irreversible)
 	ret = pkvm_alloc_private_va_range(size * sizeof(u64), &vaddr);
@@ -205,12 +216,24 @@ u64 __pkvm_kcov_init_buffer(phys_addr_t pfns_list, uint size)
 	// Step 3: Share and map the buffer.
 	hyp_spin_lock(&host_mmu.lock);
 	hyp_spin_lock(&pkvm_pgd_lock);
+
+	kvm_pte_t pte;
+	kvm_pgtable_get_leaf(&host_mmu.pgt, hyp_pfn_to_phys(pfns[0]), &pte, NULL);
+
+	hyp_puts("pte of ");
+	hyp_putx64(pfns[0]);
+	hyp_puts(" is ");
+	hyp_putx64(pte);
+	hyp_putc('\n');
+
 	ret = kvm_pgtable_hyp_map_share_noncontig(&pkvm_pgtable, vaddr,
 						  size * sizeof(u64), pfns,
 						  &pfn_processed,
 						  PAGE_HYP);
-	if (ret)
+	if (ret){
+		hyp_puts("Denied map share\n");
 		goto unmap_buffer;
+	}
 	hyp_spin_unlock(&pkvm_pgd_lock);
 	hyp_spin_unlock(&host_mmu.lock);
 
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 59096a7..4daaf19 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -9,6 +9,7 @@
 
 #ifdef __KVM_NVHE_HYPERVISOR__
 #include <nvhe/mem_protect.h>
+#include "debug-pl011.h"
 #endif
 #include <linux/bitfield.h>
 #include <asm/kvm_pgtable.h>
@@ -558,6 +559,7 @@ static int hyp_map_share_noncontig_walker(const struct kvm_pgtable_visit_ctx *ct
 		childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
 		if (!childp)
 			return -ENOMEM;
+		hyp_puts("Creating table\n");
 
 		new = kvm_init_table_pte(childp, mm_ops);
 		mm_ops->get_page(ctx->ptep);
@@ -570,18 +572,37 @@ static int hyp_map_share_noncontig_walker(const struct kvm_pgtable_visit_ctx *ct
 
 	pfn = data->pfns[data->current_pfn];
 	page_lm = hyp_pfn_to_virt(pfn);
+	hyp_puts("NC walker phys: ");
+	hyp_putx64(hyp_pfn_to_phys(pfn));
+	hyp_puts(" pfn: ");
+	hyp_putx64(pfn);
+	hyp_puts(" virt: ");
+	hyp_putx64(page_lm);
+	hyp_putc('\n');
 	ret = __pkvm_host_share_hyp_locked(pfn);
-	if (ret)
+	if (ret){
+
+		hyp_puts("Denied share\n");
+		kvm_pte_t pte;
+		kvm_pgtable_get_leaf(&host_mmu.pgt, hyp_pfn_to_phys(pfn),
+				     &pte, NULL);
+
+		hyp_puts("host pte is ");
+		hyp_putx64(pte);
+		hyp_putc('\n');
 		return ret;
+	}
 
 	ret = __hyp_pin_shared_mem_locked(page_lm, page_lm + PAGE_SIZE);
 	if (ret) {
+		hyp_puts("Denied pin\n");
 		WARN_ON(__pkvm_host_unshare_hyp_locked(pfn));
 		return ret;
 	}
 
 	data->map_data.phys = hyp_pfn_to_phys(pfn);
 	if(!hyp_map_walker_try_leaf(ctx, &data->map_data)){
+		hyp_puts("Denied leaf map\n");
 		__hyp_unpin_shared_mem_locked(page_lm, page_lm + PAGE_SIZE);
 		WARN_ON(__pkvm_host_unshare_hyp_locked(pfn));
 		return -EINVAL;
@@ -606,9 +627,21 @@ int kvm_pgtable_hyp_map_share_noncontig(struct kvm_pgtable *pgt, u64 addr,
 		.arg = &map_data,
 	};
 
+	hyp_puts("NC Map addr: ");
+	hyp_putx64(addr);
+	hyp_puts(" size: ");
+	hyp_putx64(size);
+	hyp_puts(" pfns: ");
+	hyp_putx64(pfns);
+	hyp_puts(" pfns_size: ");
+	hyp_putx64(size / PAGE_SIZE);
+	hyp_putc('\n');
+
 	ret = hyp_set_prot_attr(prot, &map_data.map_data.attr);
-	if (ret)
+	if (ret) {
+		hyp_puts("Denied prot");
 		return ret;
+	}
 
 	ret = kvm_pgtable_walk(pgt, addr, size, &walker);
 	dsb(ishst);