[DONOTMERGE] Debug for hyp_alloc

Change-Id: Ica69559e8b1751fd9d6b7468beef197d9c358009
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 73ade8f..db354a0 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -115,6 +115,7 @@ enum __kvm_host_smccc_func {
 	__KVM_HOST_SMCCC_FUNC___pkvm_init_module,
 	__KVM_HOST_SMCCC_FUNC___pkvm_register_hcall,
 	__KVM_HOST_SMCCC_FUNC___pkvm_iommu_init,
+	__KVM_HOST_SMCCC_FUNC___pkvm_dump_hyp_allocator,
 
 	/*
 	 * Start of the dynamically registered hypercalls. Start a bit
diff --git a/arch/arm64/kvm/hyp/include/nvhe/alloc.h b/arch/arm64/kvm/hyp/include/nvhe/alloc.h
index af60e3c..59ec55f 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/alloc.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/alloc.h
@@ -66,6 +66,7 @@ int hyp_alloc_refill(struct kvm_hyp_memcache *host_mc);
 int hyp_alloc_reclaimable(void);
 void hyp_alloc_reclaim(struct kvm_hyp_memcache *host_mc, int target);
 u8 hyp_alloc_missing_donations(void);
+void dump_hyp_allocator(unsigned long hva);
 
 extern struct hyp_mgt_allocator_ops hyp_alloc_ops;
 #endif
diff --git a/arch/arm64/kvm/hyp/nvhe/alloc.c b/arch/arm64/kvm/hyp/nvhe/alloc.c
index 10eb60f..5c520ae 100644
--- a/arch/arm64/kvm/hyp/nvhe/alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/alloc.c
@@ -846,3 +846,42 @@ struct hyp_mgt_allocator_ops hyp_alloc_ops = {
 	.reclaim = hyp_alloc_reclaim,
 	.reclaimable = hyp_alloc_reclaimable,
 };
+
+struct hyp_allocator_chunk_dump {
+	unsigned long	addr;
+	unsigned long   alloc_start;
+	size_t		alloc_size;
+	size_t		unmapped_size;
+	size_t 		mapped_size;
+	u32		hash;
+};
+
+void dump_hyp_allocator(unsigned long host_va)
+{
+	struct hyp_allocator *allocator = &hyp_allocator;
+	struct hyp_allocator_chunk_dump *chunk_dump;
+	void *share = (void *)kern_hyp_va(host_va);
+	struct chunk_hdr *chunk;
+
+	WARN_ON(__pkvm_host_donate_hyp(hyp_virt_to_pfn(share), 1));
+
+	chunk_dump = (struct hyp_allocator_chunk_dump *)share;
+
+	hyp_spin_lock(&allocator->lock);
+
+	list_for_each_entry(chunk, &allocator->chunks, node) {
+		chunk_dump->addr = (unsigned long)chunk;
+		chunk_dump->alloc_start = (unsigned long)chunk_data(chunk);
+		chunk_dump->alloc_size = chunk->alloc_size;
+		chunk_dump->unmapped_size = chunk_unmapped_size(chunk, allocator);
+		chunk_dump->mapped_size = chunk->mapped_size;
+		chunk_dump->hash = chunk->hash;
+		chunk_dump++;
+	}
+
+	chunk_dump->addr = 0;
+
+	hyp_spin_unlock(&allocator->lock);
+
+	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(share), 1));
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index ba43bea..4284a8a 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -1620,6 +1620,15 @@ static void handle___pkvm_dump_stage2(struct kvm_cpu_context *host_ctxt)
 		__pkvm_dump_host_stage2();
 }
 
+static void handle___pkvm_dump_hyp_allocator(struct kvm_cpu_context *host_ctxt)
+{
+	DECLARE_REG(u64, hva, host_ctxt, 1);
+
+	dump_hyp_allocator(hva);
+
+	cpu_reg(host_ctxt, 1) = 0;
+}
+
 typedef void (*hcall_t)(struct kvm_cpu_context *);
 
 #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -1686,6 +1695,7 @@ static const hcall_t host_hcall[] = {
 	HANDLE_FUNC(__pkvm_init_module),
 	HANDLE_FUNC(__pkvm_register_hcall),
 	HANDLE_FUNC(__pkvm_iommu_init),
+	HANDLE_FUNC(__pkvm_dump_hyp_allocator),
 };
 
 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index f80f2b0..b3f2b9d 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -1102,3 +1102,148 @@ static int __init pkvm_host_stage2_dump(void)
 	return 0;
 }
 late_initcall(pkvm_host_stage2_dump);
+
+struct hyp_allocator_chunk_dump {
+	unsigned long	addr;
+	unsigned long	alloc_start;
+	size_t		alloc_size;
+	size_t		unmapped_size;
+	size_t 		mapped_size;
+	u32		hash;
+};
+
+#define BYTES_TO_LINES PAGE_SIZE
+#define LINE_WIDTH 33
+#if 1
+static void __dump_region(struct seq_file *m, const char *name, size_t size, unsigned long va,
+			  bool end_chunk)
+{
+	int i, j, nr_lines = size / BYTES_TO_LINES;
+
+	/* TODO: Check for non decreasing va */
+
+	if (!size)
+		return;
+
+	if (!nr_lines)
+		nr_lines = 1;
+
+	if (nr_lines > 8)
+		nr_lines = 8;
+
+	for (i = 0; i < nr_lines; i++) {
+		if (i == nr_lines / 2) {
+			int name_len = strlen(name);
+			int start = (LINE_WIDTH - 2 - name_len) / 2;
+
+			seq_putc(m, '|');
+			for (j = 0; j < start; j++)
+				seq_putc(m, ' ');
+
+			seq_puts(m, name);
+			for (j = 0; j < (name_len % 2 ? start - 1 : start); j++)
+				seq_putc(m, ' ');
+			seq_puts(m, "|\n");
+		} else
+			seq_puts(m, "|                              |\n");
+	}
+
+	if (end_chunk)
+		seq_printf(m, "+==============================+ 0x%08lx\n", va);
+	else
+		seq_printf(m, "+------------------------------+ 0x%08lx\n", va);
+}
+
+static int dump_hyp_allocator_show(struct seq_file *m, void *v)
+{
+	struct hyp_allocator_chunk_dump *first_chunk, *chunk;
+	void *page = m->private;
+
+	/* Decode the page */
+	first_chunk = chunk = (struct hyp_allocator_chunk_dump *)page;
+	if (!chunk->addr)
+		return 0;
+
+	while ((chunk + 1)->addr)
+		chunk++;
+
+	seq_printf(m, "+==============================+ 0x%08lx\n", chunk->addr + chunk->mapped_size + chunk->unmapped_size);
+	while ((unsigned long)chunk >= (unsigned long)first_chunk) {
+		size_t header_size = chunk->alloc_start - chunk->addr;
+		size_t mapped_display_size = chunk->mapped_size - header_size - chunk->alloc_size;
+
+		__dump_region(m, "unmapped", chunk->unmapped_size,
+			      chunk->addr + chunk->mapped_size, false);
+		__dump_region(m, "mapped", mapped_display_size,
+			      chunk->alloc_start + chunk->alloc_size, false);
+		__dump_region(m, "alloc", chunk->alloc_size,
+			      chunk->alloc_start, false);
+		__dump_region(m, "chunk header", header_size, chunk->addr, true);
+		chunk--;
+	}
+
+	return 0;
+}
+#else
+static int dump_hyp_allocator_show(struct seq_file *m, void *v)
+{
+	struct hyp_allocator_chunk_dump *first_chunk, *chunk;
+	void *page = m->private;
+
+	first_chunk = chunk = (struct hyp_allocator_chunk_dump *)page;
+	if (!chunk->addr)
+		return 0;
+
+	while (chunk->addr) {
+		seq_printf(m, "0x%lx: alloc=%zu mapped=%zu unmapped=%zu hash=%x\n",
+			   chunk->addr, chunk->alloc_size, chunk->mapped_size,
+			   chunk->unmapped_size, chunk->hash);
+		chunk++;
+	}
+
+	return 0;
+}
+#endif
+static int dump_hyp_allocator_open(struct inode *inode, struct file *file)
+{
+	void *page;
+	int ret;
+
+	page = page_address(alloc_page(GFP_KERNEL));
+	if (!page)
+		return -ENOMEM;
+
+	ret = kvm_call_hyp_nvhe(__pkvm_dump_hyp_allocator, page);
+	if (ret) {
+		free_page((unsigned long)page);
+		return ret;
+	}
+
+	return single_open(file, dump_hyp_allocator_show, page);
+}
+
+static int dump_hyp_allocator_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *m = file->private_data;
+	void *page = m->private;
+
+	free_page((unsigned long)page);
+	seq_release(inode, file);
+
+	return 0;
+}
+
+static const struct file_operations dump_hyp_allocator_debugfs_fops = {
+	.open = dump_hyp_allocator_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = dump_hyp_allocator_release,
+};
+
+static int __init hyp_allocator_debugfs_init(void)
+{
+	debugfs_create_file("dump_hyp_allocator", S_IRUSR, NULL, NULL, &dump_hyp_allocator_debugfs_fops);
+
+	return 0;
+}
+late_initcall(hyp_allocator_debugfs_init);