wip: add allocator
diff --git a/arch/arm64/kvm/hyp/include/nvhe/alloc.h b/arch/arm64/kvm/hyp/include/nvhe/alloc.h
new file mode 100644
index 0000000..2877a37
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/alloc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __KVM_HYP_ALLOC_H
+#define __KVM_HYP_ALLOC_H
+
+#include <linux/list.h>
+
+#include <nvhe/buddy.h>
+#include <nvhe/memory.h>
+#include <nvhe/spinlock.h>
+
+#define HYP_ALLOC_VA_SIZE SZ_128M
+
+void *hyp_alloc(size_t sz);
+void hyp_free(void *);
+int hyp_alloc_init(void);
+
+#endif /* __KVM_HYP_GFP_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 72bdf5f..ce18327 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -26,7 +26,7 @@
hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o buddy.o \
- cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
+ cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o alloc.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
diff --git a/arch/arm64/kvm/hyp/nvhe/alloc.c b/arch/arm64/kvm/hyp/nvhe/alloc.c
new file mode 100644
index 0000000..c39812e
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/alloc.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#include <asm/kvm_hyp.h>
+#include <linux/bitops.h>
+
+#include <nvhe/gfp.h>
+
+static struct hyp_alloc_pool {
+ struct hyp_pool pool;
+ hyp_spinlock_t lock;
+} hyp_alloc_pool;
+
+#define PAGE_BM_BITS (HYP_ALLOC_VA_SIZE / PAGE_SIZE)
+#define PAGE_BM_IDX(va) (((unsigned long)(va) - alloc_va_start) / PAGE_SIZE)
+
+#define HYP_ALLOC_GRAN 64
+
+static unsigned long page_bm[BITS_TO_LONGS(PAGE_BM_BITS)];
+static unsigned long hyp_alloc_va_start;
+
+struct hyp_alloc_blk {
+ union {
+ struct {
+ bool is_busy;
+ struct hyp_blk blk;
+ };
+ u64 _pad;
+ };
+ union {
+ struct list_head node; /* free blk */
+ u8 data[]; /* used blk */
+ };
+};
+
+#define HYP_BLK_HDR_SZ offsetof(struct hyp_alloc_blk, data)
+
+static bool hyp_alloc_blk_is_busy(struct hyp_blk *blk)
+{
+ if (!test_bit(PAGE_BM_IDX(blk), page_bm))
+ return true;
+ return container_of(blk, struct hyp_alloc_blk, blk)->is_busy;
+}
+
+static struct hyp_blk *hyp_alloc_blk_from_va(void *addr)
+{
+ return &container_of(addr, struct hyp_alloc_blk, data)->blk;
+}
+
+static void *hyp_alloc_blk_to_va(struct hyp_blk *blk)
+{
+ return &container_of(blk, struct hyp_alloc_blk, blk)->data;
+}
+
+static struct list_head *hyp_alloc_blk_to_list(struct hyp_blk *blk)
+{
+ return &container_of(blk, struct hyp_alloc_blk, blk)->node;
+}
+
+static struct hyp_blk *hyp_alloc_blk_from_list(struct list_head *node)
+{
+ return &container_of(node, struct hyp_alloc_blk, node)->blk
+}
+
+void *hyp_alloc(unsigned long sz)
+{
+ u8 order = fls64((sz + HYP_BLK_HDR_SZ - 1) / HYP_ALLOC_GRAN);
+ struct hyp_alloc_blk *hyp_blk;
+
+ hyp_spin_lock(&hyp_alloc_pool.lock);
+ hyp_blk = __hyp_detach_blk(&hyp_alloc_pool.pool, order);
+ if (hyp_blk)
+ hyp_blk->busy = true;
+ hyp_spin_unlock(&hyp_alloc_pool.lock);
+
+ return &hyp_blk->data;
+}
+
+void hyp_free(void *va)
+{
+ struct hyp_alloc_blk *hyp_blk = container_of(va, struct hyp_alloc_blk, data);
+
+ memset(va, 0, HYP_ALLOC_GRAN << (hyp_blk->blk.order - HYP_BLK_HDR_SZ));
+ hyp_spin_lock(&hyp_alloc_pool.lock);
+ hyp_blk->is_busy = true;
+ __hyp_attach_blk(&hyp_alloc_pool.pool, hyp_blk);
+ hyp_spin_unlock(&hyp_alloc_pool.lock);
+}
+
+static void init_buddy_pool(struct hyp_pool *pool)
+{
+ pool->blk_ops.to_va = hyp_alloc_blk_to_va;
+ pool->blk_ops.from_va = hyp_alloc_blk_from_va;
+ pool->blk_ops.to_list = hyp_alloc_blk_to_list;
+ pool->blk_ops.from_list = hyp_alloc_blk_from_list;
+ pool->blk_ops.is_busy = hyp_alloc_blk_is_busy;
+ pool->alloc_gran = HYP_ALLOC_GRAN;
+ pool->max_order = MAX_PAGE_ORDER;
+ pool->va_start = hyp_alloc_va_start;
+ pool->va_end = hyp_alloc_va_start + HYP_ALLOC_VA_SIZE;
+ for (int i = 0; i <= pool->max_order; i++)
+ INIT_LIST_HEAD(&pool->free_area[i]);
+}
+
+int hyp_alloc_init(void)
+{
+ BUILD_BUG_ON(sizeof(struct hyp_alloc_blk) > HYP_ALLOC_GRAN);
+
+ init_buddy_pool(&hyp_alloc_pool.pool);
+
+ return 0;
+}