| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * Copyright (C) 2020 Google LLC |
| * Author: Quentin Perret <qperret@google.com> |
| */ |
| |
| #include <asm/kvm_hyp.h> |
| #include <linux/bitops.h> |
| |
| #include <nvhe/gfp.h> |
| |
| static struct hyp_alloc_pool { |
| struct hyp_pool pool; |
| hyp_spinlock_t lock; |
| } hyp_alloc_pool; |
| |
| #define PAGE_BM_BITS (HYP_ALLOC_VA_SIZE / PAGE_SIZE) |
| #define PAGE_BM_IDX(va) (((unsigned long)(va) - alloc_va_start) / PAGE_SIZE) |
| |
| #define HYP_ALLOC_GRAN 64 |
| |
| static unsigned long page_bm[BITS_TO_LONGS(PAGE_BM_BITS)]; |
| static unsigned long hyp_alloc_va_start; |
| |
| struct hyp_alloc_blk { |
| union { |
| struct { |
| bool is_busy; |
| struct hyp_blk blk; |
| }; |
| u64 _pad; |
| }; |
| union { |
| struct list_head node; /* free blk */ |
| u8 data[]; /* used blk */ |
| }; |
| }; |
| |
| #define HYP_BLK_HDR_SZ offsetof(struct hyp_alloc_blk, data) |
| |
| static bool hyp_alloc_blk_is_busy(struct hyp_blk *blk) |
| { |
| if (!test_bit(PAGE_BM_IDX(blk), page_bm)) |
| return true; |
| return container_of(blk, struct hyp_alloc_blk, blk)->is_busy; |
| } |
| |
| static struct hyp_blk *hyp_alloc_blk_from_va(void *addr) |
| { |
| return &container_of(addr, struct hyp_alloc_blk, data)->blk; |
| } |
| |
| static void *hyp_alloc_blk_to_va(struct hyp_blk *blk) |
| { |
| return &container_of(blk, struct hyp_alloc_blk, blk)->data; |
| } |
| |
| static struct list_head *hyp_alloc_blk_to_list(struct hyp_blk *blk) |
| { |
| return &container_of(blk, struct hyp_alloc_blk, blk)->node; |
| } |
| |
| static struct hyp_blk *hyp_alloc_blk_from_list(struct list_head *node) |
| { |
| return &container_of(node, struct hyp_alloc_blk, node)->blk |
| } |
| |
| void *hyp_alloc(unsigned long sz) |
| { |
| u8 order = fls64((sz + HYP_BLK_HDR_SZ - 1) / HYP_ALLOC_GRAN); |
| struct hyp_alloc_blk *hyp_blk; |
| |
| hyp_spin_lock(&hyp_alloc_pool.lock); |
| hyp_blk = __hyp_detach_blk(&hyp_alloc_pool.pool, order); |
| if (hyp_blk) |
| hyp_blk->busy = true; |
| hyp_spin_unlock(&hyp_alloc_pool.lock); |
| |
| return &hyp_blk->data; |
| } |
| |
| void hyp_free(void *va) |
| { |
| struct hyp_alloc_blk *hyp_blk = container_of(va, struct hyp_alloc_blk, data); |
| |
| memset(va, 0, HYP_ALLOC_GRAN << (hyp_blk->blk.order - HYP_BLK_HDR_SZ)); |
| hyp_spin_lock(&hyp_alloc_pool.lock); |
| hyp_blk->is_busy = true; |
| __hyp_attach_blk(&hyp_alloc_pool.pool, hyp_blk); |
| hyp_spin_unlock(&hyp_alloc_pool.lock); |
| } |
| |
| static void init_buddy_pool(struct hyp_pool *pool) |
| { |
| pool->blk_ops.to_va = hyp_alloc_blk_to_va; |
| pool->blk_ops.from_va = hyp_alloc_blk_from_va; |
| pool->blk_ops.to_list = hyp_alloc_blk_to_list; |
| pool->blk_ops.from_list = hyp_alloc_blk_from_list; |
| pool->blk_ops.is_busy = hyp_alloc_blk_is_busy; |
| pool->alloc_gran = HYP_ALLOC_GRAN; |
| pool->max_order = MAX_PAGE_ORDER; |
| pool->va_start = hyp_alloc_va_start; |
| pool->va_end = hyp_alloc_va_start + HYP_ALLOC_VA_SIZE; |
| for (int i = 0; i <= pool->max_order; i++) |
| INIT_LIST_HEAD(&pool->free_area[i]); |
| } |
| |
| int hyp_alloc_init(void) |
| { |
| BUILD_BUG_ON(sizeof(struct hyp_alloc_blk) > HYP_ALLOC_GRAN); |
| |
| init_buddy_pool(&hyp_alloc_pool.pool); |
| |
| return 0; |
| } |