Thomas Gleixner | 7a33847 | 2019-06-04 10:11:15 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 2 | /* |
| 3 | * tools/testing/selftests/kvm/include/kvm_util.h |
| 4 | * |
| 5 | * Copyright (C) 2018, Google LLC. |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 6 | */ |
| 7 | #ifndef SELFTEST_KVM_UTIL_H |
Andrew Jones | cc68765 | 2018-09-18 19:54:26 +0200 | [diff] [blame] | 8 | #define SELFTEST_KVM_UTIL_H |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 9 | |
| 10 | #include "test_util.h" |
| 11 | |
| 12 | #include "asm/kvm.h" |
| 13 | #include "linux/kvm.h" |
| 14 | #include <sys/ioctl.h> |
| 15 | |
| 16 | #include "sparsebit.h" |
| 17 | |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 18 | |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 19 | /* |
| 20 | * Callers of kvm_util only have an incomplete/opaque description of the |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 21 | * structure kvm_util is using to maintain the state of a VM. |
| 22 | */ |
| 23 | struct kvm_vm; |
| 24 | |
| 25 | typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ |
| 26 | typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ |
| 27 | |
| 28 | /* Minimum allocated guest virtual and physical addresses */ |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 29 | #define KVM_UTIL_MIN_VADDR 0x2000 |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 30 | |
| 31 | #define DEFAULT_GUEST_PHY_PAGES 512 |
| 32 | #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 33 | #define DEFAULT_STACK_PGS 5 |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 34 | |
| 35 | enum vm_guest_mode { |
Andrew Jones | 81d1cca | 2018-09-18 19:54:33 +0200 | [diff] [blame] | 36 | VM_MODE_P52V48_4K, |
| 37 | VM_MODE_P52V48_64K, |
Andrew Jones | cdbd242 | 2018-11-06 14:57:11 +0100 | [diff] [blame] | 38 | VM_MODE_P48V48_4K, |
| 39 | VM_MODE_P48V48_64K, |
Andrew Jones | e28934e | 2018-09-18 19:54:35 +0200 | [diff] [blame] | 40 | VM_MODE_P40V48_4K, |
| 41 | VM_MODE_P40V48_64K, |
Peter Xu | 567a9f1 | 2019-08-30 09:36:18 +0800 | [diff] [blame] | 42 | VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ |
Andrew Jones | 81d1cca | 2018-09-18 19:54:33 +0200 | [diff] [blame] | 43 | NUM_VM_MODES, |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 44 | }; |
| 45 | |
Peter Xu | 567a9f1 | 2019-08-30 09:36:18 +0800 | [diff] [blame] | 46 | #if defined(__aarch64__) |
Thomas Huth | fe85ec8 | 2019-05-23 18:43:04 +0200 | [diff] [blame] | 47 | #define VM_MODE_DEFAULT VM_MODE_P40V48_4K |
Peter Xu | 567a9f1 | 2019-08-30 09:36:18 +0800 | [diff] [blame] | 48 | #elif defined(__x86_64__) |
| 49 | #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K |
Thomas Huth | fe85ec8 | 2019-05-23 18:43:04 +0200 | [diff] [blame] | 50 | #else |
| 51 | #define VM_MODE_DEFAULT VM_MODE_P52V48_4K |
| 52 | #endif |
| 53 | |
Andrew Jones | 81d1cca | 2018-09-18 19:54:33 +0200 | [diff] [blame] | 54 | #define vm_guest_mode_string(m) vm_guest_mode_string[m] |
| 55 | extern const char * const vm_guest_mode_string[]; |
| 56 | |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 57 | enum vm_mem_backing_src_type { |
| 58 | VM_MEM_SRC_ANONYMOUS, |
| 59 | VM_MEM_SRC_ANONYMOUS_THP, |
| 60 | VM_MEM_SRC_ANONYMOUS_HUGETLB, |
| 61 | }; |
| 62 | |
| 63 | int kvm_check_cap(long cap); |
Drew Schmitt | 8b56ee9 | 2018-08-20 10:32:16 -0700 | [diff] [blame] | 64 | int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 65 | |
| 66 | struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); |
Peter Xu | 12c386b | 2019-08-30 09:36:16 +0800 | [diff] [blame] | 67 | struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 68 | void kvm_vm_free(struct kvm_vm *vmp); |
Paolo Bonzini | fa3899a | 2018-07-26 13:19:23 +0200 | [diff] [blame] | 69 | void kvm_vm_restart(struct kvm_vm *vmp, int perm); |
| 70 | void kvm_vm_release(struct kvm_vm *vmp); |
Peter Xu | 3b4cd0f | 2018-08-22 15:20:00 +0800 | [diff] [blame] | 71 | void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log); |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 72 | void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, |
| 73 | uint64_t first_page, uint32_t num_pages); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 74 | |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 75 | int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, |
| 76 | size_t len); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 77 | |
Paolo Bonzini | 6089ae0 | 2018-03-28 09:45:34 +0200 | [diff] [blame] | 78 | void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename, |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 79 | uint32_t data_memslot, uint32_t pgd_memslot); |
Paolo Bonzini | 6089ae0 | 2018-03-28 09:45:34 +0200 | [diff] [blame] | 80 | |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 81 | void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 82 | |
| 83 | /* |
| 84 | * VM VCPU Dump |
| 85 | * |
| 86 | * Input Args: |
| 87 | * stream - Output FILE stream |
| 88 | * vm - Virtual Machine |
| 89 | * vcpuid - VCPU ID |
| 90 | * indent - Left margin indent amount |
| 91 | * |
| 92 | * Output Args: None |
| 93 | * |
| 94 | * Return: None |
| 95 | * |
| 96 | * Dumps the current state of the VCPU specified by @vcpuid, within the VM |
| 97 | * given by @vm, to the FILE stream given by @stream. |
| 98 | */ |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 99 | void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, |
| 100 | uint8_t indent); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 101 | |
| 102 | void vm_create_irqchip(struct kvm_vm *vm); |
| 103 | |
| 104 | void vm_userspace_mem_region_add(struct kvm_vm *vm, |
| 105 | enum vm_mem_backing_src_type src_type, |
| 106 | uint64_t guest_paddr, uint32_t slot, uint64_t npages, |
| 107 | uint32_t flags); |
| 108 | |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 109 | void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, |
| 110 | void *arg); |
Vitaly Kuznetsov | 7e50c42 | 2018-12-10 18:21:58 +0100 | [diff] [blame] | 111 | int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, |
| 112 | void *arg); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 113 | void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); |
| 114 | void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); |
Sean Christopherson | 13e48aa | 2020-02-18 13:07:33 -0800 | [diff] [blame] | 115 | void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); |
Paolo Bonzini | 837ec79 | 2019-06-04 19:13:46 +0200 | [diff] [blame] | 116 | void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 117 | vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 118 | uint32_t data_memslot, uint32_t pgd_memslot); |
Peter Xu | 3b4cd0f | 2018-08-22 15:20:00 +0800 | [diff] [blame] | 119 | void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, |
Andrew Jones | beca547 | 2020-03-13 16:56:43 +0100 | [diff] [blame] | 120 | unsigned int npages, uint32_t pgd_memslot); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 121 | void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); |
| 122 | void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); |
| 123 | vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * Address Guest Virtual to Guest Physical |
| 127 | * |
| 128 | * Input Args: |
| 129 | * vm - Virtual Machine |
| 130 | * gva - VM virtual address |
| 131 | * |
| 132 | * Output Args: None |
| 133 | * |
| 134 | * Return: |
| 135 | * Equivalent VM physical address |
| 136 | * |
| 137 | * Returns the VM physical address of the translated VM virtual |
| 138 | * address given by @gva. |
| 139 | */ |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 140 | vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); |
| 141 | |
| 142 | struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); |
| 143 | void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); |
| 144 | int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); |
Sean Christopherson | 0f73bbc | 2019-03-13 16:49:31 -0700 | [diff] [blame] | 145 | void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid); |
Peter Xu | 449aa90 | 2020-05-05 16:50:00 -0400 | [diff] [blame] | 146 | void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, |
| 147 | struct kvm_guest_debug *debug); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 148 | void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 149 | struct kvm_mp_state *mp_state); |
| 150 | void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); |
| 151 | void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 152 | |
| 153 | /* |
| 154 | * VM VCPU Args Set |
| 155 | * |
| 156 | * Input Args: |
| 157 | * vm - Virtual Machine |
| 158 | * vcpuid - VCPU ID |
| 159 | * num - number of arguments |
| 160 | * ... - arguments, each of type uint64_t |
| 161 | * |
| 162 | * Output Args: None |
| 163 | * |
| 164 | * Return: None |
| 165 | * |
| 166 | * Sets the first @num function input registers of the VCPU with @vcpuid, |
| 167 | * per the C calling convention of the architecture, to the values given |
| 168 | * as variable args. Each of the variable args is expected to be of type |
| 169 | * uint64_t. The maximum @num can be is specific to the architecture. |
| 170 | */ |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 171 | void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...); |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 172 | |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 173 | void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, |
| 174 | struct kvm_sregs *sregs); |
| 175 | void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, |
| 176 | struct kvm_sregs *sregs); |
| 177 | int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, |
| 178 | struct kvm_sregs *sregs); |
Janosch Frank | ada0a50 | 2020-01-31 05:02:03 -0500 | [diff] [blame] | 179 | void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, |
| 180 | struct kvm_fpu *fpu); |
| 181 | void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, |
| 182 | struct kvm_fpu *fpu); |
| 183 | void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg); |
| 184 | void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg); |
Thomas Huth | a9c788f | 2019-05-23 18:43:02 +0200 | [diff] [blame] | 185 | #ifdef __KVM_HAVE_VCPU_EVENTS |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 186 | void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 187 | struct kvm_vcpu_events *events); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 188 | void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 189 | struct kvm_vcpu_events *events); |
Thomas Huth | a9c788f | 2019-05-23 18:43:02 +0200 | [diff] [blame] | 190 | #endif |
Thomas Huth | c795720 | 2019-05-23 11:31:14 +0200 | [diff] [blame] | 191 | #ifdef __x86_64__ |
Aaron Lewis | da1e307 | 2019-05-02 11:31:41 -0700 | [diff] [blame] | 192 | void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, |
| 193 | struct kvm_nested_state *state); |
| 194 | int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, |
| 195 | struct kvm_nested_state *state, bool ignore_error); |
Thomas Huth | c795720 | 2019-05-23 11:31:14 +0200 | [diff] [blame] | 196 | #endif |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 197 | |
| 198 | const char *exit_reason_str(unsigned int exit_reason); |
| 199 | |
| 200 | void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot); |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 201 | |
| 202 | /* |
| 203 | * VM Virtual Page Map |
| 204 | * |
| 205 | * Input Args: |
| 206 | * vm - Virtual Machine |
| 207 | * vaddr - VM Virtual Address |
| 208 | * paddr - VM Physical Address |
| 209 | * memslot - Memory region slot for new virtual translation tables |
| 210 | * |
| 211 | * Output Args: None |
| 212 | * |
| 213 | * Return: None |
| 214 | * |
| 215 | * Within @vm, creates a virtual translation for the page starting |
| 216 | * at @vaddr to the page starting at @paddr. |
| 217 | */ |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 218 | void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 219 | uint32_t memslot); |
| 220 | |
Andrew Jones | eabe788 | 2018-09-18 19:54:28 +0200 | [diff] [blame] | 221 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, |
| 222 | uint32_t memslot); |
Andrew Jones | d510653 | 2018-09-18 19:54:29 +0200 | [diff] [blame] | 223 | vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, |
| 224 | vm_paddr_t paddr_min, uint32_t memslot); |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 225 | |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 226 | /* |
| 227 | * Create a VM with reasonable defaults |
| 228 | * |
| 229 | * Input Args: |
| 230 | * vcpuid - The id of the single VCPU to add to the VM. |
Andrew Jones | beca547 | 2020-03-13 16:56:43 +0100 | [diff] [blame] | 231 | * extra_mem_pages - The number of extra pages to add (this will |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 232 | * decide how much extra space we will need to |
| 233 | * setup the page tables using memslot 0) |
| 234 | * guest_code - The vCPU's entry point |
| 235 | * |
| 236 | * Output Args: None |
| 237 | * |
| 238 | * Return: |
| 239 | * Pointer to opaque structure that describes the created VM. |
| 240 | */ |
Andrew Jones | beca547 | 2020-03-13 16:56:43 +0100 | [diff] [blame] | 241 | struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, |
Peter Xu | aee41be | 2018-08-22 15:19:59 +0800 | [diff] [blame] | 242 | void *guest_code); |
Andrew Jones | 4259362 | 2020-03-10 10:15:54 +0100 | [diff] [blame] | 243 | |
| 244 | /* |
| 245 | * Adds a vCPU with reasonable defaults (e.g. a stack) |
| 246 | * |
| 247 | * Input Args: |
| 248 | * vm - Virtual Machine |
| 249 | * vcpuid - The id of the VCPU to add to the VM. |
| 250 | * guest_code - The vCPU's entry point |
| 251 | */ |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 252 | void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); |
| 253 | |
Aaron Lewis | 9dba988 | 2019-05-31 14:14:52 +0000 | [diff] [blame] | 254 | bool vm_is_unrestricted_guest(struct kvm_vm *vm); |
| 255 | |
Peter Xu | 52200d0 | 2019-08-30 09:36:19 +0800 | [diff] [blame] | 256 | unsigned int vm_get_page_size(struct kvm_vm *vm); |
| 257 | unsigned int vm_get_page_shift(struct kvm_vm *vm); |
| 258 | unsigned int vm_get_max_gfn(struct kvm_vm *vm); |
| 259 | |
Andrew Jones | 94c4b76b | 2020-03-13 16:56:44 +0100 | [diff] [blame] | 260 | unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); |
Andrew Jones | 87a802d | 2020-02-14 15:59:20 +0100 | [diff] [blame] | 261 | unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); |
| 262 | unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); |
| 263 | static inline unsigned int |
| 264 | vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) |
| 265 | { |
Andrew Jones | 331b4de | 2020-03-12 11:40:55 +0100 | [diff] [blame] | 266 | unsigned int n; |
| 267 | n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); |
| 268 | #ifdef __s390x__ |
| 269 | /* s390 requires 1M aligned guest sizes */ |
| 270 | n = (n + 255) & ~255; |
| 271 | #endif |
| 272 | return n; |
Andrew Jones | 87a802d | 2020-02-14 15:59:20 +0100 | [diff] [blame] | 273 | } |
| 274 | |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 275 | struct kvm_userspace_memory_region * |
| 276 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, |
| 277 | uint64_t end); |
| 278 | |
| 279 | struct kvm_dirty_log * |
| 280 | allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region); |
| 281 | |
| 282 | int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); |
| 283 | |
Andrew Jones | 14c47b7 | 2018-09-18 19:54:25 +0200 | [diff] [blame] | 284 | #define sync_global_to_guest(vm, g) ({ \ |
| 285 | typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ |
| 286 | memcpy(_p, &(g), sizeof(g)); \ |
| 287 | }) |
Peter Xu | 4e18bccc | 2018-08-22 15:19:57 +0800 | [diff] [blame] | 288 | |
Andrew Jones | 14c47b7 | 2018-09-18 19:54:25 +0200 | [diff] [blame] | 289 | #define sync_global_from_guest(vm, g) ({ \ |
| 290 | typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ |
| 291 | memcpy(&(g), _p, sizeof(g)); \ |
| 292 | }) |
Peter Xu | 4e18bccc | 2018-08-22 15:19:57 +0800 | [diff] [blame] | 293 | |
Andrew Jones | 14c47b7 | 2018-09-18 19:54:25 +0200 | [diff] [blame] | 294 | /* Common ucalls */ |
| 295 | enum { |
| 296 | UCALL_NONE, |
| 297 | UCALL_SYNC, |
| 298 | UCALL_ABORT, |
| 299 | UCALL_DONE, |
| 300 | }; |
Peter Xu | 4e18bccc | 2018-08-22 15:19:57 +0800 | [diff] [blame] | 301 | |
Andrew Jones | 14c47b7 | 2018-09-18 19:54:25 +0200 | [diff] [blame] | 302 | #define UCALL_MAX_ARGS 6 |
Peter Xu | 4e18bccc | 2018-08-22 15:19:57 +0800 | [diff] [blame] | 303 | |
Andrew Jones | 14c47b7 | 2018-09-18 19:54:25 +0200 | [diff] [blame] | 304 | struct ucall { |
| 305 | uint64_t cmd; |
| 306 | uint64_t args[UCALL_MAX_ARGS]; |
| 307 | }; |
Peter Xu | 4e18bccc | 2018-08-22 15:19:57 +0800 | [diff] [blame] | 308 | |
Thomas Huth | 2040f41 | 2019-07-31 17:15:23 +0200 | [diff] [blame] | 309 | void ucall_init(struct kvm_vm *vm, void *arg); |
Andrew Jones | 14c47b7 | 2018-09-18 19:54:25 +0200 | [diff] [blame] | 310 | void ucall_uninit(struct kvm_vm *vm); |
| 311 | void ucall(uint64_t cmd, int nargs, ...); |
| 312 | uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); |
Peter Xu | 4e18bccc | 2018-08-22 15:19:57 +0800 | [diff] [blame] | 313 | |
Andrew Jones | 14c47b7 | 2018-09-18 19:54:25 +0200 | [diff] [blame] | 314 | #define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) |
| 315 | #define GUEST_DONE() ucall(UCALL_DONE, 0) |
| 316 | #define GUEST_ASSERT(_condition) do { \ |
| 317 | if (!(_condition)) \ |
| 318 | ucall(UCALL_ABORT, 2, \ |
| 319 | "Failed guest assert: " \ |
| 320 | #_condition, __LINE__); \ |
| 321 | } while (0) |
Peter Xu | 4e18bccc | 2018-08-22 15:19:57 +0800 | [diff] [blame] | 322 | |
Paolo Bonzini | 783e9e5 | 2018-03-27 11:49:19 +0200 | [diff] [blame] | 323 | #endif /* SELFTEST_KVM_UTIL_H */ |