blob: ebf77ba156c822447b11d0f744db7f32c5e07fe4 [file] [log] [blame]
#include "kvm/kvm.h"
#include "kvm/read-write.h"
#include "kvm/util.h"
#include "kvm/strbuf.h"
#include "kvm/mutex.h"
#include "kvm/kvm-cpu.h"
#include "kvm/kvm-ipc.h"
#include <linux/kernel.h>
#include <linux/kvm.h>
#include <linux/list.h>
#include <linux/err.h>
#include <sys/un.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <stdbool.h>
#include <limits.h>
#include <signal.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <time.h>
#include <sys/eventfd.h>
#include <asm/unistd.h>
#include <dirent.h>
#define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
const char *kvm_exit_reasons[] = {
DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
#ifdef CONFIG_PPC64
DEFINE_KVM_EXIT_REASON(KVM_EXIT_PAPR_HCALL),
#endif
};
static int pause_event;
static DEFINE_MUTEX(pause_lock);
extern struct kvm_ext kvm_req_ext[];
static char kvm_dir[PATH_MAX];
extern __thread struct kvm_cpu *current_kvm_cpu;
static int set_dir(const char *fmt, va_list args)
{
char tmp[PATH_MAX];
vsnprintf(tmp, sizeof(tmp), fmt, args);
mkdir(tmp, 0777);
if (!realpath(tmp, kvm_dir))
return -errno;
strcat(kvm_dir, "/");
return 0;
}
void kvm__set_dir(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
set_dir(fmt, args);
va_end(args);
}
const char *kvm__get_dir(void)
{
return kvm_dir;
}
bool kvm__supports_vm_extension(struct kvm *kvm, unsigned int extension)
{
static int supports_vm_ext_check = 0;
int ret;
switch (supports_vm_ext_check) {
case 0:
ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION,
KVM_CAP_CHECK_EXTENSION_VM);
if (ret <= 0) {
supports_vm_ext_check = -1;
return false;
}
supports_vm_ext_check = 1;
/* fall through */
case 1:
break;
case -1:
return false;
}
ret = ioctl(kvm->vm_fd, KVM_CHECK_EXTENSION, extension);
if (ret < 0)
return false;
return ret;
}
bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
{
int ret;
ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
if (ret < 0)
return false;
return ret;
}
static int kvm__check_extensions(struct kvm *kvm)
{
int i;
for (i = 0; ; i++) {
if (!kvm_req_ext[i].name)
break;
if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
pr_err("Unsupported KVM extension detected: %s",
kvm_req_ext[i].name);
return -i;
}
}
return 0;
}
struct kvm *kvm__new(void)
{
struct kvm *kvm = calloc(1, sizeof(*kvm));
if (!kvm)
return ERR_PTR(-ENOMEM);
mutex_init(&kvm->mem_banks_lock);
kvm->sys_fd = -1;
kvm->vm_fd = -1;
kvm->ram_fd = -1;
#ifdef KVM_BRLOCK_DEBUG
kvm->brlock_sem = (pthread_rwlock_t) PTHREAD_RWLOCK_INITIALIZER;
#endif
return kvm;
}
static void kvm__delete_ram(struct kvm *kvm)
{
munmap(kvm->ram_start, kvm->ram_size);
if (kvm->ram_fd >= 0)
close(kvm->ram_fd);
}
int kvm__exit(struct kvm *kvm)
{
struct kvm_mem_bank *bank, *tmp;
kvm__delete_ram(kvm);
list_for_each_entry_safe(bank, tmp, &kvm->mem_banks, list) {
list_del(&bank->list);
free(bank);
}
free(kvm);
return 0;
}
core_exit(kvm__exit);
static int set_user_memory_region(int vm_fd, u32 slot, u32 flags,
u64 guest_phys, u64 size,
u64 userspace_addr)
{
int ret = 0;
struct kvm_userspace_memory_region mem = {
.slot = slot,
.flags = flags,
.guest_phys_addr = guest_phys,
.memory_size = size,
.userspace_addr = (unsigned long)userspace_addr,
};
ret = ioctl(vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
if (ret < 0)
ret = -errno;
return ret;
}
static int set_user_memory_region_ext(int vm_fd, u32 slot, u32 flags,
u64 guest_phys, u64 size,
u64 userspace_addr, u32 fd, u64 offset)
{
int ret = 0;
struct kvm_userspace_memory_region_ext mem = {
.region = {
.slot = slot,
.flags = flags,
.guest_phys_addr = guest_phys,
.memory_size = size,
.userspace_addr = 0,
},
.restricted_fd = fd,
.restricted_offset = offset,
};
struct kvm_memory_attributes attr = {
.address = guest_phys,
.size = size,
.attributes = KVM_MEMORY_ATTRIBUTE_PRIVATE,
.flags = 0,
};
ret = ioctl(vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
if (ret < 0) {
ret = -errno;
goto out;
}
/* Inform KVM that the region is protected. */
ret = ioctl(vm_fd, KVM_SET_MEMORY_ATTRIBUTES, &attr);
if (ret || attr.size != 0)
ret = -errno;
out:
return ret;
}
int kvm__destroy_mem(struct kvm *kvm, u64 guest_phys, u64 size,
void *userspace_addr)
{
struct kvm_mem_bank *bank;
int ret;
mutex_lock(&kvm->mem_banks_lock);
list_for_each_entry(bank, &kvm->mem_banks, list)
if (bank->guest_phys_addr == guest_phys &&
bank->size == size && bank->host_addr == userspace_addr)
break;
if (&bank->list == &kvm->mem_banks) {
pr_err("Region [%llx-%llx] not found", guest_phys,
guest_phys + size - 1);
ret = -EINVAL;
goto out;
}
if (bank->type == KVM_MEM_TYPE_RESERVED) {
pr_err("Cannot delete reserved region [%llx-%llx]",
guest_phys, guest_phys + size - 1);
ret = -EINVAL;
goto out;
}
if (kvm->cfg.restricted_mem)
ret = set_user_memory_region_ext(kvm->vm_fd, bank->slot,
KVM_MEM_PRIVATE, guest_phys, 0, (u64) userspace_addr,
0, 0);
else
ret = set_user_memory_region(kvm->vm_fd, bank->slot, 0,
guest_phys, 0, (u64) userspace_addr);
if (ret < 0)
goto out;
list_del(&bank->list);
free(bank);
kvm->mem_slots--;
ret = 0;
out:
mutex_unlock(&kvm->mem_banks_lock);
return ret;
}
int kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size,
void *userspace_addr, int memfd, u64 offset,
enum kvm_mem_type type)
{
struct kvm_mem_bank *merged = NULL;
struct kvm_mem_bank *bank;
struct list_head *prev_entry;
u32 slot;
u32 flags = 0;
int ret;
mutex_lock(&kvm->mem_banks_lock);
/* Check for overlap and find first empty slot. */
slot = 0;
prev_entry = &kvm->mem_banks;
list_for_each_entry(bank, &kvm->mem_banks, list) {
u64 bank_end = bank->guest_phys_addr + bank->size - 1;
u64 end = guest_phys + size - 1;
if (guest_phys > bank_end || end < bank->guest_phys_addr) {
/*
* Keep the banks sorted ascending by slot, so it's
* easier for us to find a free slot.
*/
if (bank->slot == slot) {
slot++;
prev_entry = &bank->list;
}
continue;
}
/* Merge overlapping reserved regions */
if (bank->type == KVM_MEM_TYPE_RESERVED &&
type == KVM_MEM_TYPE_RESERVED) {
bank->guest_phys_addr = min(bank->guest_phys_addr, guest_phys);
bank->size = max(bank_end, end) - bank->guest_phys_addr + 1;
if (merged) {
/*
* This is at least the second merge, remove
* previous result.
*/
list_del(&merged->list);
free(merged);
}
guest_phys = bank->guest_phys_addr;
size = bank->size;
merged = bank;
/* Keep checking that we don't overlap another region */
continue;
}
pr_err("%s region [%llx-%llx] would overlap %s region [%llx-%llx]",
kvm_mem_type_to_string(type), guest_phys, guest_phys + size - 1,
kvm_mem_type_to_string(bank->type), bank->guest_phys_addr,
bank->guest_phys_addr + bank->size - 1);
ret = -EINVAL;
goto out;
}
if (merged) {
ret = 0;
goto out;
}
bank = malloc(sizeof(*bank));
if (!bank) {
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&bank->list);
bank->guest_phys_addr = guest_phys;
bank->host_addr = userspace_addr;
bank->size = size;
bank->type = type;
bank->slot = slot;
bank->memfd = memfd;
bank->memfd_offset = offset;
if (type & KVM_MEM_TYPE_READONLY)
flags |= KVM_MEM_READONLY;
if (type != KVM_MEM_TYPE_RESERVED) {
if (kvm->cfg.restricted_mem)
ret = set_user_memory_region_ext(kvm->vm_fd, slot,
flags | KVM_MEM_PRIVATE, guest_phys, size,
(u64) userspace_addr, memfd, offset);
else
ret = set_user_memory_region(kvm->vm_fd, slot, flags,
guest_phys, size, (u64) userspace_addr);
if (ret < 0)
goto out;
}
list_add(&bank->list, prev_entry);
kvm->mem_slots++;
ret = 0;
out:
mutex_unlock(&kvm->mem_banks_lock);
return ret;
}
void *guest_flat_to_host(struct kvm *kvm, u64 offset)
{
struct kvm_mem_bank *bank;
list_for_each_entry(bank, &kvm->mem_banks, list) {
u64 bank_start = bank->guest_phys_addr;
u64 bank_end = bank_start + bank->size;
if (offset >= bank_start && offset < bank_end)
return bank->host_addr + (offset - bank_start);
}
pr_warning("unable to translate guest address 0x%llx to host",
(unsigned long long)offset);
return NULL;
}
u64 host_to_guest_flat(struct kvm *kvm, void *ptr)
{
struct kvm_mem_bank *bank;
list_for_each_entry(bank, &kvm->mem_banks, list) {
void *bank_start = bank->host_addr;
void *bank_end = bank_start + bank->size;
if (ptr >= bank_start && ptr < bank_end)
return bank->guest_phys_addr + (ptr - bank_start);
}
pr_warning("unable to translate host address %p to guest", ptr);
return 0;
}
/*
* Iterate over each registered memory bank. Call @fun for each bank with @data
* as argument. @type is a bitmask that allows to filter banks according to
* their type.
*
* If one call to @fun returns a non-zero value, stop iterating and return the
* value. If none of the bank types match, return -ENODEV. Otherwise, return
* zero.
*/
int kvm__for_each_mem_bank(struct kvm *kvm, enum kvm_mem_type type,
int (*fun)(struct kvm *kvm, struct kvm_mem_bank *bank, void *data),
void *data)
{
struct kvm_mem_bank *bank;
int ret = -ENODEV;
list_for_each_entry(bank, &kvm->mem_banks, list) {
if (type != KVM_MEM_TYPE_ALL && !(bank->type & type))
continue;
ret = fun(kvm, bank, data);
if (ret)
break;
}
return ret;
}
static void *_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset)
{
return mmap(addr, len, prot, flags | MAP_FIXED, fd, offset);
}
static int _munmap(void *addr, size_t len)
{
if (mmap(addr, len, PROT_NONE, MAP_SHARED | MAP_FIXED | MAP_ANON, -1, 0) != MAP_FAILED)
return 0;
return -EFAULT;
}
struct bank_range {
u64 gpa;
u64 size;
};
static bool is_bank_range(struct kvm_mem_bank *bank, struct bank_range *range)
{
u64 bank_start = bank->guest_phys_addr;
u64 bank_end = bank_start + bank->size;
u64 gpa_end = range->gpa + range->size;
if (range->gpa < bank_start || range->gpa >= bank_end)
return false;
if (gpa_end > bank_end || gpa_end <= bank_start) {
pr_warning("%s invalid guest range", __func__);
return false;
}
return true;
}
static int map_bank_range(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
{
struct bank_range *range = data;
u64 gpa_offset;
u64 map_offset;
u64 hva;
void *mapping;
if (!is_bank_range(bank, range))
return 0;
gpa_offset = range->gpa - bank->guest_phys_addr;
map_offset = bank->memfd_offset + gpa_offset;
hva = (u64) bank->host_addr + gpa_offset;
BUG_ON(map_offset > bank->memfd_offset + bank->size);
BUG_ON(map_offset < bank->memfd_offset);
BUG_ON(hva < (u64)bank->host_addr);
BUG_ON(!bank->memfd);
mapping = _mmap((void *)hva, range->size, PROT_RW, MAP_SHARED, bank->memfd, map_offset);
if (mapping == MAP_FAILED || mapping != (void *)hva)
pr_warning("%s gpa 0x%llx (size: %llu) at hva 0x%llx failed with mapping 0x%llx",
__func__,
(unsigned long long)range->gpa,
(unsigned long long)range->size,
(unsigned long long)hva,
(unsigned long long)mapping);
/* Do not proceed to trying to map the other banks. */
return 1;
}
static int unmap_bank_range(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
{
struct bank_range *range = data;
u64 gpa_offset;
u64 hva;
int ret;
if (!is_bank_range(bank, range))
return 0;
gpa_offset = range->gpa - bank->guest_phys_addr;
hva = (u64)bank->host_addr + gpa_offset;
BUG_ON(hva < (u64)bank->host_addr);
BUG_ON(!bank->memfd);
ret = _munmap((void *)hva, range->size);
if (ret)
pr_warning("%s gpa 0x%llx (size: %llu) at hva 0x%llx failed with error %d",
__func__,
(unsigned long long)range->gpa,
(unsigned long long)range->size,
(unsigned long long)hva,
ret);
/* Do not proceed to trying to unmap the other banks. */
return 1;
}
static int map_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
{
void *mapping;
BUG_ON(!bank->memfd);
pr_debug("%s hva 0x%llx (size: %llu) of memfd %d (offset %llu)",
__func__,
(unsigned long long)bank->host_addr,
(unsigned long long)bank->size,
bank->memfd,
(unsigned long long)bank->memfd_offset);
mapping = _mmap(bank->host_addr, bank->size, PROT_RW, MAP_SHARED, bank->memfd, bank->memfd_offset);
if (!mapping || mapping != bank->host_addr)
pr_warning("%s hva 0x%llx (size: %llu) failed with return 0x%llx",
__func__,
(unsigned long long)bank->host_addr,
(unsigned long long)bank->size,
(unsigned long long)mapping);
return 0;
}
static int unmap_bank(struct kvm *kvm, struct kvm_mem_bank *bank, void *data)
{
int ret;
pr_debug("%s hva 0x%llx (size: %llu)",
__func__,
(unsigned long long)bank->host_addr,
(unsigned long long)bank->size);
ret = _munmap(bank->host_addr, bank->size);
if (ret)
pr_warning("%s hva 0x%llx (size: %llu) failed with error %d",
__func__,
(unsigned long long)bank->host_addr,
(unsigned long long)bank->size,
ret);
return 0;
}
void map_guest_range(struct kvm *kvm, u64 gpa, u64 size)
{
struct bank_range range = { .gpa = gpa, .size = size };
int ret;
ret = kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM|KVM_MEM_TYPE_DEVICE,
map_bank_range, &range);
if (!ret)
pr_warning("%s gpa 0x%llx (size: %llu) found no matches",
__func__,
(unsigned long long)gpa,
(unsigned long long)size);
}
void unmap_guest_range(struct kvm *kvm, u64 gpa, u64 size)
{
struct bank_range range = { .gpa = gpa, .size = size };
int ret;
ret = kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM|KVM_MEM_TYPE_DEVICE,
unmap_bank_range, &range);
if (!ret)
pr_warning("%s gpa 0x%llx (size: %llu) found no matches",
__func__,
(unsigned long long)gpa,
(unsigned long long)size);
return;
}
void map_guest(struct kvm *kvm)
{
kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM|KVM_MEM_TYPE_DEVICE,
map_bank, NULL);
}
void unmap_guest(struct kvm *kvm)
{
kvm__for_each_mem_bank(kvm, KVM_MEM_TYPE_RAM|KVM_MEM_TYPE_DEVICE,
unmap_bank, NULL);
}
int kvm__recommended_cpus(struct kvm *kvm)
{
int ret;
ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
if (ret <= 0)
/*
* api.txt states that if KVM_CAP_NR_VCPUS does not exist,
* assume 4.
*/
return 4;
return ret;
}
int kvm__max_cpus(struct kvm *kvm)
{
int ret;
ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS);
if (ret <= 0)
ret = kvm__recommended_cpus(kvm);
return ret;
}
int __attribute__((weak)) kvm__get_vm_type(struct kvm *kvm)
{
return KVM_VM_TYPE;
}
int kvm__init(struct kvm *kvm)
{
int ret;
if (!kvm__arch_cpu_supports_vm()) {
pr_err("Your CPU does not support hardware virtualization");
ret = -ENOSYS;
goto err;
}
kvm->sys_fd = open(kvm->cfg.dev, O_RDWR);
if (kvm->sys_fd < 0) {
if (errno == ENOENT)
pr_err("'%s' not found. Please make sure your kernel has CONFIG_KVM "
"enabled and that the KVM modules are loaded.", kvm->cfg.dev);
else if (errno == ENODEV)
pr_err("'%s' KVM driver not available.\n # (If the KVM "
"module is loaded then 'dmesg' may offer further clues "
"about the failure.)", kvm->cfg.dev);
else
pr_err("Could not open %s: ", kvm->cfg.dev);
ret = -errno;
goto err_free;
}
ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
if (ret != KVM_API_VERSION) {
pr_err("KVM_API_VERSION ioctl");
ret = -errno;
goto err_sys_fd;
}
kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, kvm__get_vm_type(kvm));
if (kvm->vm_fd < 0) {
pr_err("KVM_CREATE_VM ioctl");
ret = kvm->vm_fd;
goto err_sys_fd;
}
if (kvm__check_extensions(kvm)) {
pr_err("A required KVM extension is not supported by OS");
ret = -ENOSYS;
goto err_vm_fd;
}
kvm__arch_init(kvm);
INIT_LIST_HEAD(&kvm->mem_banks);
kvm__init_ram(kvm);
if (!kvm->cfg.firmware_filename) {
if (!kvm__load_kernel(kvm, kvm->cfg.kernel_filename,
kvm->cfg.initrd_filename, kvm->cfg.real_cmdline))
die("unable to load kernel %s", kvm->cfg.kernel_filename);
}
if (kvm->cfg.firmware_filename) {
if (!kvm__load_firmware(kvm, kvm->cfg.firmware_filename))
die("unable to load firmware image %s: %s", kvm->cfg.firmware_filename, strerror(errno));
} else {
ret = kvm__arch_setup_firmware(kvm);
if (ret < 0)
die("kvm__arch_setup_firmware() failed with error %d\n", ret);
}
return 0;
err_vm_fd:
close(kvm->vm_fd);
err_sys_fd:
close(kvm->sys_fd);
err_free:
free(kvm);
err:
return ret;
}
core_init(kvm__init);
bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
const char *initrd_filename, const char *kernel_cmdline)
{
bool ret;
int fd_kernel = -1, fd_initrd = -1;
fd_kernel = open(kernel_filename, O_RDONLY);
if (fd_kernel < 0)
die("Unable to open kernel %s", kernel_filename);
if (initrd_filename) {
fd_initrd = open(initrd_filename, O_RDONLY);
if (fd_initrd < 0)
die("Unable to open initrd %s", initrd_filename);
}
ret = kvm__arch_load_kernel_image(kvm, fd_kernel, fd_initrd,
kernel_cmdline);
if (initrd_filename)
close(fd_initrd);
close(fd_kernel);
if (!ret)
die("%s is not a valid kernel image", kernel_filename);
return ret;
}
void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size, int debug_fd)
{
unsigned char *p;
unsigned long n;
size &= ~7; /* mod 8 */
if (!size)
return;
p = guest_flat_to_host(kvm, addr);
for (n = 0; n < size; n += 8) {
if (!host_ptr_in_ram(kvm, p + n)) {
dprintf(debug_fd, " 0x%08lx: <unknown>\n", addr + n);
continue;
}
dprintf(debug_fd, " 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n",
addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
}
}
void kvm__reboot(struct kvm *kvm)
{
/* Check if the guest is running */
if (!kvm->cpus[0] || kvm->cpus[0]->thread == 0)
return;
pthread_kill(kvm->cpus[0]->thread, SIGKVMEXIT);
}
void kvm__continue(struct kvm *kvm)
{
mutex_unlock(&pause_lock);
}
void kvm__pause(struct kvm *kvm)
{
int i, paused_vcpus = 0;
mutex_lock(&pause_lock);
/* Check if the guest is running */
if (!kvm->cpus || !kvm->cpus[0] || kvm->cpus[0]->thread == 0)
return;
pause_event = eventfd(0, 0);
if (pause_event < 0)
die("Failed creating pause notification event");
for (i = 0; i < kvm->nrcpus; i++) {
if (kvm->cpus[i]->is_running && kvm->cpus[i]->paused == 0)
pthread_kill(kvm->cpus[i]->thread, SIGKVMPAUSE);
else
paused_vcpus++;
}
while (paused_vcpus < kvm->nrcpus) {
u64 cur_read;
if (read(pause_event, &cur_read, sizeof(cur_read)) < 0)
die("Failed reading pause event");
paused_vcpus += cur_read;
}
close(pause_event);
}
void kvm__notify_paused(void)
{
u64 p = 1;
if (write(pause_event, &p, sizeof(p)) < 0)
die("Failed notifying of paused VCPU.");
mutex_lock(&pause_lock);
current_kvm_cpu->paused = 0;
mutex_unlock(&pause_lock);
}