blob: 72fceabb0ef6d647111f1c274be31f9b8d28ae93 [file] [log] [blame]
/*
* XXX - Credit, licence, copyright
*/
#include <assert.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/kvm.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "helpers.h"
#define GUEST_PHYS_ADDR (1UL << 30)
#define GUEST_MMIO_ADDR (1UL << 38)
#define GUEST_MEM_SIZE (PMD_SIZE*4)
/*
* x0[0] = x1;
* x1 = x0[0];
*/
void guest_code()
{
asm(
" str x1, [x0] \n"
" brk #0 \n"
" ldr x1, [x0] \n"
" brk #0 \n"
::);
}
#define GUEST_NR_INST 4
#define GUEST_CODE_SIZE() (GUEST_NR_INST * ARM64_INST_SIZE)
void guest_brk()
{
asm("brk #0 \n" ::);
}
static void guest_write_memory(int vcpufd, struct kvm_run *run, uint64_t addr)
{
set_one_reg(vcpufd, REG_X(0), addr);
set_one_reg(vcpufd, REG_X(1), 1);
set_one_reg(vcpufd, REG_PC, GUEST_PHYS_ADDR);
assert(vcpu_run(vcpufd, run) == KVM_EXIT_DEBUG);
}
static void guest_read_memory(int vcpufd, struct kvm_run *run, uint64_t addr)
{
set_one_reg(vcpufd, REG_X(0), addr);
set_one_reg(vcpufd, REG_PC, GUEST_PHYS_ADDR + 2 * ARM64_INST_SIZE);
assert(vcpu_run(vcpufd, run) == KVM_EXIT_DEBUG);
}
struct kvm_userspace_memory_region guest_prepare_memory(int vmfd, int vcpufd)
{
uint8_t *mem_code;
/* Note: this uses MAP_HUGETLB */
mem_code = mmap(NULL, GUEST_MEM_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
if (mem_code == MAP_FAILED)
err(-ENOMEM, "Are there PMD hugetlb pages available on the system?");
memcpy(mem_code, guest_code, GUEST_CODE_SIZE());
/* Map code memory to the second page frame. */
struct kvm_userspace_memory_region region = {
.slot = 0,
.guest_phys_addr = GUEST_PHYS_ADDR,
.memory_size = GUEST_MEM_SIZE,
.userspace_addr = (uint64_t)mem_code,
};
KVM_IOCTL(vmfd, KVM_SET_USER_MEMORY_REGION, &region);
vm_add_mmio_page(vmfd, 1, GUEST_MMIO_ADDR);
set_one_reg(vcpufd, REG_X(3), GUEST_MMIO_ADDR);
return region;
}
int main(void)
{
struct kvm_userspace_memory_region region;
uint64_t addr, offset, user_addr;
struct kvm_run *run = NULL;
struct kvm_dirty_log dlog;
int kvm, vmfd, vcpufd;
uint8_t *dirty_bm;
int i;
/* Prepare the guest */
kvm = get_kvm();
vmfd = create_vm(kvm);
vcpufd = create_vcpu(kvm, vmfd, &run, true);
/* Create a memslot backed by hugetlb */
region = guest_prepare_memory(vmfd, vcpufd);
/* Enable dirty logging, to force PTEs despite having huge pages */
region.flags = KVM_MEM_LOG_DIRTY_PAGES;
KVM_IOCTL(vmfd, KVM_SET_USER_MEMORY_REGION, &region);
/*
* The address from which we read and write is in the same page as the
* code, so the first read / exec faults should get us the RX mappings
* we want, and then the write should trigger the bug Will found out
*/
offset = GUEST_CODE_SIZE();
addr = GUEST_PHYS_ADDR + offset;
/* Force user_mem_abort() to create the RX mapping, with PTEs*/
guest_read_memory(vcpufd, run, addr);
/* Disable dirty logging, to now get block mappings */
region.flags = 0;
KVM_IOCTL(vmfd, KVM_SET_USER_MEMORY_REGION, &region);
/* Write to the same page now, to trigger the write fault */
guest_write_memory(vcpufd, run, addr);
printf("Passing.\n");
}