| #include <stdio.h> |
| #include <err.h> |
| #include <string.h> |
| #include <stddef.h> |
| #include <stdint.h> |
| #include <stdlib.h> |
| #include <sys/types.h> |
| #include <sys/stat.h> |
| #include <sys/ioctl.h> |
| #include <sys/mman.h> |
| #include <unistd.h> |
| #include <fcntl.h> |
| #include <linux/types.h> |
| #include <linux/kvm.h> |
| #include <linux/kcov.h> |
| #include <sys/sysinfo.h> |
| |
| /* #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) */ |
| #define KCOV_INIT_HYP_TRACE _IOR('c', 2, unsigned long) |
| /* #define KCOV_ENABLE _IO('c', 100) */ |
| /* #define KCOV_DISABLE _IO('c', 101) */ |
| |
| #define COVER_SIZE (512*1024) |
| |
| /* #define KCOV_TRACE_PC 0 */ |
| /* #define KCOV_TRACE_CMP 1 */ |
| #define KCOV_ENABLE_HYP (0x1ull << 48) |
| #define KCOV_ENABLE_HYP_ONLY (0x1ull << 49) |
| |
| struct kcov_meta_buffer { |
| unsigned long num_entries; |
| unsigned long entry[]; |
| }; |
| |
| void create_buffer(){ |
| int kcov_fd; |
| kcov_fd = open("/sys/kernel/debug/kcov", O_RDWR); |
| if (kcov_fd == -1) |
| perror("open"), exit(1); |
| |
| printf("Opened fd %d\n", kcov_fd); |
| |
| /* Setup trace mode and trace size. */ |
| if (ioctl(kcov_fd, KCOV_INIT_HYP_TRACE, COVER_SIZE)) |
| perror("ioctl"), exit(1); |
| printf("Initialized buffer\n"); |
| } |
| |
| #define VM_MACHINE_TYPE 0x00000000UL |
| #define GUEST_CODE_IPA 0x1000ULL |
| |
| int run_vm(){ |
| int kvm, vmfd, vcpufd, ret; |
| |
| /* Add x0 to itself and output the result to MMIO at address in x2. */ |
| const uint8_t code[] = { |
| 0x01, 0x00, 0x00, 0x8b, /* add x1, x0, x0 */ |
| 0x41, 0x00, 0x00, 0xf9, /* str x1, [x2] */ |
| }; |
| struct kvm_run *run = NULL; |
| uint8_t *mem_code = NULL; |
| size_t mmap_size; |
| |
| kvm = open("/dev/kvm", O_RDWR | O_CLOEXEC); |
| if (kvm < 0) |
| err(1, "/dev/kvm"); |
| |
| vmfd = ioctl(kvm, KVM_CREATE_VM, VM_MACHINE_TYPE); |
| if (vmfd < 0) |
| err(1, "KVM_CREATE_VM"); |
| |
| /* Allocate one aligned page of guest memory to hold the code. */ |
| mem_code = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, |
| MAP_SHARED | MAP_ANONYMOUS, -1, 0); |
| if (!mem_code) |
| err(1, "allocating guest memory"); |
| memcpy(mem_code, code, sizeof(code)); |
| |
| /* Map code memory to the second page frame. */ |
| struct kvm_userspace_memory_region region = { |
| .guest_phys_addr = GUEST_CODE_IPA, |
| .memory_size = 0x1000, |
| .userspace_addr = (uint64_t)mem_code, |
| }; |
| ret = ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, ®ion); |
| if (ret < 0) |
| err(1, "KVM_SET_USER_MEMORY_REGION"); |
| |
| /* Create one CPU to run in the VM. */ |
| vcpufd = ioctl(vmfd, KVM_CREATE_VCPU, (unsigned long)0); |
| if (vcpufd < 0) |
| err(1, "KVM_CREATE_VCPU"); |
| |
| /* Map the shared kvm_run structure and following data. */ |
| ret = ioctl(kvm, KVM_GET_VCPU_MMAP_SIZE, NULL); |
| if (ret < 0) |
| err(1, "KVM_GET_VCPU_MMAP_SIZE"); |
| mmap_size = ret; |
| if (mmap_size < sizeof(*run)) |
| errx(1, "KVM_GET_VCPU_MMAP_SIZE unexpectedly small"); |
| run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpufd, 0); |
| if (!run) |
| err(1, "mmap vcpu"); |
| |
| /* Query KVM for preferred CPU target type that can be emulated. */ |
| struct kvm_vcpu_init vcpu_init; |
| ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &vcpu_init); |
| if (ret < 0) |
| err(1, "KVM_PREFERRED_TARGET"); |
| |
| /* Initialize VCPU with the preferred type obtained above. */ |
| ret = ioctl(vcpufd, KVM_ARM_VCPU_INIT, &vcpu_init); |
| if (ret < 0) |
| err(1, "KVM_ARM_VCPU_INIT"); |
| |
| /* Prepare the kvm_one_reg structure to use for populating registers. */ |
| uint64_t reg_data; |
| struct kvm_one_reg reg; |
| reg.addr = (__u64) ®_data; |
| |
| // Initialize input registers (x0) to 33. |
| reg_data = 33; |
| reg.id = 0x6030000000100000; // x0 id |
| ret = ioctl(vcpufd, KVM_SET_ONE_REG, ®); |
| if (ret != 0) |
| err(1, "KVM_SET_ONE_REG"); |
| |
| // Initialize the PC to point to the start of the code. |
| reg.id = 0x6030000000100040; // pc id |
| reg_data = GUEST_CODE_IPA; |
| ret = ioctl(vcpufd, KVM_SET_ONE_REG, ®); |
| if (ret != 0) |
| err(1, "KVM_SET_ONE_REG"); |
| |
| /* Repeatedly run code and handle VM exits. */ |
| for (;;) { |
| ret = ioctl(vcpufd, KVM_RUN, NULL); |
| if (ret < 0) |
| err(1, "KVM_RUN"); |
| switch (run->exit_reason) { |
| case KVM_EXIT_MMIO: |
| { |
| uint64_t payload = *(uint64_t*)(run->mmio.data); |
| printf("KVM_EXIT_MMIO: addr = 0x%llx, len = %u, is_write = %u, data = 0x%08lx\n", |
| run->mmio.phys_addr, run->mmio.len, run->mmio.is_write, |
| payload); |
| // dump_code_page((void *)mem_code); |
| return 0; |
| } |
| default: |
| errx(1, "exit_reason = 0x%x", run->exit_reason); |
| } |
| } |
| } |
| |
| int main(int argc, char **argv) |
| { |
| int kcov_fd, kvm_fd; |
| unsigned long n, i; |
| unsigned int j, size; |
| struct kcov_meta_buffer *cover; |
| |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| |
| /* A single fd descriptor allows coverage collection on a single |
| * thread. |
| */ |
| kcov_fd = open("/sys/kernel/debug/kcov", O_RDWR); |
| if (kcov_fd == -1) |
| perror("open"), exit(1); |
| |
| printf("Opened fd %d\n", kcov_fd); |
| |
| /* Setup trace mode and trace size. */ |
| if (ioctl(kcov_fd, KCOV_INIT_HYP_TRACE, COVER_SIZE)) |
| perror("ioctl"), exit(1); |
| printf("Initialized buffer\n"); |
| |
| /* Mmap buffer shared between kernel- and user-space. */ |
| cover = mmap(NULL, COVER_SIZE * sizeof(unsigned long), |
| PROT_READ | PROT_WRITE, MAP_SHARED, kcov_fd, 0); |
| if ((void*)cover == MAP_FAILED) |
| perror("mmap"), exit(1); |
| printf("Mmaped buffer\n"); |
| |
| if (ioctl(kcov_fd, KCOV_ENABLE, KCOV_TRACE_PC | KCOV_ENABLE_HYP | KCOV_ENABLE_HYP_ONLY)) |
| /* if (ioctl(kcov_fd, KCOV_ENABLE, KCOV_TRACE_PC)) */ |
| perror("ioctl"), exit(1); |
| printf("KCOV enabled\n"); |
| |
| /* kvm_fd = open("/dev/kvm", O_RDWR); */ |
| |
| /* hope for a stage 2 fault */ |
| /* void* dummy = malloc(100000000); */ |
| /* memset(dummy, 'a', 100000000); */ |
| |
| run_vm(); |
| |
| /* n = get_nprocs(); */ |
| /* size = COVER_SIZE / n; */ |
| /* printf("Number CPUS: %d\n", n); */ |
| |
| /* while (1) { */ |
| /* for (i = 0; i < n; i++) { */ |
| /* cover = (struct kcov_meta_buffer *)(ptr + size); */ |
| /* printf("CPU %d got 0x%lx entries\n", i, cover->num_entries); */ |
| |
| /* for (j = 0; j < cover->num_entries; j++) { */ |
| /* printf("0x%lx\n", cover->entry[j]); */ |
| /* } */ |
| |
| /* /\* Reset coverage from the tail of the ioctl() call. *\/ */ |
| /* __atomic_store_n(&cover->num_entries, 0, __ATOMIC_RELAXED); */ |
| /* } */ |
| /* } */ |
| |
| /* Disable coverage collection for the current thread. After this call |
| * coverage can be enabled for a different thread. |
| */ |
| if (ioctl(kcov_fd, KCOV_DISABLE, 0)) |
| perror("ioctl"), exit(1); |
| printf("KCOV disabled\n"); |
| |
| printf("Got %lld entries\n",cover->num_entries); |
| for(int i = 0; i < cover->num_entries; ++i){ |
| printf("%llx\n", cover->entry[i]); |
| } |
| /*TODO print buffer */ |
| |
| /* Free resources. */ |
| /* if (munmap(cover, COVER_SIZE * sizeof(unsigned long))) */ |
| /* perror("munmap"), exit(1); */ |
| |
| /* if (close(kcov_fd)) */ |
| /* perror("close"), exit(1); */ |
| /* printf("Closed buffer\n"); */ |
| return 0; |
| } |
| |