| #include <stdio.h> |
| #include <string.h> |
| #include <stddef.h> |
| #include <stdint.h> |
| #include <stdlib.h> |
| #include <sys/types.h> |
| #include <sys/stat.h> |
| #include <sys/ioctl.h> |
| #include <sys/mman.h> |
| #include <unistd.h> |
| #include <fcntl.h> |
| #include <linux/types.h> |
| #include <sys/sysinfo.h> |
| |
| #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) |
| #define KCOV_INIT_HYP_TRACE _IOR('c', 2, unsigned long) |
| #define KCOV_ENABLE _IO('c', 100) |
| #define KCOV_DISABLE _IO('c', 101) |
| #define COVER_SIZE (512) |
| |
| #define KCOV_TRACE_PC 0 |
| #define KCOV_TRACE_CMP 1 |
| #define KCOV_ENABLE_HYP (0x1ull << 48) |
| #define KCOV_ENABLE_HYP_ONLY (0x1ull << 49) |
| |
| struct kcov_meta_buffer { |
| unsigned long num_entries; |
| unsigned long entry[]; |
| }; |
| |
| void create_buffer(){ |
| int kcov_fd; |
| kcov_fd = open("/sys/kernel/debug/kcov", O_RDWR); |
| if (kcov_fd == -1) |
| perror("open"), exit(1); |
| |
| printf("Opened fd %d\n", kcov_fd); |
| |
| /* Setup trace mode and trace size. */ |
| if (ioctl(kcov_fd, KCOV_INIT_HYP_TRACE, COVER_SIZE)) |
| perror("ioctl"), exit(1); |
| printf("Initialized buffer\n"); |
| } |
| |
| int main(int argc, char **argv) |
| { |
| int kcov_fd, kvm_fd; |
| unsigned long n, i; |
| unsigned int j, size; |
| struct kcov_meta_buffer *cover; |
| |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| /* create_buffer(); */ |
| |
| /* A single fd descriptor allows coverage collection on a single |
| * thread. |
| */ |
| kcov_fd = open("/sys/kernel/debug/kcov", O_RDWR); |
| if (kcov_fd == -1) |
| perror("open"), exit(1); |
| |
| printf("Opened fd %d\n", kcov_fd); |
| |
| /* Setup trace mode and trace size. */ |
| if (ioctl(kcov_fd, KCOV_INIT_HYP_TRACE, COVER_SIZE)) |
| perror("ioctl"), exit(1); |
| printf("Initialized buffer\n"); |
| |
| /* Mmap buffer shared between kernel- and user-space. */ |
| cover = mmap(NULL, COVER_SIZE * sizeof(unsigned long), |
| PROT_READ | PROT_WRITE, MAP_SHARED, kcov_fd, 0); |
| if ((void*)cover == MAP_FAILED) |
| perror("mmap"), exit(1); |
| printf("Mmaped buffer\n"); |
| |
| if (ioctl(kcov_fd, KCOV_ENABLE, KCOV_TRACE_PC | KCOV_ENABLE_HYP | KCOV_ENABLE_HYP_ONLY)) |
| /* if (ioctl(kcov_fd, KCOV_ENABLE, KCOV_TRACE_PC)) */ |
| perror("ioctl"), exit(1); |
| /* printf("KCOV enabled\n"); */ |
| |
| /* kvm_fd = open("/dev/kvm", O_RDWR); */ |
| |
| /* hope for a stage 2 fault */ |
| /* void* dummy = malloc(100000000); */ |
| /* memset(dummy, 'a', 100000000); */ |
| |
| |
| |
| |
| |
| /* n = get_nprocs(); */ |
| /* size = COVER_SIZE / n; */ |
| /* printf("Number CPUS: %d\n", n); */ |
| |
| /* while (1) { */ |
| /* for (i = 0; i < n; i++) { */ |
| /* cover = (struct kcov_meta_buffer *)(ptr + size); */ |
| /* printf("CPU %d got 0x%lx entries\n", i, cover->num_entries); */ |
| |
| /* for (j = 0; j < cover->num_entries; j++) { */ |
| /* printf("0x%lx\n", cover->entry[j]); */ |
| /* } */ |
| |
| /* /\* Reset coverage from the tail of the ioctl() call. *\/ */ |
| /* __atomic_store_n(&cover->num_entries, 0, __ATOMIC_RELAXED); */ |
| /* } */ |
| /* } */ |
| |
| /* Disable coverage collection for the current thread. After this call |
| * coverage can be enabled for a different thread. |
| */ |
| if (ioctl(kcov_fd, KCOV_DISABLE, 0)) |
| perror("ioctl"), exit(1); |
| printf("KCOV disabled\n"); |
| |
| printf("Got %lld entries\n",cover->num_entries); |
| for(int i = 0; i < cover->num_entries; ++i){ |
| printf("%llx\n", cover->entry[i]); |
| } |
| /*TODO print buffer */ |
| |
| /* Free resources. */ |
| /* if (munmap(cover, COVER_SIZE * sizeof(unsigned long))) */ |
| /* perror("munmap"), exit(1); */ |
| |
| /* if (close(kcov_fd)) */ |
| /* perror("close"), exit(1); */ |
| /* printf("Closed buffer\n"); */ |
| return 0; |
| } |
| |