blob: 6731426cef06aeed09861e28987cca3a16c46b83 [file] [log] [blame] [edit]
#include <stdio.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <linux/types.h>
#include <sys/sysinfo.h>
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
#define KCOV_INIT_HYP_TRACE _IOR('c', 2, unsigned long)
#define KCOV_ENABLE _IO('c', 100)
#define KCOV_DISABLE _IO('c', 101)
#define COVER_SIZE (4096)
#define KCOV_TRACE_PC 0
#define KCOV_TRACE_CMP 1
struct kcov_meta_buffer {
unsigned long num_entries;
unsigned long entry[];
};
int main(int argc, char **argv)
{
int fd;
unsigned long n, i;
unsigned int j, size;
struct kcov_meta_buffer *cover;
void *ptr;
/* A single fd descriptor allows coverage collection on a single
* thread.
*/
fd = open("/sys/kernel/debug/kcov", O_RDWR);
if (fd == -1)
perror("open"), exit(1);
/* Setup trace mode and trace size. */
if (ioctl(fd, KCOV_INIT_HYP_TRACE, COVER_SIZE))
perror("ioctl"), exit(1);
/* Mmap buffer shared between kernel- and user-space. */
ptr = mmap(NULL, COVER_SIZE * sizeof(unsigned long),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if ((void*)ptr == MAP_FAILED)
perror("mmap"), exit(1);
n = get_nprocs();
size = COVER_SIZE / n;
printf("Number CPUS: %d\n", n);
while (1) {
for (i = 0; i < n; i++) {
cover = (struct kcov_meta_buffer *)(ptr + size);
printf("CPU %d got 0x%lx entries\n", i, cover->num_entries);
for (j = 0; j < cover->num_entries; j++) {
printf("0x%lx\n", cover->entry[j]);
}
/* Reset coverage from the tail of the ioctl() call. */
__atomic_store_n(&cover->num_entries, 0, __ATOMIC_RELAXED);
}
}
/* Disable coverage collection for the current thread. After this call
* coverage can be enabled for a different thread.
*/
if (ioctl(fd, KCOV_DISABLE, 0))
perror("ioctl"), exit(1);
/* Free resources. */
if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
perror("munmap"), exit(1);
if (close(fd))
perror("close"), exit(1);
return 0;
}