Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 1 | #include "evsel.h" |
| 2 | #include "tests.h" |
| 3 | #include "thread_map.h" |
| 4 | #include "cpumap.h" |
| 5 | #include "debug.h" |
| 6 | |
Riku Voipio | 43f322b | 2015-04-16 16:52:53 +0300 | [diff] [blame^] | 7 | int test__openat_syscall_event_on_all_cpus(void) |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 8 | { |
| 9 | int err = -1, fd, cpu; |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 10 | struct cpu_map *cpus; |
| 11 | struct perf_evsel *evsel; |
Riku Voipio | 43f322b | 2015-04-16 16:52:53 +0300 | [diff] [blame^] | 12 | unsigned int nr_openat_calls = 111, i; |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 13 | cpu_set_t cpu_set; |
Arnaldo Carvalho de Melo | a60d795 | 2012-12-10 15:11:43 -0300 | [diff] [blame] | 14 | struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); |
Masami Hiramatsu | ba3dfff | 2014-08-14 02:22:45 +0000 | [diff] [blame] | 15 | char sbuf[STRERR_BUFSIZE]; |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 16 | |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 17 | if (threads == NULL) { |
| 18 | pr_debug("thread_map__new\n"); |
| 19 | return -1; |
| 20 | } |
| 21 | |
| 22 | cpus = cpu_map__new(NULL); |
| 23 | if (cpus == NULL) { |
| 24 | pr_debug("cpu_map__new\n"); |
| 25 | goto out_thread_map_delete; |
| 26 | } |
| 27 | |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 28 | CPU_ZERO(&cpu_set); |
| 29 | |
Riku Voipio | 43f322b | 2015-04-16 16:52:53 +0300 | [diff] [blame^] | 30 | evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 31 | if (evsel == NULL) { |
Steven Rostedt (Red Hat) | 23773ca | 2015-02-02 14:35:07 -0500 | [diff] [blame] | 32 | if (tracefs_configured()) |
| 33 | pr_debug("is tracefs mounted on /sys/kernel/tracing?\n"); |
| 34 | else if (debugfs_configured()) |
| 35 | pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); |
| 36 | else |
| 37 | pr_debug("Neither tracefs or debugfs is enabled in this kernel\n"); |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 38 | goto out_thread_map_delete; |
| 39 | } |
| 40 | |
| 41 | if (perf_evsel__open(evsel, cpus, threads) < 0) { |
| 42 | pr_debug("failed to open counter: %s, " |
| 43 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", |
Masami Hiramatsu | ba3dfff | 2014-08-14 02:22:45 +0000 | [diff] [blame] | 44 | strerror_r(errno, sbuf, sizeof(sbuf))); |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 45 | goto out_evsel_delete; |
| 46 | } |
| 47 | |
| 48 | for (cpu = 0; cpu < cpus->nr; ++cpu) { |
Riku Voipio | 43f322b | 2015-04-16 16:52:53 +0300 | [diff] [blame^] | 49 | unsigned int ncalls = nr_openat_calls + cpu; |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 50 | /* |
| 51 | * XXX eventually lift this restriction in a way that |
| 52 | * keeps perf building on older glibc installations |
| 53 | * without CPU_ALLOC. 1024 cpus in 2010 still seems |
| 54 | * a reasonable upper limit tho :-) |
| 55 | */ |
| 56 | if (cpus->map[cpu] >= CPU_SETSIZE) { |
| 57 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); |
| 58 | continue; |
| 59 | } |
| 60 | |
| 61 | CPU_SET(cpus->map[cpu], &cpu_set); |
| 62 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { |
| 63 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", |
| 64 | cpus->map[cpu], |
Masami Hiramatsu | ba3dfff | 2014-08-14 02:22:45 +0000 | [diff] [blame] | 65 | strerror_r(errno, sbuf, sizeof(sbuf))); |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 66 | goto out_close_fd; |
| 67 | } |
| 68 | for (i = 0; i < ncalls; ++i) { |
Riku Voipio | 43f322b | 2015-04-16 16:52:53 +0300 | [diff] [blame^] | 69 | fd = openat(0, "/etc/passwd", O_RDONLY); |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 70 | close(fd); |
| 71 | } |
| 72 | CPU_CLR(cpus->map[cpu], &cpu_set); |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * Here we need to explicitely preallocate the counts, as if |
| 77 | * we use the auto allocation it will allocate just for 1 cpu, |
| 78 | * as we start by cpu 0. |
| 79 | */ |
| 80 | if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { |
| 81 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); |
| 82 | goto out_close_fd; |
| 83 | } |
| 84 | |
| 85 | err = 0; |
| 86 | |
| 87 | for (cpu = 0; cpu < cpus->nr; ++cpu) { |
| 88 | unsigned int expected; |
| 89 | |
| 90 | if (cpus->map[cpu] >= CPU_SETSIZE) |
| 91 | continue; |
| 92 | |
| 93 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { |
| 94 | pr_debug("perf_evsel__read_on_cpu\n"); |
| 95 | err = -1; |
| 96 | break; |
| 97 | } |
| 98 | |
Riku Voipio | 43f322b | 2015-04-16 16:52:53 +0300 | [diff] [blame^] | 99 | expected = nr_openat_calls + cpu; |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 100 | if (evsel->counts->cpu[cpu].val != expected) { |
| 101 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", |
| 102 | expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); |
| 103 | err = -1; |
| 104 | } |
| 105 | } |
| 106 | |
Namhyung Kim | 43f8e76 | 2013-01-25 10:44:44 +0900 | [diff] [blame] | 107 | perf_evsel__free_counts(evsel); |
Jiri Olsa | bd90517b | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 108 | out_close_fd: |
| 109 | perf_evsel__close_fd(evsel, 1, threads->nr); |
| 110 | out_evsel_delete: |
| 111 | perf_evsel__delete(evsel); |
| 112 | out_thread_map_delete: |
| 113 | thread_map__delete(threads); |
| 114 | return err; |
| 115 | } |