blob: 6bc21e202ae40d36ceb9fa23d3c78785cad957da [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
Ingo Molnare7e7ee22011-05-04 08:42:29 +02005 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
Peter Zijlstra90eec102015-11-16 11:08:45 +01006 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
Al Virod36b6912011-12-29 17:09:01 -05007 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
Peter Zijlstra2e80a822010-11-17 23:17:36 +010016#include <linux/idr.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020017#include <linux/file.h>
18#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020020#include <linux/hash.h>
Frederic Weisbecker12351ef2013-04-20 15:48:22 +020021#include <linux/tick.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020022#include <linux/sysfs.h>
23#include <linux/dcache.h>
24#include <linux/percpu.h>
25#include <linux/ptrace.h>
Peter Zijlstrac2774432010-12-08 15:29:02 +010026#include <linux/reboot.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020027#include <linux/vmstat.h>
Peter Zijlstraabe43402010-11-17 23:17:37 +010028#include <linux/device.h>
Paul Gortmaker6e5fdee2011-05-26 16:00:52 -040029#include <linux/export.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020030#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020031#include <linux/hardirq.h>
32#include <linux/rculist.h>
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
36#include <linux/kernel_stat.h>
Matt Fleming39bed6c2015-01-23 18:45:40 +000037#include <linux/cgroup.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020038#include <linux/perf_event.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040039#include <linux/trace_events.h>
Jason Wessel3c502e72010-11-04 17:33:01 -050040#include <linux/hw_breakpoint.h>
Jiri Olsac5ebced2012-08-07 15:20:40 +020041#include <linux/mm_types.h>
Yan, Zhengc464c762014-03-18 16:56:41 +080042#include <linux/module.h>
Peter Zijlstraf972eb62014-05-19 15:13:47 -040043#include <linux/mman.h>
Pawel Mollb3f20782014-06-13 16:03:32 +010044#include <linux/compat.h>
Alexei Starovoitov25415172015-03-25 12:49:20 -070045#include <linux/bpf.h>
46#include <linux/filter.h>
Alexander Shishkin375637b2016-04-27 18:44:46 +030047#include <linux/namei.h>
48#include <linux/parser.h>
Ingo Molnare6017572017-02-01 16:36:40 +010049#include <linux/sched/clock.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010050#include <linux/sched/mm.h>
Hari Bathinie4222672017-03-08 02:11:36 +053051#include <linux/proc_ns.h>
52#include <linux/mount.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020053
Frederic Weisbecker76369132011-05-19 19:55:04 +020054#include "internal.h"
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056#include <asm/irq_regs.h>
57
Peter Zijlstra272325c2015-04-15 11:41:58 +020058typedef int (*remote_function_f)(void *);
59
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010060struct remote_function_call {
Ingo Molnare7e7ee22011-05-04 08:42:29 +020061 struct task_struct *p;
Peter Zijlstra272325c2015-04-15 11:41:58 +020062 remote_function_f func;
Ingo Molnare7e7ee22011-05-04 08:42:29 +020063 void *info;
64 int ret;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010065};
66
67static void remote_function(void *data)
68{
69 struct remote_function_call *tfc = data;
70 struct task_struct *p = tfc->p;
71
72 if (p) {
Peter Zijlstra0da4cf32016-02-24 18:45:51 +010073 /* -EAGAIN */
74 if (task_cpu(p) != smp_processor_id())
75 return;
76
77 /*
78 * Now that we're on right CPU with IRQs disabled, we can test
79 * if we hit the right task without races.
80 */
81
82 tfc->ret = -ESRCH; /* No such (running) process */
83 if (p != current)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010084 return;
85 }
86
87 tfc->ret = tfc->func(tfc->info);
88}
89
90/**
91 * task_function_call - call a function on the cpu on which a task runs
92 * @p: the task to evaluate
93 * @func: the function to be called
94 * @info: the function call argument
95 *
96 * Calls the function @func when the task is currently running. This might
97 * be on the current CPU, which just calls the function directly
98 *
99 * returns: @func return value, or
100 * -ESRCH - when the process isn't running
101 * -EAGAIN - when the process moved away
102 */
103static int
Peter Zijlstra272325c2015-04-15 11:41:58 +0200104task_function_call(struct task_struct *p, remote_function_f func, void *info)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100105{
106 struct remote_function_call data = {
Ingo Molnare7e7ee22011-05-04 08:42:29 +0200107 .p = p,
108 .func = func,
109 .info = info,
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100110 .ret = -EAGAIN,
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100111 };
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100112 int ret;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100113
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100114 do {
115 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
116 if (!ret)
117 ret = data.ret;
118 } while (ret == -EAGAIN);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100119
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100120 return ret;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100121}
122
123/**
124 * cpu_function_call - call a function on the cpu
125 * @func: the function to be called
126 * @info: the function call argument
127 *
128 * Calls the function @func on the remote cpu.
129 *
130 * returns: @func return value or -ENXIO when the cpu is offline
131 */
Peter Zijlstra272325c2015-04-15 11:41:58 +0200132static int cpu_function_call(int cpu, remote_function_f func, void *info)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100133{
134 struct remote_function_call data = {
Ingo Molnare7e7ee22011-05-04 08:42:29 +0200135 .p = NULL,
136 .func = func,
137 .info = info,
138 .ret = -ENXIO, /* No such CPU */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100139 };
140
141 smp_call_function_single(cpu, remote_function, &data, 1);
142
143 return data.ret;
144}
145
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100146static inline struct perf_cpu_context *
147__get_cpu_context(struct perf_event_context *ctx)
148{
149 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
150}
151
152static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
153 struct perf_event_context *ctx)
154{
155 raw_spin_lock(&cpuctx->ctx.lock);
156 if (ctx)
157 raw_spin_lock(&ctx->lock);
158}
159
160static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
161 struct perf_event_context *ctx)
162{
163 if (ctx)
164 raw_spin_unlock(&ctx->lock);
165 raw_spin_unlock(&cpuctx->ctx.lock);
166}
167
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100168#define TASK_TOMBSTONE ((void *)-1L)
169
170static bool is_kernel_event(struct perf_event *event)
171{
Peter Zijlstraf47c02c2016-01-26 12:30:14 +0100172 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100173}
174
Peter Zijlstra39a43642016-01-11 12:46:35 +0100175/*
176 * On task ctx scheduling...
177 *
178 * When !ctx->nr_events a task context will not be scheduled. This means
179 * we can disable the scheduler hooks (for performance) without leaving
180 * pending task ctx state.
181 *
182 * This however results in two special cases:
183 *
184 * - removing the last event from a task ctx; this is relatively straight
185 * forward and is done in __perf_remove_from_context.
186 *
187 * - adding the first event to a task ctx; this is tricky because we cannot
188 * rely on ctx->is_active and therefore cannot use event_function_call().
189 * See perf_install_in_context().
190 *
Peter Zijlstra39a43642016-01-11 12:46:35 +0100191 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
192 */
193
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100194typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
195 struct perf_event_context *, void *);
196
197struct event_function_struct {
198 struct perf_event *event;
199 event_f func;
200 void *data;
201};
202
203static int event_function(void *info)
204{
205 struct event_function_struct *efs = info;
206 struct perf_event *event = efs->event;
207 struct perf_event_context *ctx = event->ctx;
208 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
209 struct perf_event_context *task_ctx = cpuctx->task_ctx;
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100210 int ret = 0;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100211
212 WARN_ON_ONCE(!irqs_disabled());
213
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100214 perf_ctx_lock(cpuctx, task_ctx);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100215 /*
216 * Since we do the IPI call without holding ctx->lock things can have
217 * changed, double check we hit the task we set out to hit.
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100218 */
219 if (ctx->task) {
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100220 if (ctx->task != current) {
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100221 ret = -ESRCH;
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100222 goto unlock;
223 }
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100224
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100225 /*
226 * We only use event_function_call() on established contexts,
227 * and event_function() is only ever called when active (or
228 * rather, we'll have bailed in task_function_call() or the
229 * above ctx->task != current test), therefore we must have
230 * ctx->is_active here.
231 */
232 WARN_ON_ONCE(!ctx->is_active);
233 /*
234 * And since we have ctx->is_active, cpuctx->task_ctx must
235 * match.
236 */
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100237 WARN_ON_ONCE(task_ctx != ctx);
238 } else {
239 WARN_ON_ONCE(&cpuctx->ctx != ctx);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100240 }
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100241
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100242 efs->func(event, cpuctx, ctx, efs->data);
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100243unlock:
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100244 perf_ctx_unlock(cpuctx, task_ctx);
245
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100246 return ret;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100247}
248
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100249static void event_function_call(struct perf_event *event, event_f func, void *data)
Peter Zijlstra00179602015-11-30 16:26:35 +0100250{
251 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100252 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100253 struct event_function_struct efs = {
254 .event = event,
255 .func = func,
256 .data = data,
257 };
Peter Zijlstra00179602015-11-30 16:26:35 +0100258
Peter Zijlstrac97f4732016-01-14 10:51:03 +0100259 if (!event->parent) {
260 /*
261 * If this is a !child event, we must hold ctx::mutex to
262 * stabilize the the event->ctx relation. See
263 * perf_event_ctx_lock().
264 */
265 lockdep_assert_held(&ctx->mutex);
266 }
Peter Zijlstra00179602015-11-30 16:26:35 +0100267
268 if (!task) {
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100269 cpu_function_call(event->cpu, event_function, &efs);
Peter Zijlstra00179602015-11-30 16:26:35 +0100270 return;
271 }
272
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100273 if (task == TASK_TOMBSTONE)
274 return;
275
Peter Zijlstraa0963092016-02-24 18:45:50 +0100276again:
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100277 if (!task_function_call(task, event_function, &efs))
Peter Zijlstra00179602015-11-30 16:26:35 +0100278 return;
279
280 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100281 /*
282 * Reload the task pointer, it might have been changed by
283 * a concurrent perf_event_context_sched_out().
284 */
285 task = ctx->task;
Peter Zijlstraa0963092016-02-24 18:45:50 +0100286 if (task == TASK_TOMBSTONE) {
287 raw_spin_unlock_irq(&ctx->lock);
288 return;
Peter Zijlstra00179602015-11-30 16:26:35 +0100289 }
Peter Zijlstraa0963092016-02-24 18:45:50 +0100290 if (ctx->is_active) {
291 raw_spin_unlock_irq(&ctx->lock);
292 goto again;
293 }
294 func(event, NULL, ctx, data);
Peter Zijlstra00179602015-11-30 16:26:35 +0100295 raw_spin_unlock_irq(&ctx->lock);
296}
297
Peter Zijlstracca20942016-08-16 13:33:26 +0200298/*
299 * Similar to event_function_call() + event_function(), but hard assumes IRQs
300 * are already disabled and we're on the right CPU.
301 */
302static void event_function_local(struct perf_event *event, event_f func, void *data)
303{
304 struct perf_event_context *ctx = event->ctx;
305 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
306 struct task_struct *task = READ_ONCE(ctx->task);
307 struct perf_event_context *task_ctx = NULL;
308
309 WARN_ON_ONCE(!irqs_disabled());
310
311 if (task) {
312 if (task == TASK_TOMBSTONE)
313 return;
314
315 task_ctx = ctx;
316 }
317
318 perf_ctx_lock(cpuctx, task_ctx);
319
320 task = ctx->task;
321 if (task == TASK_TOMBSTONE)
322 goto unlock;
323
324 if (task) {
325 /*
326 * We must be either inactive or active and the right task,
327 * otherwise we're screwed, since we cannot IPI to somewhere
328 * else.
329 */
330 if (ctx->is_active) {
331 if (WARN_ON_ONCE(task != current))
332 goto unlock;
333
334 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
335 goto unlock;
336 }
337 } else {
338 WARN_ON_ONCE(&cpuctx->ctx != ctx);
339 }
340
341 func(event, cpuctx, ctx, data);
342unlock:
343 perf_ctx_unlock(cpuctx, task_ctx);
344}
345
Stephane Eraniane5d13672011-02-14 11:20:01 +0200346#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
347 PERF_FLAG_FD_OUTPUT |\
Yann Droneauda21b0b32014-01-05 21:36:33 +0100348 PERF_FLAG_PID_CGROUP |\
349 PERF_FLAG_FD_CLOEXEC)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200350
Stephane Eranianbce38cd2012-02-09 23:20:51 +0100351/*
352 * branch priv levels that need permission checks
353 */
354#define PERF_SAMPLE_BRANCH_PERM_PLM \
355 (PERF_SAMPLE_BRANCH_KERNEL |\
356 PERF_SAMPLE_BRANCH_HV)
357
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200358enum event_type_t {
359 EVENT_FLEXIBLE = 0x1,
360 EVENT_PINNED = 0x2,
Peter Zijlstra3cbaa592016-02-24 18:45:47 +0100361 EVENT_TIME = 0x4,
Alexander Shishkin487f05e2017-01-19 18:43:30 +0200362 /* see ctx_resched() for details */
363 EVENT_CPU = 0x8,
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200364 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
365};
366
Stephane Eraniane5d13672011-02-14 11:20:01 +0200367/*
368 * perf_sched_events : >0 events exist
369 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
370 */
Peter Zijlstra9107c892016-02-24 18:45:45 +0100371
372static void perf_sched_delayed(struct work_struct *work);
373DEFINE_STATIC_KEY_FALSE(perf_sched_events);
374static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
375static DEFINE_MUTEX(perf_sched_mutex);
376static atomic_t perf_sched_count;
377
Stephane Eraniane5d13672011-02-14 11:20:01 +0200378static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
Yan, Zhengba532502014-11-04 21:55:58 -0500379static DEFINE_PER_CPU(int, perf_sched_cb_usages);
Kan Liangf2fb6be2016-03-23 11:24:37 -0700380static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200381
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200382static atomic_t nr_mmap_events __read_mostly;
383static atomic_t nr_comm_events __read_mostly;
Hari Bathinie4222672017-03-08 02:11:36 +0530384static atomic_t nr_namespaces_events __read_mostly;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200385static atomic_t nr_task_events __read_mostly;
Frederic Weisbecker948b26b2013-08-02 18:29:55 +0200386static atomic_t nr_freq_events __read_mostly;
Adrian Hunter45ac1402015-07-21 12:44:02 +0300387static atomic_t nr_switch_events __read_mostly;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200388
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200389static LIST_HEAD(pmus);
390static DEFINE_MUTEX(pmus_lock);
391static struct srcu_struct pmus_srcu;
Thomas Gleixnera63fbed2017-05-24 10:15:34 +0200392static cpumask_var_t perf_online_mask;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200393
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200394/*
395 * perf event paranoia level:
396 * -1 - not paranoid at all
397 * 0 - disallow raw tracepoint access for unpriv
398 * 1 - disallow cpu events for unpriv
399 * 2 - disallow kernel profiling for unpriv
400 */
Andy Lutomirski01610282016-05-09 15:48:51 -0700401int sysctl_perf_event_paranoid __read_mostly = 2;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200402
Frederic Weisbecker20443382011-03-31 03:33:29 +0200403/* Minimum for 512 kiB + 1 user control page */
404int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200405
406/*
407 * max perf event sample rate
408 */
Dave Hansen14c63f12013-06-21 08:51:36 -0700409#define DEFAULT_MAX_SAMPLE_RATE 100000
410#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
411#define DEFAULT_CPU_TIME_MAX_PERCENT 25
412
413int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
414
415static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
416static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
417
Peter Zijlstrad9494cb2013-10-17 15:36:19 +0200418static int perf_sample_allowed_ns __read_mostly =
419 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
Dave Hansen14c63f12013-06-21 08:51:36 -0700420
Geliang Tang18ab2cd2015-09-27 23:25:50 +0800421static void update_perf_cpu_limits(void)
Dave Hansen14c63f12013-06-21 08:51:36 -0700422{
423 u64 tmp = perf_sample_period_ns;
424
425 tmp *= sysctl_perf_cpu_time_max_percent;
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100426 tmp = div_u64(tmp, 100);
427 if (!tmp)
428 tmp = 1;
429
430 WRITE_ONCE(perf_sample_allowed_ns, tmp);
Dave Hansen14c63f12013-06-21 08:51:36 -0700431}
Peter Zijlstra163ec432011-02-16 11:22:34 +0100432
Stephane Eranian9e630202013-04-03 14:21:33 +0200433static int perf_rotate_context(struct perf_cpu_context *cpuctx);
434
Peter Zijlstra163ec432011-02-16 11:22:34 +0100435int perf_proc_update_handler(struct ctl_table *table, int write,
436 void __user *buffer, size_t *lenp,
437 loff_t *ppos)
438{
Knut Petersen723478c2013-09-25 14:29:37 +0200439 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Peter Zijlstra163ec432011-02-16 11:22:34 +0100440
441 if (ret || !write)
442 return ret;
443
Kan Liangab7fdef2016-05-03 00:26:06 -0700444 /*
445 * If throttling is disabled don't allow the write:
446 */
447 if (sysctl_perf_cpu_time_max_percent == 100 ||
448 sysctl_perf_cpu_time_max_percent == 0)
449 return -EINVAL;
450
Peter Zijlstra163ec432011-02-16 11:22:34 +0100451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
Dave Hansen14c63f12013-06-21 08:51:36 -0700452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
453 update_perf_cpu_limits();
Peter Zijlstra163ec432011-02-16 11:22:34 +0100454
455 return 0;
456}
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200457
Dave Hansen14c63f12013-06-21 08:51:36 -0700458int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
459
460int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
461 void __user *buffer, size_t *lenp,
462 loff_t *ppos)
463{
Tan Xiaojun1572e452017-02-23 14:04:39 +0800464 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Dave Hansen14c63f12013-06-21 08:51:36 -0700465
466 if (ret || !write)
467 return ret;
468
Peter Zijlstrab303e7c2016-04-04 09:57:40 +0200469 if (sysctl_perf_cpu_time_max_percent == 100 ||
470 sysctl_perf_cpu_time_max_percent == 0) {
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100471 printk(KERN_WARNING
472 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
473 WRITE_ONCE(perf_sample_allowed_ns, 0);
474 } else {
475 update_perf_cpu_limits();
476 }
Dave Hansen14c63f12013-06-21 08:51:36 -0700477
478 return 0;
479}
480
481/*
482 * perf samples are done in some very critical code paths (NMIs).
483 * If they take too much CPU time, the system can lock up and not
484 * get any real work done. This will drop the sample rate when
485 * we detect that events are taking too long.
486 */
487#define NR_ACCUMULATED_SAMPLES 128
Peter Zijlstrad9494cb2013-10-17 15:36:19 +0200488static DEFINE_PER_CPU(u64, running_sample_length);
Dave Hansen14c63f12013-06-21 08:51:36 -0700489
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100490static u64 __report_avg;
491static u64 __report_allowed;
492
Peter Zijlstra6a02ad662014-02-03 18:11:08 +0100493static void perf_duration_warn(struct irq_work *w)
Dave Hansen14c63f12013-06-21 08:51:36 -0700494{
David Ahern0d87d7e2016-08-01 13:49:29 -0700495 printk_ratelimited(KERN_INFO
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100496 "perf: interrupt took too long (%lld > %lld), lowering "
497 "kernel.perf_event_max_sample_rate to %d\n",
498 __report_avg, __report_allowed,
499 sysctl_perf_event_sample_rate);
Peter Zijlstra6a02ad662014-02-03 18:11:08 +0100500}
501
502static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
503
504void perf_sample_event_took(u64 sample_len_ns)
505{
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100506 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
507 u64 running_len;
508 u64 avg_len;
509 u32 max;
Dave Hansen14c63f12013-06-21 08:51:36 -0700510
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100511 if (max_len == 0)
Dave Hansen14c63f12013-06-21 08:51:36 -0700512 return;
513
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100514 /* Decay the counter by 1 average sample. */
515 running_len = __this_cpu_read(running_sample_length);
516 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
517 running_len += sample_len_ns;
518 __this_cpu_write(running_sample_length, running_len);
Dave Hansen14c63f12013-06-21 08:51:36 -0700519
520 /*
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100521 * Note: this will be biased artifically low until we have
522 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
Dave Hansen14c63f12013-06-21 08:51:36 -0700523 * from having to maintain a count.
524 */
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100525 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
526 if (avg_len <= max_len)
Dave Hansen14c63f12013-06-21 08:51:36 -0700527 return;
528
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100529 __report_avg = avg_len;
530 __report_allowed = max_len;
Dave Hansen14c63f12013-06-21 08:51:36 -0700531
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100532 /*
533 * Compute a throttle threshold 25% below the current duration.
534 */
535 avg_len += avg_len / 4;
536 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
537 if (avg_len < max)
538 max /= (u32)avg_len;
539 else
540 max = 1;
541
542 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
543 WRITE_ONCE(max_samples_per_tick, max);
544
545 sysctl_perf_event_sample_rate = max * HZ;
Dave Hansen14c63f12013-06-21 08:51:36 -0700546 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
547
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100548 if (!irq_work_queue(&perf_duration_work)) {
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100549 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100550 "kernel.perf_event_max_sample_rate to %d\n",
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100551 __report_avg, __report_allowed,
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100552 sysctl_perf_event_sample_rate);
553 }
Dave Hansen14c63f12013-06-21 08:51:36 -0700554}
555
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200556static atomic64_t perf_event_id;
557
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200558static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
559 enum event_type_t event_type);
560
561static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +0200562 enum event_type_t event_type,
563 struct task_struct *task);
564
565static void update_context_time(struct perf_event_context *ctx);
566static u64 perf_event_time(struct perf_event *event);
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200567
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200568void __weak perf_event_print_debug(void) { }
569
Matt Fleming84c79912010-10-03 21:41:13 +0100570extern __weak const char *perf_pmu_name(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200571{
Matt Fleming84c79912010-10-03 21:41:13 +0100572 return "pmu";
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200573}
574
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200575static inline u64 perf_clock(void)
576{
577 return local_clock();
578}
579
Peter Zijlstra34f43922015-02-20 14:05:38 +0100580static inline u64 perf_event_clock(struct perf_event *event)
581{
582 return event->clock();
583}
584
Stephane Eraniane5d13672011-02-14 11:20:01 +0200585#ifdef CONFIG_CGROUP_PERF
586
Stephane Eraniane5d13672011-02-14 11:20:01 +0200587static inline bool
588perf_cgroup_match(struct perf_event *event)
589{
590 struct perf_event_context *ctx = event->ctx;
591 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
592
Tejun Heoef824fa2013-04-08 19:00:38 -0700593 /* @event doesn't care about cgroup */
594 if (!event->cgrp)
595 return true;
596
597 /* wants specific cgroup scope but @cpuctx isn't associated with any */
598 if (!cpuctx->cgrp)
599 return false;
600
601 /*
602 * Cgroup scoping is recursive. An event enabled for a cgroup is
603 * also enabled for all its descendant cgroups. If @cpuctx's
604 * cgroup is a descendant of @event's (the test covers identity
605 * case), it's a match.
606 */
607 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
608 event->cgrp->css.cgroup);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200609}
610
Stephane Eraniane5d13672011-02-14 11:20:01 +0200611static inline void perf_detach_cgroup(struct perf_event *event)
612{
Zefan Li4e2ba652014-09-19 16:53:14 +0800613 css_put(&event->cgrp->css);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200614 event->cgrp = NULL;
615}
616
617static inline int is_cgroup_event(struct perf_event *event)
618{
619 return event->cgrp != NULL;
620}
621
622static inline u64 perf_cgroup_event_time(struct perf_event *event)
623{
624 struct perf_cgroup_info *t;
625
626 t = per_cpu_ptr(event->cgrp->info, event->cpu);
627 return t->time;
628}
629
630static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
631{
632 struct perf_cgroup_info *info;
633 u64 now;
634
635 now = perf_clock();
636
637 info = this_cpu_ptr(cgrp->info);
638
639 info->time += now - info->timestamp;
640 info->timestamp = now;
641}
642
643static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
644{
645 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
646 if (cgrp_out)
647 __update_cgrp_time(cgrp_out);
648}
649
650static inline void update_cgrp_time_from_event(struct perf_event *event)
651{
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200652 struct perf_cgroup *cgrp;
653
Stephane Eraniane5d13672011-02-14 11:20:01 +0200654 /*
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200655 * ensure we access cgroup data only when needed and
656 * when we know the cgroup is pinned (css_get)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200657 */
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200658 if (!is_cgroup_event(event))
Stephane Eraniane5d13672011-02-14 11:20:01 +0200659 return;
660
Stephane Eranian614e4c42015-11-12 11:00:04 +0100661 cgrp = perf_cgroup_from_task(current, event->ctx);
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200662 /*
663 * Do not update time when cgroup is not active
664 */
665 if (cgrp == event->cgrp)
666 __update_cgrp_time(event->cgrp);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200667}
668
669static inline void
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200670perf_cgroup_set_timestamp(struct task_struct *task,
671 struct perf_event_context *ctx)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200672{
673 struct perf_cgroup *cgrp;
674 struct perf_cgroup_info *info;
675
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200676 /*
677 * ctx->lock held by caller
678 * ensure we do not access cgroup data
679 * unless we have the cgroup pinned (css_get)
680 */
681 if (!task || !ctx->nr_cgroups)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200682 return;
683
Stephane Eranian614e4c42015-11-12 11:00:04 +0100684 cgrp = perf_cgroup_from_task(task, ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200685 info = this_cpu_ptr(cgrp->info);
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200686 info->timestamp = ctx->timestamp;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200687}
688
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800689static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
690
Stephane Eraniane5d13672011-02-14 11:20:01 +0200691#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
692#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
693
694/*
695 * reschedule events based on the cgroup constraint of task.
696 *
697 * mode SWOUT : schedule out everything
698 * mode SWIN : schedule in based on cgroup for next
699 */
Geliang Tang18ab2cd2015-09-27 23:25:50 +0800700static void perf_cgroup_switch(struct task_struct *task, int mode)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200701{
702 struct perf_cpu_context *cpuctx;
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800703 struct list_head *list;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200704 unsigned long flags;
705
706 /*
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800707 * Disable interrupts and preemption to avoid this CPU's
708 * cgrp_cpuctx_entry to change under us.
Stephane Eraniane5d13672011-02-14 11:20:01 +0200709 */
710 local_irq_save(flags);
711
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800712 list = this_cpu_ptr(&cgrp_cpuctx_list);
713 list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
714 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200715
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800716 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
717 perf_pmu_disable(cpuctx->ctx.pmu);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200718
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800719 if (mode & PERF_CGROUP_SWOUT) {
720 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
721 /*
722 * must not be done before ctxswout due
723 * to event_filter_match() in event_sched_out()
724 */
725 cpuctx->cgrp = NULL;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200726 }
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800727
728 if (mode & PERF_CGROUP_SWIN) {
729 WARN_ON_ONCE(cpuctx->cgrp);
730 /*
731 * set cgrp before ctxsw in to allow
732 * event_filter_match() to not have to pass
733 * task around
734 * we pass the cpuctx->ctx to perf_cgroup_from_task()
735 * because cgorup events are only per-cpu
736 */
737 cpuctx->cgrp = perf_cgroup_from_task(task,
738 &cpuctx->ctx);
739 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
740 }
741 perf_pmu_enable(cpuctx->ctx.pmu);
742 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200743 }
744
Stephane Eraniane5d13672011-02-14 11:20:01 +0200745 local_irq_restore(flags);
746}
747
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200748static inline void perf_cgroup_sched_out(struct task_struct *task,
749 struct task_struct *next)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200750{
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200751 struct perf_cgroup *cgrp1;
752 struct perf_cgroup *cgrp2 = NULL;
753
Stephane Eranianddaaf4e2015-11-12 11:00:03 +0100754 rcu_read_lock();
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200755 /*
756 * we come here when we know perf_cgroup_events > 0
Stephane Eranian614e4c42015-11-12 11:00:04 +0100757 * we do not need to pass the ctx here because we know
758 * we are holding the rcu lock
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200759 */
Stephane Eranian614e4c42015-11-12 11:00:04 +0100760 cgrp1 = perf_cgroup_from_task(task, NULL);
Peter Zijlstra70a01652016-01-08 09:29:16 +0100761 cgrp2 = perf_cgroup_from_task(next, NULL);
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200762
763 /*
764 * only schedule out current cgroup events if we know
765 * that we are switching to a different cgroup. Otherwise,
766 * do no touch the cgroup events.
767 */
768 if (cgrp1 != cgrp2)
769 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
Stephane Eranianddaaf4e2015-11-12 11:00:03 +0100770
771 rcu_read_unlock();
Stephane Eraniane5d13672011-02-14 11:20:01 +0200772}
773
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200774static inline void perf_cgroup_sched_in(struct task_struct *prev,
775 struct task_struct *task)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200776{
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200777 struct perf_cgroup *cgrp1;
778 struct perf_cgroup *cgrp2 = NULL;
779
Stephane Eranianddaaf4e2015-11-12 11:00:03 +0100780 rcu_read_lock();
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200781 /*
782 * we come here when we know perf_cgroup_events > 0
Stephane Eranian614e4c42015-11-12 11:00:04 +0100783 * we do not need to pass the ctx here because we know
784 * we are holding the rcu lock
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200785 */
Stephane Eranian614e4c42015-11-12 11:00:04 +0100786 cgrp1 = perf_cgroup_from_task(task, NULL);
Stephane Eranian614e4c42015-11-12 11:00:04 +0100787 cgrp2 = perf_cgroup_from_task(prev, NULL);
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200788
789 /*
790 * only need to schedule in cgroup events if we are changing
791 * cgroup during ctxsw. Cgroup events were not scheduled
792 * out of ctxsw out if that was not the case.
793 */
794 if (cgrp1 != cgrp2)
795 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
Stephane Eranianddaaf4e2015-11-12 11:00:03 +0100796
797 rcu_read_unlock();
Stephane Eraniane5d13672011-02-14 11:20:01 +0200798}
799
800static inline int perf_cgroup_connect(int fd, struct perf_event *event,
801 struct perf_event_attr *attr,
802 struct perf_event *group_leader)
803{
804 struct perf_cgroup *cgrp;
805 struct cgroup_subsys_state *css;
Al Viro2903ff02012-08-28 12:52:22 -0400806 struct fd f = fdget(fd);
807 int ret = 0;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200808
Al Viro2903ff02012-08-28 12:52:22 -0400809 if (!f.file)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200810 return -EBADF;
811
Al Virob5830432014-10-31 01:22:04 -0400812 css = css_tryget_online_from_dir(f.file->f_path.dentry,
Tejun Heoec903c02014-05-13 12:11:01 -0400813 &perf_event_cgrp_subsys);
Li Zefan3db272c2011-03-03 14:25:37 +0800814 if (IS_ERR(css)) {
815 ret = PTR_ERR(css);
816 goto out;
817 }
Stephane Eraniane5d13672011-02-14 11:20:01 +0200818
819 cgrp = container_of(css, struct perf_cgroup, css);
820 event->cgrp = cgrp;
821
822 /*
823 * all events in a group must monitor
824 * the same cgroup because a task belongs
825 * to only one perf cgroup at a time
826 */
827 if (group_leader && group_leader->cgrp != cgrp) {
828 perf_detach_cgroup(event);
829 ret = -EINVAL;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200830 }
Li Zefan3db272c2011-03-03 14:25:37 +0800831out:
Al Viro2903ff02012-08-28 12:52:22 -0400832 fdput(f);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200833 return ret;
834}
835
836static inline void
837perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
838{
839 struct perf_cgroup_info *t;
840 t = per_cpu_ptr(event->cgrp->info, event->cpu);
841 event->shadow_ctx_time = now - t->timestamp;
842}
843
844static inline void
845perf_cgroup_defer_enabled(struct perf_event *event)
846{
847 /*
848 * when the current task's perf cgroup does not match
849 * the event's, we need to remember to call the
850 * perf_mark_enable() function the first time a task with
851 * a matching perf cgroup is scheduled in.
852 */
853 if (is_cgroup_event(event) && !perf_cgroup_match(event))
854 event->cgrp_defer_enabled = 1;
855}
856
857static inline void
858perf_cgroup_mark_enabled(struct perf_event *event,
859 struct perf_event_context *ctx)
860{
861 struct perf_event *sub;
862 u64 tstamp = perf_event_time(event);
863
864 if (!event->cgrp_defer_enabled)
865 return;
866
867 event->cgrp_defer_enabled = 0;
868
869 event->tstamp_enabled = tstamp - event->total_time_enabled;
870 list_for_each_entry(sub, &event->sibling_list, group_entry) {
871 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
872 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
873 sub->cgrp_defer_enabled = 0;
874 }
875 }
876}
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -0700877
878/*
879 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
880 * cleared when last cgroup event is removed.
881 */
882static inline void
883list_update_cgroup_event(struct perf_event *event,
884 struct perf_event_context *ctx, bool add)
885{
886 struct perf_cpu_context *cpuctx;
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800887 struct list_head *cpuctx_entry;
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -0700888
889 if (!is_cgroup_event(event))
890 return;
891
892 if (add && ctx->nr_cgroups++)
893 return;
894 else if (!add && --ctx->nr_cgroups)
895 return;
896 /*
897 * Because cgroup events are always per-cpu events,
898 * this will always be called from the right CPU.
899 */
900 cpuctx = __get_cpu_context(ctx);
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800901 cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
902 /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
903 if (add) {
904 list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
905 if (perf_cgroup_from_task(current, ctx) == event->cgrp)
906 cpuctx->cgrp = event->cgrp;
907 } else {
908 list_del(cpuctx_entry);
David Carrillo-Cisneros8fc31ce2016-12-04 00:46:17 -0800909 cpuctx->cgrp = NULL;
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800910 }
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -0700911}
912
Stephane Eraniane5d13672011-02-14 11:20:01 +0200913#else /* !CONFIG_CGROUP_PERF */
914
915static inline bool
916perf_cgroup_match(struct perf_event *event)
917{
918 return true;
919}
920
921static inline void perf_detach_cgroup(struct perf_event *event)
922{}
923
924static inline int is_cgroup_event(struct perf_event *event)
925{
926 return 0;
927}
928
Stephane Eraniane5d13672011-02-14 11:20:01 +0200929static inline void update_cgrp_time_from_event(struct perf_event *event)
930{
931}
932
933static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
934{
935}
936
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200937static inline void perf_cgroup_sched_out(struct task_struct *task,
938 struct task_struct *next)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200939{
940}
941
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200942static inline void perf_cgroup_sched_in(struct task_struct *prev,
943 struct task_struct *task)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200944{
945}
946
947static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
948 struct perf_event_attr *attr,
949 struct perf_event *group_leader)
950{
951 return -EINVAL;
952}
953
954static inline void
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200955perf_cgroup_set_timestamp(struct task_struct *task,
956 struct perf_event_context *ctx)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200957{
958}
959
960void
961perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
962{
963}
964
965static inline void
966perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
967{
968}
969
970static inline u64 perf_cgroup_event_time(struct perf_event *event)
971{
972 return 0;
973}
974
975static inline void
976perf_cgroup_defer_enabled(struct perf_event *event)
977{
978}
979
980static inline void
981perf_cgroup_mark_enabled(struct perf_event *event,
982 struct perf_event_context *ctx)
983{
984}
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -0700985
986static inline void
987list_update_cgroup_event(struct perf_event *event,
988 struct perf_event_context *ctx, bool add)
989{
990}
991
Stephane Eraniane5d13672011-02-14 11:20:01 +0200992#endif
993
Stephane Eranian9e630202013-04-03 14:21:33 +0200994/*
995 * set default to be dependent on timer tick just
996 * like original code
997 */
998#define PERF_CPU_HRTIMER (1000 / HZ)
999/*
Masahiro Yamada8a1115f2017-03-09 16:16:31 -08001000 * function must be called with interrupts disabled
Stephane Eranian9e630202013-04-03 14:21:33 +02001001 */
Peter Zijlstra272325c2015-04-15 11:41:58 +02001002static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
Stephane Eranian9e630202013-04-03 14:21:33 +02001003{
1004 struct perf_cpu_context *cpuctx;
Stephane Eranian9e630202013-04-03 14:21:33 +02001005 int rotations = 0;
1006
1007 WARN_ON(!irqs_disabled());
1008
1009 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
Stephane Eranian9e630202013-04-03 14:21:33 +02001010 rotations = perf_rotate_context(cpuctx);
1011
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001012 raw_spin_lock(&cpuctx->hrtimer_lock);
1013 if (rotations)
Stephane Eranian9e630202013-04-03 14:21:33 +02001014 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001015 else
1016 cpuctx->hrtimer_active = 0;
1017 raw_spin_unlock(&cpuctx->hrtimer_lock);
Stephane Eranian9e630202013-04-03 14:21:33 +02001018
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001019 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
Stephane Eranian9e630202013-04-03 14:21:33 +02001020}
1021
Peter Zijlstra272325c2015-04-15 11:41:58 +02001022static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
Stephane Eranian9e630202013-04-03 14:21:33 +02001023{
Peter Zijlstra272325c2015-04-15 11:41:58 +02001024 struct hrtimer *timer = &cpuctx->hrtimer;
Stephane Eranian9e630202013-04-03 14:21:33 +02001025 struct pmu *pmu = cpuctx->ctx.pmu;
Peter Zijlstra272325c2015-04-15 11:41:58 +02001026 u64 interval;
Stephane Eranian9e630202013-04-03 14:21:33 +02001027
1028 /* no multiplexing needed for SW PMU */
1029 if (pmu->task_ctx_nr == perf_sw_context)
1030 return;
1031
Stephane Eranian62b85632013-04-03 14:21:34 +02001032 /*
1033 * check default is sane, if not set then force to
1034 * default interval (1/tick)
1035 */
Peter Zijlstra272325c2015-04-15 11:41:58 +02001036 interval = pmu->hrtimer_interval_ms;
1037 if (interval < 1)
1038 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
Stephane Eranian62b85632013-04-03 14:21:34 +02001039
Peter Zijlstra272325c2015-04-15 11:41:58 +02001040 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
Stephane Eranian9e630202013-04-03 14:21:33 +02001041
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001042 raw_spin_lock_init(&cpuctx->hrtimer_lock);
1043 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Peter Zijlstra272325c2015-04-15 11:41:58 +02001044 timer->function = perf_mux_hrtimer_handler;
Stephane Eranian9e630202013-04-03 14:21:33 +02001045}
1046
Peter Zijlstra272325c2015-04-15 11:41:58 +02001047static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
Stephane Eranian9e630202013-04-03 14:21:33 +02001048{
Peter Zijlstra272325c2015-04-15 11:41:58 +02001049 struct hrtimer *timer = &cpuctx->hrtimer;
Stephane Eranian9e630202013-04-03 14:21:33 +02001050 struct pmu *pmu = cpuctx->ctx.pmu;
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001051 unsigned long flags;
Stephane Eranian9e630202013-04-03 14:21:33 +02001052
1053 /* not for SW PMU */
1054 if (pmu->task_ctx_nr == perf_sw_context)
Peter Zijlstra272325c2015-04-15 11:41:58 +02001055 return 0;
Stephane Eranian9e630202013-04-03 14:21:33 +02001056
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001057 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1058 if (!cpuctx->hrtimer_active) {
1059 cpuctx->hrtimer_active = 1;
1060 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
1061 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
1062 }
1063 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
Stephane Eranian9e630202013-04-03 14:21:33 +02001064
Peter Zijlstra272325c2015-04-15 11:41:58 +02001065 return 0;
Stephane Eranian9e630202013-04-03 14:21:33 +02001066}
1067
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001068void perf_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001069{
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001070 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1071 if (!(*count)++)
1072 pmu->pmu_disable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001073}
1074
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001075void perf_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001076{
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001077 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1078 if (!--(*count))
1079 pmu->pmu_enable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001080}
1081
Mark Rutland2fde4f92015-01-07 15:01:54 +00001082static DEFINE_PER_CPU(struct list_head, active_ctx_list);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001083
1084/*
Mark Rutland2fde4f92015-01-07 15:01:54 +00001085 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1086 * perf_event_task_tick() are fully serialized because they're strictly cpu
1087 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1088 * disabled, while perf_event_task_tick is called from IRQ context.
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001089 */
Mark Rutland2fde4f92015-01-07 15:01:54 +00001090static void perf_event_ctx_activate(struct perf_event_context *ctx)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001091{
Mark Rutland2fde4f92015-01-07 15:01:54 +00001092 struct list_head *head = this_cpu_ptr(&active_ctx_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001093
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001094 WARN_ON(!irqs_disabled());
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001095
Mark Rutland2fde4f92015-01-07 15:01:54 +00001096 WARN_ON(!list_empty(&ctx->active_ctx_list));
1097
1098 list_add(&ctx->active_ctx_list, head);
1099}
1100
1101static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1102{
1103 WARN_ON(!irqs_disabled());
1104
1105 WARN_ON(list_empty(&ctx->active_ctx_list));
1106
1107 list_del_init(&ctx->active_ctx_list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001108}
1109
1110static void get_ctx(struct perf_event_context *ctx)
1111{
1112 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
1113}
1114
Yan, Zheng4af57ef2014-11-04 21:56:01 -05001115static void free_ctx(struct rcu_head *head)
1116{
1117 struct perf_event_context *ctx;
1118
1119 ctx = container_of(head, struct perf_event_context, rcu_head);
1120 kfree(ctx->task_ctx_data);
1121 kfree(ctx);
1122}
1123
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001124static void put_ctx(struct perf_event_context *ctx)
1125{
1126 if (atomic_dec_and_test(&ctx->refcount)) {
1127 if (ctx->parent_ctx)
1128 put_ctx(ctx->parent_ctx);
Peter Zijlstra63b6da32016-01-14 16:05:37 +01001129 if (ctx->task && ctx->task != TASK_TOMBSTONE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001130 put_task_struct(ctx->task);
Yan, Zheng4af57ef2014-11-04 21:56:01 -05001131 call_rcu(&ctx->rcu_head, free_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001132 }
1133}
1134
Peter Zijlstra211de6e2014-09-30 19:23:08 +02001135/*
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001136 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1137 * perf_pmu_migrate_context() we need some magic.
1138 *
1139 * Those places that change perf_event::ctx will hold both
1140 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1141 *
Peter Zijlstra8b10c5e22015-05-01 16:08:46 +02001142 * Lock ordering is by mutex address. There are two other sites where
1143 * perf_event_context::mutex nests and those are:
1144 *
1145 * - perf_event_exit_task_context() [ child , 0 ]
Peter Zijlstra8ba289b2016-01-26 13:06:56 +01001146 * perf_event_exit_event()
1147 * put_event() [ parent, 1 ]
Peter Zijlstra8b10c5e22015-05-01 16:08:46 +02001148 *
1149 * - perf_event_init_context() [ parent, 0 ]
1150 * inherit_task_group()
1151 * inherit_group()
1152 * inherit_event()
1153 * perf_event_alloc()
1154 * perf_init_event()
1155 * perf_try_init_event() [ child , 1 ]
1156 *
1157 * While it appears there is an obvious deadlock here -- the parent and child
1158 * nesting levels are inverted between the two. This is in fact safe because
1159 * life-time rules separate them. That is an exiting task cannot fork, and a
1160 * spawning task cannot (yet) exit.
1161 *
1162 * But remember that that these are parent<->child context relations, and
1163 * migration does not affect children, therefore these two orderings should not
1164 * interact.
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001165 *
1166 * The change in perf_event::ctx does not affect children (as claimed above)
1167 * because the sys_perf_event_open() case will install a new event and break
1168 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1169 * concerned with cpuctx and that doesn't have children.
1170 *
1171 * The places that change perf_event::ctx will issue:
1172 *
1173 * perf_remove_from_context();
1174 * synchronize_rcu();
1175 * perf_install_in_context();
1176 *
1177 * to affect the change. The remove_from_context() + synchronize_rcu() should
1178 * quiesce the event, after which we can install it in the new location. This
1179 * means that only external vectors (perf_fops, prctl) can perturb the event
1180 * while in transit. Therefore all such accessors should also acquire
1181 * perf_event_context::mutex to serialize against this.
1182 *
1183 * However; because event->ctx can change while we're waiting to acquire
1184 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1185 * function.
1186 *
1187 * Lock order:
Peter Zijlstra79c9ce52016-04-26 11:36:53 +02001188 * cred_guard_mutex
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001189 * task_struct::perf_event_mutex
1190 * perf_event_context::mutex
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001191 * perf_event::child_mutex;
Peter Zijlstra07c4a772016-01-26 12:15:37 +01001192 * perf_event_context::lock
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001193 * perf_event::mmap_mutex
1194 * mmap_sem
1195 */
Peter Zijlstraa83fe282015-01-29 14:44:34 +01001196static struct perf_event_context *
1197perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001198{
1199 struct perf_event_context *ctx;
1200
1201again:
1202 rcu_read_lock();
1203 ctx = ACCESS_ONCE(event->ctx);
1204 if (!atomic_inc_not_zero(&ctx->refcount)) {
1205 rcu_read_unlock();
1206 goto again;
1207 }
1208 rcu_read_unlock();
1209
Peter Zijlstraa83fe282015-01-29 14:44:34 +01001210 mutex_lock_nested(&ctx->mutex, nesting);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001211 if (event->ctx != ctx) {
1212 mutex_unlock(&ctx->mutex);
1213 put_ctx(ctx);
1214 goto again;
1215 }
1216
1217 return ctx;
1218}
1219
Peter Zijlstraa83fe282015-01-29 14:44:34 +01001220static inline struct perf_event_context *
1221perf_event_ctx_lock(struct perf_event *event)
1222{
1223 return perf_event_ctx_lock_nested(event, 0);
1224}
1225
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001226static void perf_event_ctx_unlock(struct perf_event *event,
1227 struct perf_event_context *ctx)
1228{
1229 mutex_unlock(&ctx->mutex);
1230 put_ctx(ctx);
1231}
1232
1233/*
Peter Zijlstra211de6e2014-09-30 19:23:08 +02001234 * This must be done under the ctx->lock, such as to serialize against
1235 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1236 * calling scheduler related locks and ctx->lock nests inside those.
1237 */
1238static __must_check struct perf_event_context *
1239unclone_ctx(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001240{
Peter Zijlstra211de6e2014-09-30 19:23:08 +02001241 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1242
1243 lockdep_assert_held(&ctx->lock);
1244
1245 if (parent_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001246 ctx->parent_ctx = NULL;
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02001247 ctx->generation++;
Peter Zijlstra211de6e2014-09-30 19:23:08 +02001248
1249 return parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001250}
1251
Oleg Nesterov1d953112017-08-22 17:59:28 +02001252static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1253 enum pid_type type)
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001254{
Oleg Nesterov1d953112017-08-22 17:59:28 +02001255 u32 nr;
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001256 /*
1257 * only top level events have the pid namespace they were created in
1258 */
1259 if (event->parent)
1260 event = event->parent;
1261
Oleg Nesterov1d953112017-08-22 17:59:28 +02001262 nr = __task_pid_nr_ns(p, type, event->ns);
1263 /* avoid -1 if it is idle thread or runs in another ns */
1264 if (!nr && !pid_alive(p))
1265 nr = -1;
1266 return nr;
1267}
1268
1269static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1270{
1271 return perf_event_pid_type(event, p, __PIDTYPE_TGID);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001272}
1273
1274static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1275{
Oleg Nesterov1d953112017-08-22 17:59:28 +02001276 return perf_event_pid_type(event, p, PIDTYPE_PID);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001277}
1278
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001279/*
1280 * If we inherit events we want to return the parent event id
1281 * to userspace.
1282 */
1283static u64 primary_event_id(struct perf_event *event)
1284{
1285 u64 id = event->id;
1286
1287 if (event->parent)
1288 id = event->parent->id;
1289
1290 return id;
1291}
1292
1293/*
1294 * Get the perf_event_context for a task and lock it.
Peter Zijlstra63b6da32016-01-14 16:05:37 +01001295 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001296 * This has to cope with with the fact that until it is locked,
1297 * the context could get moved to another task.
1298 */
1299static struct perf_event_context *
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001300perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001301{
1302 struct perf_event_context *ctx;
1303
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001304retry:
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001305 /*
1306 * One of the few rules of preemptible RCU is that one cannot do
1307 * rcu_read_unlock() while holding a scheduler (or nested) lock when
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001308 * part of the read side critical section was irqs-enabled -- see
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001309 * rcu_read_unlock_special().
1310 *
1311 * Since ctx->lock nests under rq->lock we must ensure the entire read
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001312 * side critical section has interrupts disabled.
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001313 */
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001314 local_irq_save(*flags);
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001315 rcu_read_lock();
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001316 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001317 if (ctx) {
1318 /*
1319 * If this context is a clone of another, it might
1320 * get swapped for another underneath us by
1321 * perf_event_task_sched_out, though the
1322 * rcu_read_lock() protects us from any context
1323 * getting freed. Lock the context and check if it
1324 * got swapped before we could get the lock, and retry
1325 * if so. If we locked the right context, then it
1326 * can't get swapped on us any more.
1327 */
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001328 raw_spin_lock(&ctx->lock);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001329 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001330 raw_spin_unlock(&ctx->lock);
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001331 rcu_read_unlock();
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001332 local_irq_restore(*flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001333 goto retry;
1334 }
1335
Peter Zijlstra63b6da32016-01-14 16:05:37 +01001336 if (ctx->task == TASK_TOMBSTONE ||
1337 !atomic_inc_not_zero(&ctx->refcount)) {
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001338 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001339 ctx = NULL;
Peter Zijlstra828b6f02016-01-27 21:59:04 +01001340 } else {
1341 WARN_ON_ONCE(ctx->task != task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001342 }
1343 }
1344 rcu_read_unlock();
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001345 if (!ctx)
1346 local_irq_restore(*flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001347 return ctx;
1348}
1349
1350/*
1351 * Get the context for a task and increment its pin_count so it
1352 * can't get swapped to another task. This also increments its
1353 * reference count so that the context can't get freed.
1354 */
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001355static struct perf_event_context *
1356perf_pin_task_context(struct task_struct *task, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001357{
1358 struct perf_event_context *ctx;
1359 unsigned long flags;
1360
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001361 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001362 if (ctx) {
1363 ++ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001364 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001365 }
1366 return ctx;
1367}
1368
1369static void perf_unpin_context(struct perf_event_context *ctx)
1370{
1371 unsigned long flags;
1372
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001373 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001374 --ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001375 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001376}
1377
Peter Zijlstraf67218c2009-11-23 11:37:27 +01001378/*
1379 * Update the record of the current time in a context.
1380 */
1381static void update_context_time(struct perf_event_context *ctx)
1382{
1383 u64 now = perf_clock();
1384
1385 ctx->time += now - ctx->timestamp;
1386 ctx->timestamp = now;
1387}
1388
Stephane Eranian41587552011-01-03 18:20:01 +02001389static u64 perf_event_time(struct perf_event *event)
1390{
1391 struct perf_event_context *ctx = event->ctx;
Stephane Eraniane5d13672011-02-14 11:20:01 +02001392
1393 if (is_cgroup_event(event))
1394 return perf_cgroup_event_time(event);
1395
Stephane Eranian41587552011-01-03 18:20:01 +02001396 return ctx ? ctx->time : 0;
1397}
1398
Peter Zijlstraf67218c2009-11-23 11:37:27 +01001399/*
1400 * Update the total_time_enabled and total_time_running fields for a event.
1401 */
1402static void update_event_times(struct perf_event *event)
1403{
1404 struct perf_event_context *ctx = event->ctx;
1405 u64 run_end;
1406
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01001407 lockdep_assert_held(&ctx->lock);
1408
Peter Zijlstraf67218c2009-11-23 11:37:27 +01001409 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1410 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1411 return;
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01001412
Stephane Eraniane5d13672011-02-14 11:20:01 +02001413 /*
1414 * in cgroup mode, time_enabled represents
1415 * the time the event was enabled AND active
1416 * tasks were in the monitored cgroup. This is
1417 * independent of the activity of the context as
1418 * there may be a mix of cgroup and non-cgroup events.
1419 *
1420 * That is why we treat cgroup events differently
1421 * here.
1422 */
1423 if (is_cgroup_event(event))
Namhyung Kim46cd6a7f2012-01-20 10:12:46 +09001424 run_end = perf_cgroup_event_time(event);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001425 else if (ctx->is_active)
1426 run_end = ctx->time;
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +01001427 else
1428 run_end = event->tstamp_stopped;
1429
1430 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +01001431
1432 if (event->state == PERF_EVENT_STATE_INACTIVE)
1433 run_end = event->tstamp_stopped;
1434 else
Stephane Eranian41587552011-01-03 18:20:01 +02001435 run_end = perf_event_time(event);
Peter Zijlstraf67218c2009-11-23 11:37:27 +01001436
1437 event->total_time_running = run_end - event->tstamp_running;
Stephane Eraniane5d13672011-02-14 11:20:01 +02001438
Peter Zijlstraf67218c2009-11-23 11:37:27 +01001439}
1440
Peter Zijlstra96c21a42010-05-11 16:19:10 +02001441/*
1442 * Update total_time_enabled and total_time_running for all events in a group.
1443 */
1444static void update_group_times(struct perf_event *leader)
1445{
1446 struct perf_event *event;
1447
1448 update_event_times(leader);
1449 list_for_each_entry(event, &leader->sibling_list, group_entry)
1450 update_event_times(event);
1451}
1452
Alexander Shishkin487f05e2017-01-19 18:43:30 +02001453static enum event_type_t get_event_type(struct perf_event *event)
1454{
1455 struct perf_event_context *ctx = event->ctx;
1456 enum event_type_t event_type;
1457
1458 lockdep_assert_held(&ctx->lock);
1459
Alexander Shishkin3bda69c2017-07-18 14:08:34 +03001460 /*
1461 * It's 'group type', really, because if our group leader is
1462 * pinned, so are we.
1463 */
1464 if (event->group_leader != event)
1465 event = event->group_leader;
1466
Alexander Shishkin487f05e2017-01-19 18:43:30 +02001467 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1468 if (!ctx->task)
1469 event_type |= EVENT_CPU;
1470
1471 return event_type;
1472}
1473
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001474static struct list_head *
1475ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1476{
1477 if (event->attr.pinned)
1478 return &ctx->pinned_groups;
1479 else
1480 return &ctx->flexible_groups;
1481}
1482
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001483/*
1484 * Add a event from the lists for its context.
1485 * Must be called with ctx->mutex and ctx->lock held.
1486 */
1487static void
1488list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1489{
Peter Zijlstrac994d612016-01-08 09:20:23 +01001490 lockdep_assert_held(&ctx->lock);
1491
Peter Zijlstra8a495422010-05-27 15:47:49 +02001492 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1493 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001494
1495 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +02001496 * If we're a stand alone event or group leader, we go to the context
1497 * list, group events are kept attached to the group so that
1498 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001499 */
Peter Zijlstra8a495422010-05-27 15:47:49 +02001500 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001501 struct list_head *list;
1502
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07001503 event->group_caps = event->event_caps;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +01001504
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001505 list = ctx_group_list(event, ctx);
1506 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001507 }
1508
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -07001509 list_update_cgroup_event(event, ctx, true);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001510
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001511 list_add_rcu(&event->event_entry, &ctx->event_list);
1512 ctx->nr_events++;
1513 if (event->attr.inherit_stat)
1514 ctx->nr_stat++;
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02001515
1516 ctx->generation++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001517}
1518
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001519/*
Jiri Olsa0231bb52013-02-01 11:23:45 +01001520 * Initialize event state based on the perf_event_attr::disabled.
1521 */
1522static inline void perf_event__state_init(struct perf_event *event)
1523{
1524 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1525 PERF_EVENT_STATE_INACTIVE;
1526}
1527
Peter Zijlstraa7239682015-09-09 19:06:33 +02001528static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001529{
1530 int entry = sizeof(u64); /* value */
1531 int size = 0;
1532 int nr = 1;
1533
1534 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1535 size += sizeof(u64);
1536
1537 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1538 size += sizeof(u64);
1539
1540 if (event->attr.read_format & PERF_FORMAT_ID)
1541 entry += sizeof(u64);
1542
1543 if (event->attr.read_format & PERF_FORMAT_GROUP) {
Peter Zijlstraa7239682015-09-09 19:06:33 +02001544 nr += nr_siblings;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001545 size += sizeof(u64);
1546 }
1547
1548 size += entry * nr;
1549 event->read_size = size;
1550}
1551
Peter Zijlstraa7239682015-09-09 19:06:33 +02001552static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001553{
1554 struct perf_sample_data *data;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001555 u16 size = 0;
1556
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001557 if (sample_type & PERF_SAMPLE_IP)
1558 size += sizeof(data->ip);
1559
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001560 if (sample_type & PERF_SAMPLE_ADDR)
1561 size += sizeof(data->addr);
1562
1563 if (sample_type & PERF_SAMPLE_PERIOD)
1564 size += sizeof(data->period);
1565
Andi Kleenc3feedf2013-01-24 16:10:28 +01001566 if (sample_type & PERF_SAMPLE_WEIGHT)
1567 size += sizeof(data->weight);
1568
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001569 if (sample_type & PERF_SAMPLE_READ)
1570 size += event->read_size;
1571
Stephane Eraniand6be9ad2013-01-24 16:10:31 +01001572 if (sample_type & PERF_SAMPLE_DATA_SRC)
1573 size += sizeof(data->data_src.val);
1574
Andi Kleenfdfbbd02013-09-20 07:40:39 -07001575 if (sample_type & PERF_SAMPLE_TRANSACTION)
1576 size += sizeof(data->txn);
1577
Kan Liangfc7ce9c2017-08-28 20:52:49 -04001578 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1579 size += sizeof(data->phys_addr);
1580
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001581 event->header_size = size;
1582}
1583
Peter Zijlstraa7239682015-09-09 19:06:33 +02001584/*
1585 * Called at perf_event creation and when events are attached/detached from a
1586 * group.
1587 */
1588static void perf_event__header_size(struct perf_event *event)
1589{
1590 __perf_event_read_size(event,
1591 event->group_leader->nr_siblings);
1592 __perf_event_header_size(event, event->attr.sample_type);
1593}
1594
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001595static void perf_event__id_header_size(struct perf_event *event)
1596{
1597 struct perf_sample_data *data;
1598 u64 sample_type = event->attr.sample_type;
1599 u16 size = 0;
1600
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001601 if (sample_type & PERF_SAMPLE_TID)
1602 size += sizeof(data->tid_entry);
1603
1604 if (sample_type & PERF_SAMPLE_TIME)
1605 size += sizeof(data->time);
1606
Adrian Hunterff3d5272013-08-27 11:23:07 +03001607 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1608 size += sizeof(data->id);
1609
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001610 if (sample_type & PERF_SAMPLE_ID)
1611 size += sizeof(data->id);
1612
1613 if (sample_type & PERF_SAMPLE_STREAM_ID)
1614 size += sizeof(data->stream_id);
1615
1616 if (sample_type & PERF_SAMPLE_CPU)
1617 size += sizeof(data->cpu_entry);
1618
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001619 event->id_header_size = size;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001620}
1621
Peter Zijlstraa7239682015-09-09 19:06:33 +02001622static bool perf_event_validate_size(struct perf_event *event)
1623{
1624 /*
1625 * The values computed here will be over-written when we actually
1626 * attach the event.
1627 */
1628 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1629 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1630 perf_event__id_header_size(event);
1631
1632 /*
1633 * Sum the lot; should not exceed the 64k limit we have on records.
1634 * Conservative limit to allow for callchains and other variable fields.
1635 */
1636 if (event->read_size + event->header_size +
1637 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1638 return false;
1639
1640 return true;
1641}
1642
Peter Zijlstra8a495422010-05-27 15:47:49 +02001643static void perf_group_attach(struct perf_event *event)
1644{
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001645 struct perf_event *group_leader = event->group_leader, *pos;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001646
Peter Zijlstraa76a82a2017-01-26 16:39:55 +01001647 lockdep_assert_held(&event->ctx->lock);
1648
Peter Zijlstra74c33372010-10-15 11:40:29 +02001649 /*
1650 * We can have double attach due to group movement in perf_event_open.
1651 */
1652 if (event->attach_state & PERF_ATTACH_GROUP)
1653 return;
1654
Peter Zijlstra8a495422010-05-27 15:47:49 +02001655 event->attach_state |= PERF_ATTACH_GROUP;
1656
1657 if (group_leader == event)
1658 return;
1659
Peter Zijlstra652884f2015-01-23 11:20:10 +01001660 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1661
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07001662 group_leader->group_caps &= event->event_caps;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001663
1664 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1665 group_leader->nr_siblings++;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001666
1667 perf_event__header_size(group_leader);
1668
1669 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1670 perf_event__header_size(pos);
Peter Zijlstra8a495422010-05-27 15:47:49 +02001671}
1672
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001673/*
1674 * Remove a event from the lists for its context.
1675 * Must be called with ctx->mutex and ctx->lock held.
1676 */
1677static void
1678list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1679{
Peter Zijlstra652884f2015-01-23 11:20:10 +01001680 WARN_ON_ONCE(event->ctx != ctx);
1681 lockdep_assert_held(&ctx->lock);
1682
Peter Zijlstra8a495422010-05-27 15:47:49 +02001683 /*
1684 * We can have double detach due to exit/hot-unplug + close.
1685 */
1686 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001687 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001688
1689 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1690
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -07001691 list_update_cgroup_event(event, ctx, false);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001692
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001693 ctx->nr_events--;
1694 if (event->attr.inherit_stat)
1695 ctx->nr_stat--;
1696
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001697 list_del_rcu(&event->event_entry);
1698
Peter Zijlstra8a495422010-05-27 15:47:49 +02001699 if (event->group_leader == event)
1700 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001701
Peter Zijlstra96c21a42010-05-11 16:19:10 +02001702 update_group_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -08001703
1704 /*
1705 * If event was in error state, then keep it
1706 * that way, otherwise bogus counts will be
1707 * returned on read(). The only way to get out
1708 * of error state is by explicit re-enabling
1709 * of the event
1710 */
1711 if (event->state > PERF_EVENT_STATE_OFF)
1712 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02001713
1714 ctx->generation++;
Peter Zijlstra050735b2010-05-11 11:51:53 +02001715}
1716
Peter Zijlstra8a495422010-05-27 15:47:49 +02001717static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +02001718{
1719 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001720 struct list_head *list = NULL;
1721
Peter Zijlstraa76a82a2017-01-26 16:39:55 +01001722 lockdep_assert_held(&event->ctx->lock);
1723
Peter Zijlstra8a495422010-05-27 15:47:49 +02001724 /*
1725 * We can have double detach due to exit/hot-unplug + close.
1726 */
1727 if (!(event->attach_state & PERF_ATTACH_GROUP))
1728 return;
1729
1730 event->attach_state &= ~PERF_ATTACH_GROUP;
1731
1732 /*
1733 * If this is a sibling, remove it from its group.
1734 */
1735 if (event->group_leader != event) {
1736 list_del_init(&event->group_entry);
1737 event->group_leader->nr_siblings--;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001738 goto out;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001739 }
1740
1741 if (!list_empty(&event->group_entry))
1742 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +01001743
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001744 /*
1745 * If this was a group event with sibling events then
1746 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +02001747 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001748 */
1749 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +02001750 if (list)
1751 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001752 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +01001753
1754 /* Inherit group flags from the previous leader */
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07001755 sibling->group_caps = event->group_caps;
Peter Zijlstra652884f2015-01-23 11:20:10 +01001756
1757 WARN_ON_ONCE(sibling->ctx != event->ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001758 }
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001759
1760out:
1761 perf_event__header_size(event->group_leader);
1762
1763 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1764 perf_event__header_size(tmp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001765}
1766
Jiri Olsafadfe7b2014-08-01 14:33:02 +02001767static bool is_orphaned_event(struct perf_event *event)
1768{
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01001769 return event->state == PERF_EVENT_STATE_DEAD;
Jiri Olsafadfe7b2014-08-01 14:33:02 +02001770}
1771
Mark Rutland2c81a642016-06-14 16:10:41 +01001772static inline int __pmu_filter_match(struct perf_event *event)
Mark Rutland66eb5792015-05-13 17:12:23 +01001773{
1774 struct pmu *pmu = event->pmu;
1775 return pmu->filter_match ? pmu->filter_match(event) : 1;
1776}
1777
Mark Rutland2c81a642016-06-14 16:10:41 +01001778/*
1779 * Check whether we should attempt to schedule an event group based on
1780 * PMU-specific filtering. An event group can consist of HW and SW events,
1781 * potentially with a SW leader, so we must check all the filters, to
1782 * determine whether a group is schedulable:
1783 */
1784static inline int pmu_filter_match(struct perf_event *event)
1785{
1786 struct perf_event *child;
1787
1788 if (!__pmu_filter_match(event))
1789 return 0;
1790
1791 list_for_each_entry(child, &event->sibling_list, group_entry) {
1792 if (!__pmu_filter_match(child))
1793 return 0;
1794 }
1795
1796 return 1;
1797}
1798
Stephane Eranianfa66f072010-08-26 16:40:01 +02001799static inline int
1800event_filter_match(struct perf_event *event)
1801{
Peter Zijlstra0b8f1e22016-08-04 14:37:24 +02001802 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1803 perf_cgroup_match(event) && pmu_filter_match(event);
Stephane Eranianfa66f072010-08-26 16:40:01 +02001804}
1805
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001806static void
1807event_sched_out(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001808 struct perf_cpu_context *cpuctx,
1809 struct perf_event_context *ctx)
1810{
Stephane Eranian41587552011-01-03 18:20:01 +02001811 u64 tstamp = perf_event_time(event);
Stephane Eranianfa66f072010-08-26 16:40:01 +02001812 u64 delta;
Peter Zijlstra652884f2015-01-23 11:20:10 +01001813
1814 WARN_ON_ONCE(event->ctx != ctx);
1815 lockdep_assert_held(&ctx->lock);
1816
Stephane Eranianfa66f072010-08-26 16:40:01 +02001817 /*
1818 * An event which could not be activated because of
1819 * filter mismatch still needs to have its timings
1820 * maintained, otherwise bogus information is return
1821 * via read() for time_enabled, time_running:
1822 */
Peter Zijlstra0b8f1e22016-08-04 14:37:24 +02001823 if (event->state == PERF_EVENT_STATE_INACTIVE &&
1824 !event_filter_match(event)) {
Stephane Eraniane5d13672011-02-14 11:20:01 +02001825 delta = tstamp - event->tstamp_stopped;
Stephane Eranianfa66f072010-08-26 16:40:01 +02001826 event->tstamp_running += delta;
Stephane Eranian41587552011-01-03 18:20:01 +02001827 event->tstamp_stopped = tstamp;
Stephane Eranianfa66f072010-08-26 16:40:01 +02001828 }
1829
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001830 if (event->state != PERF_EVENT_STATE_ACTIVE)
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001831 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001832
Alexander Shishkin44377272013-12-16 14:17:36 +02001833 perf_pmu_disable(event->pmu);
1834
Peter Zijlstra28a967c2016-02-24 18:45:46 +01001835 event->tstamp_stopped = tstamp;
1836 event->pmu->del(event, 0);
1837 event->oncpu = -1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001838 event->state = PERF_EVENT_STATE_INACTIVE;
1839 if (event->pending_disable) {
1840 event->pending_disable = 0;
1841 event->state = PERF_EVENT_STATE_OFF;
1842 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001843
1844 if (!is_software_event(event))
1845 cpuctx->active_oncpu--;
Mark Rutland2fde4f92015-01-07 15:01:54 +00001846 if (!--ctx->nr_active)
1847 perf_event_ctx_deactivate(ctx);
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01001848 if (event->attr.freq && event->attr.sample_freq)
1849 ctx->nr_freq--;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001850 if (event->attr.exclusive || !cpuctx->active_oncpu)
1851 cpuctx->exclusive = 0;
Alexander Shishkin44377272013-12-16 14:17:36 +02001852
1853 perf_pmu_enable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001854}
1855
1856static void
1857group_sched_out(struct perf_event *group_event,
1858 struct perf_cpu_context *cpuctx,
1859 struct perf_event_context *ctx)
1860{
1861 struct perf_event *event;
Stephane Eranianfa66f072010-08-26 16:40:01 +02001862 int state = group_event->state;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001863
Mark Rutland3f005e72016-07-26 18:12:21 +01001864 perf_pmu_disable(ctx->pmu);
1865
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001866 event_sched_out(group_event, cpuctx, ctx);
1867
1868 /*
1869 * Schedule out siblings (if any):
1870 */
1871 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1872 event_sched_out(event, cpuctx, ctx);
1873
Mark Rutland3f005e72016-07-26 18:12:21 +01001874 perf_pmu_enable(ctx->pmu);
1875
Stephane Eranianfa66f072010-08-26 16:40:01 +02001876 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001877 cpuctx->exclusive = 0;
1878}
1879
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001880#define DETACH_GROUP 0x01UL
Peter Zijlstra00179602015-11-30 16:26:35 +01001881
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001882/*
1883 * Cross CPU call to remove a performance event
1884 *
1885 * We disable the event on the hardware level first. After that we
1886 * remove it from the context list.
1887 */
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001888static void
1889__perf_remove_from_context(struct perf_event *event,
1890 struct perf_cpu_context *cpuctx,
1891 struct perf_event_context *ctx,
1892 void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001893{
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001894 unsigned long flags = (unsigned long)info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001895
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001896 event_sched_out(event, cpuctx, ctx);
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001897 if (flags & DETACH_GROUP)
Peter Zijlstra46ce0fe2014-05-02 16:56:01 +02001898 perf_group_detach(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001899 list_del_event(event, ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001900
Peter Zijlstra39a43642016-01-11 12:46:35 +01001901 if (!ctx->nr_events && ctx->is_active) {
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001902 ctx->is_active = 0;
Peter Zijlstra39a43642016-01-11 12:46:35 +01001903 if (ctx->task) {
1904 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1905 cpuctx->task_ctx = NULL;
1906 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001907 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001908}
1909
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001910/*
1911 * Remove the event from a task's (or a CPU's) list of events.
1912 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001913 * If event->ctx is a cloned context, callers must make sure that
1914 * every task struct that event->ctx->task could possibly point to
1915 * remains valid. This is OK when called from perf_release since
1916 * that only calls us on the top-level context, which can't be a clone.
1917 * When called from perf_event_exit_task, it's OK because the
1918 * context has been detached from its task.
1919 */
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001920static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001921{
Peter Zijlstraa76a82a2017-01-26 16:39:55 +01001922 struct perf_event_context *ctx = event->ctx;
1923
1924 lockdep_assert_held(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001925
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001926 event_function_call(event, __perf_remove_from_context, (void *)flags);
Peter Zijlstraa76a82a2017-01-26 16:39:55 +01001927
1928 /*
1929 * The above event_function_call() can NO-OP when it hits
1930 * TASK_TOMBSTONE. In that case we must already have been detached
1931 * from the context (by perf_event_exit_event()) but the grouping
1932 * might still be in-tact.
1933 */
1934 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1935 if ((flags & DETACH_GROUP) &&
1936 (event->attach_state & PERF_ATTACH_GROUP)) {
1937 /*
1938 * Since in that case we cannot possibly be scheduled, simply
1939 * detach now.
1940 */
1941 raw_spin_lock_irq(&ctx->lock);
1942 perf_group_detach(event);
1943 raw_spin_unlock_irq(&ctx->lock);
1944 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001945}
1946
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001947/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001948 * Cross CPU call to disable a performance event
1949 */
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001950static void __perf_event_disable(struct perf_event *event,
1951 struct perf_cpu_context *cpuctx,
1952 struct perf_event_context *ctx,
1953 void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001954{
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001955 if (event->state < PERF_EVENT_STATE_INACTIVE)
1956 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001957
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001958 update_context_time(ctx);
1959 update_cgrp_time_from_event(event);
1960 update_group_times(event);
1961 if (event == event->group_leader)
1962 group_sched_out(event, cpuctx, ctx);
1963 else
1964 event_sched_out(event, cpuctx, ctx);
1965 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra7b648012015-12-03 18:35:21 +01001966}
1967
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001968/*
1969 * Disable a event.
1970 *
1971 * If event->ctx is a cloned context, callers must make sure that
1972 * every task struct that event->ctx->task could possibly point to
1973 * remains valid. This condition is satisifed when called through
1974 * perf_event_for_each_child or perf_event_for_each because they
1975 * hold the top-level event's child_mutex, so any descendant that
Peter Zijlstra8ba289b2016-01-26 13:06:56 +01001976 * goes to exit will block in perf_event_exit_event().
1977 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001978 * When called from perf_pending_event it's OK because event->ctx
1979 * is the current context on this CPU and preemption is disabled,
1980 * hence we can't get into perf_event_task_sched_out for this context.
1981 */
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001982static void _perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001983{
1984 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001985
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001986 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra7b648012015-12-03 18:35:21 +01001987 if (event->state <= PERF_EVENT_STATE_OFF) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001988 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstra7b648012015-12-03 18:35:21 +01001989 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001990 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001991 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstra7b648012015-12-03 18:35:21 +01001992
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001993 event_function_call(event, __perf_event_disable, NULL);
1994}
1995
1996void perf_event_disable_local(struct perf_event *event)
1997{
1998 event_function_local(event, __perf_event_disable, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001999}
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002000
2001/*
2002 * Strictly speaking kernel users cannot create groups and therefore this
2003 * interface does not need the perf_event_ctx_lock() magic.
2004 */
2005void perf_event_disable(struct perf_event *event)
2006{
2007 struct perf_event_context *ctx;
2008
2009 ctx = perf_event_ctx_lock(event);
2010 _perf_event_disable(event);
2011 perf_event_ctx_unlock(event, ctx);
2012}
Robert Richterdcfce4a2011-10-11 17:11:08 +02002013EXPORT_SYMBOL_GPL(perf_event_disable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002014
Jiri Olsa5aab90c2016-10-26 11:48:24 +02002015void perf_event_disable_inatomic(struct perf_event *event)
2016{
2017 event->pending_disable = 1;
2018 irq_work_queue(&event->pending);
2019}
2020
Stephane Eraniane5d13672011-02-14 11:20:01 +02002021static void perf_set_shadow_time(struct perf_event *event,
2022 struct perf_event_context *ctx,
2023 u64 tstamp)
2024{
2025 /*
2026 * use the correct time source for the time snapshot
2027 *
2028 * We could get by without this by leveraging the
2029 * fact that to get to this function, the caller
2030 * has most likely already called update_context_time()
2031 * and update_cgrp_time_xx() and thus both timestamp
2032 * are identical (or very close). Given that tstamp is,
2033 * already adjusted for cgroup, we could say that:
2034 * tstamp - ctx->timestamp
2035 * is equivalent to
2036 * tstamp - cgrp->timestamp.
2037 *
2038 * Then, in perf_output_read(), the calculation would
2039 * work with no changes because:
2040 * - event is guaranteed scheduled in
2041 * - no scheduled out in between
2042 * - thus the timestamp would be the same
2043 *
2044 * But this is a bit hairy.
2045 *
2046 * So instead, we have an explicit cgroup call to remain
2047 * within the time time source all along. We believe it
2048 * is cleaner and simpler to understand.
2049 */
2050 if (is_cgroup_event(event))
2051 perf_cgroup_set_shadow_time(event, tstamp);
2052 else
2053 event->shadow_ctx_time = tstamp - ctx->timestamp;
2054}
2055
Peter Zijlstra4fe757d2011-02-15 22:26:07 +01002056#define MAX_INTERRUPTS (~0ULL)
2057
2058static void perf_log_throttle(struct perf_event *event, int enable);
Alexander Shishkinec0d7722015-01-14 14:18:23 +02002059static void perf_log_itrace_start(struct perf_event *event);
Peter Zijlstra4fe757d2011-02-15 22:26:07 +01002060
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002061static int
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002062event_sched_in(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002063 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01002064 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002065{
Stephane Eranian41587552011-01-03 18:20:01 +02002066 u64 tstamp = perf_event_time(event);
Alexander Shishkin44377272013-12-16 14:17:36 +02002067 int ret = 0;
Stephane Eranian41587552011-01-03 18:20:01 +02002068
Peter Zijlstra63342412014-05-05 11:49:16 +02002069 lockdep_assert_held(&ctx->lock);
2070
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002071 if (event->state <= PERF_EVENT_STATE_OFF)
2072 return 0;
2073
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002074 WRITE_ONCE(event->oncpu, smp_processor_id());
2075 /*
2076 * Order event::oncpu write to happen before the ACTIVE state
2077 * is visible.
2078 */
2079 smp_wmb();
2080 WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
Peter Zijlstra4fe757d2011-02-15 22:26:07 +01002081
2082 /*
2083 * Unthrottle events, since we scheduled we might have missed several
2084 * ticks already, also for a heavily scheduling task there is little
2085 * guarantee it'll get a tick in a timely manner.
2086 */
2087 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2088 perf_log_throttle(event, 1);
2089 event->hw.interrupts = 0;
2090 }
2091
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002092 /*
2093 * The new state must be visible before we turn it on in the hardware:
2094 */
2095 smp_wmb();
2096
Alexander Shishkin44377272013-12-16 14:17:36 +02002097 perf_pmu_disable(event->pmu);
2098
Shaohua Li72f669c2015-02-05 15:55:31 -08002099 perf_set_shadow_time(event, ctx, tstamp);
2100
Alexander Shishkinec0d7722015-01-14 14:18:23 +02002101 perf_log_itrace_start(event);
2102
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002103 if (event->pmu->add(event, PERF_EF_START)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002104 event->state = PERF_EVENT_STATE_INACTIVE;
2105 event->oncpu = -1;
Alexander Shishkin44377272013-12-16 14:17:36 +02002106 ret = -EAGAIN;
2107 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002108 }
2109
Peter Zijlstra00a29162015-07-27 10:35:07 +02002110 event->tstamp_running += tstamp - event->tstamp_stopped;
2111
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002112 if (!is_software_event(event))
2113 cpuctx->active_oncpu++;
Mark Rutland2fde4f92015-01-07 15:01:54 +00002114 if (!ctx->nr_active++)
2115 perf_event_ctx_activate(ctx);
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01002116 if (event->attr.freq && event->attr.sample_freq)
2117 ctx->nr_freq++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002118
2119 if (event->attr.exclusive)
2120 cpuctx->exclusive = 1;
2121
Alexander Shishkin44377272013-12-16 14:17:36 +02002122out:
2123 perf_pmu_enable(event->pmu);
2124
2125 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002126}
2127
2128static int
2129group_sched_in(struct perf_event *group_event,
2130 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01002131 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002132{
Lin Ming6bde9b62010-04-23 13:56:00 +08002133 struct perf_event *event, *partial_group = NULL;
Peter Zijlstra4a234592014-02-24 12:43:31 +01002134 struct pmu *pmu = ctx->pmu;
Stephane Eraniand7842da2010-10-20 15:25:01 +02002135 u64 now = ctx->time;
2136 bool simulate = false;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002137
2138 if (group_event->state == PERF_EVENT_STATE_OFF)
2139 return 0;
2140
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07002141 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
Lin Ming6bde9b62010-04-23 13:56:00 +08002142
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002143 if (event_sched_in(group_event, cpuctx, ctx)) {
Peter Zijlstraad5133b2010-06-15 12:22:39 +02002144 pmu->cancel_txn(pmu);
Peter Zijlstra272325c2015-04-15 11:41:58 +02002145 perf_mux_hrtimer_restart(cpuctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002146 return -EAGAIN;
Stephane Eranian90151c352010-05-25 16:23:10 +02002147 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002148
2149 /*
2150 * Schedule in siblings as one group (if any):
2151 */
2152 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002153 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002154 partial_group = event;
2155 goto group_error;
2156 }
2157 }
2158
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002159 if (!pmu->commit_txn(pmu))
Paul Mackerras6e851582010-05-08 20:58:00 +10002160 return 0;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002161
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002162group_error:
2163 /*
2164 * Groups can be scheduled in as one unit only, so undo any
2165 * partial group before returning:
Stephane Eraniand7842da2010-10-20 15:25:01 +02002166 * The events up to the failed event are scheduled out normally,
2167 * tstamp_stopped will be updated.
2168 *
2169 * The failed events and the remaining siblings need to have
2170 * their timings updated as if they had gone thru event_sched_in()
2171 * and event_sched_out(). This is required to get consistent timings
2172 * across the group. This also takes care of the case where the group
2173 * could never be scheduled by ensuring tstamp_stopped is set to mark
2174 * the time the event was actually stopped, such that time delta
2175 * calculation in update_event_times() is correct.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002176 */
2177 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2178 if (event == partial_group)
Stephane Eraniand7842da2010-10-20 15:25:01 +02002179 simulate = true;
2180
2181 if (simulate) {
2182 event->tstamp_running += now - event->tstamp_stopped;
2183 event->tstamp_stopped = now;
2184 } else {
2185 event_sched_out(event, cpuctx, ctx);
2186 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002187 }
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002188 event_sched_out(group_event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002189
Peter Zijlstraad5133b2010-06-15 12:22:39 +02002190 pmu->cancel_txn(pmu);
Stephane Eranian90151c352010-05-25 16:23:10 +02002191
Peter Zijlstra272325c2015-04-15 11:41:58 +02002192 perf_mux_hrtimer_restart(cpuctx);
Stephane Eranian9e630202013-04-03 14:21:33 +02002193
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002194 return -EAGAIN;
2195}
2196
2197/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002198 * Work out whether we can put this event group on the CPU now.
2199 */
2200static int group_can_go_on(struct perf_event *event,
2201 struct perf_cpu_context *cpuctx,
2202 int can_add_hw)
2203{
2204 /*
2205 * Groups consisting entirely of software events can always go on.
2206 */
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07002207 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002208 return 1;
2209 /*
2210 * If an exclusive group is already on, no other hardware
2211 * events can go on.
2212 */
2213 if (cpuctx->exclusive)
2214 return 0;
2215 /*
2216 * If this group is exclusive and there are already
2217 * events on the CPU, it can't go on.
2218 */
2219 if (event->attr.exclusive && cpuctx->active_oncpu)
2220 return 0;
2221 /*
2222 * Otherwise, try to add it if all previous groups were able
2223 * to go on.
2224 */
2225 return can_add_hw;
2226}
2227
Peter Zijlstra9b231d92017-08-03 15:42:09 +02002228/*
2229 * Complement to update_event_times(). This computes the tstamp_* values to
2230 * continue 'enabled' state from @now, and effectively discards the time
2231 * between the prior tstamp_stopped and now (as we were in the OFF state, or
2232 * just switched (context) time base).
2233 *
2234 * This further assumes '@event->state == INACTIVE' (we just came from OFF) and
2235 * cannot have been scheduled in yet. And going into INACTIVE state means
2236 * '@event->tstamp_stopped = @now'.
2237 *
2238 * Thus given the rules of update_event_times():
2239 *
2240 * total_time_enabled = tstamp_stopped - tstamp_enabled
2241 * total_time_running = tstamp_stopped - tstamp_running
2242 *
2243 * We can insert 'tstamp_stopped == now' and reverse them to compute new
2244 * tstamp_* values.
2245 */
2246static void __perf_event_enable_time(struct perf_event *event, u64 now)
2247{
2248 WARN_ON_ONCE(event->state != PERF_EVENT_STATE_INACTIVE);
2249
2250 event->tstamp_stopped = now;
2251 event->tstamp_enabled = now - event->total_time_enabled;
2252 event->tstamp_running = now - event->total_time_running;
2253}
2254
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002255static void add_event_to_ctx(struct perf_event *event,
2256 struct perf_event_context *ctx)
2257{
Stephane Eranian41587552011-01-03 18:20:01 +02002258 u64 tstamp = perf_event_time(event);
2259
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002260 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +02002261 perf_group_attach(event);
Peter Zijlstra9b231d92017-08-03 15:42:09 +02002262 /*
2263 * We can be called with event->state == STATE_OFF when we create with
2264 * .disabled = 1. In that case the IOC_ENABLE will call this function.
2265 */
2266 if (event->state == PERF_EVENT_STATE_INACTIVE)
2267 __perf_event_enable_time(event, tstamp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002268}
2269
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002270static void ctx_sched_out(struct perf_event_context *ctx,
2271 struct perf_cpu_context *cpuctx,
2272 enum event_type_t event_type);
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02002273static void
2274ctx_sched_in(struct perf_event_context *ctx,
2275 struct perf_cpu_context *cpuctx,
2276 enum event_type_t event_type,
2277 struct task_struct *task);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002278
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002279static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002280 struct perf_event_context *ctx,
2281 enum event_type_t event_type)
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002282{
2283 if (!cpuctx->task_ctx)
2284 return;
2285
2286 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2287 return;
2288
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002289 ctx_sched_out(ctx, cpuctx, event_type);
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002290}
2291
Peter Zijlstradce58552011-04-09 21:17:46 +02002292static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2293 struct perf_event_context *ctx,
2294 struct task_struct *task)
2295{
2296 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2297 if (ctx)
2298 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2299 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2300 if (ctx)
2301 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2302}
2303
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002304/*
2305 * We want to maintain the following priority of scheduling:
2306 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2307 * - task pinned (EVENT_PINNED)
2308 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2309 * - task flexible (EVENT_FLEXIBLE).
2310 *
2311 * In order to avoid unscheduling and scheduling back in everything every
2312 * time an event is added, only do it for the groups of equal priority and
2313 * below.
2314 *
2315 * This can be called after a batch operation on task events, in which case
2316 * event_type is a bit mask of the types of events involved. For CPU events,
2317 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2318 */
Peter Zijlstra3e349502016-01-08 10:01:18 +01002319static void ctx_resched(struct perf_cpu_context *cpuctx,
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002320 struct perf_event_context *task_ctx,
2321 enum event_type_t event_type)
Peter Zijlstra00179602015-11-30 16:26:35 +01002322{
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002323 enum event_type_t ctx_event_type = event_type & EVENT_ALL;
2324 bool cpu_event = !!(event_type & EVENT_CPU);
2325
2326 /*
2327 * If pinned groups are involved, flexible groups also need to be
2328 * scheduled out.
2329 */
2330 if (event_type & EVENT_PINNED)
2331 event_type |= EVENT_FLEXIBLE;
2332
Peter Zijlstra3e349502016-01-08 10:01:18 +01002333 perf_pmu_disable(cpuctx->ctx.pmu);
2334 if (task_ctx)
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002335 task_ctx_sched_out(cpuctx, task_ctx, event_type);
2336
2337 /*
2338 * Decide which cpu ctx groups to schedule out based on the types
2339 * of events that caused rescheduling:
2340 * - EVENT_CPU: schedule out corresponding groups;
2341 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2342 * - otherwise, do nothing more.
2343 */
2344 if (cpu_event)
2345 cpu_ctx_sched_out(cpuctx, ctx_event_type);
2346 else if (ctx_event_type & EVENT_PINNED)
2347 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2348
Peter Zijlstra3e349502016-01-08 10:01:18 +01002349 perf_event_sched_in(cpuctx, task_ctx, current);
2350 perf_pmu_enable(cpuctx->ctx.pmu);
Peter Zijlstra00179602015-11-30 16:26:35 +01002351}
2352
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002353/*
2354 * Cross CPU call to install and enable a performance event
2355 *
Peter Zijlstraa0963092016-02-24 18:45:50 +01002356 * Very similar to remote_function() + event_function() but cannot assume that
2357 * things like ctx->is_active and cpuctx->task_ctx are set.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002358 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002359static int __perf_install_in_context(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002360{
Peter Zijlstraa0963092016-02-24 18:45:50 +01002361 struct perf_event *event = info;
2362 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002363 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02002364 struct perf_event_context *task_ctx = cpuctx->task_ctx;
Peter Zijlstra63cae122016-12-09 14:59:00 +01002365 bool reprogram = true;
Peter Zijlstraa0963092016-02-24 18:45:50 +01002366 int ret = 0;
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02002367
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002368 raw_spin_lock(&cpuctx->ctx.lock);
Peter Zijlstra39a43642016-01-11 12:46:35 +01002369 if (ctx->task) {
Peter Zijlstrab58f6b02011-06-07 00:23:28 +02002370 raw_spin_lock(&ctx->lock);
2371 task_ctx = ctx;
Peter Zijlstraa0963092016-02-24 18:45:50 +01002372
Peter Zijlstra63cae122016-12-09 14:59:00 +01002373 reprogram = (ctx->task == current);
2374
2375 /*
2376 * If the task is running, it must be running on this CPU,
2377 * otherwise we cannot reprogram things.
2378 *
2379 * If its not running, we don't care, ctx->lock will
2380 * serialize against it becoming runnable.
2381 */
2382 if (task_curr(ctx->task) && !reprogram) {
Peter Zijlstraa0963092016-02-24 18:45:50 +01002383 ret = -ESRCH;
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002384 goto unlock;
Peter Zijlstraa0963092016-02-24 18:45:50 +01002385 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002386
Peter Zijlstra63cae122016-12-09 14:59:00 +01002387 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002388 } else if (task_ctx) {
2389 raw_spin_lock(&task_ctx->lock);
Peter Zijlstrab58f6b02011-06-07 00:23:28 +02002390 }
2391
Peter Zijlstra63cae122016-12-09 14:59:00 +01002392 if (reprogram) {
Peter Zijlstraa0963092016-02-24 18:45:50 +01002393 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2394 add_event_to_ctx(event, ctx);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002395 ctx_resched(cpuctx, task_ctx, get_event_type(event));
Peter Zijlstraa0963092016-02-24 18:45:50 +01002396 } else {
2397 add_event_to_ctx(event, ctx);
2398 }
2399
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002400unlock:
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02002401 perf_ctx_unlock(cpuctx, task_ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002402
Peter Zijlstraa0963092016-02-24 18:45:50 +01002403 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002404}
2405
2406/*
Peter Zijlstraa0963092016-02-24 18:45:50 +01002407 * Attach a performance event to a context.
2408 *
2409 * Very similar to event_function_call, see comment there.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002410 */
2411static void
2412perf_install_in_context(struct perf_event_context *ctx,
2413 struct perf_event *event,
2414 int cpu)
2415{
Peter Zijlstraa0963092016-02-24 18:45:50 +01002416 struct task_struct *task = READ_ONCE(ctx->task);
Peter Zijlstra39a43642016-01-11 12:46:35 +01002417
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002418 lockdep_assert_held(&ctx->mutex);
2419
Yan, Zheng0cda4c02012-06-15 14:31:33 +08002420 if (event->cpu != -1)
2421 event->cpu = cpu;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02002422
Peter Zijlstra0b8f1e22016-08-04 14:37:24 +02002423 /*
2424 * Ensures that if we can observe event->ctx, both the event and ctx
2425 * will be 'complete'. See perf_iterate_sb_cpu().
2426 */
2427 smp_store_release(&event->ctx, ctx);
2428
Peter Zijlstraa0963092016-02-24 18:45:50 +01002429 if (!task) {
2430 cpu_function_call(cpu, __perf_install_in_context, event);
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002431 return;
2432 }
Peter Zijlstra6f932e52016-02-24 18:45:43 +01002433
Peter Zijlstraa0963092016-02-24 18:45:50 +01002434 /*
2435 * Should not happen, we validate the ctx is still alive before calling.
2436 */
2437 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2438 return;
2439
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002440 /*
2441 * Installing events is tricky because we cannot rely on ctx->is_active
2442 * to be set in case this is the nr_events 0 -> 1 transition.
Peter Zijlstra63cae122016-12-09 14:59:00 +01002443 *
2444 * Instead we use task_curr(), which tells us if the task is running.
2445 * However, since we use task_curr() outside of rq::lock, we can race
2446 * against the actual state. This means the result can be wrong.
2447 *
2448 * If we get a false positive, we retry, this is harmless.
2449 *
2450 * If we get a false negative, things are complicated. If we are after
2451 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2452 * value must be correct. If we're before, it doesn't matter since
2453 * perf_event_context_sched_in() will program the counter.
2454 *
2455 * However, this hinges on the remote context switch having observed
2456 * our task->perf_event_ctxp[] store, such that it will in fact take
2457 * ctx::lock in perf_event_context_sched_in().
2458 *
2459 * We do this by task_function_call(), if the IPI fails to hit the task
2460 * we know any future context switch of task must see the
2461 * perf_event_ctpx[] store.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002462 */
Peter Zijlstra63cae122016-12-09 14:59:00 +01002463
Peter Zijlstraa0963092016-02-24 18:45:50 +01002464 /*
Peter Zijlstra63cae122016-12-09 14:59:00 +01002465 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2466 * task_cpu() load, such that if the IPI then does not find the task
2467 * running, a future context switch of that task must observe the
2468 * store.
Peter Zijlstraa0963092016-02-24 18:45:50 +01002469 */
Peter Zijlstra63cae122016-12-09 14:59:00 +01002470 smp_mb();
2471again:
2472 if (!task_function_call(task, __perf_install_in_context, event))
Peter Zijlstraa0963092016-02-24 18:45:50 +01002473 return;
2474
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002475 raw_spin_lock_irq(&ctx->lock);
2476 task = ctx->task;
Peter Zijlstraa0963092016-02-24 18:45:50 +01002477 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2478 /*
2479 * Cannot happen because we already checked above (which also
2480 * cannot happen), and we hold ctx->mutex, which serializes us
2481 * against perf_event_exit_task_context().
2482 */
Peter Zijlstra39a43642016-01-11 12:46:35 +01002483 raw_spin_unlock_irq(&ctx->lock);
2484 return;
2485 }
Peter Zijlstraa0963092016-02-24 18:45:50 +01002486 /*
Peter Zijlstra63cae122016-12-09 14:59:00 +01002487 * If the task is not running, ctx->lock will avoid it becoming so,
2488 * thus we can safely install the event.
Peter Zijlstraa0963092016-02-24 18:45:50 +01002489 */
Peter Zijlstra63cae122016-12-09 14:59:00 +01002490 if (task_curr(task)) {
2491 raw_spin_unlock_irq(&ctx->lock);
2492 goto again;
2493 }
2494 add_event_to_ctx(event, ctx);
2495 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002496}
2497
2498/*
2499 * Put a event into inactive state and update time fields.
2500 * Enabling the leader of a group effectively enables all
2501 * the group members that aren't explicitly disabled, so we
2502 * have to update their ->tstamp_enabled also.
2503 * Note: this works for group members as well as group leaders
2504 * since the non-leader members' sibling_lists will be empty.
2505 */
Peter Zijlstra1d9b4822011-11-23 12:34:20 +01002506static void __perf_event_mark_enabled(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002507{
2508 struct perf_event *sub;
Stephane Eranian41587552011-01-03 18:20:01 +02002509 u64 tstamp = perf_event_time(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002510
2511 event->state = PERF_EVENT_STATE_INACTIVE;
Peter Zijlstra9b231d92017-08-03 15:42:09 +02002512 __perf_event_enable_time(event, tstamp);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002513 list_for_each_entry(sub, &event->sibling_list, group_entry) {
Peter Zijlstra9b231d92017-08-03 15:42:09 +02002514 /* XXX should not be > INACTIVE if event isn't */
Stephane Eranian41587552011-01-03 18:20:01 +02002515 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra9b231d92017-08-03 15:42:09 +02002516 __perf_event_enable_time(sub, tstamp);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002517 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002518}
2519
2520/*
2521 * Cross CPU call to enable a performance event
2522 */
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002523static void __perf_event_enable(struct perf_event *event,
2524 struct perf_cpu_context *cpuctx,
2525 struct perf_event_context *ctx,
2526 void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002527{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002528 struct perf_event *leader = event->group_leader;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002529 struct perf_event_context *task_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002530
Peter Zijlstra6e801e012016-01-26 12:17:08 +01002531 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2532 event->state <= PERF_EVENT_STATE_ERROR)
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002533 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002534
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002535 if (ctx->is_active)
2536 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2537
Peter Zijlstra1d9b4822011-11-23 12:34:20 +01002538 __perf_event_mark_enabled(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002539
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002540 if (!ctx->is_active)
2541 return;
2542
Stephane Eraniane5d13672011-02-14 11:20:01 +02002543 if (!event_filter_match(event)) {
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002544 if (is_cgroup_event(event))
Stephane Eraniane5d13672011-02-14 11:20:01 +02002545 perf_cgroup_defer_enabled(event);
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002546 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002547 return;
Stephane Eraniane5d13672011-02-14 11:20:01 +02002548 }
Peter Zijlstraf4c41762009-12-16 17:55:54 +01002549
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002550 /*
2551 * If the event is in a group and isn't the group leader,
2552 * then don't put it on unless the group is on.
2553 */
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002554 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2555 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002556 return;
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002557 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002558
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002559 task_ctx = cpuctx->task_ctx;
2560 if (ctx->task)
2561 WARN_ON_ONCE(task_ctx != ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002562
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002563 ctx_resched(cpuctx, task_ctx, get_event_type(event));
Peter Zijlstra7b648012015-12-03 18:35:21 +01002564}
2565
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002566/*
2567 * Enable a event.
2568 *
2569 * If event->ctx is a cloned context, callers must make sure that
2570 * every task struct that event->ctx->task could possibly point to
2571 * remains valid. This condition is satisfied when called through
2572 * perf_event_for_each_child or perf_event_for_each as described
2573 * for perf_event_disable.
2574 */
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002575static void _perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002576{
2577 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002578
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002579 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra6e801e012016-01-26 12:17:08 +01002580 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2581 event->state < PERF_EVENT_STATE_ERROR) {
Peter Zijlstra7b648012015-12-03 18:35:21 +01002582 raw_spin_unlock_irq(&ctx->lock);
2583 return;
2584 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002585
2586 /*
2587 * If the event is in error state, clear that first.
Peter Zijlstra7b648012015-12-03 18:35:21 +01002588 *
2589 * That way, if we see the event in error state below, we know that it
2590 * has gone back into error state, as distinct from the task having
2591 * been scheduled away before the cross-call arrived.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002592 */
2593 if (event->state == PERF_EVENT_STATE_ERROR)
2594 event->state = PERF_EVENT_STATE_OFF;
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002595 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002596
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002597 event_function_call(event, __perf_event_enable, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002598}
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002599
2600/*
2601 * See perf_event_disable();
2602 */
2603void perf_event_enable(struct perf_event *event)
2604{
2605 struct perf_event_context *ctx;
2606
2607 ctx = perf_event_ctx_lock(event);
2608 _perf_event_enable(event);
2609 perf_event_ctx_unlock(event, ctx);
2610}
Robert Richterdcfce4a2011-10-11 17:11:08 +02002611EXPORT_SYMBOL_GPL(perf_event_enable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002612
Alexander Shishkin375637b2016-04-27 18:44:46 +03002613struct stop_event_data {
2614 struct perf_event *event;
2615 unsigned int restart;
2616};
2617
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002618static int __perf_event_stop(void *info)
2619{
Alexander Shishkin375637b2016-04-27 18:44:46 +03002620 struct stop_event_data *sd = info;
2621 struct perf_event *event = sd->event;
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002622
Alexander Shishkin375637b2016-04-27 18:44:46 +03002623 /* if it's already INACTIVE, do nothing */
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002624 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2625 return 0;
2626
2627 /* matches smp_wmb() in event_sched_in() */
2628 smp_rmb();
2629
2630 /*
2631 * There is a window with interrupts enabled before we get here,
2632 * so we need to check again lest we try to stop another CPU's event.
2633 */
2634 if (READ_ONCE(event->oncpu) != smp_processor_id())
2635 return -EAGAIN;
2636
2637 event->pmu->stop(event, PERF_EF_UPDATE);
2638
Alexander Shishkin375637b2016-04-27 18:44:46 +03002639 /*
2640 * May race with the actual stop (through perf_pmu_output_stop()),
2641 * but it is only used for events with AUX ring buffer, and such
2642 * events will refuse to restart because of rb::aux_mmap_count==0,
2643 * see comments in perf_aux_output_begin().
2644 *
2645 * Since this is happening on a event-local CPU, no trace is lost
2646 * while restarting.
2647 */
2648 if (sd->restart)
Will Deaconc9bbdd42016-08-15 11:42:45 +01002649 event->pmu->start(event, 0);
Alexander Shishkin375637b2016-04-27 18:44:46 +03002650
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002651 return 0;
2652}
2653
Alexander Shishkin767ae082016-09-06 16:23:49 +03002654static int perf_event_stop(struct perf_event *event, int restart)
Alexander Shishkin375637b2016-04-27 18:44:46 +03002655{
2656 struct stop_event_data sd = {
2657 .event = event,
Alexander Shishkin767ae082016-09-06 16:23:49 +03002658 .restart = restart,
Alexander Shishkin375637b2016-04-27 18:44:46 +03002659 };
2660 int ret = 0;
2661
2662 do {
2663 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2664 return 0;
2665
2666 /* matches smp_wmb() in event_sched_in() */
2667 smp_rmb();
2668
2669 /*
2670 * We only want to restart ACTIVE events, so if the event goes
2671 * inactive here (event->oncpu==-1), there's nothing more to do;
2672 * fall through with ret==-ENXIO.
2673 */
2674 ret = cpu_function_call(READ_ONCE(event->oncpu),
2675 __perf_event_stop, &sd);
2676 } while (ret == -EAGAIN);
2677
2678 return ret;
2679}
2680
2681/*
2682 * In order to contain the amount of racy and tricky in the address filter
2683 * configuration management, it is a two part process:
2684 *
2685 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2686 * we update the addresses of corresponding vmas in
2687 * event::addr_filters_offs array and bump the event::addr_filters_gen;
2688 * (p2) when an event is scheduled in (pmu::add), it calls
2689 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2690 * if the generation has changed since the previous call.
2691 *
2692 * If (p1) happens while the event is active, we restart it to force (p2).
2693 *
2694 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2695 * pre-existing mappings, called once when new filters arrive via SET_FILTER
2696 * ioctl;
2697 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2698 * registered mapping, called for every new mmap(), with mm::mmap_sem down
2699 * for reading;
2700 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2701 * of exec.
2702 */
2703void perf_event_addr_filters_sync(struct perf_event *event)
2704{
2705 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2706
2707 if (!has_addr_filter(event))
2708 return;
2709
2710 raw_spin_lock(&ifh->lock);
2711 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2712 event->pmu->addr_filters_sync(event);
2713 event->hw.addr_filters_gen = event->addr_filters_gen;
2714 }
2715 raw_spin_unlock(&ifh->lock);
2716}
2717EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2718
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002719static int _perf_event_refresh(struct perf_event *event, int refresh)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002720{
2721 /*
2722 * not supported on inherited events
2723 */
Franck Bui-Huu2e939d12010-11-23 16:21:44 +01002724 if (event->attr.inherit || !is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002725 return -EINVAL;
2726
2727 atomic_add(refresh, &event->event_limit);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002728 _perf_event_enable(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002729
2730 return 0;
2731}
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002732
2733/*
2734 * See perf_event_disable()
2735 */
2736int perf_event_refresh(struct perf_event *event, int refresh)
2737{
2738 struct perf_event_context *ctx;
2739 int ret;
2740
2741 ctx = perf_event_ctx_lock(event);
2742 ret = _perf_event_refresh(event, refresh);
2743 perf_event_ctx_unlock(event, ctx);
2744
2745 return ret;
2746}
Avi Kivity26ca5c12011-06-29 18:42:37 +03002747EXPORT_SYMBOL_GPL(perf_event_refresh);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002748
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002749static void ctx_sched_out(struct perf_event_context *ctx,
2750 struct perf_cpu_context *cpuctx,
2751 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002752{
Peter Zijlstradb24d332011-04-09 21:17:45 +02002753 int is_active = ctx->is_active;
Peter Zijlstrac994d612016-01-08 09:20:23 +01002754 struct perf_event *event;
2755
2756 lockdep_assert_held(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002757
Peter Zijlstra39a43642016-01-11 12:46:35 +01002758 if (likely(!ctx->nr_events)) {
2759 /*
2760 * See __perf_remove_from_context().
2761 */
2762 WARN_ON_ONCE(ctx->is_active);
2763 if (ctx->task)
2764 WARN_ON_ONCE(cpuctx->task_ctx);
2765 return;
2766 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002767
Peter Zijlstradb24d332011-04-09 21:17:45 +02002768 ctx->is_active &= ~event_type;
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002769 if (!(ctx->is_active & EVENT_ALL))
2770 ctx->is_active = 0;
2771
Peter Zijlstra63e30d32016-01-08 11:39:10 +01002772 if (ctx->task) {
2773 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2774 if (!ctx->is_active)
2775 cpuctx->task_ctx = NULL;
2776 }
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002777
Peter Zijlstra8fdc6532016-03-29 09:26:44 +02002778 /*
2779 * Always update time if it was set; not only when it changes.
2780 * Otherwise we can 'forget' to update time for any but the last
2781 * context we sched out. For example:
2782 *
2783 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2784 * ctx_sched_out(.event_type = EVENT_PINNED)
2785 *
2786 * would only update time for the pinned events.
2787 */
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002788 if (is_active & EVENT_TIME) {
2789 /* update (and stop) ctx time */
2790 update_context_time(ctx);
2791 update_cgrp_time_from_cpuctx(cpuctx);
2792 }
2793
Peter Zijlstra8fdc6532016-03-29 09:26:44 +02002794 is_active ^= ctx->is_active; /* changed bits */
2795
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002796 if (!ctx->nr_active || !(is_active & EVENT_ALL))
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002797 return;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002798
Peter Zijlstra075e0b02011-04-09 21:17:40 +02002799 perf_pmu_disable(ctx->pmu);
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002800 if (is_active & EVENT_PINNED) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002801 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2802 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002803 }
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002804
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002805 if (is_active & EVENT_FLEXIBLE) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002806 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08002807 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002808 }
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02002809 perf_pmu_enable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002810}
2811
2812/*
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002813 * Test whether two contexts are equivalent, i.e. whether they have both been
2814 * cloned from the same version of the same context.
2815 *
2816 * Equivalence is measured using a generation number in the context that is
2817 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2818 * and list_del_event().
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002819 */
2820static int context_equiv(struct perf_event_context *ctx1,
2821 struct perf_event_context *ctx2)
2822{
Peter Zijlstra211de6e2014-09-30 19:23:08 +02002823 lockdep_assert_held(&ctx1->lock);
2824 lockdep_assert_held(&ctx2->lock);
2825
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002826 /* Pinning disables the swap optimization */
2827 if (ctx1->pin_count || ctx2->pin_count)
2828 return 0;
2829
2830 /* If ctx1 is the parent of ctx2 */
2831 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2832 return 1;
2833
2834 /* If ctx2 is the parent of ctx1 */
2835 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2836 return 1;
2837
2838 /*
2839 * If ctx1 and ctx2 have the same parent; we flatten the parent
2840 * hierarchy, see perf_event_init_context().
2841 */
2842 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2843 ctx1->parent_gen == ctx2->parent_gen)
2844 return 1;
2845
2846 /* Unmatched */
2847 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002848}
2849
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002850static void __perf_event_sync_stat(struct perf_event *event,
2851 struct perf_event *next_event)
2852{
2853 u64 value;
2854
2855 if (!event->attr.inherit_stat)
2856 return;
2857
2858 /*
2859 * Update the event value, we cannot use perf_event_read()
2860 * because we're in the middle of a context switch and have IRQs
2861 * disabled, which upsets smp_call_function_single(), however
2862 * we know the event must be on the current CPU, therefore we
2863 * don't need to use it.
2864 */
2865 switch (event->state) {
2866 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01002867 event->pmu->read(event);
2868 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002869
2870 case PERF_EVENT_STATE_INACTIVE:
2871 update_event_times(event);
2872 break;
2873
2874 default:
2875 break;
2876 }
2877
2878 /*
2879 * In order to keep per-task stats reliable we need to flip the event
2880 * values when we flip the contexts.
2881 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02002882 value = local64_read(&next_event->count);
2883 value = local64_xchg(&event->count, value);
2884 local64_set(&next_event->count, value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002885
2886 swap(event->total_time_enabled, next_event->total_time_enabled);
2887 swap(event->total_time_running, next_event->total_time_running);
2888
2889 /*
2890 * Since we swizzled the values, update the user visible data too.
2891 */
2892 perf_event_update_userpage(event);
2893 perf_event_update_userpage(next_event);
2894}
2895
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002896static void perf_event_sync_stat(struct perf_event_context *ctx,
2897 struct perf_event_context *next_ctx)
2898{
2899 struct perf_event *event, *next_event;
2900
2901 if (!ctx->nr_stat)
2902 return;
2903
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01002904 update_context_time(ctx);
2905
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002906 event = list_first_entry(&ctx->event_list,
2907 struct perf_event, event_entry);
2908
2909 next_event = list_first_entry(&next_ctx->event_list,
2910 struct perf_event, event_entry);
2911
2912 while (&event->event_entry != &ctx->event_list &&
2913 &next_event->event_entry != &next_ctx->event_list) {
2914
2915 __perf_event_sync_stat(event, next_event);
2916
2917 event = list_next_entry(event, event_entry);
2918 next_event = list_next_entry(next_event, event_entry);
2919 }
2920}
2921
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002922static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2923 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002924{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02002925 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002926 struct perf_event_context *next_ctx;
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002927 struct perf_event_context *parent, *next_parent;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002928 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002929 int do_switch = 1;
2930
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002931 if (likely(!ctx))
2932 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002933
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002934 cpuctx = __get_cpu_context(ctx);
2935 if (!cpuctx->task_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002936 return;
2937
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002938 rcu_read_lock();
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02002939 next_ctx = next->perf_event_ctxp[ctxn];
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002940 if (!next_ctx)
2941 goto unlock;
2942
2943 parent = rcu_dereference(ctx->parent_ctx);
2944 next_parent = rcu_dereference(next_ctx->parent_ctx);
2945
2946 /* If neither context have a parent context; they cannot be clones. */
Jiri Olsa802c8a62014-09-12 13:18:28 +02002947 if (!parent && !next_parent)
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002948 goto unlock;
2949
2950 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002951 /*
2952 * Looks like the two contexts are clones, so we might be
2953 * able to optimize the context switch. We lock both
2954 * contexts and check that they are clones under the
2955 * lock (including re-checking that neither has been
2956 * uncloned in the meantime). It doesn't matter which
2957 * order we take the locks because no other cpu could
2958 * be trying to lock both of these tasks.
2959 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002960 raw_spin_lock(&ctx->lock);
2961 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002962 if (context_equiv(ctx, next_ctx)) {
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002963 WRITE_ONCE(ctx->task, next);
2964 WRITE_ONCE(next_ctx->task, task);
Yan, Zheng5a158c32014-11-04 21:56:02 -05002965
2966 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2967
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002968 /*
2969 * RCU_INIT_POINTER here is safe because we've not
2970 * modified the ctx and the above modification of
2971 * ctx->task and ctx->task_ctx_data are immaterial
2972 * since those values are always verified under
2973 * ctx->lock which we're now holding.
2974 */
2975 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2976 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2977
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002978 do_switch = 0;
2979
2980 perf_event_sync_stat(ctx, next_ctx);
2981 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002982 raw_spin_unlock(&next_ctx->lock);
2983 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002984 }
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002985unlock:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002986 rcu_read_unlock();
2987
2988 if (do_switch) {
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002989 raw_spin_lock(&ctx->lock);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002990 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002991 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002992 }
2993}
2994
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002995static DEFINE_PER_CPU(struct list_head, sched_cb_list);
2996
Yan, Zhengba532502014-11-04 21:55:58 -05002997void perf_sched_cb_dec(struct pmu *pmu)
2998{
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002999 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3000
Yan, Zhengba532502014-11-04 21:55:58 -05003001 this_cpu_dec(perf_sched_cb_usages);
Peter Zijlstrae48c1782016-07-06 09:18:30 +02003002
3003 if (!--cpuctx->sched_cb_usage)
3004 list_del(&cpuctx->sched_cb_entry);
Yan, Zhengba532502014-11-04 21:55:58 -05003005}
3006
Peter Zijlstrae48c1782016-07-06 09:18:30 +02003007
Yan, Zhengba532502014-11-04 21:55:58 -05003008void perf_sched_cb_inc(struct pmu *pmu)
3009{
Peter Zijlstrae48c1782016-07-06 09:18:30 +02003010 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3011
3012 if (!cpuctx->sched_cb_usage++)
3013 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3014
Yan, Zhengba532502014-11-04 21:55:58 -05003015 this_cpu_inc(perf_sched_cb_usages);
3016}
3017
3018/*
3019 * This function provides the context switch callback to the lower code
3020 * layer. It is invoked ONLY when the context switch callback is enabled.
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +02003021 *
3022 * This callback is relevant even to per-cpu events; for example multi event
3023 * PEBS requires this to provide PID/TID information. This requires we flush
3024 * all queued PEBS records before we context switch to a new task.
Yan, Zhengba532502014-11-04 21:55:58 -05003025 */
3026static void perf_pmu_sched_task(struct task_struct *prev,
3027 struct task_struct *next,
3028 bool sched_in)
3029{
3030 struct perf_cpu_context *cpuctx;
3031 struct pmu *pmu;
Yan, Zhengba532502014-11-04 21:55:58 -05003032
3033 if (prev == next)
3034 return;
3035
Peter Zijlstrae48c1782016-07-06 09:18:30 +02003036 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
David Carrillo-Cisneros1fd7e412017-01-18 11:24:54 -08003037 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
Yan, Zhengba532502014-11-04 21:55:58 -05003038
Peter Zijlstrae48c1782016-07-06 09:18:30 +02003039 if (WARN_ON_ONCE(!pmu->sched_task))
3040 continue;
Yan, Zhengba532502014-11-04 21:55:58 -05003041
Peter Zijlstrae48c1782016-07-06 09:18:30 +02003042 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3043 perf_pmu_disable(pmu);
Yan, Zhengba532502014-11-04 21:55:58 -05003044
Peter Zijlstrae48c1782016-07-06 09:18:30 +02003045 pmu->sched_task(cpuctx->task_ctx, sched_in);
Yan, Zhengba532502014-11-04 21:55:58 -05003046
Peter Zijlstrae48c1782016-07-06 09:18:30 +02003047 perf_pmu_enable(pmu);
3048 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
Yan, Zhengba532502014-11-04 21:55:58 -05003049 }
Yan, Zhengba532502014-11-04 21:55:58 -05003050}
3051
Adrian Hunter45ac1402015-07-21 12:44:02 +03003052static void perf_event_switch(struct task_struct *task,
3053 struct task_struct *next_prev, bool sched_in);
3054
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003055#define for_each_task_context_nr(ctxn) \
3056 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
3057
3058/*
3059 * Called from scheduler to remove the events of the current task,
3060 * with interrupts disabled.
3061 *
3062 * We stop each event and update the event value in event->count.
3063 *
3064 * This does not protect us against NMI, but disable()
3065 * sets the disabled bit in the control field of event _before_
3066 * accessing the event control register. If a NMI hits, then it will
3067 * not restart the event.
3068 */
Jiri Olsaab0cce52012-05-23 13:13:02 +02003069void __perf_event_task_sched_out(struct task_struct *task,
3070 struct task_struct *next)
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003071{
3072 int ctxn;
3073
Yan, Zhengba532502014-11-04 21:55:58 -05003074 if (__this_cpu_read(perf_sched_cb_usages))
3075 perf_pmu_sched_task(task, next, false);
3076
Adrian Hunter45ac1402015-07-21 12:44:02 +03003077 if (atomic_read(&nr_switch_events))
3078 perf_event_switch(task, next, false);
3079
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003080 for_each_task_context_nr(ctxn)
3081 perf_event_context_sched_out(task, ctxn, next);
Stephane Eraniane5d13672011-02-14 11:20:01 +02003082
3083 /*
3084 * if cgroup events exist on this CPU, then we need
3085 * to check if we have to switch out PMU state.
3086 * cgroup event are system-wide mode only
3087 */
Christoph Lameter4a32fea2014-08-17 12:30:27 -05003088 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
Stephane Eraniana8d757e2011-08-25 15:58:03 +02003089 perf_cgroup_sched_out(task, next);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003090}
3091
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003092/*
3093 * Called with IRQs disabled
3094 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003095static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
3096 enum event_type_t event_type)
3097{
3098 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003099}
3100
3101static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003102ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01003103 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003104{
3105 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003106
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003107 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
3108 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003109 continue;
Stephane Eranian5632ab12011-01-03 18:20:01 +02003110 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003111 continue;
3112
Stephane Eraniane5d13672011-02-14 11:20:01 +02003113 /* may need to reset tstamp_enabled */
3114 if (is_cgroup_event(event))
3115 perf_cgroup_mark_enabled(event, ctx);
3116
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08003117 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01003118 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003119
3120 /*
3121 * If this pinned group hasn't been scheduled,
3122 * put it in error state.
3123 */
3124 if (event->state == PERF_EVENT_STATE_INACTIVE) {
3125 update_group_times(event);
3126 event->state = PERF_EVENT_STATE_ERROR;
3127 }
3128 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003129}
3130
3131static void
3132ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01003133 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003134{
3135 struct perf_event *event;
3136 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003137
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003138 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
3139 /* Ignore events in OFF or ERROR state */
3140 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003141 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003142 /*
3143 * Listen to the 'cpu' scheduling filter constraint
3144 * of events:
3145 */
Stephane Eranian5632ab12011-01-03 18:20:01 +02003146 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003147 continue;
3148
Stephane Eraniane5d13672011-02-14 11:20:01 +02003149 /* may need to reset tstamp_enabled */
3150 if (is_cgroup_event(event))
3151 perf_cgroup_mark_enabled(event, ctx);
3152
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003153 if (group_can_go_on(event, cpuctx, can_add_hw)) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01003154 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003155 can_add_hw = 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003156 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003157 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003158}
3159
3160static void
3161ctx_sched_in(struct perf_event_context *ctx,
3162 struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +02003163 enum event_type_t event_type,
3164 struct task_struct *task)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003165{
Peter Zijlstradb24d332011-04-09 21:17:45 +02003166 int is_active = ctx->is_active;
Peter Zijlstrac994d612016-01-08 09:20:23 +01003167 u64 now;
Stephane Eraniane5d13672011-02-14 11:20:01 +02003168
Peter Zijlstrac994d612016-01-08 09:20:23 +01003169 lockdep_assert_held(&ctx->lock);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003170
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003171 if (likely(!ctx->nr_events))
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003172 return;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003173
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01003174 ctx->is_active |= (event_type | EVENT_TIME);
Peter Zijlstra63e30d32016-01-08 11:39:10 +01003175 if (ctx->task) {
3176 if (!is_active)
3177 cpuctx->task_ctx = ctx;
3178 else
3179 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3180 }
3181
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01003182 is_active ^= ctx->is_active; /* changed bits */
3183
3184 if (is_active & EVENT_TIME) {
3185 /* start ctx time */
3186 now = perf_clock();
3187 ctx->timestamp = now;
3188 perf_cgroup_set_timestamp(task, ctx);
3189 }
3190
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003191 /*
3192 * First go through the list and put on any pinned groups
3193 * in order to give them the best chance of going on.
3194 */
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01003195 if (is_active & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01003196 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003197
3198 /* Then walk through the lower prio flexible groups */
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01003199 if (is_active & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01003200 ctx_flexible_sched_in(ctx, cpuctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003201}
3202
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003203static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +02003204 enum event_type_t event_type,
3205 struct task_struct *task)
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003206{
3207 struct perf_event_context *ctx = &cpuctx->ctx;
3208
Stephane Eraniane5d13672011-02-14 11:20:01 +02003209 ctx_sched_in(ctx, cpuctx, event_type, task);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003210}
3211
Stephane Eraniane5d13672011-02-14 11:20:01 +02003212static void perf_event_context_sched_in(struct perf_event_context *ctx,
3213 struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003214{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003215 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003216
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003217 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003218 if (cpuctx->task_ctx == ctx)
3219 return;
3220
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003221 perf_ctx_lock(cpuctx, ctx);
leilei.linfdccc3f2017-08-09 08:29:21 +08003222 /*
3223 * We must check ctx->nr_events while holding ctx->lock, such
3224 * that we serialize against perf_install_in_context().
3225 */
3226 if (!ctx->nr_events)
3227 goto unlock;
3228
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02003229 perf_pmu_disable(ctx->pmu);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003230 /*
3231 * We want to keep the following priority order:
3232 * cpu pinned (that don't need to move), task pinned,
3233 * cpu flexible, task flexible.
Alexander Shishkinfe45baf2017-01-19 18:43:29 +02003234 *
3235 * However, if task's ctx is not carrying any pinned
3236 * events, no need to flip the cpuctx's events around.
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003237 */
Alexander Shishkinfe45baf2017-01-19 18:43:29 +02003238 if (!list_empty(&ctx->pinned_groups))
3239 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Peter Zijlstra63e30d32016-01-08 11:39:10 +01003240 perf_event_sched_in(cpuctx, ctx, task);
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003241 perf_pmu_enable(ctx->pmu);
leilei.linfdccc3f2017-08-09 08:29:21 +08003242
3243unlock:
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003244 perf_ctx_unlock(cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003245}
3246
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003247/*
3248 * Called from scheduler to add the events of the current task
3249 * with interrupts disabled.
3250 *
3251 * We restore the event value and then enable it.
3252 *
3253 * This does not protect us against NMI, but enable()
3254 * sets the enabled bit in the control field of event _before_
3255 * accessing the event control register. If a NMI hits, then it will
3256 * keep the event running.
3257 */
Jiri Olsaab0cce52012-05-23 13:13:02 +02003258void __perf_event_task_sched_in(struct task_struct *prev,
3259 struct task_struct *task)
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003260{
3261 struct perf_event_context *ctx;
3262 int ctxn;
3263
Peter Zijlstra7e41d172016-01-08 09:21:40 +01003264 /*
3265 * If cgroup events exist on this CPU, then we need to check if we have
3266 * to switch in PMU state; cgroup event are system-wide mode only.
3267 *
3268 * Since cgroup events are CPU events, we must schedule these in before
3269 * we schedule in the task events.
3270 */
3271 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3272 perf_cgroup_sched_in(prev, task);
3273
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003274 for_each_task_context_nr(ctxn) {
3275 ctx = task->perf_event_ctxp[ctxn];
3276 if (likely(!ctx))
3277 continue;
3278
Stephane Eraniane5d13672011-02-14 11:20:01 +02003279 perf_event_context_sched_in(ctx, task);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003280 }
Stephane Eraniand010b332012-02-09 23:21:00 +01003281
Adrian Hunter45ac1402015-07-21 12:44:02 +03003282 if (atomic_read(&nr_switch_events))
3283 perf_event_switch(task, prev, true);
3284
Yan, Zhengba532502014-11-04 21:55:58 -05003285 if (__this_cpu_read(perf_sched_cb_usages))
3286 perf_pmu_sched_task(prev, task, true);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003287}
3288
Peter Zijlstraabd50712010-01-26 18:50:16 +01003289static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3290{
3291 u64 frequency = event->attr.sample_freq;
3292 u64 sec = NSEC_PER_SEC;
3293 u64 divisor, dividend;
3294
3295 int count_fls, nsec_fls, frequency_fls, sec_fls;
3296
3297 count_fls = fls64(count);
3298 nsec_fls = fls64(nsec);
3299 frequency_fls = fls64(frequency);
3300 sec_fls = 30;
3301
3302 /*
3303 * We got @count in @nsec, with a target of sample_freq HZ
3304 * the target period becomes:
3305 *
3306 * @count * 10^9
3307 * period = -------------------
3308 * @nsec * sample_freq
3309 *
3310 */
3311
3312 /*
3313 * Reduce accuracy by one bit such that @a and @b converge
3314 * to a similar magnitude.
3315 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003316#define REDUCE_FLS(a, b) \
Peter Zijlstraabd50712010-01-26 18:50:16 +01003317do { \
3318 if (a##_fls > b##_fls) { \
3319 a >>= 1; \
3320 a##_fls--; \
3321 } else { \
3322 b >>= 1; \
3323 b##_fls--; \
3324 } \
3325} while (0)
3326
3327 /*
3328 * Reduce accuracy until either term fits in a u64, then proceed with
3329 * the other, so that finally we can do a u64/u64 division.
3330 */
3331 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3332 REDUCE_FLS(nsec, frequency);
3333 REDUCE_FLS(sec, count);
3334 }
3335
3336 if (count_fls + sec_fls > 64) {
3337 divisor = nsec * frequency;
3338
3339 while (count_fls + sec_fls > 64) {
3340 REDUCE_FLS(count, sec);
3341 divisor >>= 1;
3342 }
3343
3344 dividend = count * sec;
3345 } else {
3346 dividend = count * sec;
3347
3348 while (nsec_fls + frequency_fls > 64) {
3349 REDUCE_FLS(nsec, frequency);
3350 dividend >>= 1;
3351 }
3352
3353 divisor = nsec * frequency;
3354 }
3355
Peter Zijlstraf6ab91ad2010-06-04 15:18:01 +02003356 if (!divisor)
3357 return dividend;
3358
Peter Zijlstraabd50712010-01-26 18:50:16 +01003359 return div64_u64(dividend, divisor);
3360}
3361
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003362static DEFINE_PER_CPU(int, perf_throttled_count);
3363static DEFINE_PER_CPU(u64, perf_throttled_seq);
3364
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003365static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003366{
3367 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91ad2010-06-04 15:18:01 +02003368 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003369 s64 delta;
3370
Peter Zijlstraabd50712010-01-26 18:50:16 +01003371 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003372
3373 delta = (s64)(period - hwc->sample_period);
3374 delta = (delta + 7) / 8; /* low pass filter */
3375
3376 sample_period = hwc->sample_period + delta;
3377
3378 if (!sample_period)
3379 sample_period = 1;
3380
3381 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01003382
Peter Zijlstrae7850592010-05-21 14:43:08 +02003383 if (local64_read(&hwc->period_left) > 8*sample_period) {
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003384 if (disable)
3385 event->pmu->stop(event, PERF_EF_UPDATE);
3386
Peter Zijlstrae7850592010-05-21 14:43:08 +02003387 local64_set(&hwc->period_left, 0);
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003388
3389 if (disable)
3390 event->pmu->start(event, PERF_EF_RELOAD);
Peter Zijlstraabd50712010-01-26 18:50:16 +01003391 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003392}
3393
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003394/*
3395 * combine freq adjustment with unthrottling to avoid two passes over the
3396 * events. At the same time, make sure, having freq events does not change
3397 * the rate of unthrottling as that would introduce bias.
3398 */
3399static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3400 int needs_unthr)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003401{
3402 struct perf_event *event;
3403 struct hw_perf_event *hwc;
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003404 u64 now, period = TICK_NSEC;
Peter Zijlstraabd50712010-01-26 18:50:16 +01003405 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003406
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003407 /*
3408 * only need to iterate over all events iff:
3409 * - context have events in frequency mode (needs freq adjust)
3410 * - there are events to unthrottle on this cpu
3411 */
3412 if (!(ctx->nr_freq || needs_unthr))
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01003413 return;
3414
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003415 raw_spin_lock(&ctx->lock);
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003416 perf_pmu_disable(ctx->pmu);
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003417
Paul Mackerras03541f82009-10-14 16:58:03 +11003418 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003419 if (event->state != PERF_EVENT_STATE_ACTIVE)
3420 continue;
3421
Stephane Eranian5632ab12011-01-03 18:20:01 +02003422 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003423 continue;
3424
Alexander Shishkin44377272013-12-16 14:17:36 +02003425 perf_pmu_disable(event->pmu);
3426
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003427 hwc = &event->hw;
3428
Jiri Olsaae23bff2013-08-24 16:45:54 +02003429 if (hwc->interrupts == MAX_INTERRUPTS) {
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003430 hwc->interrupts = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003431 perf_log_throttle(event, 1);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02003432 event->pmu->start(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003433 }
3434
3435 if (!event->attr.freq || !event->attr.sample_freq)
Alexander Shishkin44377272013-12-16 14:17:36 +02003436 goto next;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003437
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003438 /*
3439 * stop the event and update event->count
3440 */
3441 event->pmu->stop(event, PERF_EF_UPDATE);
3442
Peter Zijlstrae7850592010-05-21 14:43:08 +02003443 now = local64_read(&event->count);
Peter Zijlstraabd50712010-01-26 18:50:16 +01003444 delta = now - hwc->freq_count_stamp;
3445 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003446
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003447 /*
3448 * restart the event
3449 * reload only if value has changed
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003450 * we have stopped the event so tell that
3451 * to perf_adjust_period() to avoid stopping it
3452 * twice.
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003453 */
Peter Zijlstraabd50712010-01-26 18:50:16 +01003454 if (delta > 0)
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003455 perf_adjust_period(event, period, delta, false);
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003456
3457 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
Alexander Shishkin44377272013-12-16 14:17:36 +02003458 next:
3459 perf_pmu_enable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003460 }
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003461
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003462 perf_pmu_enable(ctx->pmu);
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003463 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003464}
3465
3466/*
3467 * Round-robin a context's events:
3468 */
3469static void rotate_ctx(struct perf_event_context *ctx)
3470{
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01003471 /*
3472 * Rotate the first entry last of non-pinned groups. Rotation might be
3473 * disabled by the inheritance code.
3474 */
3475 if (!ctx->rotate_disable)
3476 list_rotate_left(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003477}
3478
Stephane Eranian9e630202013-04-03 14:21:33 +02003479static int perf_rotate_context(struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003480{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003481 struct perf_event_context *ctx = NULL;
Mark Rutland2fde4f92015-01-07 15:01:54 +00003482 int rotate = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003483
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003484 if (cpuctx->ctx.nr_events) {
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003485 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3486 rotate = 1;
3487 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003488
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003489 ctx = cpuctx->task_ctx;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003490 if (ctx && ctx->nr_events) {
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003491 if (ctx->nr_events != ctx->nr_active)
3492 rotate = 1;
3493 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003494
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003495 if (!rotate)
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01003496 goto done;
3497
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003498 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02003499 perf_pmu_disable(cpuctx->ctx.pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003500
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003501 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3502 if (ctx)
3503 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01003504
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003505 rotate_ctx(&cpuctx->ctx);
3506 if (ctx)
3507 rotate_ctx(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003508
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003509 perf_event_sched_in(cpuctx, ctx, current);
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01003510
3511 perf_pmu_enable(cpuctx->ctx.pmu);
3512 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003513done:
Stephane Eranian9e630202013-04-03 14:21:33 +02003514
3515 return rotate;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003516}
3517
3518void perf_event_task_tick(void)
3519{
Mark Rutland2fde4f92015-01-07 15:01:54 +00003520 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3521 struct perf_event_context *ctx, *tmp;
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003522 int throttled;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003523
3524 WARN_ON(!irqs_disabled());
3525
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003526 __this_cpu_inc(perf_throttled_seq);
3527 throttled = __this_cpu_xchg(perf_throttled_count, 0);
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02003528 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003529
Mark Rutland2fde4f92015-01-07 15:01:54 +00003530 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003531 perf_adjust_freq_unthr_context(ctx, throttled);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003532}
3533
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003534static int event_enable_on_exec(struct perf_event *event,
3535 struct perf_event_context *ctx)
3536{
3537 if (!event->attr.enable_on_exec)
3538 return 0;
3539
3540 event->attr.enable_on_exec = 0;
3541 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3542 return 0;
3543
Peter Zijlstra1d9b4822011-11-23 12:34:20 +01003544 __perf_event_mark_enabled(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003545
3546 return 1;
3547}
3548
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003549/*
3550 * Enable all of a task's events that have been marked enable-on-exec.
3551 * This expects task == current.
3552 */
Peter Zijlstrac1274492015-12-10 20:57:40 +01003553static void perf_event_enable_on_exec(int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003554{
Peter Zijlstrac1274492015-12-10 20:57:40 +01003555 struct perf_event_context *ctx, *clone_ctx = NULL;
Alexander Shishkin487f05e2017-01-19 18:43:30 +02003556 enum event_type_t event_type = 0;
Peter Zijlstra3e349502016-01-08 10:01:18 +01003557 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003558 struct perf_event *event;
3559 unsigned long flags;
3560 int enabled = 0;
3561
3562 local_irq_save(flags);
Peter Zijlstrac1274492015-12-10 20:57:40 +01003563 ctx = current->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003564 if (!ctx || !ctx->nr_events)
3565 goto out;
3566
Peter Zijlstra3e349502016-01-08 10:01:18 +01003567 cpuctx = __get_cpu_context(ctx);
3568 perf_ctx_lock(cpuctx, ctx);
Peter Zijlstra7fce2502016-02-24 18:45:48 +01003569 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02003570 list_for_each_entry(event, &ctx->event_list, event_entry) {
Peter Zijlstra3e349502016-01-08 10:01:18 +01003571 enabled |= event_enable_on_exec(event, ctx);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02003572 event_type |= get_event_type(event);
3573 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003574
3575 /*
Peter Zijlstra3e349502016-01-08 10:01:18 +01003576 * Unclone and reschedule this context if we enabled any event.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003577 */
Peter Zijlstra3e349502016-01-08 10:01:18 +01003578 if (enabled) {
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003579 clone_ctx = unclone_ctx(ctx);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02003580 ctx_resched(cpuctx, ctx, event_type);
Peter Zijlstra7bbba0e2017-02-15 16:12:20 +01003581 } else {
3582 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
Peter Zijlstra3e349502016-01-08 10:01:18 +01003583 }
3584 perf_ctx_unlock(cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003585
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003586out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003587 local_irq_restore(flags);
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003588
3589 if (clone_ctx)
3590 put_ctx(clone_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003591}
3592
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003593struct perf_read_data {
3594 struct perf_event *event;
3595 bool group;
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003596 int ret;
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003597};
3598
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003599static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003600{
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003601 u16 local_pkg, event_pkg;
3602
3603 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003604 int local_cpu = smp_processor_id();
3605
3606 event_pkg = topology_physical_package_id(event_cpu);
3607 local_pkg = topology_physical_package_id(local_cpu);
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003608
3609 if (event_pkg == local_pkg)
3610 return local_cpu;
3611 }
3612
3613 return event_cpu;
3614}
3615
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003616/*
3617 * Cross CPU call to read the hardware event
3618 */
3619static void __perf_event_read(void *info)
3620{
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003621 struct perf_read_data *data = info;
3622 struct perf_event *sub, *event = data->event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003623 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003624 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003625 struct pmu *pmu = event->pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003626
3627 /*
3628 * If this is a task context, we need to check whether it is
3629 * the current task context of this cpu. If not it has been
3630 * scheduled out before the smp call arrived. In that case
3631 * event->count would have been updated to a recent sample
3632 * when the event was scheduled out.
3633 */
3634 if (ctx->task && cpuctx->task_ctx != ctx)
3635 return;
3636
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003637 raw_spin_lock(&ctx->lock);
Stephane Eraniane5d13672011-02-14 11:20:01 +02003638 if (ctx->is_active) {
Peter Zijlstra542e72f2011-01-26 15:38:35 +01003639 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02003640 update_cgrp_time_from_event(event);
3641 }
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003642
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003643 update_event_times(event);
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003644 if (event->state != PERF_EVENT_STATE_ACTIVE)
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003645 goto unlock;
3646
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003647 if (!data->group) {
3648 pmu->read(event);
3649 data->ret = 0;
3650 goto unlock;
3651 }
3652
3653 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3654
3655 pmu->read(event);
3656
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003657 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3658 update_event_times(sub);
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003659 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3660 /*
3661 * Use sibling's PMU rather than @event's since
3662 * sibling could be on different (eg: software) PMU.
3663 */
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003664 sub->pmu->read(sub);
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003665 }
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003666 }
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003667
3668 data->ret = pmu->commit_txn(pmu);
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003669
3670unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003671 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003672}
3673
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003674static inline u64 perf_event_count(struct perf_event *event)
3675{
Vikas Shivappac39a0e22017-07-25 14:14:20 -07003676 return local64_read(&event->count) + atomic64_read(&event->child_count);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003677}
3678
Kaixu Xiaffe86902015-08-06 07:02:32 +00003679/*
3680 * NMI-safe method to read a local event, that is an event that
3681 * is:
3682 * - either for the current task, or for this CPU
3683 * - does not have inherit set, for inherited task events
3684 * will not be local and we cannot read them atomically
3685 * - must not have a pmu::count method
3686 */
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003687int perf_event_read_local(struct perf_event *event, u64 *value)
Kaixu Xiaffe86902015-08-06 07:02:32 +00003688{
3689 unsigned long flags;
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003690 int ret = 0;
Kaixu Xiaffe86902015-08-06 07:02:32 +00003691
3692 /*
3693 * Disabling interrupts avoids all counter scheduling (context
3694 * switches, timer based rotation and IPIs).
3695 */
3696 local_irq_save(flags);
3697
Kaixu Xiaffe86902015-08-06 07:02:32 +00003698 /*
3699 * It must not be an event with inherit set, we cannot read
3700 * all child counters from atomic context.
3701 */
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003702 if (event->attr.inherit) {
3703 ret = -EOPNOTSUPP;
3704 goto out;
3705 }
Kaixu Xiaffe86902015-08-06 07:02:32 +00003706
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003707 /* If this is a per-task event, it must be for current */
3708 if ((event->attach_state & PERF_ATTACH_TASK) &&
3709 event->hw.target != current) {
3710 ret = -EINVAL;
3711 goto out;
3712 }
3713
3714 /* If this is a per-CPU event, it must be for this CPU */
3715 if (!(event->attach_state & PERF_ATTACH_TASK) &&
3716 event->cpu != smp_processor_id()) {
3717 ret = -EINVAL;
3718 goto out;
3719 }
Kaixu Xiaffe86902015-08-06 07:02:32 +00003720
3721 /*
3722 * If the event is currently on this CPU, its either a per-task event,
3723 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3724 * oncpu == -1).
3725 */
3726 if (event->oncpu == smp_processor_id())
3727 event->pmu->read(event);
3728
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003729 *value = local64_read(&event->count);
3730out:
Kaixu Xiaffe86902015-08-06 07:02:32 +00003731 local_irq_restore(flags);
3732
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003733 return ret;
Kaixu Xiaffe86902015-08-06 07:02:32 +00003734}
3735
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003736static int perf_event_read(struct perf_event *event, bool group)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003737{
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003738 int event_cpu, ret = 0;
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003739
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003740 /*
3741 * If event is enabled and currently active on a CPU, update the
3742 * value in the event structure:
3743 */
3744 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003745 struct perf_read_data data = {
3746 .event = event,
3747 .group = group,
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003748 .ret = 0,
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003749 };
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003750
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003751 event_cpu = READ_ONCE(event->oncpu);
3752 if ((unsigned)event_cpu >= nr_cpu_ids)
3753 return 0;
3754
3755 preempt_disable();
3756 event_cpu = __perf_event_read_cpu(event, event_cpu);
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003757
Peter Zijlstra58763142016-08-30 10:15:03 +02003758 /*
3759 * Purposely ignore the smp_call_function_single() return
3760 * value.
3761 *
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003762 * If event_cpu isn't a valid CPU it means the event got
Peter Zijlstra58763142016-08-30 10:15:03 +02003763 * scheduled out and that will have updated the event count.
3764 *
3765 * Therefore, either way, we'll have an up-to-date event count
3766 * after this.
3767 */
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003768 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
3769 preempt_enable();
Peter Zijlstra58763142016-08-30 10:15:03 +02003770 ret = data.ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003771 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01003772 struct perf_event_context *ctx = event->ctx;
3773 unsigned long flags;
3774
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003775 raw_spin_lock_irqsave(&ctx->lock, flags);
Stephane Eranianc530ccd2010-10-15 15:26:01 +02003776 /*
3777 * may read while context is not active
3778 * (e.g., thread is blocked), in that case
3779 * we cannot update context time
3780 */
Stephane Eraniane5d13672011-02-14 11:20:01 +02003781 if (ctx->is_active) {
Stephane Eranianc530ccd2010-10-15 15:26:01 +02003782 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02003783 update_cgrp_time_from_event(event);
3784 }
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003785 if (group)
3786 update_group_times(event);
3787 else
3788 update_event_times(event);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003789 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003790 }
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003791
3792 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003793}
3794
3795/*
3796 * Initialize the perf_event context in a task_struct:
3797 */
Peter Zijlstraeb184472010-09-07 15:55:13 +02003798static void __perf_event_init_context(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003799{
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003800 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003801 mutex_init(&ctx->mutex);
Mark Rutland2fde4f92015-01-07 15:01:54 +00003802 INIT_LIST_HEAD(&ctx->active_ctx_list);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003803 INIT_LIST_HEAD(&ctx->pinned_groups);
3804 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003805 INIT_LIST_HEAD(&ctx->event_list);
3806 atomic_set(&ctx->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003807}
3808
Peter Zijlstraeb184472010-09-07 15:55:13 +02003809static struct perf_event_context *
3810alloc_perf_context(struct pmu *pmu, struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003811{
3812 struct perf_event_context *ctx;
Peter Zijlstraeb184472010-09-07 15:55:13 +02003813
3814 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3815 if (!ctx)
3816 return NULL;
3817
3818 __perf_event_init_context(ctx);
3819 if (task) {
3820 ctx->task = task;
3821 get_task_struct(task);
3822 }
3823 ctx->pmu = pmu;
3824
3825 return ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003826}
3827
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003828static struct task_struct *
3829find_lively_task_by_vpid(pid_t vpid)
3830{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003831 struct task_struct *task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003832
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003833 rcu_read_lock();
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003834 if (!vpid)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003835 task = current;
3836 else
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003837 task = find_task_by_vpid(vpid);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003838 if (task)
3839 get_task_struct(task);
3840 rcu_read_unlock();
3841
3842 if (!task)
3843 return ERR_PTR(-ESRCH);
3844
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003845 return task;
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003846}
3847
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003848/*
3849 * Returns a matching context with refcount and pincount.
3850 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003851static struct perf_event_context *
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003852find_get_context(struct pmu *pmu, struct task_struct *task,
3853 struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003854{
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003855 struct perf_event_context *ctx, *clone_ctx = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003856 struct perf_cpu_context *cpuctx;
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003857 void *task_ctx_data = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003858 unsigned long flags;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003859 int ctxn, err;
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003860 int cpu = event->cpu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003861
Oleg Nesterov22a4ec72011-01-18 17:10:08 +01003862 if (!task) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003863 /* Must be root to operate on a CPU event: */
3864 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3865 return ERR_PTR(-EACCES);
3866
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003867 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003868 ctx = &cpuctx->ctx;
3869 get_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003870 ++ctx->pin_count;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003871
3872 return ctx;
3873 }
3874
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003875 err = -EINVAL;
3876 ctxn = pmu->task_ctx_nr;
3877 if (ctxn < 0)
3878 goto errout;
3879
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003880 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3881 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3882 if (!task_ctx_data) {
3883 err = -ENOMEM;
3884 goto errout;
3885 }
3886 }
3887
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003888retry:
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003889 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003890 if (ctx) {
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003891 clone_ctx = unclone_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003892 ++ctx->pin_count;
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003893
3894 if (task_ctx_data && !ctx->task_ctx_data) {
3895 ctx->task_ctx_data = task_ctx_data;
3896 task_ctx_data = NULL;
3897 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003898 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003899
3900 if (clone_ctx)
3901 put_ctx(clone_ctx);
Peter Zijlstra9137fb22011-04-09 21:17:41 +02003902 } else {
Peter Zijlstraeb184472010-09-07 15:55:13 +02003903 ctx = alloc_perf_context(pmu, task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003904 err = -ENOMEM;
3905 if (!ctx)
3906 goto errout;
Peter Zijlstraeb184472010-09-07 15:55:13 +02003907
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003908 if (task_ctx_data) {
3909 ctx->task_ctx_data = task_ctx_data;
3910 task_ctx_data = NULL;
3911 }
3912
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01003913 err = 0;
3914 mutex_lock(&task->perf_event_mutex);
3915 /*
3916 * If it has already passed perf_event_exit_task().
3917 * we must see PF_EXITING, it takes this mutex too.
3918 */
3919 if (task->flags & PF_EXITING)
3920 err = -ESRCH;
3921 else if (task->perf_event_ctxp[ctxn])
3922 err = -EAGAIN;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003923 else {
Peter Zijlstra9137fb22011-04-09 21:17:41 +02003924 get_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003925 ++ctx->pin_count;
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01003926 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003927 }
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01003928 mutex_unlock(&task->perf_event_mutex);
3929
3930 if (unlikely(err)) {
Peter Zijlstra9137fb22011-04-09 21:17:41 +02003931 put_ctx(ctx);
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01003932
3933 if (err == -EAGAIN)
3934 goto retry;
3935 goto errout;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003936 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003937 }
3938
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003939 kfree(task_ctx_data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003940 return ctx;
3941
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003942errout:
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003943 kfree(task_ctx_data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003944 return ERR_PTR(err);
3945}
3946
Li Zefan6fb29152009-10-15 11:21:42 +08003947static void perf_event_free_filter(struct perf_event *event);
Alexei Starovoitov25415172015-03-25 12:49:20 -07003948static void perf_event_free_bpf_prog(struct perf_event *event);
Li Zefan6fb29152009-10-15 11:21:42 +08003949
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003950static void free_event_rcu(struct rcu_head *head)
3951{
3952 struct perf_event *event;
3953
3954 event = container_of(head, struct perf_event, rcu_head);
3955 if (event->ns)
3956 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08003957 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003958 kfree(event);
3959}
3960
Peter Zijlstrab69cf532014-03-14 10:50:33 +01003961static void ring_buffer_attach(struct perf_event *event,
3962 struct ring_buffer *rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003963
Kan Liangf2fb6be2016-03-23 11:24:37 -07003964static void detach_sb_event(struct perf_event *event)
3965{
3966 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
3967
3968 raw_spin_lock(&pel->lock);
3969 list_del_rcu(&event->sb_list);
3970 raw_spin_unlock(&pel->lock);
3971}
3972
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003973static bool is_sb_event(struct perf_event *event)
Kan Liangf2fb6be2016-03-23 11:24:37 -07003974{
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003975 struct perf_event_attr *attr = &event->attr;
3976
Kan Liangf2fb6be2016-03-23 11:24:37 -07003977 if (event->parent)
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003978 return false;
Kan Liangf2fb6be2016-03-23 11:24:37 -07003979
3980 if (event->attach_state & PERF_ATTACH_TASK)
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003981 return false;
Kan Liangf2fb6be2016-03-23 11:24:37 -07003982
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003983 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
3984 attr->comm || attr->comm_exec ||
3985 attr->task ||
3986 attr->context_switch)
3987 return true;
3988 return false;
3989}
3990
3991static void unaccount_pmu_sb_event(struct perf_event *event)
3992{
3993 if (is_sb_event(event))
3994 detach_sb_event(event);
Kan Liangf2fb6be2016-03-23 11:24:37 -07003995}
3996
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003997static void unaccount_event_cpu(struct perf_event *event, int cpu)
3998{
3999 if (event->parent)
4000 return;
4001
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004002 if (is_cgroup_event(event))
4003 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
4004}
4005
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02004006#ifdef CONFIG_NO_HZ_FULL
4007static DEFINE_SPINLOCK(nr_freq_lock);
4008#endif
4009
4010static void unaccount_freq_event_nohz(void)
4011{
4012#ifdef CONFIG_NO_HZ_FULL
4013 spin_lock(&nr_freq_lock);
4014 if (atomic_dec_and_test(&nr_freq_events))
4015 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
4016 spin_unlock(&nr_freq_lock);
4017#endif
4018}
4019
4020static void unaccount_freq_event(void)
4021{
4022 if (tick_nohz_full_enabled())
4023 unaccount_freq_event_nohz();
4024 else
4025 atomic_dec(&nr_freq_events);
4026}
4027
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004028static void unaccount_event(struct perf_event *event)
4029{
Peter Zijlstra25432ae2016-01-08 11:05:09 +01004030 bool dec = false;
4031
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004032 if (event->parent)
4033 return;
4034
4035 if (event->attach_state & PERF_ATTACH_TASK)
Peter Zijlstra25432ae2016-01-08 11:05:09 +01004036 dec = true;
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004037 if (event->attr.mmap || event->attr.mmap_data)
4038 atomic_dec(&nr_mmap_events);
4039 if (event->attr.comm)
4040 atomic_dec(&nr_comm_events);
Hari Bathinie4222672017-03-08 02:11:36 +05304041 if (event->attr.namespaces)
4042 atomic_dec(&nr_namespaces_events);
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004043 if (event->attr.task)
4044 atomic_dec(&nr_task_events);
Frederic Weisbecker948b26b2013-08-02 18:29:55 +02004045 if (event->attr.freq)
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02004046 unaccount_freq_event();
Adrian Hunter45ac1402015-07-21 12:44:02 +03004047 if (event->attr.context_switch) {
Peter Zijlstra25432ae2016-01-08 11:05:09 +01004048 dec = true;
Adrian Hunter45ac1402015-07-21 12:44:02 +03004049 atomic_dec(&nr_switch_events);
4050 }
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004051 if (is_cgroup_event(event))
Peter Zijlstra25432ae2016-01-08 11:05:09 +01004052 dec = true;
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004053 if (has_branch_stack(event))
Peter Zijlstra25432ae2016-01-08 11:05:09 +01004054 dec = true;
4055
Peter Zijlstra9107c892016-02-24 18:45:45 +01004056 if (dec) {
4057 if (!atomic_add_unless(&perf_sched_count, -1, 1))
4058 schedule_delayed_work(&perf_sched_work, HZ);
4059 }
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004060
4061 unaccount_event_cpu(event, event->cpu);
Kan Liangf2fb6be2016-03-23 11:24:37 -07004062
4063 unaccount_pmu_sb_event(event);
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004064}
4065
Peter Zijlstra9107c892016-02-24 18:45:45 +01004066static void perf_sched_delayed(struct work_struct *work)
4067{
4068 mutex_lock(&perf_sched_mutex);
4069 if (atomic_dec_and_test(&perf_sched_count))
4070 static_branch_disable(&perf_sched_events);
4071 mutex_unlock(&perf_sched_mutex);
4072}
4073
Alexander Shishkinbed5b252015-01-30 12:31:06 +02004074/*
4075 * The following implement mutual exclusion of events on "exclusive" pmus
4076 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4077 * at a time, so we disallow creating events that might conflict, namely:
4078 *
4079 * 1) cpu-wide events in the presence of per-task events,
4080 * 2) per-task events in the presence of cpu-wide events,
4081 * 3) two matching events on the same context.
4082 *
4083 * The former two cases are handled in the allocation path (perf_event_alloc(),
Peter Zijlstraa0733e62016-01-26 12:14:40 +01004084 * _free_event()), the latter -- before the first perf_install_in_context().
Alexander Shishkinbed5b252015-01-30 12:31:06 +02004085 */
4086static int exclusive_event_init(struct perf_event *event)
4087{
4088 struct pmu *pmu = event->pmu;
4089
4090 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4091 return 0;
4092
4093 /*
4094 * Prevent co-existence of per-task and cpu-wide events on the
4095 * same exclusive pmu.
4096 *
4097 * Negative pmu::exclusive_cnt means there are cpu-wide
4098 * events on this "exclusive" pmu, positive means there are
4099 * per-task events.
4100 *
4101 * Since this is called in perf_event_alloc() path, event::ctx
4102 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
4103 * to mean "per-task event", because unlike other attach states it
4104 * never gets cleared.
4105 */
4106 if (event->attach_state & PERF_ATTACH_TASK) {
4107 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
4108 return -EBUSY;
4109 } else {
4110 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
4111 return -EBUSY;
4112 }
4113
4114 return 0;
4115}
4116
4117static void exclusive_event_destroy(struct perf_event *event)
4118{
4119 struct pmu *pmu = event->pmu;
4120
4121 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4122 return;
4123
4124 /* see comment in exclusive_event_init() */
4125 if (event->attach_state & PERF_ATTACH_TASK)
4126 atomic_dec(&pmu->exclusive_cnt);
4127 else
4128 atomic_inc(&pmu->exclusive_cnt);
4129}
4130
4131static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
4132{
Alexander Shishkin3bf62152016-09-20 18:48:11 +03004133 if ((e1->pmu == e2->pmu) &&
Alexander Shishkinbed5b252015-01-30 12:31:06 +02004134 (e1->cpu == e2->cpu ||
4135 e1->cpu == -1 ||
4136 e2->cpu == -1))
4137 return true;
4138 return false;
4139}
4140
4141/* Called under the same ctx::mutex as perf_install_in_context() */
4142static bool exclusive_event_installable(struct perf_event *event,
4143 struct perf_event_context *ctx)
4144{
4145 struct perf_event *iter_event;
4146 struct pmu *pmu = event->pmu;
4147
4148 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4149 return true;
4150
4151 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
4152 if (exclusive_event_match(iter_event, event))
4153 return false;
4154 }
4155
4156 return true;
4157}
4158
Alexander Shishkin375637b2016-04-27 18:44:46 +03004159static void perf_addr_filters_splice(struct perf_event *event,
4160 struct list_head *head);
4161
Peter Zijlstra683ede42014-05-05 12:11:24 +02004162static void _free_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004163{
Peter Zijlstrae360adb2010-10-14 14:01:34 +08004164 irq_work_sync(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004165
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004166 unaccount_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004167
Frederic Weisbecker76369132011-05-19 19:55:04 +02004168 if (event->rb) {
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02004169 /*
4170 * Can happen when we close an event with re-directed output.
4171 *
4172 * Since we have a 0 refcount, perf_mmap_close() will skip
4173 * over us; possibly making our ring_buffer_put() the last.
4174 */
4175 mutex_lock(&event->mmap_mutex);
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004176 ring_buffer_attach(event, NULL);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02004177 mutex_unlock(&event->mmap_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004178 }
4179
Stephane Eraniane5d13672011-02-14 11:20:01 +02004180 if (is_cgroup_event(event))
4181 perf_detach_cgroup(event);
4182
Peter Zijlstraa0733e62016-01-26 12:14:40 +01004183 if (!event->parent) {
4184 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
4185 put_callchain_buffers();
4186 }
4187
4188 perf_event_free_bpf_prog(event);
Alexander Shishkin375637b2016-04-27 18:44:46 +03004189 perf_addr_filters_splice(event, NULL);
4190 kfree(event->addr_filters_offs);
Peter Zijlstraa0733e62016-01-26 12:14:40 +01004191
4192 if (event->destroy)
4193 event->destroy(event);
4194
4195 if (event->ctx)
4196 put_ctx(event->ctx);
4197
Alexander Shishkin62a92c82016-06-07 15:44:15 +03004198 exclusive_event_destroy(event);
4199 module_put(event->pmu->module);
Peter Zijlstraa0733e62016-01-26 12:14:40 +01004200
4201 call_rcu(&event->rcu_head, free_event_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004202}
4203
Peter Zijlstra683ede42014-05-05 12:11:24 +02004204/*
4205 * Used to free events which have a known refcount of 1, such as in error paths
4206 * where the event isn't exposed yet and inherited events.
4207 */
4208static void free_event(struct perf_event *event)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004209{
Peter Zijlstra683ede42014-05-05 12:11:24 +02004210 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
4211 "unexpected event refcount: %ld; ptr=%p\n",
4212 atomic_long_read(&event->refcount), event)) {
4213 /* leak to avoid use-after-free */
4214 return;
4215 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004216
Peter Zijlstra683ede42014-05-05 12:11:24 +02004217 _free_event(event);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004218}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004219
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004220/*
Jiri Olsaf8697762014-08-01 14:33:01 +02004221 * Remove user event from the owner task.
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004222 */
Jiri Olsaf8697762014-08-01 14:33:01 +02004223static void perf_remove_from_owner(struct perf_event *event)
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004224{
Peter Zijlstra88821352010-11-09 19:01:43 +01004225 struct task_struct *owner;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004226
Peter Zijlstra88821352010-11-09 19:01:43 +01004227 rcu_read_lock();
Peter Zijlstra88821352010-11-09 19:01:43 +01004228 /*
Peter Zijlstraf47c02c2016-01-26 12:30:14 +01004229 * Matches the smp_store_release() in perf_event_exit_task(). If we
4230 * observe !owner it means the list deletion is complete and we can
4231 * indeed free this event, otherwise we need to serialize on
Peter Zijlstra88821352010-11-09 19:01:43 +01004232 * owner->perf_event_mutex.
4233 */
Peter Zijlstraf47c02c2016-01-26 12:30:14 +01004234 owner = lockless_dereference(event->owner);
Peter Zijlstra88821352010-11-09 19:01:43 +01004235 if (owner) {
4236 /*
4237 * Since delayed_put_task_struct() also drops the last
4238 * task reference we can safely take a new reference
4239 * while holding the rcu_read_lock().
4240 */
4241 get_task_struct(owner);
4242 }
4243 rcu_read_unlock();
4244
4245 if (owner) {
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004246 /*
4247 * If we're here through perf_event_exit_task() we're already
4248 * holding ctx->mutex which would be an inversion wrt. the
4249 * normal lock order.
4250 *
4251 * However we can safely take this lock because its the child
4252 * ctx->mutex.
4253 */
4254 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
4255
Peter Zijlstra88821352010-11-09 19:01:43 +01004256 /*
4257 * We have to re-check the event->owner field, if it is cleared
4258 * we raced with perf_event_exit_task(), acquiring the mutex
4259 * ensured they're done, and we can proceed with freeing the
4260 * event.
4261 */
Peter Zijlstraf47c02c2016-01-26 12:30:14 +01004262 if (event->owner) {
Peter Zijlstra88821352010-11-09 19:01:43 +01004263 list_del_init(&event->owner_entry);
Peter Zijlstraf47c02c2016-01-26 12:30:14 +01004264 smp_store_release(&event->owner, NULL);
4265 }
Peter Zijlstra88821352010-11-09 19:01:43 +01004266 mutex_unlock(&owner->perf_event_mutex);
4267 put_task_struct(owner);
4268 }
Jiri Olsaf8697762014-08-01 14:33:01 +02004269}
4270
Jiri Olsaf8697762014-08-01 14:33:01 +02004271static void put_event(struct perf_event *event)
4272{
Jiri Olsaf8697762014-08-01 14:33:01 +02004273 if (!atomic_long_dec_and_test(&event->refcount))
4274 return;
4275
Peter Zijlstra683ede42014-05-05 12:11:24 +02004276 _free_event(event);
Al Viroa6fa9412012-08-20 14:59:25 +01004277}
4278
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004279/*
4280 * Kill an event dead; while event:refcount will preserve the event
4281 * object, it will not preserve its functionality. Once the last 'user'
4282 * gives up the object, we'll destroy the thing.
4283 */
Peter Zijlstra683ede42014-05-05 12:11:24 +02004284int perf_event_release_kernel(struct perf_event *event)
4285{
Peter Zijlstraa4f4bb62016-02-24 18:45:42 +01004286 struct perf_event_context *ctx = event->ctx;
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004287 struct perf_event *child, *tmp;
4288
Peter Zijlstraa4f4bb62016-02-24 18:45:42 +01004289 /*
4290 * If we got here through err_file: fput(event_file); we will not have
4291 * attached to a context yet.
4292 */
4293 if (!ctx) {
4294 WARN_ON_ONCE(event->attach_state &
4295 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4296 goto no_ctx;
4297 }
4298
Peter Zijlstra88821352010-11-09 19:01:43 +01004299 if (!is_kernel_event(event))
4300 perf_remove_from_owner(event);
4301
Peter Zijlstra5fa7c8e2016-01-26 15:25:15 +01004302 ctx = perf_event_ctx_lock(event);
Peter Zijlstra683ede42014-05-05 12:11:24 +02004303 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004304 perf_remove_from_context(event, DETACH_GROUP);
Peter Zijlstra88821352010-11-09 19:01:43 +01004305
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004306 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra60beda82016-01-26 14:55:02 +01004307 /*
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +01004308 * Mark this event as STATE_DEAD, there is no external reference to it
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004309 * anymore.
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004310 *
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004311 * Anybody acquiring event->child_mutex after the below loop _must_
4312 * also see this, most importantly inherit_event() which will avoid
4313 * placing more children on the list.
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004314 *
4315 * Thus this guarantees that we will in fact observe and kill _ALL_
4316 * child events.
Peter Zijlstra60beda82016-01-26 14:55:02 +01004317 */
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004318 event->state = PERF_EVENT_STATE_DEAD;
4319 raw_spin_unlock_irq(&ctx->lock);
4320
4321 perf_event_ctx_unlock(event, ctx);
Peter Zijlstra60beda82016-01-26 14:55:02 +01004322
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004323again:
4324 mutex_lock(&event->child_mutex);
4325 list_for_each_entry(child, &event->child_list, child_list) {
Al Viroa6fa9412012-08-20 14:59:25 +01004326
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004327 /*
4328 * Cannot change, child events are not migrated, see the
4329 * comment with perf_event_ctx_lock_nested().
4330 */
4331 ctx = lockless_dereference(child->ctx);
4332 /*
4333 * Since child_mutex nests inside ctx::mutex, we must jump
4334 * through hoops. We start by grabbing a reference on the ctx.
4335 *
4336 * Since the event cannot get freed while we hold the
4337 * child_mutex, the context must also exist and have a !0
4338 * reference count.
4339 */
4340 get_ctx(ctx);
4341
4342 /*
4343 * Now that we have a ctx ref, we can drop child_mutex, and
4344 * acquire ctx::mutex without fear of it going away. Then we
4345 * can re-acquire child_mutex.
4346 */
4347 mutex_unlock(&event->child_mutex);
4348 mutex_lock(&ctx->mutex);
4349 mutex_lock(&event->child_mutex);
4350
4351 /*
4352 * Now that we hold ctx::mutex and child_mutex, revalidate our
4353 * state, if child is still the first entry, it didn't get freed
4354 * and we can continue doing so.
4355 */
4356 tmp = list_first_entry_or_null(&event->child_list,
4357 struct perf_event, child_list);
4358 if (tmp == child) {
4359 perf_remove_from_context(child, DETACH_GROUP);
4360 list_del(&child->child_list);
4361 free_event(child);
4362 /*
4363 * This matches the refcount bump in inherit_event();
4364 * this can't be the last reference.
4365 */
4366 put_event(event);
4367 }
4368
4369 mutex_unlock(&event->child_mutex);
4370 mutex_unlock(&ctx->mutex);
4371 put_ctx(ctx);
4372 goto again;
4373 }
4374 mutex_unlock(&event->child_mutex);
4375
Peter Zijlstraa4f4bb62016-02-24 18:45:42 +01004376no_ctx:
4377 put_event(event); /* Must be the 'last' reference */
Peter Zijlstra683ede42014-05-05 12:11:24 +02004378 return 0;
4379}
4380EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4381
Peter Zijlstra8b10c5e22015-05-01 16:08:46 +02004382/*
4383 * Called when the last reference to the file is gone.
4384 */
Al Viroa6fa9412012-08-20 14:59:25 +01004385static int perf_release(struct inode *inode, struct file *file)
4386{
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004387 perf_event_release_kernel(file->private_data);
Al Viroa6fa9412012-08-20 14:59:25 +01004388 return 0;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004389}
4390
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004391u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004392{
4393 struct perf_event *child;
4394 u64 total = 0;
4395
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004396 *enabled = 0;
4397 *running = 0;
4398
Peter Zijlstra6f105812009-11-20 22:19:56 +01004399 mutex_lock(&event->child_mutex);
Sukadev Bhattiprolu01add3e2015-09-03 20:07:46 -07004400
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004401 (void)perf_event_read(event, false);
Sukadev Bhattiprolu01add3e2015-09-03 20:07:46 -07004402 total += perf_event_count(event);
4403
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004404 *enabled += event->total_time_enabled +
4405 atomic64_read(&event->child_total_time_enabled);
4406 *running += event->total_time_running +
4407 atomic64_read(&event->child_total_time_running);
4408
4409 list_for_each_entry(child, &event->child_list, child_list) {
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004410 (void)perf_event_read(child, false);
Sukadev Bhattiprolu01add3e2015-09-03 20:07:46 -07004411 total += perf_event_count(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004412 *enabled += child->total_time_enabled;
4413 *running += child->total_time_running;
4414 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01004415 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004416
4417 return total;
4418}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004419EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004420
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004421static int __perf_read_group_add(struct perf_event *leader,
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004422 u64 read_format, u64 *values)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004423{
Jiri Olsa2aeb1882017-07-20 16:14:55 +02004424 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004425 struct perf_event *sub;
Jiri Olsa2aeb1882017-07-20 16:14:55 +02004426 unsigned long flags;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004427 int n = 1; /* skip @nr */
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004428 int ret;
Peter Zijlstraabf48682009-11-20 22:19:49 +01004429
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004430 ret = perf_event_read(leader, true);
4431 if (ret)
4432 return ret;
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004433
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004434 /*
4435 * Since we co-schedule groups, {enabled,running} times of siblings
4436 * will be identical to those of the leader, so we only publish one
4437 * set.
4438 */
4439 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4440 values[n++] += leader->total_time_enabled +
4441 atomic64_read(&leader->child_total_time_enabled);
4442 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004443
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004444 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4445 values[n++] += leader->total_time_running +
4446 atomic64_read(&leader->child_total_time_running);
4447 }
4448
4449 /*
4450 * Write {count,id} tuples for every sibling.
4451 */
4452 values[n++] += perf_event_count(leader);
Peter Zijlstraabf48682009-11-20 22:19:49 +01004453 if (read_format & PERF_FORMAT_ID)
4454 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004455
Jiri Olsa2aeb1882017-07-20 16:14:55 +02004456 raw_spin_lock_irqsave(&ctx->lock, flags);
4457
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004458 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004459 values[n++] += perf_event_count(sub);
Peter Zijlstraabf48682009-11-20 22:19:49 +01004460 if (read_format & PERF_FORMAT_ID)
4461 values[n++] = primary_event_id(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004462 }
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004463
Jiri Olsa2aeb1882017-07-20 16:14:55 +02004464 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004465 return 0;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004466}
4467
4468static int perf_read_group(struct perf_event *event,
4469 u64 read_format, char __user *buf)
4470{
4471 struct perf_event *leader = event->group_leader, *child;
4472 struct perf_event_context *ctx = leader->ctx;
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004473 int ret;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004474 u64 *values;
4475
4476 lockdep_assert_held(&ctx->mutex);
4477
4478 values = kzalloc(event->read_size, GFP_KERNEL);
4479 if (!values)
4480 return -ENOMEM;
4481
4482 values[0] = 1 + leader->nr_siblings;
4483
4484 /*
4485 * By locking the child_mutex of the leader we effectively
4486 * lock the child list of all siblings.. XXX explain how.
4487 */
4488 mutex_lock(&leader->child_mutex);
4489
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004490 ret = __perf_read_group_add(leader, read_format, values);
4491 if (ret)
4492 goto unlock;
4493
4494 list_for_each_entry(child, &leader->child_list, child_list) {
4495 ret = __perf_read_group_add(child, read_format, values);
4496 if (ret)
4497 goto unlock;
4498 }
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004499
4500 mutex_unlock(&leader->child_mutex);
4501
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004502 ret = event->read_size;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004503 if (copy_to_user(buf, values, event->read_size))
4504 ret = -EFAULT;
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004505 goto out;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004506
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004507unlock:
4508 mutex_unlock(&leader->child_mutex);
4509out:
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004510 kfree(values);
Peter Zijlstraabf48682009-11-20 22:19:49 +01004511 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004512}
4513
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004514static int perf_read_one(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004515 u64 read_format, char __user *buf)
4516{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004517 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004518 u64 values[4];
4519 int n = 0;
4520
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004521 values[n++] = perf_event_read_value(event, &enabled, &running);
4522 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4523 values[n++] = enabled;
4524 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4525 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004526 if (read_format & PERF_FORMAT_ID)
4527 values[n++] = primary_event_id(event);
4528
4529 if (copy_to_user(buf, values, n * sizeof(u64)))
4530 return -EFAULT;
4531
4532 return n * sizeof(u64);
4533}
4534
Jiri Olsadc633982014-09-12 13:18:26 +02004535static bool is_event_hup(struct perf_event *event)
4536{
4537 bool no_children;
4538
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004539 if (event->state > PERF_EVENT_STATE_EXIT)
Jiri Olsadc633982014-09-12 13:18:26 +02004540 return false;
4541
4542 mutex_lock(&event->child_mutex);
4543 no_children = list_empty(&event->child_list);
4544 mutex_unlock(&event->child_mutex);
4545 return no_children;
4546}
4547
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004548/*
4549 * Read the performance event - simple non blocking version for now
4550 */
4551static ssize_t
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004552__perf_read(struct perf_event *event, char __user *buf, size_t count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004553{
4554 u64 read_format = event->attr.read_format;
4555 int ret;
4556
4557 /*
4558 * Return end-of-file for a read on a event that is in
4559 * error state (i.e. because it was pinned but it couldn't be
4560 * scheduled on to the CPU at some point).
4561 */
4562 if (event->state == PERF_EVENT_STATE_ERROR)
4563 return 0;
4564
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02004565 if (count < event->read_size)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004566 return -ENOSPC;
4567
4568 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004569 if (read_format & PERF_FORMAT_GROUP)
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004570 ret = perf_read_group(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004571 else
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004572 ret = perf_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004573
4574 return ret;
4575}
4576
4577static ssize_t
4578perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4579{
4580 struct perf_event *event = file->private_data;
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004581 struct perf_event_context *ctx;
4582 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004583
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004584 ctx = perf_event_ctx_lock(event);
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004585 ret = __perf_read(event, buf, count);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004586 perf_event_ctx_unlock(event, ctx);
4587
4588 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004589}
4590
4591static unsigned int perf_poll(struct file *file, poll_table *wait)
4592{
4593 struct perf_event *event = file->private_data;
Frederic Weisbecker76369132011-05-19 19:55:04 +02004594 struct ring_buffer *rb;
Jiri Olsa61b67682014-08-13 19:39:56 +02004595 unsigned int events = POLLHUP;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004596
Sebastian Andrzej Siewiore708d7a2014-08-04 15:31:08 +02004597 poll_wait(file, &event->waitq, wait);
Jiri Olsa179033b2014-08-07 11:48:26 -04004598
Jiri Olsadc633982014-09-12 13:18:26 +02004599 if (is_event_hup(event))
Jiri Olsa179033b2014-08-07 11:48:26 -04004600 return events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004601
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004602 /*
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02004603 * Pin the event->rb by taking event->mmap_mutex; otherwise
4604 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004605 */
4606 mutex_lock(&event->mmap_mutex);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02004607 rb = event->rb;
4608 if (rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +02004609 events = atomic_xchg(&rb->poll, 0);
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004610 mutex_unlock(&event->mmap_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004611 return events;
4612}
4613
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004614static void _perf_event_reset(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004615{
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004616 (void)perf_event_read(event, false);
Peter Zijlstrae7850592010-05-21 14:43:08 +02004617 local64_set(&event->count, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004618 perf_event_update_userpage(event);
4619}
4620
4621/*
4622 * Holding the top-level event's child_mutex means that any
4623 * descendant process that has inherited this event will block
Peter Zijlstra8ba289b2016-01-26 13:06:56 +01004624 * in perf_event_exit_event() if it goes to exit, thus satisfying the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004625 * task existence requirements of perf_event_enable/disable.
4626 */
4627static void perf_event_for_each_child(struct perf_event *event,
4628 void (*func)(struct perf_event *))
4629{
4630 struct perf_event *child;
4631
4632 WARN_ON_ONCE(event->ctx->parent_ctx);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004633
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004634 mutex_lock(&event->child_mutex);
4635 func(event);
4636 list_for_each_entry(child, &event->child_list, child_list)
4637 func(child);
4638 mutex_unlock(&event->child_mutex);
4639}
4640
4641static void perf_event_for_each(struct perf_event *event,
4642 void (*func)(struct perf_event *))
4643{
4644 struct perf_event_context *ctx = event->ctx;
4645 struct perf_event *sibling;
4646
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004647 lockdep_assert_held(&ctx->mutex);
4648
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004649 event = event->group_leader;
4650
4651 perf_event_for_each_child(event, func);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004652 list_for_each_entry(sibling, &event->sibling_list, group_entry)
Michael Ellerman724b6da2012-04-11 11:54:13 +10004653 perf_event_for_each_child(sibling, func);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004654}
4655
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01004656static void __perf_event_period(struct perf_event *event,
4657 struct perf_cpu_context *cpuctx,
4658 struct perf_event_context *ctx,
4659 void *info)
Peter Zijlstra00179602015-11-30 16:26:35 +01004660{
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01004661 u64 value = *((u64 *)info);
Peter Zijlstrac7999c62015-08-04 19:22:49 +02004662 bool active;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004663
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004664 if (event->attr.freq) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004665 event->attr.sample_freq = value;
4666 } else {
4667 event->attr.sample_period = value;
4668 event->hw.sample_period = value;
4669 }
Peter Zijlstrabad71922013-11-27 13:54:38 +00004670
4671 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4672 if (active) {
4673 perf_pmu_disable(ctx->pmu);
Peter Zijlstra1e02cd42016-03-10 15:39:24 +01004674 /*
4675 * We could be throttled; unthrottle now to avoid the tick
4676 * trying to unthrottle while we already re-started the event.
4677 */
4678 if (event->hw.interrupts == MAX_INTERRUPTS) {
4679 event->hw.interrupts = 0;
4680 perf_log_throttle(event, 1);
4681 }
Peter Zijlstrabad71922013-11-27 13:54:38 +00004682 event->pmu->stop(event, PERF_EF_UPDATE);
4683 }
4684
4685 local64_set(&event->hw.period_left, 0);
4686
4687 if (active) {
4688 event->pmu->start(event, PERF_EF_RELOAD);
4689 perf_pmu_enable(ctx->pmu);
4690 }
Peter Zijlstrac7999c62015-08-04 19:22:49 +02004691}
4692
4693static int perf_event_period(struct perf_event *event, u64 __user *arg)
4694{
Peter Zijlstrac7999c62015-08-04 19:22:49 +02004695 u64 value;
4696
4697 if (!is_sampling_event(event))
4698 return -EINVAL;
4699
4700 if (copy_from_user(&value, arg, sizeof(value)))
4701 return -EFAULT;
4702
4703 if (!value)
4704 return -EINVAL;
4705
4706 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4707 return -EINVAL;
4708
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01004709 event_function_call(event, __perf_event_period, &value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004710
Peter Zijlstrac7999c62015-08-04 19:22:49 +02004711 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004712}
4713
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004714static const struct file_operations perf_fops;
4715
Al Viro2903ff02012-08-28 12:52:22 -04004716static inline int perf_fget_light(int fd, struct fd *p)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004717{
Al Viro2903ff02012-08-28 12:52:22 -04004718 struct fd f = fdget(fd);
4719 if (!f.file)
4720 return -EBADF;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004721
Al Viro2903ff02012-08-28 12:52:22 -04004722 if (f.file->f_op != &perf_fops) {
4723 fdput(f);
4724 return -EBADF;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004725 }
Al Viro2903ff02012-08-28 12:52:22 -04004726 *p = f;
4727 return 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004728}
4729
4730static int perf_event_set_output(struct perf_event *event,
4731 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08004732static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Alexei Starovoitov25415172015-03-25 12:49:20 -07004733static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004734
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004735static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004736{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004737 void (*func)(struct perf_event *);
4738 u32 flags = arg;
4739
4740 switch (cmd) {
4741 case PERF_EVENT_IOC_ENABLE:
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004742 func = _perf_event_enable;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004743 break;
4744 case PERF_EVENT_IOC_DISABLE:
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004745 func = _perf_event_disable;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004746 break;
4747 case PERF_EVENT_IOC_RESET:
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004748 func = _perf_event_reset;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004749 break;
4750
4751 case PERF_EVENT_IOC_REFRESH:
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004752 return _perf_event_refresh(event, arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004753
4754 case PERF_EVENT_IOC_PERIOD:
4755 return perf_event_period(event, (u64 __user *)arg);
4756
Jiri Olsacf4957f2012-10-24 13:37:58 +02004757 case PERF_EVENT_IOC_ID:
4758 {
4759 u64 id = primary_event_id(event);
4760
4761 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4762 return -EFAULT;
4763 return 0;
4764 }
4765
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004766 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004767 {
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004768 int ret;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004769 if (arg != -1) {
Al Viro2903ff02012-08-28 12:52:22 -04004770 struct perf_event *output_event;
4771 struct fd output;
4772 ret = perf_fget_light(arg, &output);
4773 if (ret)
4774 return ret;
4775 output_event = output.file->private_data;
4776 ret = perf_event_set_output(event, output_event);
4777 fdput(output);
4778 } else {
4779 ret = perf_event_set_output(event, NULL);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004780 }
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004781 return ret;
4782 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004783
Li Zefan6fb29152009-10-15 11:21:42 +08004784 case PERF_EVENT_IOC_SET_FILTER:
4785 return perf_event_set_filter(event, (void __user *)arg);
4786
Alexei Starovoitov25415172015-03-25 12:49:20 -07004787 case PERF_EVENT_IOC_SET_BPF:
4788 return perf_event_set_bpf_prog(event, arg);
4789
Wang Nan86e79722016-03-28 06:41:29 +00004790 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
4791 struct ring_buffer *rb;
4792
4793 rcu_read_lock();
4794 rb = rcu_dereference(event->rb);
4795 if (!rb || !rb->nr_pages) {
4796 rcu_read_unlock();
4797 return -EINVAL;
4798 }
4799 rb_toggle_paused(rb, !!arg);
4800 rcu_read_unlock();
4801 return 0;
4802 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004803 default:
4804 return -ENOTTY;
4805 }
4806
4807 if (flags & PERF_IOC_FLAG_GROUP)
4808 perf_event_for_each(event, func);
4809 else
4810 perf_event_for_each_child(event, func);
4811
4812 return 0;
4813}
4814
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004815static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4816{
4817 struct perf_event *event = file->private_data;
4818 struct perf_event_context *ctx;
4819 long ret;
4820
4821 ctx = perf_event_ctx_lock(event);
4822 ret = _perf_ioctl(event, cmd, arg);
4823 perf_event_ctx_unlock(event, ctx);
4824
4825 return ret;
4826}
4827
Pawel Mollb3f20782014-06-13 16:03:32 +01004828#ifdef CONFIG_COMPAT
4829static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4830 unsigned long arg)
4831{
4832 switch (_IOC_NR(cmd)) {
4833 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4834 case _IOC_NR(PERF_EVENT_IOC_ID):
4835 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4836 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4837 cmd &= ~IOCSIZE_MASK;
4838 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4839 }
4840 break;
4841 }
4842 return perf_ioctl(file, cmd, arg);
4843}
4844#else
4845# define perf_compat_ioctl NULL
4846#endif
4847
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004848int perf_event_task_enable(void)
4849{
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004850 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004851 struct perf_event *event;
4852
4853 mutex_lock(&current->perf_event_mutex);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004854 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4855 ctx = perf_event_ctx_lock(event);
4856 perf_event_for_each_child(event, _perf_event_enable);
4857 perf_event_ctx_unlock(event, ctx);
4858 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004859 mutex_unlock(&current->perf_event_mutex);
4860
4861 return 0;
4862}
4863
4864int perf_event_task_disable(void)
4865{
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004866 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004867 struct perf_event *event;
4868
4869 mutex_lock(&current->perf_event_mutex);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004870 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4871 ctx = perf_event_ctx_lock(event);
4872 perf_event_for_each_child(event, _perf_event_disable);
4873 perf_event_ctx_unlock(event, ctx);
4874 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004875 mutex_unlock(&current->perf_event_mutex);
4876
4877 return 0;
4878}
4879
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004880static int perf_event_index(struct perf_event *event)
4881{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004882 if (event->hw.state & PERF_HES_STOPPED)
4883 return 0;
4884
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004885 if (event->state != PERF_EVENT_STATE_ACTIVE)
4886 return 0;
4887
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01004888 return event->pmu->event_idx(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004889}
4890
Eric B Munsonc4794292011-06-23 16:34:38 -04004891static void calc_timer_values(struct perf_event *event,
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004892 u64 *now,
Eric B Munson7f310a52011-06-23 16:34:38 -04004893 u64 *enabled,
4894 u64 *running)
Eric B Munsonc4794292011-06-23 16:34:38 -04004895{
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004896 u64 ctx_time;
Eric B Munsonc4794292011-06-23 16:34:38 -04004897
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004898 *now = perf_clock();
4899 ctx_time = event->shadow_ctx_time + *now;
Eric B Munsonc4794292011-06-23 16:34:38 -04004900 *enabled = ctx_time - event->tstamp_enabled;
4901 *running = ctx_time - event->tstamp_running;
4902}
4903
Peter Zijlstrafa7315872013-09-19 10:16:42 +02004904static void perf_event_init_userpage(struct perf_event *event)
4905{
4906 struct perf_event_mmap_page *userpg;
4907 struct ring_buffer *rb;
4908
4909 rcu_read_lock();
4910 rb = rcu_dereference(event->rb);
4911 if (!rb)
4912 goto unlock;
4913
4914 userpg = rb->user_page;
4915
4916 /* Allow new userspace to detect that bit 0 is deprecated */
4917 userpg->cap_bit0_is_deprecated = 1;
4918 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
Alexander Shishkine8c6dea2015-01-14 14:18:10 +02004919 userpg->data_offset = PAGE_SIZE;
4920 userpg->data_size = perf_data_size(rb);
Peter Zijlstrafa7315872013-09-19 10:16:42 +02004921
4922unlock:
4923 rcu_read_unlock();
4924}
4925
Andy Lutomirskic1317ec2014-10-24 15:58:11 -07004926void __weak arch_perf_update_userpage(
4927 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004928{
4929}
4930
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004931/*
4932 * Callers need to ensure there can be no nesting of this function, otherwise
4933 * the seqlock logic goes bad. We can not serialize this because the arch
4934 * code calls this from NMI context.
4935 */
4936void perf_event_update_userpage(struct perf_event *event)
4937{
4938 struct perf_event_mmap_page *userpg;
Frederic Weisbecker76369132011-05-19 19:55:04 +02004939 struct ring_buffer *rb;
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004940 u64 enabled, running, now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004941
4942 rcu_read_lock();
Peter Zijlstra5ec4c592013-08-02 21:16:30 +02004943 rb = rcu_dereference(event->rb);
4944 if (!rb)
4945 goto unlock;
4946
Eric B Munson0d641202011-06-24 12:26:26 -04004947 /*
4948 * compute total_time_enabled, total_time_running
4949 * based on snapshot values taken when the event
4950 * was last scheduled in.
4951 *
4952 * we cannot simply called update_context_time()
4953 * because of locking issue as we can be called in
4954 * NMI context
4955 */
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004956 calc_timer_values(event, &now, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004957
Frederic Weisbecker76369132011-05-19 19:55:04 +02004958 userpg = rb->user_page;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004959 /*
4960 * Disable preemption so as to not let the corresponding user-space
4961 * spin too long if we get preempted.
4962 */
4963 preempt_disable();
4964 ++userpg->lock;
4965 barrier();
4966 userpg->index = perf_event_index(event);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02004967 userpg->offset = perf_event_count(event);
Peter Zijlstra365a4032011-11-21 20:58:59 +01004968 if (userpg->index)
Peter Zijlstrae7850592010-05-21 14:43:08 +02004969 userpg->offset -= local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004970
Eric B Munson0d641202011-06-24 12:26:26 -04004971 userpg->time_enabled = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004972 atomic64_read(&event->child_total_time_enabled);
4973
Eric B Munson0d641202011-06-24 12:26:26 -04004974 userpg->time_running = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004975 atomic64_read(&event->child_total_time_running);
4976
Andy Lutomirskic1317ec2014-10-24 15:58:11 -07004977 arch_perf_update_userpage(event, userpg, now);
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004978
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004979 barrier();
4980 ++userpg->lock;
4981 preempt_enable();
4982unlock:
4983 rcu_read_unlock();
4984}
4985
Dave Jiang11bac802017-02-24 14:56:41 -08004986static int perf_mmap_fault(struct vm_fault *vmf)
Peter Zijlstra906010b2009-09-21 16:08:49 +02004987{
Dave Jiang11bac802017-02-24 14:56:41 -08004988 struct perf_event *event = vmf->vma->vm_file->private_data;
Frederic Weisbecker76369132011-05-19 19:55:04 +02004989 struct ring_buffer *rb;
Peter Zijlstra906010b2009-09-21 16:08:49 +02004990 int ret = VM_FAULT_SIGBUS;
4991
4992 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4993 if (vmf->pgoff == 0)
4994 ret = 0;
4995 return ret;
4996 }
4997
4998 rcu_read_lock();
Frederic Weisbecker76369132011-05-19 19:55:04 +02004999 rb = rcu_dereference(event->rb);
5000 if (!rb)
Peter Zijlstra906010b2009-09-21 16:08:49 +02005001 goto unlock;
5002
5003 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
5004 goto unlock;
5005
Frederic Weisbecker76369132011-05-19 19:55:04 +02005006 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
Peter Zijlstra906010b2009-09-21 16:08:49 +02005007 if (!vmf->page)
5008 goto unlock;
5009
5010 get_page(vmf->page);
Dave Jiang11bac802017-02-24 14:56:41 -08005011 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
Peter Zijlstra906010b2009-09-21 16:08:49 +02005012 vmf->page->index = vmf->pgoff;
5013
5014 ret = 0;
5015unlock:
5016 rcu_read_unlock();
5017
5018 return ret;
5019}
5020
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005021static void ring_buffer_attach(struct perf_event *event,
5022 struct ring_buffer *rb)
5023{
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005024 struct ring_buffer *old_rb = NULL;
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005025 unsigned long flags;
5026
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005027 if (event->rb) {
5028 /*
5029 * Should be impossible, we set this when removing
5030 * event->rb_entry and wait/clear when adding event->rb_entry.
5031 */
5032 WARN_ON_ONCE(event->rcu_pending);
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005033
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005034 old_rb = event->rb;
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005035 spin_lock_irqsave(&old_rb->event_lock, flags);
5036 list_del_rcu(&event->rb_entry);
5037 spin_unlock_irqrestore(&old_rb->event_lock, flags);
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005038
Oleg Nesterov2f993cf2015-05-30 22:04:25 +02005039 event->rcu_batches = get_state_synchronize_rcu();
5040 event->rcu_pending = 1;
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005041 }
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005042
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005043 if (rb) {
Oleg Nesterov2f993cf2015-05-30 22:04:25 +02005044 if (event->rcu_pending) {
5045 cond_synchronize_rcu(event->rcu_batches);
5046 event->rcu_pending = 0;
5047 }
5048
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005049 spin_lock_irqsave(&rb->event_lock, flags);
5050 list_add_rcu(&event->rb_entry, &rb->event_list);
5051 spin_unlock_irqrestore(&rb->event_lock, flags);
5052 }
5053
Alexander Shishkin767ae082016-09-06 16:23:49 +03005054 /*
5055 * Avoid racing with perf_mmap_close(AUX): stop the event
5056 * before swizzling the event::rb pointer; if it's getting
5057 * unmapped, its aux_mmap_count will be 0 and it won't
5058 * restart. See the comment in __perf_pmu_output_stop().
5059 *
5060 * Data will inevitably be lost when set_output is done in
5061 * mid-air, but then again, whoever does it like this is
5062 * not in for the data anyway.
5063 */
5064 if (has_aux(event))
5065 perf_event_stop(event, 0);
5066
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005067 rcu_assign_pointer(event->rb, rb);
5068
5069 if (old_rb) {
5070 ring_buffer_put(old_rb);
5071 /*
5072 * Since we detached before setting the new rb, so that we
5073 * could attach the new rb, we could have missed a wakeup.
5074 * Provide it now.
5075 */
5076 wake_up_all(&event->waitq);
5077 }
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005078}
5079
5080static void ring_buffer_wakeup(struct perf_event *event)
5081{
5082 struct ring_buffer *rb;
5083
5084 rcu_read_lock();
5085 rb = rcu_dereference(event->rb);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005086 if (rb) {
5087 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
5088 wake_up_all(&event->waitq);
5089 }
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005090 rcu_read_unlock();
5091}
5092
Alexander Shishkinfdc26702015-01-14 14:18:16 +02005093struct ring_buffer *ring_buffer_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005094{
Frederic Weisbecker76369132011-05-19 19:55:04 +02005095 struct ring_buffer *rb;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005096
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005097 rcu_read_lock();
Frederic Weisbecker76369132011-05-19 19:55:04 +02005098 rb = rcu_dereference(event->rb);
5099 if (rb) {
5100 if (!atomic_inc_not_zero(&rb->refcount))
5101 rb = NULL;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005102 }
5103 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005104
Frederic Weisbecker76369132011-05-19 19:55:04 +02005105 return rb;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005106}
5107
Alexander Shishkinfdc26702015-01-14 14:18:16 +02005108void ring_buffer_put(struct ring_buffer *rb)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005109{
Frederic Weisbecker76369132011-05-19 19:55:04 +02005110 if (!atomic_dec_and_test(&rb->refcount))
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005111 return;
5112
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005113 WARN_ON_ONCE(!list_empty(&rb->event_list));
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005114
Frederic Weisbecker76369132011-05-19 19:55:04 +02005115 call_rcu(&rb->rcu_head, rb_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005116}
5117
5118static void perf_mmap_open(struct vm_area_struct *vma)
5119{
5120 struct perf_event *event = vma->vm_file->private_data;
5121
5122 atomic_inc(&event->mmap_count);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005123 atomic_inc(&event->rb->mmap_count);
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005124
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005125 if (vma->vm_pgoff)
5126 atomic_inc(&event->rb->aux_mmap_count);
5127
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005128 if (event->pmu->event_mapped)
Peter Zijlstrabfe334922017-08-02 19:39:30 +02005129 event->pmu->event_mapped(event, vma->vm_mm);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005130}
5131
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02005132static void perf_pmu_output_stop(struct perf_event *event);
5133
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005134/*
5135 * A buffer can be mmap()ed multiple times; either directly through the same
5136 * event, or through other events by use of perf_event_set_output().
5137 *
5138 * In order to undo the VM accounting done by perf_mmap() we need to destroy
5139 * the buffer here, where we still have a VM context. This means we need
5140 * to detach all events redirecting to us.
5141 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005142static void perf_mmap_close(struct vm_area_struct *vma)
5143{
5144 struct perf_event *event = vma->vm_file->private_data;
5145
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005146 struct ring_buffer *rb = ring_buffer_get(event);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005147 struct user_struct *mmap_user = rb->mmap_user;
5148 int mmap_locked = rb->mmap_locked;
5149 unsigned long size = perf_data_size(rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005150
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005151 if (event->pmu->event_unmapped)
Peter Zijlstrabfe334922017-08-02 19:39:30 +02005152 event->pmu->event_unmapped(event, vma->vm_mm);
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005153
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005154 /*
5155 * rb->aux_mmap_count will always drop before rb->mmap_count and
5156 * event->mmap_count, so it is ok to use event->mmap_mutex to
5157 * serialize with perf_mmap here.
5158 */
5159 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
5160 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02005161 /*
5162 * Stop all AUX events that are writing to this buffer,
5163 * so that we can free its AUX pages and corresponding PMU
5164 * data. Note that after rb::aux_mmap_count dropped to zero,
5165 * they won't start any more (see perf_aux_output_begin()).
5166 */
5167 perf_pmu_output_stop(event);
5168
5169 /* now it's safe to free the pages */
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005170 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
5171 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
5172
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02005173 /* this has to be the last one */
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005174 rb_free_aux(rb);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02005175 WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
5176
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005177 mutex_unlock(&event->mmap_mutex);
5178 }
5179
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005180 atomic_dec(&rb->mmap_count);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005181
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005182 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005183 goto out_put;
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005184
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005185 ring_buffer_attach(event, NULL);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005186 mutex_unlock(&event->mmap_mutex);
5187
5188 /* If there's still other mmap()s of this buffer, we're done. */
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005189 if (atomic_read(&rb->mmap_count))
5190 goto out_put;
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005191
5192 /*
5193 * No other mmap()s, detach from all other events that might redirect
5194 * into the now unreachable buffer. Somewhat complicated by the
5195 * fact that rb::event_lock otherwise nests inside mmap_mutex.
5196 */
5197again:
5198 rcu_read_lock();
5199 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
5200 if (!atomic_long_inc_not_zero(&event->refcount)) {
5201 /*
5202 * This event is en-route to free_event() which will
5203 * detach it and remove it from the list.
5204 */
5205 continue;
5206 }
5207 rcu_read_unlock();
5208
5209 mutex_lock(&event->mmap_mutex);
5210 /*
5211 * Check we didn't race with perf_event_set_output() which can
5212 * swizzle the rb from under us while we were waiting to
5213 * acquire mmap_mutex.
5214 *
5215 * If we find a different rb; ignore this event, a next
5216 * iteration will no longer find it on the list. We have to
5217 * still restart the iteration to make sure we're not now
5218 * iterating the wrong list.
5219 */
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005220 if (event->rb == rb)
5221 ring_buffer_attach(event, NULL);
5222
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005223 mutex_unlock(&event->mmap_mutex);
5224 put_event(event);
5225
5226 /*
5227 * Restart the iteration; either we're on the wrong list or
5228 * destroyed its integrity by doing a deletion.
5229 */
5230 goto again;
5231 }
5232 rcu_read_unlock();
5233
5234 /*
5235 * It could be there's still a few 0-ref events on the list; they'll
5236 * get cleaned up by free_event() -- they'll also still have their
5237 * ref on the rb and will free it whenever they are done with it.
5238 *
5239 * Aside from that, this buffer is 'fully' detached and unmapped,
5240 * undo the VM accounting.
5241 */
5242
5243 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
5244 vma->vm_mm->pinned_vm -= mmap_locked;
5245 free_uid(mmap_user);
5246
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005247out_put:
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005248 ring_buffer_put(rb); /* could be last */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005249}
5250
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +04005251static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005252 .open = perf_mmap_open,
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005253 .close = perf_mmap_close, /* non mergable */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005254 .fault = perf_mmap_fault,
5255 .page_mkwrite = perf_mmap_fault,
5256};
5257
5258static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5259{
5260 struct perf_event *event = file->private_data;
5261 unsigned long user_locked, user_lock_limit;
5262 struct user_struct *user = current_user();
5263 unsigned long locked, lock_limit;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005264 struct ring_buffer *rb = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005265 unsigned long vma_size;
5266 unsigned long nr_pages;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005267 long user_extra = 0, extra = 0;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02005268 int ret = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005269
Peter Zijlstrac7920612010-05-18 10:33:24 +02005270 /*
5271 * Don't allow mmap() of inherited per-task counters. This would
5272 * create a performance issue due to all children writing to the
Frederic Weisbecker76369132011-05-19 19:55:04 +02005273 * same rb.
Peter Zijlstrac7920612010-05-18 10:33:24 +02005274 */
5275 if (event->cpu == -1 && event->attr.inherit)
5276 return -EINVAL;
5277
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005278 if (!(vma->vm_flags & VM_SHARED))
5279 return -EINVAL;
5280
5281 vma_size = vma->vm_end - vma->vm_start;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005282
5283 if (vma->vm_pgoff == 0) {
5284 nr_pages = (vma_size / PAGE_SIZE) - 1;
5285 } else {
5286 /*
5287 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5288 * mapped, all subsequent mappings should have the same size
5289 * and offset. Must be above the normal perf buffer.
5290 */
5291 u64 aux_offset, aux_size;
5292
5293 if (!event->rb)
5294 return -EINVAL;
5295
5296 nr_pages = vma_size / PAGE_SIZE;
5297
5298 mutex_lock(&event->mmap_mutex);
5299 ret = -EINVAL;
5300
5301 rb = event->rb;
5302 if (!rb)
5303 goto aux_unlock;
5304
5305 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
5306 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
5307
5308 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5309 goto aux_unlock;
5310
5311 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5312 goto aux_unlock;
5313
5314 /* already mapped with a different offset */
5315 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5316 goto aux_unlock;
5317
5318 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5319 goto aux_unlock;
5320
5321 /* already mapped with a different size */
5322 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5323 goto aux_unlock;
5324
5325 if (!is_power_of_2(nr_pages))
5326 goto aux_unlock;
5327
5328 if (!atomic_inc_not_zero(&rb->mmap_count))
5329 goto aux_unlock;
5330
5331 if (rb_has_aux(rb)) {
5332 atomic_inc(&rb->aux_mmap_count);
5333 ret = 0;
5334 goto unlock;
5335 }
5336
5337 atomic_set(&rb->aux_mmap_count, 1);
5338 user_extra = nr_pages;
5339
5340 goto accounting;
5341 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005342
5343 /*
Frederic Weisbecker76369132011-05-19 19:55:04 +02005344 * If we have rb pages ensure they're a power-of-two number, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005345 * can do bitmasks instead of modulo.
5346 */
Kan Liang2ed11312015-03-02 02:14:26 -05005347 if (nr_pages != 0 && !is_power_of_2(nr_pages))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005348 return -EINVAL;
5349
5350 if (vma_size != PAGE_SIZE * (1 + nr_pages))
5351 return -EINVAL;
5352
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005353 WARN_ON_ONCE(event->ctx->parent_ctx);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005354again:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005355 mutex_lock(&event->mmap_mutex);
Frederic Weisbecker76369132011-05-19 19:55:04 +02005356 if (event->rb) {
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005357 if (event->rb->nr_pages != nr_pages) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005358 ret = -EINVAL;
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005359 goto unlock;
5360 }
5361
5362 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5363 /*
5364 * Raced against perf_mmap_close() through
5365 * perf_event_set_output(). Try again, hope for better
5366 * luck.
5367 */
5368 mutex_unlock(&event->mmap_mutex);
5369 goto again;
5370 }
5371
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005372 goto unlock;
5373 }
5374
5375 user_extra = nr_pages + 1;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005376
5377accounting:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005378 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
5379
5380 /*
5381 * Increase the limit linearly with more CPUs:
5382 */
5383 user_lock_limit *= num_online_cpus();
5384
5385 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
5386
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005387 if (user_locked > user_lock_limit)
5388 extra = user_locked - user_lock_limit;
5389
Jiri Slaby78d7d402010-03-05 13:42:54 -08005390 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005391 lock_limit >>= PAGE_SHIFT;
Christoph Lameterbc3e53f2011-10-31 17:07:30 -07005392 locked = vma->vm_mm->pinned_vm + extra;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005393
5394 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5395 !capable(CAP_IPC_LOCK)) {
5396 ret = -EPERM;
5397 goto unlock;
5398 }
5399
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005400 WARN_ON(!rb && event->rb);
Peter Zijlstra906010b2009-09-21 16:08:49 +02005401
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02005402 if (vma->vm_flags & VM_WRITE)
Frederic Weisbecker76369132011-05-19 19:55:04 +02005403 flags |= RING_BUFFER_WRITABLE;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02005404
Frederic Weisbecker76369132011-05-19 19:55:04 +02005405 if (!rb) {
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005406 rb = rb_alloc(nr_pages,
5407 event->attr.watermark ? event->attr.wakeup_watermark : 0,
5408 event->cpu, flags);
5409
5410 if (!rb) {
5411 ret = -ENOMEM;
5412 goto unlock;
5413 }
5414
5415 atomic_set(&rb->mmap_count, 1);
5416 rb->mmap_user = get_current_user();
5417 rb->mmap_locked = extra;
5418
5419 ring_buffer_attach(event, rb);
5420
5421 perf_event_init_userpage(event);
5422 perf_event_update_userpage(event);
5423 } else {
Alexander Shishkin1a594132015-01-14 14:18:18 +02005424 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5425 event->attr.aux_watermark, flags);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005426 if (!ret)
5427 rb->aux_mmap_locked = extra;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005428 }
Peter Zijlstra26cb63a2013-05-28 10:55:48 +02005429
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005430unlock:
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005431 if (!ret) {
5432 atomic_long_add(user_extra, &user->locked_vm);
5433 vma->vm_mm->pinned_vm += extra;
5434
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005435 atomic_inc(&event->mmap_count);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005436 } else if (rb) {
5437 atomic_dec(&rb->mmap_count);
5438 }
5439aux_unlock:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005440 mutex_unlock(&event->mmap_mutex);
5441
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005442 /*
5443 * Since pinned accounting is per vm we cannot allow fork() to copy our
5444 * vma.
5445 */
Peter Zijlstra26cb63a2013-05-28 10:55:48 +02005446 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005447 vma->vm_ops = &perf_mmap_vmops;
5448
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005449 if (event->pmu->event_mapped)
Peter Zijlstrabfe334922017-08-02 19:39:30 +02005450 event->pmu->event_mapped(event, vma->vm_mm);
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005451
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005452 return ret;
5453}
5454
5455static int perf_fasync(int fd, struct file *filp, int on)
5456{
Al Viro496ad9a2013-01-23 17:07:38 -05005457 struct inode *inode = file_inode(filp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005458 struct perf_event *event = filp->private_data;
5459 int retval;
5460
Al Viro59551022016-01-22 15:40:57 -05005461 inode_lock(inode);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005462 retval = fasync_helper(fd, filp, on, &event->fasync);
Al Viro59551022016-01-22 15:40:57 -05005463 inode_unlock(inode);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005464
5465 if (retval < 0)
5466 return retval;
5467
5468 return 0;
5469}
5470
5471static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01005472 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005473 .release = perf_release,
5474 .read = perf_read,
5475 .poll = perf_poll,
5476 .unlocked_ioctl = perf_ioctl,
Pawel Mollb3f20782014-06-13 16:03:32 +01005477 .compat_ioctl = perf_compat_ioctl,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005478 .mmap = perf_mmap,
5479 .fasync = perf_fasync,
5480};
5481
5482/*
5483 * Perf event wakeup
5484 *
5485 * If there's data, ensure we set the poll() state and publish everything
5486 * to user-space before waking everybody up.
5487 */
5488
Peter Zijlstrafed66e2cd2015-06-11 10:32:01 +02005489static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5490{
5491 /* only the parent has fasync state */
5492 if (event->parent)
5493 event = event->parent;
5494 return &event->fasync;
5495}
5496
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005497void perf_event_wakeup(struct perf_event *event)
5498{
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005499 ring_buffer_wakeup(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005500
5501 if (event->pending_kill) {
Peter Zijlstrafed66e2cd2015-06-11 10:32:01 +02005502 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005503 event->pending_kill = 0;
5504 }
5505}
5506
Peter Zijlstrae360adb2010-10-14 14:01:34 +08005507static void perf_pending_event(struct irq_work *entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005508{
5509 struct perf_event *event = container_of(entry,
5510 struct perf_event, pending);
Peter Zijlstrad5252112015-02-19 18:03:11 +01005511 int rctx;
5512
5513 rctx = perf_swevent_get_recursion_context();
5514 /*
5515 * If we 'fail' here, that's OK, it means recursion is already disabled
5516 * and we won't recurse 'further'.
5517 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005518
5519 if (event->pending_disable) {
5520 event->pending_disable = 0;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01005521 perf_event_disable_local(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005522 }
5523
5524 if (event->pending_wakeup) {
5525 event->pending_wakeup = 0;
5526 perf_event_wakeup(event);
5527 }
Peter Zijlstrad5252112015-02-19 18:03:11 +01005528
5529 if (rctx >= 0)
5530 perf_swevent_put_recursion_context(rctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005531}
5532
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005533/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08005534 * We assume there is only KVM supporting the callbacks.
5535 * Later on, we might change it to a list if there is
5536 * another virtualization implementation supporting the callbacks.
5537 */
5538struct perf_guest_info_callbacks *perf_guest_cbs;
5539
5540int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5541{
5542 perf_guest_cbs = cbs;
5543 return 0;
5544}
5545EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5546
5547int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5548{
5549 perf_guest_cbs = NULL;
5550 return 0;
5551}
5552EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5553
Jiri Olsa40189942012-08-07 15:20:37 +02005554static void
5555perf_output_sample_regs(struct perf_output_handle *handle,
5556 struct pt_regs *regs, u64 mask)
5557{
5558 int bit;
Madhavan Srinivasan29dd3282016-08-17 15:06:08 +05305559 DECLARE_BITMAP(_mask, 64);
Jiri Olsa40189942012-08-07 15:20:37 +02005560
Madhavan Srinivasan29dd3282016-08-17 15:06:08 +05305561 bitmap_from_u64(_mask, mask);
5562 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
Jiri Olsa40189942012-08-07 15:20:37 +02005563 u64 val;
5564
5565 val = perf_reg_value(regs, bit);
5566 perf_output_put(handle, val);
5567 }
5568}
5569
Stephane Eranian60e23642014-09-24 13:48:37 +02005570static void perf_sample_regs_user(struct perf_regs *regs_user,
Andy Lutomirski88a7c262015-01-04 10:36:19 -08005571 struct pt_regs *regs,
5572 struct pt_regs *regs_user_copy)
Jiri Olsa40189942012-08-07 15:20:37 +02005573{
Andy Lutomirski88a7c262015-01-04 10:36:19 -08005574 if (user_mode(regs)) {
5575 regs_user->abi = perf_reg_abi(current);
Peter Zijlstra25657112014-09-24 13:48:42 +02005576 regs_user->regs = regs;
Andy Lutomirski88a7c262015-01-04 10:36:19 -08005577 } else if (current->mm) {
5578 perf_get_regs_user(regs_user, regs, regs_user_copy);
Peter Zijlstra25657112014-09-24 13:48:42 +02005579 } else {
5580 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5581 regs_user->regs = NULL;
Jiri Olsa40189942012-08-07 15:20:37 +02005582 }
5583}
5584
Stephane Eranian60e23642014-09-24 13:48:37 +02005585static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5586 struct pt_regs *regs)
5587{
5588 regs_intr->regs = regs;
5589 regs_intr->abi = perf_reg_abi(current);
5590}
5591
5592
Jiri Olsac5ebced2012-08-07 15:20:40 +02005593/*
5594 * Get remaining task size from user stack pointer.
5595 *
5596 * It'd be better to take stack vma map and limit this more
5597 * precisly, but there's no way to get it safely under interrupt,
5598 * so using TASK_SIZE as limit.
5599 */
5600static u64 perf_ustack_task_size(struct pt_regs *regs)
5601{
5602 unsigned long addr = perf_user_stack_pointer(regs);
5603
5604 if (!addr || addr >= TASK_SIZE)
5605 return 0;
5606
5607 return TASK_SIZE - addr;
5608}
5609
5610static u16
5611perf_sample_ustack_size(u16 stack_size, u16 header_size,
5612 struct pt_regs *regs)
5613{
5614 u64 task_size;
5615
5616 /* No regs, no stack pointer, no dump. */
5617 if (!regs)
5618 return 0;
5619
5620 /*
5621 * Check if we fit in with the requested stack size into the:
5622 * - TASK_SIZE
5623 * If we don't, we limit the size to the TASK_SIZE.
5624 *
5625 * - remaining sample size
5626 * If we don't, we customize the stack size to
5627 * fit in to the remaining sample size.
5628 */
5629
5630 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5631 stack_size = min(stack_size, (u16) task_size);
5632
5633 /* Current header size plus static size and dynamic size. */
5634 header_size += 2 * sizeof(u64);
5635
5636 /* Do we fit in with the current stack dump size? */
5637 if ((u16) (header_size + stack_size) < header_size) {
5638 /*
5639 * If we overflow the maximum size for the sample,
5640 * we customize the stack dump size to fit in.
5641 */
5642 stack_size = USHRT_MAX - header_size - sizeof(u64);
5643 stack_size = round_up(stack_size, sizeof(u64));
5644 }
5645
5646 return stack_size;
5647}
5648
5649static void
5650perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5651 struct pt_regs *regs)
5652{
5653 /* Case of a kernel thread, nothing to dump */
5654 if (!regs) {
5655 u64 size = 0;
5656 perf_output_put(handle, size);
5657 } else {
5658 unsigned long sp;
5659 unsigned int rem;
5660 u64 dyn_size;
5661
5662 /*
5663 * We dump:
5664 * static size
5665 * - the size requested by user or the best one we can fit
5666 * in to the sample max size
5667 * data
5668 * - user stack dump data
5669 * dynamic size
5670 * - the actual dumped size
5671 */
5672
5673 /* Static size. */
5674 perf_output_put(handle, dump_size);
5675
5676 /* Data. */
5677 sp = perf_user_stack_pointer(regs);
5678 rem = __output_copy_user(handle, (void *) sp, dump_size);
5679 dyn_size = dump_size - rem;
5680
5681 perf_output_skip(handle, rem);
5682
5683 /* Dynamic size. */
5684 perf_output_put(handle, dyn_size);
5685 }
5686}
5687
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02005688static void __perf_event_header__init_id(struct perf_event_header *header,
5689 struct perf_sample_data *data,
5690 struct perf_event *event)
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02005691{
5692 u64 sample_type = event->attr.sample_type;
5693
5694 data->type = sample_type;
5695 header->size += event->id_header_size;
5696
5697 if (sample_type & PERF_SAMPLE_TID) {
5698 /* namespace issues */
5699 data->tid_entry.pid = perf_event_pid(event, current);
5700 data->tid_entry.tid = perf_event_tid(event, current);
5701 }
5702
5703 if (sample_type & PERF_SAMPLE_TIME)
Peter Zijlstra34f43922015-02-20 14:05:38 +01005704 data->time = perf_event_clock(event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02005705
Adrian Hunterff3d5272013-08-27 11:23:07 +03005706 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02005707 data->id = primary_event_id(event);
5708
5709 if (sample_type & PERF_SAMPLE_STREAM_ID)
5710 data->stream_id = event->id;
5711
5712 if (sample_type & PERF_SAMPLE_CPU) {
5713 data->cpu_entry.cpu = raw_smp_processor_id();
5714 data->cpu_entry.reserved = 0;
5715 }
5716}
5717
Frederic Weisbecker76369132011-05-19 19:55:04 +02005718void perf_event_header__init_id(struct perf_event_header *header,
5719 struct perf_sample_data *data,
5720 struct perf_event *event)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02005721{
5722 if (event->attr.sample_id_all)
5723 __perf_event_header__init_id(header, data, event);
5724}
5725
5726static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5727 struct perf_sample_data *data)
5728{
5729 u64 sample_type = data->type;
5730
5731 if (sample_type & PERF_SAMPLE_TID)
5732 perf_output_put(handle, data->tid_entry);
5733
5734 if (sample_type & PERF_SAMPLE_TIME)
5735 perf_output_put(handle, data->time);
5736
5737 if (sample_type & PERF_SAMPLE_ID)
5738 perf_output_put(handle, data->id);
5739
5740 if (sample_type & PERF_SAMPLE_STREAM_ID)
5741 perf_output_put(handle, data->stream_id);
5742
5743 if (sample_type & PERF_SAMPLE_CPU)
5744 perf_output_put(handle, data->cpu_entry);
Adrian Hunterff3d5272013-08-27 11:23:07 +03005745
5746 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5747 perf_output_put(handle, data->id);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02005748}
5749
Frederic Weisbecker76369132011-05-19 19:55:04 +02005750void perf_event__output_id_sample(struct perf_event *event,
5751 struct perf_output_handle *handle,
5752 struct perf_sample_data *sample)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02005753{
5754 if (event->attr.sample_id_all)
5755 __perf_event__output_id_sample(handle, sample);
5756}
5757
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005758static void perf_output_read_one(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02005759 struct perf_event *event,
5760 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005761{
5762 u64 read_format = event->attr.read_format;
5763 u64 values[4];
5764 int n = 0;
5765
Peter Zijlstrab5e58792010-05-21 14:43:12 +02005766 values[n++] = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005767 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Stephane Eranianeed01522010-10-26 16:08:01 +02005768 values[n++] = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005769 atomic64_read(&event->child_total_time_enabled);
5770 }
5771 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Stephane Eranianeed01522010-10-26 16:08:01 +02005772 values[n++] = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005773 atomic64_read(&event->child_total_time_running);
5774 }
5775 if (read_format & PERF_FORMAT_ID)
5776 values[n++] = primary_event_id(event);
5777
Frederic Weisbecker76369132011-05-19 19:55:04 +02005778 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005779}
5780
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005781static void perf_output_read_group(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02005782 struct perf_event *event,
5783 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005784{
5785 struct perf_event *leader = event->group_leader, *sub;
5786 u64 read_format = event->attr.read_format;
5787 u64 values[5];
5788 int n = 0;
5789
5790 values[n++] = 1 + leader->nr_siblings;
5791
5792 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
Stephane Eranianeed01522010-10-26 16:08:01 +02005793 values[n++] = enabled;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005794
5795 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
Stephane Eranianeed01522010-10-26 16:08:01 +02005796 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005797
5798 if (leader != event)
5799 leader->pmu->read(leader);
5800
Peter Zijlstrab5e58792010-05-21 14:43:12 +02005801 values[n++] = perf_event_count(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005802 if (read_format & PERF_FORMAT_ID)
5803 values[n++] = primary_event_id(leader);
5804
Frederic Weisbecker76369132011-05-19 19:55:04 +02005805 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005806
5807 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
5808 n = 0;
5809
Jiri Olsa6f5ab002012-10-15 20:13:45 +02005810 if ((sub != event) &&
5811 (sub->state == PERF_EVENT_STATE_ACTIVE))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005812 sub->pmu->read(sub);
5813
Peter Zijlstrab5e58792010-05-21 14:43:12 +02005814 values[n++] = perf_event_count(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005815 if (read_format & PERF_FORMAT_ID)
5816 values[n++] = primary_event_id(sub);
5817
Frederic Weisbecker76369132011-05-19 19:55:04 +02005818 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005819 }
5820}
5821
Stephane Eranianeed01522010-10-26 16:08:01 +02005822#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5823 PERF_FORMAT_TOTAL_TIME_RUNNING)
5824
Peter Zijlstraba5213a2017-05-30 11:45:12 +02005825/*
5826 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
5827 *
5828 * The problem is that its both hard and excessively expensive to iterate the
5829 * child list, not to mention that its impossible to IPI the children running
5830 * on another CPU, from interrupt/NMI context.
5831 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005832static void perf_output_read(struct perf_output_handle *handle,
5833 struct perf_event *event)
5834{
Peter Zijlstrae3f35412011-11-21 11:43:53 +01005835 u64 enabled = 0, running = 0, now;
Stephane Eranianeed01522010-10-26 16:08:01 +02005836 u64 read_format = event->attr.read_format;
5837
5838 /*
5839 * compute total_time_enabled, total_time_running
5840 * based on snapshot values taken when the event
5841 * was last scheduled in.
5842 *
5843 * we cannot simply called update_context_time()
5844 * because of locking issue as we are called in
5845 * NMI context
5846 */
Eric B Munsonc4794292011-06-23 16:34:38 -04005847 if (read_format & PERF_FORMAT_TOTAL_TIMES)
Peter Zijlstrae3f35412011-11-21 11:43:53 +01005848 calc_timer_values(event, &now, &enabled, &running);
Stephane Eranianeed01522010-10-26 16:08:01 +02005849
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005850 if (event->attr.read_format & PERF_FORMAT_GROUP)
Stephane Eranianeed01522010-10-26 16:08:01 +02005851 perf_output_read_group(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005852 else
Stephane Eranianeed01522010-10-26 16:08:01 +02005853 perf_output_read_one(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005854}
5855
5856void perf_output_sample(struct perf_output_handle *handle,
5857 struct perf_event_header *header,
5858 struct perf_sample_data *data,
5859 struct perf_event *event)
5860{
5861 u64 sample_type = data->type;
5862
5863 perf_output_put(handle, *header);
5864
Adrian Hunterff3d5272013-08-27 11:23:07 +03005865 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5866 perf_output_put(handle, data->id);
5867
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005868 if (sample_type & PERF_SAMPLE_IP)
5869 perf_output_put(handle, data->ip);
5870
5871 if (sample_type & PERF_SAMPLE_TID)
5872 perf_output_put(handle, data->tid_entry);
5873
5874 if (sample_type & PERF_SAMPLE_TIME)
5875 perf_output_put(handle, data->time);
5876
5877 if (sample_type & PERF_SAMPLE_ADDR)
5878 perf_output_put(handle, data->addr);
5879
5880 if (sample_type & PERF_SAMPLE_ID)
5881 perf_output_put(handle, data->id);
5882
5883 if (sample_type & PERF_SAMPLE_STREAM_ID)
5884 perf_output_put(handle, data->stream_id);
5885
5886 if (sample_type & PERF_SAMPLE_CPU)
5887 perf_output_put(handle, data->cpu_entry);
5888
5889 if (sample_type & PERF_SAMPLE_PERIOD)
5890 perf_output_put(handle, data->period);
5891
5892 if (sample_type & PERF_SAMPLE_READ)
5893 perf_output_read(handle, event);
5894
5895 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5896 if (data->callchain) {
5897 int size = 1;
5898
5899 if (data->callchain)
5900 size += data->callchain->nr;
5901
5902 size *= sizeof(u64);
5903
Frederic Weisbecker76369132011-05-19 19:55:04 +02005904 __output_copy(handle, data->callchain, size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005905 } else {
5906 u64 nr = 0;
5907 perf_output_put(handle, nr);
5908 }
5909 }
5910
5911 if (sample_type & PERF_SAMPLE_RAW) {
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02005912 struct perf_raw_record *raw = data->raw;
Alexei Starovoitovfa128e62015-10-20 20:02:33 -07005913
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02005914 if (raw) {
5915 struct perf_raw_frag *frag = &raw->frag;
5916
5917 perf_output_put(handle, raw->size);
5918 do {
5919 if (frag->copy) {
5920 __output_custom(handle, frag->copy,
5921 frag->data, frag->size);
5922 } else {
5923 __output_copy(handle, frag->data,
5924 frag->size);
5925 }
5926 if (perf_raw_frag_last(frag))
5927 break;
5928 frag = frag->next;
5929 } while (1);
5930 if (frag->pad)
5931 __output_skip(handle, NULL, frag->pad);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005932 } else {
5933 struct {
5934 u32 size;
5935 u32 data;
5936 } raw = {
5937 .size = sizeof(u32),
5938 .data = 0,
5939 };
5940 perf_output_put(handle, raw);
5941 }
5942 }
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02005943
Stephane Eranianbce38cd2012-02-09 23:20:51 +01005944 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5945 if (data->br_stack) {
5946 size_t size;
5947
5948 size = data->br_stack->nr
5949 * sizeof(struct perf_branch_entry);
5950
5951 perf_output_put(handle, data->br_stack->nr);
5952 perf_output_copy(handle, data->br_stack->entries, size);
5953 } else {
5954 /*
5955 * we always store at least the value of nr
5956 */
5957 u64 nr = 0;
5958 perf_output_put(handle, nr);
5959 }
5960 }
Jiri Olsa40189942012-08-07 15:20:37 +02005961
5962 if (sample_type & PERF_SAMPLE_REGS_USER) {
5963 u64 abi = data->regs_user.abi;
5964
5965 /*
5966 * If there are no regs to dump, notice it through
5967 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5968 */
5969 perf_output_put(handle, abi);
5970
5971 if (abi) {
5972 u64 mask = event->attr.sample_regs_user;
5973 perf_output_sample_regs(handle,
5974 data->regs_user.regs,
5975 mask);
5976 }
5977 }
Jiri Olsac5ebced2012-08-07 15:20:40 +02005978
Peter Zijlstraa5cdd402013-07-16 17:09:07 +02005979 if (sample_type & PERF_SAMPLE_STACK_USER) {
Jiri Olsac5ebced2012-08-07 15:20:40 +02005980 perf_output_sample_ustack(handle,
5981 data->stack_user_size,
5982 data->regs_user.regs);
Peter Zijlstraa5cdd402013-07-16 17:09:07 +02005983 }
Andi Kleenc3feedf2013-01-24 16:10:28 +01005984
5985 if (sample_type & PERF_SAMPLE_WEIGHT)
5986 perf_output_put(handle, data->weight);
Stephane Eraniand6be9ad2013-01-24 16:10:31 +01005987
5988 if (sample_type & PERF_SAMPLE_DATA_SRC)
5989 perf_output_put(handle, data->data_src.val);
Peter Zijlstraa5cdd402013-07-16 17:09:07 +02005990
Andi Kleenfdfbbd02013-09-20 07:40:39 -07005991 if (sample_type & PERF_SAMPLE_TRANSACTION)
5992 perf_output_put(handle, data->txn);
5993
Stephane Eranian60e23642014-09-24 13:48:37 +02005994 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5995 u64 abi = data->regs_intr.abi;
5996 /*
5997 * If there are no regs to dump, notice it through
5998 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5999 */
6000 perf_output_put(handle, abi);
6001
6002 if (abi) {
6003 u64 mask = event->attr.sample_regs_intr;
6004
6005 perf_output_sample_regs(handle,
6006 data->regs_intr.regs,
6007 mask);
6008 }
6009 }
6010
Kan Liangfc7ce9c2017-08-28 20:52:49 -04006011 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
6012 perf_output_put(handle, data->phys_addr);
6013
Peter Zijlstraa5cdd402013-07-16 17:09:07 +02006014 if (!event->attr.watermark) {
6015 int wakeup_events = event->attr.wakeup_events;
6016
6017 if (wakeup_events) {
6018 struct ring_buffer *rb = handle->rb;
6019 int events = local_inc_return(&rb->events);
6020
6021 if (events >= wakeup_events) {
6022 local_sub(wakeup_events, &rb->events);
6023 local_inc(&rb->wakeup);
6024 }
6025 }
6026 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006027}
6028
Kan Liangfc7ce9c2017-08-28 20:52:49 -04006029static u64 perf_virt_to_phys(u64 virt)
6030{
6031 u64 phys_addr = 0;
6032 struct page *p = NULL;
6033
6034 if (!virt)
6035 return 0;
6036
6037 if (virt >= TASK_SIZE) {
6038 /* If it's vmalloc()d memory, leave phys_addr as 0 */
6039 if (virt_addr_valid((void *)(uintptr_t)virt) &&
6040 !(virt >= VMALLOC_START && virt < VMALLOC_END))
6041 phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
6042 } else {
6043 /*
6044 * Walking the pages tables for user address.
6045 * Interrupts are disabled, so it prevents any tear down
6046 * of the page tables.
6047 * Try IRQ-safe __get_user_pages_fast first.
6048 * If failed, leave phys_addr as 0.
6049 */
6050 if ((current->mm != NULL) &&
6051 (__get_user_pages_fast(virt, 1, 0, &p) == 1))
6052 phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
6053
6054 if (p)
6055 put_page(p);
6056 }
6057
6058 return phys_addr;
6059}
6060
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006061void perf_prepare_sample(struct perf_event_header *header,
6062 struct perf_sample_data *data,
6063 struct perf_event *event,
6064 struct pt_regs *regs)
6065{
6066 u64 sample_type = event->attr.sample_type;
6067
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006068 header->type = PERF_RECORD_SAMPLE;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006069 header->size = sizeof(*header) + event->header_size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006070
6071 header->misc = 0;
6072 header->misc |= perf_misc_flags(regs);
6073
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006074 __perf_event_header__init_id(header, data, event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02006075
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006076 if (sample_type & PERF_SAMPLE_IP)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006077 data->ip = perf_instruction_pointer(regs);
6078
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006079 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
6080 int size = 1;
6081
Andrew Vagine6dab5f2012-07-11 18:14:58 +04006082 data->callchain = perf_callchain(event, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006083
6084 if (data->callchain)
6085 size += data->callchain->nr;
6086
6087 header->size += size * sizeof(u64);
6088 }
6089
6090 if (sample_type & PERF_SAMPLE_RAW) {
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02006091 struct perf_raw_record *raw = data->raw;
6092 int size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006093
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02006094 if (raw) {
6095 struct perf_raw_frag *frag = &raw->frag;
6096 u32 sum = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006097
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02006098 do {
6099 sum += frag->size;
6100 if (perf_raw_frag_last(frag))
6101 break;
6102 frag = frag->next;
6103 } while (1);
6104
6105 size = round_up(sum + sizeof(u32), sizeof(u64));
6106 raw->size = size - sizeof(u32);
6107 frag->pad = raw->size - sum;
6108 } else {
6109 size = sizeof(u64);
6110 }
6111
6112 header->size += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006113 }
Stephane Eranianbce38cd2012-02-09 23:20:51 +01006114
6115 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
6116 int size = sizeof(u64); /* nr */
6117 if (data->br_stack) {
6118 size += data->br_stack->nr
6119 * sizeof(struct perf_branch_entry);
6120 }
6121 header->size += size;
6122 }
Jiri Olsa40189942012-08-07 15:20:37 +02006123
Peter Zijlstra25657112014-09-24 13:48:42 +02006124 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
Andy Lutomirski88a7c262015-01-04 10:36:19 -08006125 perf_sample_regs_user(&data->regs_user, regs,
6126 &data->regs_user_copy);
Peter Zijlstra25657112014-09-24 13:48:42 +02006127
Jiri Olsa40189942012-08-07 15:20:37 +02006128 if (sample_type & PERF_SAMPLE_REGS_USER) {
6129 /* regs dump ABI info */
6130 int size = sizeof(u64);
6131
Jiri Olsa40189942012-08-07 15:20:37 +02006132 if (data->regs_user.regs) {
6133 u64 mask = event->attr.sample_regs_user;
6134 size += hweight64(mask) * sizeof(u64);
6135 }
6136
6137 header->size += size;
6138 }
Jiri Olsac5ebced2012-08-07 15:20:40 +02006139
6140 if (sample_type & PERF_SAMPLE_STACK_USER) {
6141 /*
6142 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
6143 * processed as the last one or have additional check added
6144 * in case new sample type is added, because we could eat
6145 * up the rest of the sample size.
6146 */
Jiri Olsac5ebced2012-08-07 15:20:40 +02006147 u16 stack_size = event->attr.sample_stack_user;
6148 u16 size = sizeof(u64);
6149
Jiri Olsac5ebced2012-08-07 15:20:40 +02006150 stack_size = perf_sample_ustack_size(stack_size, header->size,
Peter Zijlstra25657112014-09-24 13:48:42 +02006151 data->regs_user.regs);
Jiri Olsac5ebced2012-08-07 15:20:40 +02006152
6153 /*
6154 * If there is something to dump, add space for the dump
6155 * itself and for the field that tells the dynamic size,
6156 * which is how many have been actually dumped.
6157 */
6158 if (stack_size)
6159 size += sizeof(u64) + stack_size;
6160
6161 data->stack_user_size = stack_size;
6162 header->size += size;
6163 }
Stephane Eranian60e23642014-09-24 13:48:37 +02006164
6165 if (sample_type & PERF_SAMPLE_REGS_INTR) {
6166 /* regs dump ABI info */
6167 int size = sizeof(u64);
6168
6169 perf_sample_regs_intr(&data->regs_intr, regs);
6170
6171 if (data->regs_intr.regs) {
6172 u64 mask = event->attr.sample_regs_intr;
6173
6174 size += hweight64(mask) * sizeof(u64);
6175 }
6176
6177 header->size += size;
6178 }
Kan Liangfc7ce9c2017-08-28 20:52:49 -04006179
6180 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
6181 data->phys_addr = perf_virt_to_phys(data->addr);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006182}
6183
Wang Nan9ecda412016-04-05 14:11:18 +00006184static void __always_inline
6185__perf_event_output(struct perf_event *event,
6186 struct perf_sample_data *data,
6187 struct pt_regs *regs,
6188 int (*output_begin)(struct perf_output_handle *,
6189 struct perf_event *,
6190 unsigned int))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006191{
6192 struct perf_output_handle handle;
6193 struct perf_event_header header;
6194
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02006195 /* protect the callchain buffers */
6196 rcu_read_lock();
6197
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006198 perf_prepare_sample(&header, data, event, regs);
6199
Wang Nan9ecda412016-04-05 14:11:18 +00006200 if (output_begin(&handle, event, header.size))
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02006201 goto exit;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006202
6203 perf_output_sample(&handle, &header, data, event);
6204
6205 perf_output_end(&handle);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02006206
6207exit:
6208 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006209}
6210
Wang Nan9ecda412016-04-05 14:11:18 +00006211void
6212perf_event_output_forward(struct perf_event *event,
6213 struct perf_sample_data *data,
6214 struct pt_regs *regs)
6215{
6216 __perf_event_output(event, data, regs, perf_output_begin_forward);
6217}
6218
6219void
6220perf_event_output_backward(struct perf_event *event,
6221 struct perf_sample_data *data,
6222 struct pt_regs *regs)
6223{
6224 __perf_event_output(event, data, regs, perf_output_begin_backward);
6225}
6226
6227void
6228perf_event_output(struct perf_event *event,
6229 struct perf_sample_data *data,
6230 struct pt_regs *regs)
6231{
6232 __perf_event_output(event, data, regs, perf_output_begin);
6233}
6234
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006235/*
6236 * read event_id
6237 */
6238
6239struct perf_read_event {
6240 struct perf_event_header header;
6241
6242 u32 pid;
6243 u32 tid;
6244};
6245
6246static void
6247perf_event_read_event(struct perf_event *event,
6248 struct task_struct *task)
6249{
6250 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006251 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006252 struct perf_read_event read_event = {
6253 .header = {
6254 .type = PERF_RECORD_READ,
6255 .misc = 0,
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006256 .size = sizeof(read_event) + event->read_size,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006257 },
6258 .pid = perf_event_pid(event, task),
6259 .tid = perf_event_tid(event, task),
6260 };
6261 int ret;
6262
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006263 perf_event_header__init_id(&read_event.header, &sample, event);
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02006264 ret = perf_output_begin(&handle, event, read_event.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006265 if (ret)
6266 return;
6267
6268 perf_output_put(&handle, read_event);
6269 perf_output_read(&handle, event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006270 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006271
6272 perf_output_end(&handle);
6273}
6274
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006275typedef void (perf_iterate_f)(struct perf_event *event, void *data);
Jiri Olsa52d857a2013-05-06 18:27:18 +02006276
6277static void
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006278perf_iterate_ctx(struct perf_event_context *ctx,
6279 perf_iterate_f output,
Alexander Shishkinb73e4fe2016-04-27 18:44:45 +03006280 void *data, bool all)
Jiri Olsa52d857a2013-05-06 18:27:18 +02006281{
6282 struct perf_event *event;
6283
6284 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Alexander Shishkinb73e4fe2016-04-27 18:44:45 +03006285 if (!all) {
6286 if (event->state < PERF_EVENT_STATE_INACTIVE)
6287 continue;
6288 if (!event_filter_match(event))
6289 continue;
6290 }
6291
Jiri Olsa67516842013-07-09 18:56:31 +02006292 output(event, data);
Jiri Olsa52d857a2013-05-06 18:27:18 +02006293 }
6294}
6295
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006296static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
Kan Liangf2fb6be2016-03-23 11:24:37 -07006297{
6298 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
6299 struct perf_event *event;
6300
6301 list_for_each_entry_rcu(event, &pel->list, sb_list) {
Peter Zijlstra0b8f1e22016-08-04 14:37:24 +02006302 /*
6303 * Skip events that are not fully formed yet; ensure that
6304 * if we observe event->ctx, both event and ctx will be
6305 * complete enough. See perf_install_in_context().
6306 */
6307 if (!smp_load_acquire(&event->ctx))
6308 continue;
6309
Kan Liangf2fb6be2016-03-23 11:24:37 -07006310 if (event->state < PERF_EVENT_STATE_INACTIVE)
6311 continue;
6312 if (!event_filter_match(event))
6313 continue;
6314 output(event, data);
6315 }
6316}
6317
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006318/*
6319 * Iterate all events that need to receive side-band events.
6320 *
6321 * For new callers; ensure that account_pmu_sb_event() includes
6322 * your event, otherwise it might not get delivered.
6323 */
Jiri Olsa4e93ad62015-11-04 16:00:05 +01006324static void
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006325perf_iterate_sb(perf_iterate_f output, void *data,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006326 struct perf_event_context *task_ctx)
6327{
Jiri Olsa52d857a2013-05-06 18:27:18 +02006328 struct perf_event_context *ctx;
Jiri Olsa52d857a2013-05-06 18:27:18 +02006329 int ctxn;
6330
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006331 rcu_read_lock();
6332 preempt_disable();
6333
Jiri Olsa4e93ad62015-11-04 16:00:05 +01006334 /*
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006335 * If we have task_ctx != NULL we only notify the task context itself.
6336 * The task_ctx is set only for EXIT events before releasing task
Jiri Olsa4e93ad62015-11-04 16:00:05 +01006337 * context.
6338 */
6339 if (task_ctx) {
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006340 perf_iterate_ctx(task_ctx, output, data, false);
6341 goto done;
Jiri Olsa4e93ad62015-11-04 16:00:05 +01006342 }
6343
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006344 perf_iterate_sb_cpu(output, data);
Kan Liangf2fb6be2016-03-23 11:24:37 -07006345
6346 for_each_task_context_nr(ctxn) {
Jiri Olsa52d857a2013-05-06 18:27:18 +02006347 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6348 if (ctx)
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006349 perf_iterate_ctx(ctx, output, data, false);
Jiri Olsa52d857a2013-05-06 18:27:18 +02006350 }
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006351done:
Kan Liangf2fb6be2016-03-23 11:24:37 -07006352 preempt_enable();
Jiri Olsa52d857a2013-05-06 18:27:18 +02006353 rcu_read_unlock();
6354}
6355
Alexander Shishkin375637b2016-04-27 18:44:46 +03006356/*
6357 * Clear all file-based filters at exec, they'll have to be
6358 * re-instated when/if these objects are mmapped again.
6359 */
6360static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6361{
6362 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6363 struct perf_addr_filter *filter;
6364 unsigned int restart = 0, count = 0;
6365 unsigned long flags;
6366
6367 if (!has_addr_filter(event))
6368 return;
6369
6370 raw_spin_lock_irqsave(&ifh->lock, flags);
6371 list_for_each_entry(filter, &ifh->list, entry) {
6372 if (filter->inode) {
6373 event->addr_filters_offs[count] = 0;
6374 restart++;
6375 }
6376
6377 count++;
6378 }
6379
6380 if (restart)
6381 event->addr_filters_gen++;
6382 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6383
6384 if (restart)
Alexander Shishkin767ae082016-09-06 16:23:49 +03006385 perf_event_stop(event, 1);
Alexander Shishkin375637b2016-04-27 18:44:46 +03006386}
6387
6388void perf_event_exec(void)
6389{
6390 struct perf_event_context *ctx;
6391 int ctxn;
6392
6393 rcu_read_lock();
6394 for_each_task_context_nr(ctxn) {
6395 ctx = current->perf_event_ctxp[ctxn];
6396 if (!ctx)
6397 continue;
6398
6399 perf_event_enable_on_exec(ctxn);
6400
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006401 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
Alexander Shishkin375637b2016-04-27 18:44:46 +03006402 true);
6403 }
6404 rcu_read_unlock();
6405}
6406
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006407struct remote_output {
6408 struct ring_buffer *rb;
6409 int err;
6410};
6411
6412static void __perf_event_output_stop(struct perf_event *event, void *data)
6413{
6414 struct perf_event *parent = event->parent;
6415 struct remote_output *ro = data;
6416 struct ring_buffer *rb = ro->rb;
Alexander Shishkin375637b2016-04-27 18:44:46 +03006417 struct stop_event_data sd = {
6418 .event = event,
6419 };
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006420
6421 if (!has_aux(event))
6422 return;
6423
6424 if (!parent)
6425 parent = event;
6426
6427 /*
6428 * In case of inheritance, it will be the parent that links to the
Alexander Shishkin767ae082016-09-06 16:23:49 +03006429 * ring-buffer, but it will be the child that's actually using it.
6430 *
6431 * We are using event::rb to determine if the event should be stopped,
6432 * however this may race with ring_buffer_attach() (through set_output),
6433 * which will make us skip the event that actually needs to be stopped.
6434 * So ring_buffer_attach() has to stop an aux event before re-assigning
6435 * its rb pointer.
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006436 */
6437 if (rcu_dereference(parent->rb) == rb)
Alexander Shishkin375637b2016-04-27 18:44:46 +03006438 ro->err = __perf_event_stop(&sd);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006439}
6440
6441static int __perf_pmu_output_stop(void *info)
6442{
6443 struct perf_event *event = info;
6444 struct pmu *pmu = event->pmu;
Will Deacon8b6a3fe2016-08-24 10:07:14 +01006445 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006446 struct remote_output ro = {
6447 .rb = event->rb,
6448 };
6449
6450 rcu_read_lock();
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006451 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006452 if (cpuctx->task_ctx)
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006453 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
Alexander Shishkinb73e4fe2016-04-27 18:44:45 +03006454 &ro, false);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006455 rcu_read_unlock();
6456
6457 return ro.err;
6458}
6459
6460static void perf_pmu_output_stop(struct perf_event *event)
6461{
6462 struct perf_event *iter;
6463 int err, cpu;
6464
6465restart:
6466 rcu_read_lock();
6467 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6468 /*
6469 * For per-CPU events, we need to make sure that neither they
6470 * nor their children are running; for cpu==-1 events it's
6471 * sufficient to stop the event itself if it's active, since
6472 * it can't have children.
6473 */
6474 cpu = iter->cpu;
6475 if (cpu == -1)
6476 cpu = READ_ONCE(iter->oncpu);
6477
6478 if (cpu == -1)
6479 continue;
6480
6481 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6482 if (err == -EAGAIN) {
6483 rcu_read_unlock();
6484 goto restart;
6485 }
6486 }
6487 rcu_read_unlock();
6488}
6489
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006490/*
6491 * task tracking -- fork/exit
6492 *
Stephane Eranian13d7a242013-08-21 12:10:24 +02006493 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006494 */
6495
6496struct perf_task_event {
6497 struct task_struct *task;
6498 struct perf_event_context *task_ctx;
6499
6500 struct {
6501 struct perf_event_header header;
6502
6503 u32 pid;
6504 u32 ppid;
6505 u32 tid;
6506 u32 ptid;
6507 u64 time;
6508 } event_id;
6509};
6510
Jiri Olsa67516842013-07-09 18:56:31 +02006511static int perf_event_task_match(struct perf_event *event)
6512{
Stephane Eranian13d7a242013-08-21 12:10:24 +02006513 return event->attr.comm || event->attr.mmap ||
6514 event->attr.mmap2 || event->attr.mmap_data ||
6515 event->attr.task;
Jiri Olsa67516842013-07-09 18:56:31 +02006516}
6517
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006518static void perf_event_task_output(struct perf_event *event,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006519 void *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006520{
Jiri Olsa52d857a2013-05-06 18:27:18 +02006521 struct perf_task_event *task_event = data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006522 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006523 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006524 struct task_struct *task = task_event->task;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006525 int ret, size = task_event->event_id.header.size;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01006526
Jiri Olsa67516842013-07-09 18:56:31 +02006527 if (!perf_event_task_match(event))
6528 return;
6529
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006530 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006531
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006532 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02006533 task_event->event_id.header.size);
Peter Zijlstraef607772010-05-18 10:50:41 +02006534 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006535 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006536
6537 task_event->event_id.pid = perf_event_pid(event, task);
6538 task_event->event_id.ppid = perf_event_pid(event, current);
6539
6540 task_event->event_id.tid = perf_event_tid(event, task);
6541 task_event->event_id.ptid = perf_event_tid(event, current);
6542
Peter Zijlstra34f43922015-02-20 14:05:38 +01006543 task_event->event_id.time = perf_event_clock(event);
6544
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006545 perf_output_put(&handle, task_event->event_id);
6546
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006547 perf_event__output_id_sample(event, &handle, &sample);
6548
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006549 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006550out:
6551 task_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006552}
6553
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006554static void perf_event_task(struct task_struct *task,
6555 struct perf_event_context *task_ctx,
6556 int new)
6557{
6558 struct perf_task_event task_event;
6559
6560 if (!atomic_read(&nr_comm_events) &&
6561 !atomic_read(&nr_mmap_events) &&
6562 !atomic_read(&nr_task_events))
6563 return;
6564
6565 task_event = (struct perf_task_event){
6566 .task = task,
6567 .task_ctx = task_ctx,
6568 .event_id = {
6569 .header = {
6570 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
6571 .misc = 0,
6572 .size = sizeof(task_event.event_id),
6573 },
6574 /* .pid */
6575 /* .ppid */
6576 /* .tid */
6577 /* .ptid */
Peter Zijlstra34f43922015-02-20 14:05:38 +01006578 /* .time */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006579 },
6580 };
6581
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006582 perf_iterate_sb(perf_event_task_output,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006583 &task_event,
6584 task_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006585}
6586
6587void perf_event_fork(struct task_struct *task)
6588{
6589 perf_event_task(task, NULL, 1);
Hari Bathinie4222672017-03-08 02:11:36 +05306590 perf_event_namespaces(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006591}
6592
6593/*
6594 * comm tracking
6595 */
6596
6597struct perf_comm_event {
6598 struct task_struct *task;
6599 char *comm;
6600 int comm_size;
6601
6602 struct {
6603 struct perf_event_header header;
6604
6605 u32 pid;
6606 u32 tid;
6607 } event_id;
6608};
6609
Jiri Olsa67516842013-07-09 18:56:31 +02006610static int perf_event_comm_match(struct perf_event *event)
6611{
6612 return event->attr.comm;
6613}
6614
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006615static void perf_event_comm_output(struct perf_event *event,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006616 void *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006617{
Jiri Olsa52d857a2013-05-06 18:27:18 +02006618 struct perf_comm_event *comm_event = data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006619 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006620 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006621 int size = comm_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006622 int ret;
6623
Jiri Olsa67516842013-07-09 18:56:31 +02006624 if (!perf_event_comm_match(event))
6625 return;
6626
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006627 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6628 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02006629 comm_event->event_id.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006630
6631 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006632 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006633
6634 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6635 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
6636
6637 perf_output_put(&handle, comm_event->event_id);
Frederic Weisbecker76369132011-05-19 19:55:04 +02006638 __output_copy(&handle, comm_event->comm,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006639 comm_event->comm_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006640
6641 perf_event__output_id_sample(event, &handle, &sample);
6642
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006643 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006644out:
6645 comm_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006646}
6647
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006648static void perf_event_comm_event(struct perf_comm_event *comm_event)
6649{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006650 char comm[TASK_COMM_LEN];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006651 unsigned int size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006652
6653 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01006654 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006655 size = ALIGN(strlen(comm)+1, sizeof(u64));
6656
6657 comm_event->comm = comm;
6658 comm_event->comm_size = size;
6659
6660 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02006661
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006662 perf_iterate_sb(perf_event_comm_output,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006663 comm_event,
6664 NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006665}
6666
Adrian Hunter82b89772014-05-28 11:45:04 +03006667void perf_event_comm(struct task_struct *task, bool exec)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006668{
6669 struct perf_comm_event comm_event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006670
6671 if (!atomic_read(&nr_comm_events))
6672 return;
6673
6674 comm_event = (struct perf_comm_event){
6675 .task = task,
6676 /* .comm */
6677 /* .comm_size */
6678 .event_id = {
6679 .header = {
6680 .type = PERF_RECORD_COMM,
Adrian Hunter82b89772014-05-28 11:45:04 +03006681 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006682 /* .size */
6683 },
6684 /* .pid */
6685 /* .tid */
6686 },
6687 };
6688
6689 perf_event_comm_event(&comm_event);
6690}
6691
6692/*
Hari Bathinie4222672017-03-08 02:11:36 +05306693 * namespaces tracking
6694 */
6695
6696struct perf_namespaces_event {
6697 struct task_struct *task;
6698
6699 struct {
6700 struct perf_event_header header;
6701
6702 u32 pid;
6703 u32 tid;
6704 u64 nr_namespaces;
6705 struct perf_ns_link_info link_info[NR_NAMESPACES];
6706 } event_id;
6707};
6708
6709static int perf_event_namespaces_match(struct perf_event *event)
6710{
6711 return event->attr.namespaces;
6712}
6713
6714static void perf_event_namespaces_output(struct perf_event *event,
6715 void *data)
6716{
6717 struct perf_namespaces_event *namespaces_event = data;
6718 struct perf_output_handle handle;
6719 struct perf_sample_data sample;
6720 int ret;
6721
6722 if (!perf_event_namespaces_match(event))
6723 return;
6724
6725 perf_event_header__init_id(&namespaces_event->event_id.header,
6726 &sample, event);
6727 ret = perf_output_begin(&handle, event,
6728 namespaces_event->event_id.header.size);
6729 if (ret)
6730 return;
6731
6732 namespaces_event->event_id.pid = perf_event_pid(event,
6733 namespaces_event->task);
6734 namespaces_event->event_id.tid = perf_event_tid(event,
6735 namespaces_event->task);
6736
6737 perf_output_put(&handle, namespaces_event->event_id);
6738
6739 perf_event__output_id_sample(event, &handle, &sample);
6740
6741 perf_output_end(&handle);
6742}
6743
6744static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
6745 struct task_struct *task,
6746 const struct proc_ns_operations *ns_ops)
6747{
6748 struct path ns_path;
6749 struct inode *ns_inode;
6750 void *error;
6751
6752 error = ns_get_path(&ns_path, task, ns_ops);
6753 if (!error) {
6754 ns_inode = ns_path.dentry->d_inode;
6755 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
6756 ns_link_info->ino = ns_inode->i_ino;
6757 }
6758}
6759
6760void perf_event_namespaces(struct task_struct *task)
6761{
6762 struct perf_namespaces_event namespaces_event;
6763 struct perf_ns_link_info *ns_link_info;
6764
6765 if (!atomic_read(&nr_namespaces_events))
6766 return;
6767
6768 namespaces_event = (struct perf_namespaces_event){
6769 .task = task,
6770 .event_id = {
6771 .header = {
6772 .type = PERF_RECORD_NAMESPACES,
6773 .misc = 0,
6774 .size = sizeof(namespaces_event.event_id),
6775 },
6776 /* .pid */
6777 /* .tid */
6778 .nr_namespaces = NR_NAMESPACES,
6779 /* .link_info[NR_NAMESPACES] */
6780 },
6781 };
6782
6783 ns_link_info = namespaces_event.event_id.link_info;
6784
6785 perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
6786 task, &mntns_operations);
6787
6788#ifdef CONFIG_USER_NS
6789 perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
6790 task, &userns_operations);
6791#endif
6792#ifdef CONFIG_NET_NS
6793 perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
6794 task, &netns_operations);
6795#endif
6796#ifdef CONFIG_UTS_NS
6797 perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
6798 task, &utsns_operations);
6799#endif
6800#ifdef CONFIG_IPC_NS
6801 perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
6802 task, &ipcns_operations);
6803#endif
6804#ifdef CONFIG_PID_NS
6805 perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
6806 task, &pidns_operations);
6807#endif
6808#ifdef CONFIG_CGROUPS
6809 perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
6810 task, &cgroupns_operations);
6811#endif
6812
6813 perf_iterate_sb(perf_event_namespaces_output,
6814 &namespaces_event,
6815 NULL);
6816}
6817
6818/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006819 * mmap tracking
6820 */
6821
6822struct perf_mmap_event {
6823 struct vm_area_struct *vma;
6824
6825 const char *file_name;
6826 int file_size;
Stephane Eranian13d7a242013-08-21 12:10:24 +02006827 int maj, min;
6828 u64 ino;
6829 u64 ino_generation;
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006830 u32 prot, flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006831
6832 struct {
6833 struct perf_event_header header;
6834
6835 u32 pid;
6836 u32 tid;
6837 u64 start;
6838 u64 len;
6839 u64 pgoff;
6840 } event_id;
6841};
6842
Jiri Olsa67516842013-07-09 18:56:31 +02006843static int perf_event_mmap_match(struct perf_event *event,
6844 void *data)
6845{
6846 struct perf_mmap_event *mmap_event = data;
6847 struct vm_area_struct *vma = mmap_event->vma;
6848 int executable = vma->vm_flags & VM_EXEC;
6849
6850 return (!executable && event->attr.mmap_data) ||
Stephane Eranian13d7a242013-08-21 12:10:24 +02006851 (executable && (event->attr.mmap || event->attr.mmap2));
Jiri Olsa67516842013-07-09 18:56:31 +02006852}
6853
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006854static void perf_event_mmap_output(struct perf_event *event,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006855 void *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006856{
Jiri Olsa52d857a2013-05-06 18:27:18 +02006857 struct perf_mmap_event *mmap_event = data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006858 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006859 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006860 int size = mmap_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006861 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006862
Jiri Olsa67516842013-07-09 18:56:31 +02006863 if (!perf_event_mmap_match(event, data))
6864 return;
6865
Stephane Eranian13d7a242013-08-21 12:10:24 +02006866 if (event->attr.mmap2) {
6867 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6868 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6869 mmap_event->event_id.header.size += sizeof(mmap_event->min);
6870 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
Arnaldo Carvalho de Melod008d522013-09-10 10:24:05 -03006871 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006872 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6873 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
Stephane Eranian13d7a242013-08-21 12:10:24 +02006874 }
6875
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006876 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6877 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02006878 mmap_event->event_id.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006879 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006880 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006881
6882 mmap_event->event_id.pid = perf_event_pid(event, current);
6883 mmap_event->event_id.tid = perf_event_tid(event, current);
6884
6885 perf_output_put(&handle, mmap_event->event_id);
Stephane Eranian13d7a242013-08-21 12:10:24 +02006886
6887 if (event->attr.mmap2) {
6888 perf_output_put(&handle, mmap_event->maj);
6889 perf_output_put(&handle, mmap_event->min);
6890 perf_output_put(&handle, mmap_event->ino);
6891 perf_output_put(&handle, mmap_event->ino_generation);
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006892 perf_output_put(&handle, mmap_event->prot);
6893 perf_output_put(&handle, mmap_event->flags);
Stephane Eranian13d7a242013-08-21 12:10:24 +02006894 }
6895
Frederic Weisbecker76369132011-05-19 19:55:04 +02006896 __output_copy(&handle, mmap_event->file_name,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006897 mmap_event->file_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006898
6899 perf_event__output_id_sample(event, &handle, &sample);
6900
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006901 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006902out:
6903 mmap_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006904}
6905
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006906static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6907{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006908 struct vm_area_struct *vma = mmap_event->vma;
6909 struct file *file = vma->vm_file;
Stephane Eranian13d7a242013-08-21 12:10:24 +02006910 int maj = 0, min = 0;
6911 u64 ino = 0, gen = 0;
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006912 u32 prot = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006913 unsigned int size;
6914 char tmp[16];
6915 char *buf = NULL;
Peter Zijlstra2c42cfbf2013-10-17 00:06:46 +02006916 char *name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006917
Peter Zijlstra0b3589b2017-01-26 23:15:08 +01006918 if (vma->vm_flags & VM_READ)
6919 prot |= PROT_READ;
6920 if (vma->vm_flags & VM_WRITE)
6921 prot |= PROT_WRITE;
6922 if (vma->vm_flags & VM_EXEC)
6923 prot |= PROT_EXEC;
6924
6925 if (vma->vm_flags & VM_MAYSHARE)
6926 flags = MAP_SHARED;
6927 else
6928 flags = MAP_PRIVATE;
6929
6930 if (vma->vm_flags & VM_DENYWRITE)
6931 flags |= MAP_DENYWRITE;
6932 if (vma->vm_flags & VM_MAYEXEC)
6933 flags |= MAP_EXECUTABLE;
6934 if (vma->vm_flags & VM_LOCKED)
6935 flags |= MAP_LOCKED;
6936 if (vma->vm_flags & VM_HUGETLB)
6937 flags |= MAP_HUGETLB;
6938
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006939 if (file) {
Stephane Eranian13d7a242013-08-21 12:10:24 +02006940 struct inode *inode;
6941 dev_t dev;
Oleg Nesterov3ea2f2b2013-10-16 22:10:04 +02006942
Peter Zijlstra2c42cfbf2013-10-17 00:06:46 +02006943 buf = kmalloc(PATH_MAX, GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006944 if (!buf) {
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006945 name = "//enomem";
6946 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006947 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006948 /*
Oleg Nesterov3ea2f2b2013-10-16 22:10:04 +02006949 * d_path() works from the end of the rb backwards, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006950 * need to add enough zero bytes after the string to handle
6951 * the 64bit alignment we do later.
6952 */
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02006953 name = file_path(file, buf, PATH_MAX - sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006954 if (IS_ERR(name)) {
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006955 name = "//toolong";
6956 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006957 }
Stephane Eranian13d7a242013-08-21 12:10:24 +02006958 inode = file_inode(vma->vm_file);
6959 dev = inode->i_sb->s_dev;
6960 ino = inode->i_ino;
6961 gen = inode->i_generation;
6962 maj = MAJOR(dev);
6963 min = MINOR(dev);
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006964
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006965 goto got_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006966 } else {
Jiri Olsafbe26ab2014-07-14 17:57:19 +02006967 if (vma->vm_ops && vma->vm_ops->name) {
6968 name = (char *) vma->vm_ops->name(vma);
6969 if (name)
6970 goto cpy_name;
6971 }
6972
Peter Zijlstra2c42cfbf2013-10-17 00:06:46 +02006973 name = (char *)arch_vma_name(vma);
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006974 if (name)
6975 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006976
Oleg Nesterov32c5fb72013-10-16 22:09:45 +02006977 if (vma->vm_start <= vma->vm_mm->start_brk &&
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006978 vma->vm_end >= vma->vm_mm->brk) {
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006979 name = "[heap]";
6980 goto cpy_name;
Oleg Nesterov32c5fb72013-10-16 22:09:45 +02006981 }
6982 if (vma->vm_start <= vma->vm_mm->start_stack &&
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006983 vma->vm_end >= vma->vm_mm->start_stack) {
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006984 name = "[stack]";
6985 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006986 }
6987
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006988 name = "//anon";
6989 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006990 }
6991
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006992cpy_name:
6993 strlcpy(tmp, name, sizeof(tmp));
6994 name = tmp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006995got_name:
Peter Zijlstra2c42cfbf2013-10-17 00:06:46 +02006996 /*
6997 * Since our buffer works in 8 byte units we need to align our string
6998 * size to a multiple of 8. However, we must guarantee the tail end is
6999 * zero'd out to avoid leaking random bits to userspace.
7000 */
7001 size = strlen(name)+1;
7002 while (!IS_ALIGNED(size, sizeof(u64)))
7003 name[size++] = '\0';
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007004
7005 mmap_event->file_name = name;
7006 mmap_event->file_size = size;
Stephane Eranian13d7a242013-08-21 12:10:24 +02007007 mmap_event->maj = maj;
7008 mmap_event->min = min;
7009 mmap_event->ino = ino;
7010 mmap_event->ino_generation = gen;
Peter Zijlstraf972eb62014-05-19 15:13:47 -04007011 mmap_event->prot = prot;
7012 mmap_event->flags = flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007013
Stephane Eranian2fe85422013-01-24 16:10:39 +01007014 if (!(vma->vm_flags & VM_EXEC))
7015 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
7016
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007017 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
7018
Peter Zijlstraaab5b712016-05-12 17:26:46 +02007019 perf_iterate_sb(perf_event_mmap_output,
Jiri Olsa52d857a2013-05-06 18:27:18 +02007020 mmap_event,
7021 NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007022
7023 kfree(buf);
7024}
7025
Alexander Shishkin375637b2016-04-27 18:44:46 +03007026/*
Alexander Shishkin375637b2016-04-27 18:44:46 +03007027 * Check whether inode and address range match filter criteria.
7028 */
7029static bool perf_addr_filter_match(struct perf_addr_filter *filter,
7030 struct file *file, unsigned long offset,
7031 unsigned long size)
7032{
Al Viro45063092016-12-04 18:24:56 -05007033 if (filter->inode != file_inode(file))
Alexander Shishkin375637b2016-04-27 18:44:46 +03007034 return false;
7035
7036 if (filter->offset > offset + size)
7037 return false;
7038
7039 if (filter->offset + filter->size < offset)
7040 return false;
7041
7042 return true;
7043}
7044
7045static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
7046{
7047 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
7048 struct vm_area_struct *vma = data;
7049 unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
7050 struct file *file = vma->vm_file;
7051 struct perf_addr_filter *filter;
7052 unsigned int restart = 0, count = 0;
7053
7054 if (!has_addr_filter(event))
7055 return;
7056
7057 if (!file)
7058 return;
7059
7060 raw_spin_lock_irqsave(&ifh->lock, flags);
7061 list_for_each_entry(filter, &ifh->list, entry) {
7062 if (perf_addr_filter_match(filter, file, off,
7063 vma->vm_end - vma->vm_start)) {
7064 event->addr_filters_offs[count] = vma->vm_start;
7065 restart++;
7066 }
7067
7068 count++;
7069 }
7070
7071 if (restart)
7072 event->addr_filters_gen++;
7073 raw_spin_unlock_irqrestore(&ifh->lock, flags);
7074
7075 if (restart)
Alexander Shishkin767ae082016-09-06 16:23:49 +03007076 perf_event_stop(event, 1);
Alexander Shishkin375637b2016-04-27 18:44:46 +03007077}
7078
7079/*
7080 * Adjust all task's events' filters to the new vma
7081 */
7082static void perf_addr_filters_adjust(struct vm_area_struct *vma)
7083{
7084 struct perf_event_context *ctx;
7085 int ctxn;
7086
Mathieu Poirier12b40a22016-07-18 10:43:06 -06007087 /*
7088 * Data tracing isn't supported yet and as such there is no need
7089 * to keep track of anything that isn't related to executable code:
7090 */
7091 if (!(vma->vm_flags & VM_EXEC))
7092 return;
7093
Alexander Shishkin375637b2016-04-27 18:44:46 +03007094 rcu_read_lock();
7095 for_each_task_context_nr(ctxn) {
7096 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
7097 if (!ctx)
7098 continue;
7099
Peter Zijlstraaab5b712016-05-12 17:26:46 +02007100 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
Alexander Shishkin375637b2016-04-27 18:44:46 +03007101 }
7102 rcu_read_unlock();
7103}
7104
Eric B Munson3af9e852010-05-18 15:30:49 +01007105void perf_event_mmap(struct vm_area_struct *vma)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007106{
7107 struct perf_mmap_event mmap_event;
7108
7109 if (!atomic_read(&nr_mmap_events))
7110 return;
7111
7112 mmap_event = (struct perf_mmap_event){
7113 .vma = vma,
7114 /* .file_name */
7115 /* .file_size */
7116 .event_id = {
7117 .header = {
7118 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08007119 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007120 /* .size */
7121 },
7122 /* .pid */
7123 /* .tid */
7124 .start = vma->vm_start,
7125 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01007126 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007127 },
Stephane Eranian13d7a242013-08-21 12:10:24 +02007128 /* .maj (attr_mmap2 only) */
7129 /* .min (attr_mmap2 only) */
7130 /* .ino (attr_mmap2 only) */
7131 /* .ino_generation (attr_mmap2 only) */
Peter Zijlstraf972eb62014-05-19 15:13:47 -04007132 /* .prot (attr_mmap2 only) */
7133 /* .flags (attr_mmap2 only) */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007134 };
7135
Alexander Shishkin375637b2016-04-27 18:44:46 +03007136 perf_addr_filters_adjust(vma);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007137 perf_event_mmap_event(&mmap_event);
7138}
7139
Alexander Shishkin68db7e92015-01-14 14:18:15 +02007140void perf_event_aux_event(struct perf_event *event, unsigned long head,
7141 unsigned long size, u64 flags)
7142{
7143 struct perf_output_handle handle;
7144 struct perf_sample_data sample;
7145 struct perf_aux_event {
7146 struct perf_event_header header;
7147 u64 offset;
7148 u64 size;
7149 u64 flags;
7150 } rec = {
7151 .header = {
7152 .type = PERF_RECORD_AUX,
7153 .misc = 0,
7154 .size = sizeof(rec),
7155 },
7156 .offset = head,
7157 .size = size,
7158 .flags = flags,
7159 };
7160 int ret;
7161
7162 perf_event_header__init_id(&rec.header, &sample, event);
7163 ret = perf_output_begin(&handle, event, rec.header.size);
7164
7165 if (ret)
7166 return;
7167
7168 perf_output_put(&handle, rec);
7169 perf_event__output_id_sample(event, &handle, &sample);
7170
7171 perf_output_end(&handle);
7172}
7173
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007174/*
Kan Liangf38b0db2015-05-10 15:13:14 -04007175 * Lost/dropped samples logging
7176 */
7177void perf_log_lost_samples(struct perf_event *event, u64 lost)
7178{
7179 struct perf_output_handle handle;
7180 struct perf_sample_data sample;
7181 int ret;
7182
7183 struct {
7184 struct perf_event_header header;
7185 u64 lost;
7186 } lost_samples_event = {
7187 .header = {
7188 .type = PERF_RECORD_LOST_SAMPLES,
7189 .misc = 0,
7190 .size = sizeof(lost_samples_event),
7191 },
7192 .lost = lost,
7193 };
7194
7195 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
7196
7197 ret = perf_output_begin(&handle, event,
7198 lost_samples_event.header.size);
7199 if (ret)
7200 return;
7201
7202 perf_output_put(&handle, lost_samples_event);
7203 perf_event__output_id_sample(event, &handle, &sample);
7204 perf_output_end(&handle);
7205}
7206
7207/*
Adrian Hunter45ac1402015-07-21 12:44:02 +03007208 * context_switch tracking
7209 */
7210
7211struct perf_switch_event {
7212 struct task_struct *task;
7213 struct task_struct *next_prev;
7214
7215 struct {
7216 struct perf_event_header header;
7217 u32 next_prev_pid;
7218 u32 next_prev_tid;
7219 } event_id;
7220};
7221
7222static int perf_event_switch_match(struct perf_event *event)
7223{
7224 return event->attr.context_switch;
7225}
7226
7227static void perf_event_switch_output(struct perf_event *event, void *data)
7228{
7229 struct perf_switch_event *se = data;
7230 struct perf_output_handle handle;
7231 struct perf_sample_data sample;
7232 int ret;
7233
7234 if (!perf_event_switch_match(event))
7235 return;
7236
7237 /* Only CPU-wide events are allowed to see next/prev pid/tid */
7238 if (event->ctx->task) {
7239 se->event_id.header.type = PERF_RECORD_SWITCH;
7240 se->event_id.header.size = sizeof(se->event_id.header);
7241 } else {
7242 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
7243 se->event_id.header.size = sizeof(se->event_id);
7244 se->event_id.next_prev_pid =
7245 perf_event_pid(event, se->next_prev);
7246 se->event_id.next_prev_tid =
7247 perf_event_tid(event, se->next_prev);
7248 }
7249
7250 perf_event_header__init_id(&se->event_id.header, &sample, event);
7251
7252 ret = perf_output_begin(&handle, event, se->event_id.header.size);
7253 if (ret)
7254 return;
7255
7256 if (event->ctx->task)
7257 perf_output_put(&handle, se->event_id.header);
7258 else
7259 perf_output_put(&handle, se->event_id);
7260
7261 perf_event__output_id_sample(event, &handle, &sample);
7262
7263 perf_output_end(&handle);
7264}
7265
7266static void perf_event_switch(struct task_struct *task,
7267 struct task_struct *next_prev, bool sched_in)
7268{
7269 struct perf_switch_event switch_event;
7270
7271 /* N.B. caller checks nr_switch_events != 0 */
7272
7273 switch_event = (struct perf_switch_event){
7274 .task = task,
7275 .next_prev = next_prev,
7276 .event_id = {
7277 .header = {
7278 /* .type */
7279 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
7280 /* .size */
7281 },
7282 /* .next_prev_pid */
7283 /* .next_prev_tid */
7284 },
7285 };
7286
Peter Zijlstraaab5b712016-05-12 17:26:46 +02007287 perf_iterate_sb(perf_event_switch_output,
Adrian Hunter45ac1402015-07-21 12:44:02 +03007288 &switch_event,
7289 NULL);
7290}
7291
7292/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007293 * IRQ throttle logging
7294 */
7295
7296static void perf_log_throttle(struct perf_event *event, int enable)
7297{
7298 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02007299 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007300 int ret;
7301
7302 struct {
7303 struct perf_event_header header;
7304 u64 time;
7305 u64 id;
7306 u64 stream_id;
7307 } throttle_event = {
7308 .header = {
7309 .type = PERF_RECORD_THROTTLE,
7310 .misc = 0,
7311 .size = sizeof(throttle_event),
7312 },
Peter Zijlstra34f43922015-02-20 14:05:38 +01007313 .time = perf_event_clock(event),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007314 .id = primary_event_id(event),
7315 .stream_id = event->id,
7316 };
7317
7318 if (enable)
7319 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
7320
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02007321 perf_event_header__init_id(&throttle_event.header, &sample, event);
7322
7323 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02007324 throttle_event.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007325 if (ret)
7326 return;
7327
7328 perf_output_put(&handle, throttle_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02007329 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007330 perf_output_end(&handle);
7331}
7332
Alexander Shishkin8d4e6c42017-03-30 18:39:56 +03007333void perf_event_itrace_started(struct perf_event *event)
7334{
7335 event->attach_state |= PERF_ATTACH_ITRACE;
7336}
7337
Alexander Shishkinec0d7722015-01-14 14:18:23 +02007338static void perf_log_itrace_start(struct perf_event *event)
7339{
7340 struct perf_output_handle handle;
7341 struct perf_sample_data sample;
7342 struct perf_aux_event {
7343 struct perf_event_header header;
7344 u32 pid;
7345 u32 tid;
7346 } rec;
7347 int ret;
7348
7349 if (event->parent)
7350 event = event->parent;
7351
7352 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
Alexander Shishkin8d4e6c42017-03-30 18:39:56 +03007353 event->attach_state & PERF_ATTACH_ITRACE)
Alexander Shishkinec0d7722015-01-14 14:18:23 +02007354 return;
7355
Alexander Shishkinec0d7722015-01-14 14:18:23 +02007356 rec.header.type = PERF_RECORD_ITRACE_START;
7357 rec.header.misc = 0;
7358 rec.header.size = sizeof(rec);
7359 rec.pid = perf_event_pid(event, current);
7360 rec.tid = perf_event_tid(event, current);
7361
7362 perf_event_header__init_id(&rec.header, &sample, event);
7363 ret = perf_output_begin(&handle, event, rec.header.size);
7364
7365 if (ret)
7366 return;
7367
7368 perf_output_put(&handle, rec);
7369 perf_event__output_id_sample(event, &handle, &sample);
7370
7371 perf_output_end(&handle);
7372}
7373
Jiri Olsa475113d2016-12-28 14:31:03 +01007374static int
7375__perf_event_account_interrupt(struct perf_event *event, int throttle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007376{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007377 struct hw_perf_event *hwc = &event->hw;
7378 int ret = 0;
Jiri Olsa475113d2016-12-28 14:31:03 +01007379 u64 seq;
Peter Zijlstra96398822010-11-24 18:55:29 +01007380
Stephane Eraniane050e3f2012-01-26 17:03:19 +01007381 seq = __this_cpu_read(perf_throttled_seq);
7382 if (seq != hwc->interrupts_seq) {
7383 hwc->interrupts_seq = seq;
7384 hwc->interrupts = 1;
7385 } else {
7386 hwc->interrupts++;
7387 if (unlikely(throttle
7388 && hwc->interrupts >= max_samples_per_tick)) {
7389 __this_cpu_inc(perf_throttled_count);
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02007390 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
Peter Zijlstra163ec432011-02-16 11:22:34 +01007391 hwc->interrupts = MAX_INTERRUPTS;
7392 perf_log_throttle(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007393 ret = 1;
7394 }
Stephane Eraniane050e3f2012-01-26 17:03:19 +01007395 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007396
7397 if (event->attr.freq) {
7398 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01007399 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007400
Peter Zijlstraabd50712010-01-26 18:50:16 +01007401 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007402
Peter Zijlstraabd50712010-01-26 18:50:16 +01007403 if (delta > 0 && delta < 2*TICK_NSEC)
Stephane Eranianf39d47f2012-02-07 14:39:57 +01007404 perf_adjust_period(event, delta, hwc->last_period, true);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007405 }
7406
Jiri Olsa475113d2016-12-28 14:31:03 +01007407 return ret;
7408}
7409
7410int perf_event_account_interrupt(struct perf_event *event)
7411{
7412 return __perf_event_account_interrupt(event, 1);
7413}
7414
7415/*
7416 * Generic event overflow handling, sampling.
7417 */
7418
7419static int __perf_event_overflow(struct perf_event *event,
7420 int throttle, struct perf_sample_data *data,
7421 struct pt_regs *regs)
7422{
7423 int events = atomic_read(&event->event_limit);
7424 int ret = 0;
7425
7426 /*
7427 * Non-sampling counters might still use the PMI to fold short
7428 * hardware counters, ignore those.
7429 */
7430 if (unlikely(!is_sampling_event(event)))
7431 return 0;
7432
7433 ret = __perf_event_account_interrupt(event, throttle);
7434
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007435 /*
7436 * XXX event_limit might not quite work as expected on inherited
7437 * events
7438 */
7439
7440 event->pending_kill = POLL_IN;
7441 if (events && atomic_dec_and_test(&event->event_limit)) {
7442 ret = 1;
7443 event->pending_kill = POLL_HUP;
Jiri Olsa5aab90c2016-10-26 11:48:24 +02007444
7445 perf_event_disable_inatomic(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007446 }
7447
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07007448 READ_ONCE(event->overflow_handler)(event, data, regs);
Peter Zijlstra453f19e2009-11-20 22:19:43 +01007449
Peter Zijlstrafed66e2cd2015-06-11 10:32:01 +02007450 if (*perf_event_fasync(event) && event->pending_kill) {
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007451 event->pending_wakeup = 1;
7452 irq_work_queue(&event->pending);
Peter Zijlstraf506b3d2011-05-26 17:02:53 +02007453 }
7454
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007455 return ret;
7456}
7457
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007458int perf_event_overflow(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007459 struct perf_sample_data *data,
7460 struct pt_regs *regs)
7461{
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007462 return __perf_event_overflow(event, 1, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007463}
7464
7465/*
7466 * Generic software event infrastructure
7467 */
7468
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007469struct swevent_htable {
7470 struct swevent_hlist *swevent_hlist;
7471 struct mutex hlist_mutex;
7472 int hlist_refcount;
7473
7474 /* Recursion avoidance in each contexts */
7475 int recursion[PERF_NR_CONTEXTS];
7476};
7477
7478static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
7479
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007480/*
7481 * We directly increment event->count and keep a second value in
7482 * event->hw.period_left to count intervals. This period event
7483 * is kept in the range [-sample_period, 0] so that we can use the
7484 * sign as trigger.
7485 */
7486
Jiri Olsaab573842013-05-01 17:25:44 +02007487u64 perf_swevent_set_period(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007488{
7489 struct hw_perf_event *hwc = &event->hw;
7490 u64 period = hwc->last_period;
7491 u64 nr, offset;
7492 s64 old, val;
7493
7494 hwc->last_period = hwc->sample_period;
7495
7496again:
Peter Zijlstrae7850592010-05-21 14:43:08 +02007497 old = val = local64_read(&hwc->period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007498 if (val < 0)
7499 return 0;
7500
7501 nr = div64_u64(period + val, period);
7502 offset = nr * period;
7503 val -= offset;
Peter Zijlstrae7850592010-05-21 14:43:08 +02007504 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007505 goto again;
7506
7507 return nr;
7508}
7509
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007510static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007511 struct perf_sample_data *data,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007512 struct pt_regs *regs)
7513{
7514 struct hw_perf_event *hwc = &event->hw;
7515 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007516
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007517 if (!overflow)
7518 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007519
7520 if (hwc->interrupts == MAX_INTERRUPTS)
7521 return;
7522
7523 for (; overflow; overflow--) {
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007524 if (__perf_event_overflow(event, throttle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007525 data, regs)) {
7526 /*
7527 * We inhibit the overflow from happening when
7528 * hwc->interrupts == MAX_INTERRUPTS.
7529 */
7530 break;
7531 }
7532 throttle = 1;
7533 }
7534}
7535
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007536static void perf_swevent_event(struct perf_event *event, u64 nr,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007537 struct perf_sample_data *data,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007538 struct pt_regs *regs)
7539{
7540 struct hw_perf_event *hwc = &event->hw;
7541
Peter Zijlstrae7850592010-05-21 14:43:08 +02007542 local64_add(nr, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007543
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007544 if (!regs)
7545 return;
7546
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01007547 if (!is_sampling_event(event))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007548 return;
7549
Andrew Vagin5d81e5c2011-11-07 15:54:12 +03007550 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7551 data->period = nr;
7552 return perf_swevent_overflow(event, 1, data, regs);
7553 } else
7554 data->period = event->hw.last_period;
7555
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007556 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007557 return perf_swevent_overflow(event, 1, data, regs);
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007558
Peter Zijlstrae7850592010-05-21 14:43:08 +02007559 if (local64_add_negative(nr, &hwc->period_left))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007560 return;
7561
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007562 perf_swevent_overflow(event, 0, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007563}
7564
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01007565static int perf_exclude_event(struct perf_event *event,
7566 struct pt_regs *regs)
7567{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007568 if (event->hw.state & PERF_HES_STOPPED)
Frederic Weisbecker91b2f482011-03-07 21:27:08 +01007569 return 1;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007570
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01007571 if (regs) {
7572 if (event->attr.exclude_user && user_mode(regs))
7573 return 1;
7574
7575 if (event->attr.exclude_kernel && !user_mode(regs))
7576 return 1;
7577 }
7578
7579 return 0;
7580}
7581
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007582static int perf_swevent_match(struct perf_event *event,
7583 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08007584 u32 event_id,
7585 struct perf_sample_data *data,
7586 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007587{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007588 if (event->attr.type != type)
7589 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01007590
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007591 if (event->attr.config != event_id)
7592 return 0;
7593
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01007594 if (perf_exclude_event(event, regs))
7595 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007596
7597 return 1;
7598}
7599
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007600static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007601{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007602 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007603
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007604 return hash_64(val, SWEVENT_HLIST_BITS);
7605}
7606
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007607static inline struct hlist_head *
7608__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007609{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007610 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007611
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007612 return &hlist->heads[hash];
7613}
7614
7615/* For the read side: events when they trigger */
7616static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007617find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007618{
7619 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007620
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007621 hlist = rcu_dereference(swhash->swevent_hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007622 if (!hlist)
7623 return NULL;
7624
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007625 return __find_swevent_head(hlist, type, event_id);
7626}
7627
7628/* For the event head insertion and removal in the hlist */
7629static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007630find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007631{
7632 struct swevent_hlist *hlist;
7633 u32 event_id = event->attr.config;
7634 u64 type = event->attr.type;
7635
7636 /*
7637 * Event scheduling is always serialized against hlist allocation
7638 * and release. Which makes the protected version suitable here.
7639 * The context lock guarantees that.
7640 */
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007641 hlist = rcu_dereference_protected(swhash->swevent_hlist,
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007642 lockdep_is_held(&event->ctx->lock));
7643 if (!hlist)
7644 return NULL;
7645
7646 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007647}
7648
7649static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007650 u64 nr,
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007651 struct perf_sample_data *data,
7652 struct pt_regs *regs)
7653{
Christoph Lameter4a32fea2014-08-17 12:30:27 -05007654 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007655 struct perf_event *event;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007656 struct hlist_head *head;
7657
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007658 rcu_read_lock();
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007659 head = find_swevent_head_rcu(swhash, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007660 if (!head)
7661 goto end;
7662
Sasha Levinb67bfe02013-02-27 17:06:00 -08007663 hlist_for_each_entry_rcu(event, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08007664 if (perf_swevent_match(event, type, event_id, data, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007665 perf_swevent_event(event, nr, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007666 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007667end:
7668 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007669}
7670
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007671DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7672
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01007673int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007674{
Christoph Lameter4a32fea2014-08-17 12:30:27 -05007675 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01007676
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007677 return get_recursion_context(swhash->recursion);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007678}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01007679EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007680
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -07007681void perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007682{
Christoph Lameter4a32fea2014-08-17 12:30:27 -05007683 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02007684
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007685 put_recursion_context(swhash->recursion, rctx);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01007686}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007687
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007688void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007689{
Ingo Molnara4234bf2009-11-23 10:57:59 +01007690 struct perf_sample_data data;
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007691
7692 if (WARN_ON_ONCE(!regs))
7693 return;
7694
7695 perf_sample_data_init(&data, addr, 0);
7696 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
7697}
7698
7699void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7700{
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01007701 int rctx;
7702
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007703 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01007704 rctx = perf_swevent_get_recursion_context();
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007705 if (unlikely(rctx < 0))
7706 goto fail;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007707
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007708 ___perf_sw_event(event_id, nr, regs, addr);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01007709
7710 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007711fail:
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007712 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007713}
7714
7715static void perf_swevent_read(struct perf_event *event)
7716{
7717}
7718
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007719static int perf_swevent_add(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007720{
Christoph Lameter4a32fea2014-08-17 12:30:27 -05007721 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007722 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007723 struct hlist_head *head;
7724
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01007725 if (is_sampling_event(event)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007726 hwc->last_period = hwc->sample_period;
7727 perf_swevent_set_period(event);
7728 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007729
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007730 hwc->state = !(flags & PERF_EF_START);
7731
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007732 head = find_swevent_head(swhash, event);
Peter Zijlstra12ca6ad2015-12-15 13:49:05 +01007733 if (WARN_ON_ONCE(!head))
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007734 return -EINVAL;
7735
7736 hlist_add_head_rcu(&event->hlist_entry, head);
Shaohua Li6a694a62015-02-05 15:55:32 -08007737 perf_event_update_userpage(event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007738
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007739 return 0;
7740}
7741
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007742static void perf_swevent_del(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007743{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007744 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007745}
7746
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007747static void perf_swevent_start(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02007748{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007749 event->hw.state = 0;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02007750}
7751
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007752static void perf_swevent_stop(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02007753{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007754 event->hw.state = PERF_HES_STOPPED;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02007755}
7756
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007757/* Deref the hlist from the update side */
7758static inline struct swevent_hlist *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007759swevent_hlist_deref(struct swevent_htable *swhash)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007760{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007761 return rcu_dereference_protected(swhash->swevent_hlist,
7762 lockdep_is_held(&swhash->hlist_mutex));
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007763}
7764
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007765static void swevent_hlist_release(struct swevent_htable *swhash)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007766{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007767 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007768
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007769 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007770 return;
7771
Andreea-Cristina Bernat70691d42014-08-22 16:26:05 +03007772 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
Lai Jiangshanfa4bbc4c2011-03-18 12:08:29 +08007773 kfree_rcu(hlist, rcu_head);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007774}
7775
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007776static void swevent_hlist_put_cpu(int cpu)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007777{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007778 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007779
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007780 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007781
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007782 if (!--swhash->hlist_refcount)
7783 swevent_hlist_release(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007784
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007785 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007786}
7787
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007788static void swevent_hlist_put(void)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007789{
7790 int cpu;
7791
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007792 for_each_possible_cpu(cpu)
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007793 swevent_hlist_put_cpu(cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007794}
7795
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007796static int swevent_hlist_get_cpu(int cpu)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007797{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007798 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007799 int err = 0;
7800
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007801 mutex_lock(&swhash->hlist_mutex);
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02007802 if (!swevent_hlist_deref(swhash) &&
7803 cpumask_test_cpu(cpu, perf_online_mask)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007804 struct swevent_hlist *hlist;
7805
7806 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
7807 if (!hlist) {
7808 err = -ENOMEM;
7809 goto exit;
7810 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007811 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007812 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007813 swhash->hlist_refcount++;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02007814exit:
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007815 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007816
7817 return err;
7818}
7819
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007820static int swevent_hlist_get(void)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007821{
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007822 int err, cpu, failed_cpu;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007823
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02007824 mutex_lock(&pmus_lock);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007825 for_each_possible_cpu(cpu) {
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007826 err = swevent_hlist_get_cpu(cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007827 if (err) {
7828 failed_cpu = cpu;
7829 goto fail;
7830 }
7831 }
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02007832 mutex_unlock(&pmus_lock);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007833 return 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02007834fail:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007835 for_each_possible_cpu(cpu) {
7836 if (cpu == failed_cpu)
7837 break;
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007838 swevent_hlist_put_cpu(cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007839 }
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02007840 mutex_unlock(&pmus_lock);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007841 return err;
7842}
7843
Ingo Molnarc5905af2012-02-24 08:31:31 +01007844struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007845
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007846static void sw_perf_event_destroy(struct perf_event *event)
7847{
7848 u64 event_id = event->attr.config;
7849
7850 WARN_ON(event->parent);
7851
Ingo Molnarc5905af2012-02-24 08:31:31 +01007852 static_key_slow_dec(&perf_swevent_enabled[event_id]);
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007853 swevent_hlist_put();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007854}
7855
7856static int perf_swevent_init(struct perf_event *event)
7857{
Tommi Rantala8176cce2013-04-13 22:49:14 +03007858 u64 event_id = event->attr.config;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007859
7860 if (event->attr.type != PERF_TYPE_SOFTWARE)
7861 return -ENOENT;
7862
Stephane Eranian2481c5f2012-02-09 23:20:59 +01007863 /*
7864 * no branch sampling for software events
7865 */
7866 if (has_branch_stack(event))
7867 return -EOPNOTSUPP;
7868
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007869 switch (event_id) {
7870 case PERF_COUNT_SW_CPU_CLOCK:
7871 case PERF_COUNT_SW_TASK_CLOCK:
7872 return -ENOENT;
7873
7874 default:
7875 break;
7876 }
7877
Dan Carpenterce677832010-10-24 21:50:42 +02007878 if (event_id >= PERF_COUNT_SW_MAX)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007879 return -ENOENT;
7880
7881 if (!event->parent) {
7882 int err;
7883
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007884 err = swevent_hlist_get();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007885 if (err)
7886 return err;
7887
Ingo Molnarc5905af2012-02-24 08:31:31 +01007888 static_key_slow_inc(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007889 event->destroy = sw_perf_event_destroy;
7890 }
7891
7892 return 0;
7893}
7894
7895static struct pmu perf_swevent = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02007896 .task_ctx_nr = perf_sw_context,
7897
Peter Zijlstra34f43922015-02-20 14:05:38 +01007898 .capabilities = PERF_PMU_CAP_NO_NMI,
7899
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007900 .event_init = perf_swevent_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007901 .add = perf_swevent_add,
7902 .del = perf_swevent_del,
7903 .start = perf_swevent_start,
7904 .stop = perf_swevent_stop,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007905 .read = perf_swevent_read,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007906};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007907
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007908#ifdef CONFIG_EVENT_TRACING
7909
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007910static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007911 struct perf_sample_data *data)
7912{
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02007913 void *record = data->raw->frag.data;
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007914
Peter Zijlstrab71b4372015-11-02 10:50:51 +01007915 /* only top level events have filters set */
7916 if (event->parent)
7917 event = event->parent;
7918
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007919 if (likely(!event->filter) || filter_match_preds(event->filter, record))
7920 return 1;
7921 return 0;
7922}
7923
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007924static int perf_tp_event_match(struct perf_event *event,
7925 struct perf_sample_data *data,
7926 struct pt_regs *regs)
7927{
Frederic Weisbeckera0f7d0f2011-03-07 21:27:09 +01007928 if (event->hw.state & PERF_HES_STOPPED)
7929 return 0;
Peter Zijlstra580d6072010-05-20 20:54:31 +02007930 /*
7931 * All tracepoints are from kernel-space.
7932 */
7933 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007934 return 0;
7935
7936 if (!perf_tp_filter_match(event, data))
7937 return 0;
7938
7939 return 1;
7940}
7941
Alexei Starovoitov85b67bc2016-04-18 20:11:50 -07007942void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7943 struct trace_event_call *call, u64 count,
7944 struct pt_regs *regs, struct hlist_head *head,
7945 struct task_struct *task)
7946{
7947 struct bpf_prog *prog = call->prog;
7948
7949 if (prog) {
7950 *(struct pt_regs **)raw_data = regs;
7951 if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
7952 perf_swevent_put_recursion_context(rctx);
7953 return;
7954 }
7955 }
7956 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
Zhou Chengming75e83872017-08-25 21:49:37 +08007957 rctx, task, NULL);
Alexei Starovoitov85b67bc2016-04-18 20:11:50 -07007958}
7959EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7960
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07007961void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
Andrew Vagine6dab5f2012-07-11 18:14:58 +04007962 struct pt_regs *regs, struct hlist_head *head, int rctx,
Zhou Chengming75e83872017-08-25 21:49:37 +08007963 struct task_struct *task, struct perf_event *event)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007964{
7965 struct perf_sample_data data;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007966
7967 struct perf_raw_record raw = {
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02007968 .frag = {
7969 .size = entry_size,
7970 .data = record,
7971 },
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007972 };
7973
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07007974 perf_sample_data_init(&data, 0, 0);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007975 data.raw = &raw;
7976
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07007977 perf_trace_buf_update(record, event_type);
7978
Zhou Chengming75e83872017-08-25 21:49:37 +08007979 /* Use the given event instead of the hlist */
7980 if (event) {
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007981 if (perf_tp_event_match(event, &data, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007982 perf_swevent_event(event, count, &data, regs);
Zhou Chengming75e83872017-08-25 21:49:37 +08007983 } else {
7984 hlist_for_each_entry_rcu(event, head, hlist_entry) {
7985 if (perf_tp_event_match(event, &data, regs))
7986 perf_swevent_event(event, count, &data, regs);
7987 }
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007988 }
Peter Zijlstraecc55f82010-05-21 15:11:34 +02007989
Andrew Vagine6dab5f2012-07-11 18:14:58 +04007990 /*
7991 * If we got specified a target task, also iterate its context and
7992 * deliver this event there too.
7993 */
7994 if (task && task != current) {
7995 struct perf_event_context *ctx;
7996 struct trace_entry *entry = record;
7997
7998 rcu_read_lock();
7999 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
8000 if (!ctx)
8001 goto unlock;
8002
8003 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
8004 if (event->attr.type != PERF_TYPE_TRACEPOINT)
8005 continue;
8006 if (event->attr.config != entry->type)
8007 continue;
8008 if (perf_tp_event_match(event, &data, regs))
8009 perf_swevent_event(event, count, &data, regs);
8010 }
8011unlock:
8012 rcu_read_unlock();
8013 }
8014
Peter Zijlstraecc55f82010-05-21 15:11:34 +02008015 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02008016}
8017EXPORT_SYMBOL_GPL(perf_tp_event);
8018
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008019static void tp_perf_event_destroy(struct perf_event *event)
8020{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02008021 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008022}
8023
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008024static int perf_tp_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008025{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02008026 int err;
8027
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008028 if (event->attr.type != PERF_TYPE_TRACEPOINT)
8029 return -ENOENT;
8030
Stephane Eranian2481c5f2012-02-09 23:20:59 +01008031 /*
8032 * no branch sampling for tracepoint events
8033 */
8034 if (has_branch_stack(event))
8035 return -EOPNOTSUPP;
8036
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02008037 err = perf_trace_init(event);
8038 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008039 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008040
8041 event->destroy = tp_perf_event_destroy;
8042
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008043 return 0;
8044}
8045
8046static struct pmu perf_tracepoint = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02008047 .task_ctx_nr = perf_sw_context,
8048
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008049 .event_init = perf_tp_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008050 .add = perf_trace_add,
8051 .del = perf_trace_del,
8052 .start = perf_swevent_start,
8053 .stop = perf_swevent_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008054 .read = perf_swevent_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008055};
8056
8057static inline void perf_tp_register(void)
8058{
Peter Zijlstra2e80a822010-11-17 23:17:36 +01008059 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008060}
Li Zefan6fb29152009-10-15 11:21:42 +08008061
Li Zefan6fb29152009-10-15 11:21:42 +08008062static void perf_event_free_filter(struct perf_event *event)
8063{
8064 ftrace_profile_free_filter(event);
8065}
8066
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07008067#ifdef CONFIG_BPF_SYSCALL
8068static void bpf_overflow_handler(struct perf_event *event,
8069 struct perf_sample_data *data,
8070 struct pt_regs *regs)
8071{
8072 struct bpf_perf_event_data_kern ctx = {
8073 .data = data,
8074 .regs = regs,
8075 };
8076 int ret = 0;
8077
8078 preempt_disable();
8079 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
8080 goto out;
8081 rcu_read_lock();
Daniel Borkmann88575192016-11-26 01:28:04 +01008082 ret = BPF_PROG_RUN(event->prog, &ctx);
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07008083 rcu_read_unlock();
8084out:
8085 __this_cpu_dec(bpf_prog_active);
8086 preempt_enable();
8087 if (!ret)
8088 return;
8089
8090 event->orig_overflow_handler(event, data, regs);
8091}
8092
8093static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
8094{
8095 struct bpf_prog *prog;
8096
8097 if (event->overflow_handler_context)
8098 /* hw breakpoint or kernel counter */
8099 return -EINVAL;
8100
8101 if (event->prog)
8102 return -EEXIST;
8103
8104 prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
8105 if (IS_ERR(prog))
8106 return PTR_ERR(prog);
8107
8108 event->prog = prog;
8109 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
8110 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
8111 return 0;
8112}
8113
8114static void perf_event_free_bpf_handler(struct perf_event *event)
8115{
8116 struct bpf_prog *prog = event->prog;
8117
8118 if (!prog)
8119 return;
8120
8121 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
8122 event->prog = NULL;
8123 bpf_prog_put(prog);
8124}
8125#else
8126static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
8127{
8128 return -EOPNOTSUPP;
8129}
8130static void perf_event_free_bpf_handler(struct perf_event *event)
8131{
8132}
8133#endif
8134
Alexei Starovoitov25415172015-03-25 12:49:20 -07008135static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
8136{
Yonghong Songcf5f5ce2017-08-04 16:00:09 -07008137 bool is_kprobe, is_tracepoint, is_syscall_tp;
Alexei Starovoitov25415172015-03-25 12:49:20 -07008138 struct bpf_prog *prog;
8139
8140 if (event->attr.type != PERF_TYPE_TRACEPOINT)
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07008141 return perf_event_set_bpf_handler(event, prog_fd);
Alexei Starovoitov25415172015-03-25 12:49:20 -07008142
8143 if (event->tp_event->prog)
8144 return -EEXIST;
8145
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -07008146 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
8147 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
Yonghong Songcf5f5ce2017-08-04 16:00:09 -07008148 is_syscall_tp = is_syscall_trace_event(event->tp_event);
8149 if (!is_kprobe && !is_tracepoint && !is_syscall_tp)
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -07008150 /* bpf programs can only be attached to u/kprobe or tracepoint */
Alexei Starovoitov25415172015-03-25 12:49:20 -07008151 return -EINVAL;
8152
8153 prog = bpf_prog_get(prog_fd);
8154 if (IS_ERR(prog))
8155 return PTR_ERR(prog);
8156
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -07008157 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
Yonghong Songcf5f5ce2017-08-04 16:00:09 -07008158 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
8159 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
Alexei Starovoitov25415172015-03-25 12:49:20 -07008160 /* valid fd, but invalid bpf program type */
8161 bpf_prog_put(prog);
8162 return -EINVAL;
8163 }
8164
Yonghong Songcf5f5ce2017-08-04 16:00:09 -07008165 if (is_tracepoint || is_syscall_tp) {
Alexei Starovoitov32bbe002016-04-06 18:43:28 -07008166 int off = trace_event_get_offsets(event->tp_event);
8167
8168 if (prog->aux->max_ctx_offset > off) {
8169 bpf_prog_put(prog);
8170 return -EACCES;
8171 }
8172 }
Alexei Starovoitov25415172015-03-25 12:49:20 -07008173 event->tp_event->prog = prog;
Yonghong Songec9dd352017-09-18 16:38:36 -07008174 event->tp_event->bpf_prog_owner = event;
Alexei Starovoitov25415172015-03-25 12:49:20 -07008175
8176 return 0;
8177}
8178
8179static void perf_event_free_bpf_prog(struct perf_event *event)
8180{
8181 struct bpf_prog *prog;
8182
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07008183 perf_event_free_bpf_handler(event);
8184
Alexei Starovoitov25415172015-03-25 12:49:20 -07008185 if (!event->tp_event)
8186 return;
8187
8188 prog = event->tp_event->prog;
Yonghong Songec9dd352017-09-18 16:38:36 -07008189 if (prog && event->tp_event->bpf_prog_owner == event) {
Alexei Starovoitov25415172015-03-25 12:49:20 -07008190 event->tp_event->prog = NULL;
Daniel Borkmann1aacde32016-06-30 17:24:43 +02008191 bpf_prog_put(prog);
Alexei Starovoitov25415172015-03-25 12:49:20 -07008192 }
8193}
8194
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008195#else
Li Zefan6fb29152009-10-15 11:21:42 +08008196
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008197static inline void perf_tp_register(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008198{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008199}
Li Zefan6fb29152009-10-15 11:21:42 +08008200
Li Zefan6fb29152009-10-15 11:21:42 +08008201static void perf_event_free_filter(struct perf_event *event)
8202{
8203}
8204
Alexei Starovoitov25415172015-03-25 12:49:20 -07008205static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
8206{
8207 return -ENOENT;
8208}
8209
8210static void perf_event_free_bpf_prog(struct perf_event *event)
8211{
8212}
Li Zefan07b139c2009-12-21 14:27:35 +08008213#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008214
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02008215#ifdef CONFIG_HAVE_HW_BREAKPOINT
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01008216void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02008217{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01008218 struct perf_sample_data sample;
8219 struct pt_regs *regs = data;
8220
Robert Richterfd0d0002012-04-02 20:19:08 +02008221 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01008222
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008223 if (!bp->hw.state && !perf_exclude_event(bp, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02008224 perf_swevent_event(bp, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02008225}
8226#endif
8227
Alexander Shishkin375637b2016-04-27 18:44:46 +03008228/*
8229 * Allocate a new address filter
8230 */
8231static struct perf_addr_filter *
8232perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
8233{
8234 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
8235 struct perf_addr_filter *filter;
8236
8237 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
8238 if (!filter)
8239 return NULL;
8240
8241 INIT_LIST_HEAD(&filter->entry);
8242 list_add_tail(&filter->entry, filters);
8243
8244 return filter;
8245}
8246
8247static void free_filters_list(struct list_head *filters)
8248{
8249 struct perf_addr_filter *filter, *iter;
8250
8251 list_for_each_entry_safe(filter, iter, filters, entry) {
8252 if (filter->inode)
8253 iput(filter->inode);
8254 list_del(&filter->entry);
8255 kfree(filter);
8256 }
8257}
8258
8259/*
8260 * Free existing address filters and optionally install new ones
8261 */
8262static void perf_addr_filters_splice(struct perf_event *event,
8263 struct list_head *head)
8264{
8265 unsigned long flags;
8266 LIST_HEAD(list);
8267
8268 if (!has_addr_filter(event))
8269 return;
8270
8271 /* don't bother with children, they don't have their own filters */
8272 if (event->parent)
8273 return;
8274
8275 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
8276
8277 list_splice_init(&event->addr_filters.list, &list);
8278 if (head)
8279 list_splice(head, &event->addr_filters.list);
8280
8281 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
8282
8283 free_filters_list(&list);
8284}
8285
8286/*
8287 * Scan through mm's vmas and see if one of them matches the
8288 * @filter; if so, adjust filter's address range.
8289 * Called with mm::mmap_sem down for reading.
8290 */
8291static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
8292 struct mm_struct *mm)
8293{
8294 struct vm_area_struct *vma;
8295
8296 for (vma = mm->mmap; vma; vma = vma->vm_next) {
8297 struct file *file = vma->vm_file;
8298 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
8299 unsigned long vma_size = vma->vm_end - vma->vm_start;
8300
8301 if (!file)
8302 continue;
8303
8304 if (!perf_addr_filter_match(filter, file, off, vma_size))
8305 continue;
8306
8307 return vma->vm_start;
8308 }
8309
8310 return 0;
8311}
8312
8313/*
8314 * Update event's address range filters based on the
8315 * task's existing mappings, if any.
8316 */
8317static void perf_event_addr_filters_apply(struct perf_event *event)
8318{
8319 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8320 struct task_struct *task = READ_ONCE(event->ctx->task);
8321 struct perf_addr_filter *filter;
8322 struct mm_struct *mm = NULL;
8323 unsigned int count = 0;
8324 unsigned long flags;
8325
8326 /*
8327 * We may observe TASK_TOMBSTONE, which means that the event tear-down
8328 * will stop on the parent's child_mutex that our caller is also holding
8329 */
8330 if (task == TASK_TOMBSTONE)
8331 return;
8332
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008333 if (!ifh->nr_file_filters)
8334 return;
8335
Alexander Shishkin375637b2016-04-27 18:44:46 +03008336 mm = get_task_mm(event->ctx->task);
8337 if (!mm)
8338 goto restart;
8339
8340 down_read(&mm->mmap_sem);
8341
8342 raw_spin_lock_irqsave(&ifh->lock, flags);
8343 list_for_each_entry(filter, &ifh->list, entry) {
8344 event->addr_filters_offs[count] = 0;
8345
Mathieu Poirier99f5bc92016-07-18 10:43:07 -06008346 /*
8347 * Adjust base offset if the filter is associated to a binary
8348 * that needs to be mapped:
8349 */
8350 if (filter->inode)
Alexander Shishkin375637b2016-04-27 18:44:46 +03008351 event->addr_filters_offs[count] =
8352 perf_addr_filter_apply(filter, mm);
8353
8354 count++;
8355 }
8356
8357 event->addr_filters_gen++;
8358 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8359
8360 up_read(&mm->mmap_sem);
8361
8362 mmput(mm);
8363
8364restart:
Alexander Shishkin767ae082016-09-06 16:23:49 +03008365 perf_event_stop(event, 1);
Alexander Shishkin375637b2016-04-27 18:44:46 +03008366}
8367
8368/*
8369 * Address range filtering: limiting the data to certain
8370 * instruction address ranges. Filters are ioctl()ed to us from
8371 * userspace as ascii strings.
8372 *
8373 * Filter string format:
8374 *
8375 * ACTION RANGE_SPEC
8376 * where ACTION is one of the
8377 * * "filter": limit the trace to this region
8378 * * "start": start tracing from this address
8379 * * "stop": stop tracing at this address/region;
8380 * RANGE_SPEC is
8381 * * for kernel addresses: <start address>[/<size>]
8382 * * for object files: <start address>[/<size>]@</path/to/object/file>
8383 *
8384 * if <size> is not specified, the range is treated as a single address.
8385 */
8386enum {
Alexander Shishkine96271f2016-11-18 13:38:43 +02008387 IF_ACT_NONE = -1,
Alexander Shishkin375637b2016-04-27 18:44:46 +03008388 IF_ACT_FILTER,
8389 IF_ACT_START,
8390 IF_ACT_STOP,
8391 IF_SRC_FILE,
8392 IF_SRC_KERNEL,
8393 IF_SRC_FILEADDR,
8394 IF_SRC_KERNELADDR,
8395};
8396
8397enum {
8398 IF_STATE_ACTION = 0,
8399 IF_STATE_SOURCE,
8400 IF_STATE_END,
8401};
8402
8403static const match_table_t if_tokens = {
8404 { IF_ACT_FILTER, "filter" },
8405 { IF_ACT_START, "start" },
8406 { IF_ACT_STOP, "stop" },
8407 { IF_SRC_FILE, "%u/%u@%s" },
8408 { IF_SRC_KERNEL, "%u/%u" },
8409 { IF_SRC_FILEADDR, "%u@%s" },
8410 { IF_SRC_KERNELADDR, "%u" },
Alexander Shishkine96271f2016-11-18 13:38:43 +02008411 { IF_ACT_NONE, NULL },
Alexander Shishkin375637b2016-04-27 18:44:46 +03008412};
8413
8414/*
8415 * Address filter string parser
8416 */
8417static int
8418perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
8419 struct list_head *filters)
8420{
8421 struct perf_addr_filter *filter = NULL;
8422 char *start, *orig, *filename = NULL;
8423 struct path path;
8424 substring_t args[MAX_OPT_ARGS];
8425 int state = IF_STATE_ACTION, token;
8426 unsigned int kernel = 0;
8427 int ret = -EINVAL;
8428
8429 orig = fstr = kstrdup(fstr, GFP_KERNEL);
8430 if (!fstr)
8431 return -ENOMEM;
8432
8433 while ((start = strsep(&fstr, " ,\n")) != NULL) {
8434 ret = -EINVAL;
8435
8436 if (!*start)
8437 continue;
8438
8439 /* filter definition begins */
8440 if (state == IF_STATE_ACTION) {
8441 filter = perf_addr_filter_new(event, filters);
8442 if (!filter)
8443 goto fail;
8444 }
8445
8446 token = match_token(start, if_tokens, args);
8447 switch (token) {
8448 case IF_ACT_FILTER:
8449 case IF_ACT_START:
8450 filter->filter = 1;
8451
8452 case IF_ACT_STOP:
8453 if (state != IF_STATE_ACTION)
8454 goto fail;
8455
8456 state = IF_STATE_SOURCE;
8457 break;
8458
8459 case IF_SRC_KERNELADDR:
8460 case IF_SRC_KERNEL:
8461 kernel = 1;
8462
8463 case IF_SRC_FILEADDR:
8464 case IF_SRC_FILE:
8465 if (state != IF_STATE_SOURCE)
8466 goto fail;
8467
8468 if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
8469 filter->range = 1;
8470
8471 *args[0].to = 0;
8472 ret = kstrtoul(args[0].from, 0, &filter->offset);
8473 if (ret)
8474 goto fail;
8475
8476 if (filter->range) {
8477 *args[1].to = 0;
8478 ret = kstrtoul(args[1].from, 0, &filter->size);
8479 if (ret)
8480 goto fail;
8481 }
8482
Mathieu Poirier4059ffd2016-07-18 10:43:05 -06008483 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
8484 int fpos = filter->range ? 2 : 1;
8485
8486 filename = match_strdup(&args[fpos]);
Alexander Shishkin375637b2016-04-27 18:44:46 +03008487 if (!filename) {
8488 ret = -ENOMEM;
8489 goto fail;
8490 }
8491 }
8492
8493 state = IF_STATE_END;
8494 break;
8495
8496 default:
8497 goto fail;
8498 }
8499
8500 /*
8501 * Filter definition is fully parsed, validate and install it.
8502 * Make sure that it doesn't contradict itself or the event's
8503 * attribute.
8504 */
8505 if (state == IF_STATE_END) {
Alexander Shishkin9ccbfbb2017-01-26 11:40:56 +02008506 ret = -EINVAL;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008507 if (kernel && event->attr.exclude_kernel)
8508 goto fail;
8509
8510 if (!kernel) {
8511 if (!filename)
8512 goto fail;
8513
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008514 /*
8515 * For now, we only support file-based filters
8516 * in per-task events; doing so for CPU-wide
8517 * events requires additional context switching
8518 * trickery, since same object code will be
8519 * mapped at different virtual addresses in
8520 * different processes.
8521 */
8522 ret = -EOPNOTSUPP;
8523 if (!event->ctx->task)
8524 goto fail_free_name;
8525
Alexander Shishkin375637b2016-04-27 18:44:46 +03008526 /* look up the path and grab its inode */
8527 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
8528 if (ret)
8529 goto fail_free_name;
8530
8531 filter->inode = igrab(d_inode(path.dentry));
8532 path_put(&path);
8533 kfree(filename);
8534 filename = NULL;
8535
8536 ret = -EINVAL;
8537 if (!filter->inode ||
8538 !S_ISREG(filter->inode->i_mode))
8539 /* free_filters_list() will iput() */
8540 goto fail;
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008541
8542 event->addr_filters.nr_file_filters++;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008543 }
8544
8545 /* ready to consume more filters */
8546 state = IF_STATE_ACTION;
8547 filter = NULL;
8548 }
8549 }
8550
8551 if (state != IF_STATE_ACTION)
8552 goto fail;
8553
8554 kfree(orig);
8555
8556 return 0;
8557
8558fail_free_name:
8559 kfree(filename);
8560fail:
8561 free_filters_list(filters);
8562 kfree(orig);
8563
8564 return ret;
8565}
8566
8567static int
8568perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
8569{
8570 LIST_HEAD(filters);
8571 int ret;
8572
8573 /*
8574 * Since this is called in perf_ioctl() path, we're already holding
8575 * ctx::mutex.
8576 */
8577 lockdep_assert_held(&event->ctx->mutex);
8578
8579 if (WARN_ON_ONCE(event->parent))
8580 return -EINVAL;
8581
Alexander Shishkin375637b2016-04-27 18:44:46 +03008582 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
8583 if (ret)
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008584 goto fail_clear_files;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008585
8586 ret = event->pmu->addr_filters_validate(&filters);
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008587 if (ret)
8588 goto fail_free_filters;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008589
8590 /* remove existing filters, if any */
8591 perf_addr_filters_splice(event, &filters);
8592
8593 /* install new filters */
8594 perf_event_for_each_child(event, perf_event_addr_filters_apply);
8595
8596 return ret;
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008597
8598fail_free_filters:
8599 free_filters_list(&filters);
8600
8601fail_clear_files:
8602 event->addr_filters.nr_file_filters = 0;
8603
8604 return ret;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008605}
8606
Alexander Shishkinc796bbb2016-04-27 18:44:42 +03008607static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8608{
8609 char *filter_str;
8610 int ret = -EINVAL;
8611
Alexander Shishkin375637b2016-04-27 18:44:46 +03008612 if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
8613 !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
8614 !has_addr_filter(event))
Alexander Shishkinc796bbb2016-04-27 18:44:42 +03008615 return -EINVAL;
8616
8617 filter_str = strndup_user(arg, PAGE_SIZE);
8618 if (IS_ERR(filter_str))
8619 return PTR_ERR(filter_str);
8620
8621 if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8622 event->attr.type == PERF_TYPE_TRACEPOINT)
8623 ret = ftrace_profile_set_filter(event, event->attr.config,
8624 filter_str);
Alexander Shishkin375637b2016-04-27 18:44:46 +03008625 else if (has_addr_filter(event))
8626 ret = perf_event_set_addr_filter(event, filter_str);
Alexander Shishkinc796bbb2016-04-27 18:44:42 +03008627
8628 kfree(filter_str);
8629 return ret;
8630}
8631
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008632/*
8633 * hrtimer based swevent callback
8634 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008635
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008636static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008637{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008638 enum hrtimer_restart ret = HRTIMER_RESTART;
8639 struct perf_sample_data data;
8640 struct pt_regs *regs;
8641 struct perf_event *event;
8642 u64 period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008643
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008644 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008645
8646 if (event->state != PERF_EVENT_STATE_ACTIVE)
8647 return HRTIMER_NORESTART;
8648
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008649 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008650
Robert Richterfd0d0002012-04-02 20:19:08 +02008651 perf_sample_data_init(&data, 0, event->hw.last_period);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008652 regs = get_irq_regs();
8653
8654 if (regs && !perf_exclude_event(event, regs)) {
Paul E. McKenney77aeeeb2011-11-10 16:02:52 -08008655 if (!(event->attr.exclude_idle && is_idle_task(current)))
Robert Richter33b07b82012-04-05 18:24:43 +02008656 if (__perf_event_overflow(event, 1, &data, regs))
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008657 ret = HRTIMER_NORESTART;
8658 }
8659
8660 period = max_t(u64, 10000, event->hw.sample_period);
8661 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
8662
8663 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008664}
8665
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008666static void perf_swevent_start_hrtimer(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008667{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008668 struct hw_perf_event *hwc = &event->hw;
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01008669 s64 period;
8670
8671 if (!is_sampling_event(event))
8672 return;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008673
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01008674 period = local64_read(&hwc->period_left);
8675 if (period) {
8676 if (period < 0)
8677 period = 10000;
Peter Zijlstrafa407f32010-06-24 12:35:12 +02008678
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01008679 local64_set(&hwc->period_left, 0);
8680 } else {
8681 period = max_t(u64, 10000, hwc->sample_period);
8682 }
Thomas Gleixner3497d202015-04-14 21:09:03 +00008683 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
8684 HRTIMER_MODE_REL_PINNED);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008685}
8686
8687static void perf_swevent_cancel_hrtimer(struct perf_event *event)
8688{
8689 struct hw_perf_event *hwc = &event->hw;
8690
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01008691 if (is_sampling_event(event)) {
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008692 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
Peter Zijlstrafa407f32010-06-24 12:35:12 +02008693 local64_set(&hwc->period_left, ktime_to_ns(remaining));
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008694
8695 hrtimer_cancel(&hwc->hrtimer);
8696 }
8697}
8698
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008699static void perf_swevent_init_hrtimer(struct perf_event *event)
8700{
8701 struct hw_perf_event *hwc = &event->hw;
8702
8703 if (!is_sampling_event(event))
8704 return;
8705
8706 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8707 hwc->hrtimer.function = perf_swevent_hrtimer;
8708
8709 /*
8710 * Since hrtimers have a fixed rate, we can do a static freq->period
8711 * mapping and avoid the whole period adjust feedback stuff.
8712 */
8713 if (event->attr.freq) {
8714 long freq = event->attr.sample_freq;
8715
8716 event->attr.sample_period = NSEC_PER_SEC / freq;
8717 hwc->sample_period = event->attr.sample_period;
8718 local64_set(&hwc->period_left, hwc->sample_period);
Namhyung Kim778141e2013-03-18 11:41:46 +09008719 hwc->last_period = hwc->sample_period;
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008720 event->attr.freq = 0;
8721 }
8722}
8723
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008724/*
8725 * Software event: cpu wall time clock
8726 */
8727
8728static void cpu_clock_event_update(struct perf_event *event)
8729{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008730 s64 prev;
8731 u64 now;
8732
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008733 now = local_clock();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008734 prev = local64_xchg(&event->hw.prev_count, now);
8735 local64_add(now - prev, &event->count);
8736}
8737
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008738static void cpu_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008739{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008740 local64_set(&event->hw.prev_count, local_clock());
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008741 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008742}
8743
8744static void cpu_clock_event_stop(struct perf_event *event, int flags)
8745{
8746 perf_swevent_cancel_hrtimer(event);
8747 cpu_clock_event_update(event);
8748}
8749
8750static int cpu_clock_event_add(struct perf_event *event, int flags)
8751{
8752 if (flags & PERF_EF_START)
8753 cpu_clock_event_start(event, flags);
Shaohua Li6a694a62015-02-05 15:55:32 -08008754 perf_event_update_userpage(event);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008755
8756 return 0;
8757}
8758
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008759static void cpu_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008760{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008761 cpu_clock_event_stop(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008762}
8763
8764static void cpu_clock_event_read(struct perf_event *event)
8765{
8766 cpu_clock_event_update(event);
8767}
8768
8769static int cpu_clock_event_init(struct perf_event *event)
8770{
8771 if (event->attr.type != PERF_TYPE_SOFTWARE)
8772 return -ENOENT;
8773
8774 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
8775 return -ENOENT;
8776
Stephane Eranian2481c5f2012-02-09 23:20:59 +01008777 /*
8778 * no branch sampling for software events
8779 */
8780 if (has_branch_stack(event))
8781 return -EOPNOTSUPP;
8782
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008783 perf_swevent_init_hrtimer(event);
8784
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008785 return 0;
8786}
8787
8788static struct pmu perf_cpu_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02008789 .task_ctx_nr = perf_sw_context,
8790
Peter Zijlstra34f43922015-02-20 14:05:38 +01008791 .capabilities = PERF_PMU_CAP_NO_NMI,
8792
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008793 .event_init = cpu_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008794 .add = cpu_clock_event_add,
8795 .del = cpu_clock_event_del,
8796 .start = cpu_clock_event_start,
8797 .stop = cpu_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008798 .read = cpu_clock_event_read,
8799};
8800
8801/*
8802 * Software event: task time clock
8803 */
8804
8805static void task_clock_event_update(struct perf_event *event, u64 now)
8806{
8807 u64 prev;
8808 s64 delta;
8809
8810 prev = local64_xchg(&event->hw.prev_count, now);
8811 delta = now - prev;
8812 local64_add(delta, &event->count);
8813}
8814
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008815static void task_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008816{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008817 local64_set(&event->hw.prev_count, event->ctx->time);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008818 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008819}
8820
8821static void task_clock_event_stop(struct perf_event *event, int flags)
8822{
8823 perf_swevent_cancel_hrtimer(event);
8824 task_clock_event_update(event, event->ctx->time);
8825}
8826
8827static int task_clock_event_add(struct perf_event *event, int flags)
8828{
8829 if (flags & PERF_EF_START)
8830 task_clock_event_start(event, flags);
Shaohua Li6a694a62015-02-05 15:55:32 -08008831 perf_event_update_userpage(event);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008832
8833 return 0;
8834}
8835
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008836static void task_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008837{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008838 task_clock_event_stop(event, PERF_EF_UPDATE);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008839}
8840
8841static void task_clock_event_read(struct perf_event *event)
8842{
Peter Zijlstra768a06e2011-02-22 16:52:24 +01008843 u64 now = perf_clock();
8844 u64 delta = now - event->ctx->timestamp;
8845 u64 time = event->ctx->time + delta;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008846
8847 task_clock_event_update(event, time);
8848}
8849
8850static int task_clock_event_init(struct perf_event *event)
8851{
8852 if (event->attr.type != PERF_TYPE_SOFTWARE)
8853 return -ENOENT;
8854
8855 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
8856 return -ENOENT;
8857
Stephane Eranian2481c5f2012-02-09 23:20:59 +01008858 /*
8859 * no branch sampling for software events
8860 */
8861 if (has_branch_stack(event))
8862 return -EOPNOTSUPP;
8863
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008864 perf_swevent_init_hrtimer(event);
8865
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008866 return 0;
8867}
8868
8869static struct pmu perf_task_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02008870 .task_ctx_nr = perf_sw_context,
8871
Peter Zijlstra34f43922015-02-20 14:05:38 +01008872 .capabilities = PERF_PMU_CAP_NO_NMI,
8873
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008874 .event_init = task_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008875 .add = task_clock_event_add,
8876 .del = task_clock_event_del,
8877 .start = task_clock_event_start,
8878 .stop = task_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008879 .read = task_clock_event_read,
8880};
8881
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008882static void perf_pmu_nop_void(struct pmu *pmu)
8883{
8884}
8885
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008886static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
8887{
8888}
8889
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008890static int perf_pmu_nop_int(struct pmu *pmu)
8891{
8892 return 0;
8893}
8894
Geliang Tang18ab2cd2015-09-27 23:25:50 +08008895static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008896
8897static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008898{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008899 __this_cpu_write(nop_txn_flags, flags);
8900
8901 if (flags & ~PERF_PMU_TXN_ADD)
8902 return;
8903
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008904 perf_pmu_disable(pmu);
8905}
8906
8907static int perf_pmu_commit_txn(struct pmu *pmu)
8908{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008909 unsigned int flags = __this_cpu_read(nop_txn_flags);
8910
8911 __this_cpu_write(nop_txn_flags, 0);
8912
8913 if (flags & ~PERF_PMU_TXN_ADD)
8914 return 0;
8915
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008916 perf_pmu_enable(pmu);
8917 return 0;
8918}
8919
8920static void perf_pmu_cancel_txn(struct pmu *pmu)
8921{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008922 unsigned int flags = __this_cpu_read(nop_txn_flags);
8923
8924 __this_cpu_write(nop_txn_flags, 0);
8925
8926 if (flags & ~PERF_PMU_TXN_ADD)
8927 return;
8928
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008929 perf_pmu_enable(pmu);
8930}
8931
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01008932static int perf_event_idx_default(struct perf_event *event)
8933{
Peter Zijlstrac719f562014-10-21 11:10:21 +02008934 return 0;
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01008935}
8936
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008937/*
8938 * Ensures all contexts with the same task_ctx_nr have the same
8939 * pmu_cpu_context too.
8940 */
Mark Rutland9e317042014-02-10 17:44:18 +00008941static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008942{
8943 struct pmu *pmu;
8944
8945 if (ctxn < 0)
8946 return NULL;
8947
8948 list_for_each_entry(pmu, &pmus, entry) {
8949 if (pmu->task_ctx_nr == ctxn)
8950 return pmu->pmu_cpu_context;
8951 }
8952
8953 return NULL;
8954}
8955
Peter Zijlstra51676952010-12-07 14:18:20 +01008956static void free_pmu_context(struct pmu *pmu)
8957{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008958 mutex_lock(&pmus_lock);
Peter Zijlstra51676952010-12-07 14:18:20 +01008959 free_percpu(pmu->pmu_cpu_context);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008960 mutex_unlock(&pmus_lock);
8961}
Alexander Shishkin6e855cd2016-04-27 18:44:48 +03008962
8963/*
8964 * Let userspace know that this PMU supports address range filtering:
8965 */
8966static ssize_t nr_addr_filters_show(struct device *dev,
8967 struct device_attribute *attr,
8968 char *page)
8969{
8970 struct pmu *pmu = dev_get_drvdata(dev);
8971
8972 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
8973}
8974DEVICE_ATTR_RO(nr_addr_filters);
8975
Peter Zijlstra2e80a822010-11-17 23:17:36 +01008976static struct idr pmu_idr;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008977
Peter Zijlstraabe43402010-11-17 23:17:37 +01008978static ssize_t
8979type_show(struct device *dev, struct device_attribute *attr, char *page)
8980{
8981 struct pmu *pmu = dev_get_drvdata(dev);
8982
8983 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
8984}
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07008985static DEVICE_ATTR_RO(type);
Peter Zijlstraabe43402010-11-17 23:17:37 +01008986
Stephane Eranian62b85632013-04-03 14:21:34 +02008987static ssize_t
8988perf_event_mux_interval_ms_show(struct device *dev,
8989 struct device_attribute *attr,
8990 char *page)
8991{
8992 struct pmu *pmu = dev_get_drvdata(dev);
8993
8994 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
8995}
8996
Peter Zijlstra272325c2015-04-15 11:41:58 +02008997static DEFINE_MUTEX(mux_interval_mutex);
8998
Stephane Eranian62b85632013-04-03 14:21:34 +02008999static ssize_t
9000perf_event_mux_interval_ms_store(struct device *dev,
9001 struct device_attribute *attr,
9002 const char *buf, size_t count)
9003{
9004 struct pmu *pmu = dev_get_drvdata(dev);
9005 int timer, cpu, ret;
9006
9007 ret = kstrtoint(buf, 0, &timer);
9008 if (ret)
9009 return ret;
9010
9011 if (timer < 1)
9012 return -EINVAL;
9013
9014 /* same value, noting to do */
9015 if (timer == pmu->hrtimer_interval_ms)
9016 return count;
9017
Peter Zijlstra272325c2015-04-15 11:41:58 +02009018 mutex_lock(&mux_interval_mutex);
Stephane Eranian62b85632013-04-03 14:21:34 +02009019 pmu->hrtimer_interval_ms = timer;
9020
9021 /* update all cpuctx for this PMU */
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02009022 cpus_read_lock();
Peter Zijlstra272325c2015-04-15 11:41:58 +02009023 for_each_online_cpu(cpu) {
Stephane Eranian62b85632013-04-03 14:21:34 +02009024 struct perf_cpu_context *cpuctx;
9025 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
9026 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
9027
Peter Zijlstra272325c2015-04-15 11:41:58 +02009028 cpu_function_call(cpu,
9029 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
Stephane Eranian62b85632013-04-03 14:21:34 +02009030 }
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02009031 cpus_read_unlock();
Peter Zijlstra272325c2015-04-15 11:41:58 +02009032 mutex_unlock(&mux_interval_mutex);
Stephane Eranian62b85632013-04-03 14:21:34 +02009033
9034 return count;
9035}
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07009036static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
Stephane Eranian62b85632013-04-03 14:21:34 +02009037
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07009038static struct attribute *pmu_dev_attrs[] = {
9039 &dev_attr_type.attr,
9040 &dev_attr_perf_event_mux_interval_ms.attr,
9041 NULL,
Peter Zijlstraabe43402010-11-17 23:17:37 +01009042};
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07009043ATTRIBUTE_GROUPS(pmu_dev);
Peter Zijlstraabe43402010-11-17 23:17:37 +01009044
9045static int pmu_bus_running;
9046static struct bus_type pmu_bus = {
9047 .name = "event_source",
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07009048 .dev_groups = pmu_dev_groups,
Peter Zijlstraabe43402010-11-17 23:17:37 +01009049};
9050
9051static void pmu_dev_release(struct device *dev)
9052{
9053 kfree(dev);
9054}
9055
9056static int pmu_dev_alloc(struct pmu *pmu)
9057{
9058 int ret = -ENOMEM;
9059
9060 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
9061 if (!pmu->dev)
9062 goto out;
9063
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01009064 pmu->dev->groups = pmu->attr_groups;
Peter Zijlstraabe43402010-11-17 23:17:37 +01009065 device_initialize(pmu->dev);
9066 ret = dev_set_name(pmu->dev, "%s", pmu->name);
9067 if (ret)
9068 goto free_dev;
9069
9070 dev_set_drvdata(pmu->dev, pmu);
9071 pmu->dev->bus = &pmu_bus;
9072 pmu->dev->release = pmu_dev_release;
9073 ret = device_add(pmu->dev);
9074 if (ret)
9075 goto free_dev;
9076
Alexander Shishkin6e855cd2016-04-27 18:44:48 +03009077 /* For PMUs with address filters, throw in an extra attribute: */
9078 if (pmu->nr_addr_filters)
9079 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
9080
9081 if (ret)
9082 goto del_dev;
9083
Peter Zijlstraabe43402010-11-17 23:17:37 +01009084out:
9085 return ret;
9086
Alexander Shishkin6e855cd2016-04-27 18:44:48 +03009087del_dev:
9088 device_del(pmu->dev);
9089
Peter Zijlstraabe43402010-11-17 23:17:37 +01009090free_dev:
9091 put_device(pmu->dev);
9092 goto out;
9093}
9094
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01009095static struct lock_class_key cpuctx_mutex;
Peter Zijlstrafacc4302011-04-09 21:17:42 +02009096static struct lock_class_key cpuctx_lock;
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01009097
Mischa Jonker03d8e802013-06-04 11:45:48 +02009098int perf_pmu_register(struct pmu *pmu, const char *name, int type)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009099{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009100 int cpu, ret;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009101
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009102 mutex_lock(&pmus_lock);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009103 ret = -ENOMEM;
9104 pmu->pmu_disable_count = alloc_percpu(int);
9105 if (!pmu->pmu_disable_count)
9106 goto unlock;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02009107
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009108 pmu->type = -1;
9109 if (!name)
9110 goto skip_type;
9111 pmu->name = name;
9112
9113 if (type < 0) {
Tejun Heo0e9c3be2013-02-27 17:04:55 -08009114 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
9115 if (type < 0) {
9116 ret = type;
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009117 goto free_pdc;
9118 }
9119 }
9120 pmu->type = type;
9121
Peter Zijlstraabe43402010-11-17 23:17:37 +01009122 if (pmu_bus_running) {
9123 ret = pmu_dev_alloc(pmu);
9124 if (ret)
9125 goto free_idr;
9126 }
9127
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009128skip_type:
Peter Zijlstra26657842016-03-22 22:09:18 +01009129 if (pmu->task_ctx_nr == perf_hw_context) {
9130 static int hw_context_taken = 0;
9131
Mark Rutland5101ef22016-04-26 11:33:46 +01009132 /*
9133 * Other than systems with heterogeneous CPUs, it never makes
9134 * sense for two PMUs to share perf_hw_context. PMUs which are
9135 * uncore must use perf_invalid_context.
9136 */
9137 if (WARN_ON_ONCE(hw_context_taken &&
9138 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
Peter Zijlstra26657842016-03-22 22:09:18 +01009139 pmu->task_ctx_nr = perf_invalid_context;
9140
9141 hw_context_taken = 1;
9142 }
9143
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02009144 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
9145 if (pmu->pmu_cpu_context)
9146 goto got_cpu_context;
9147
Wei Yongjunc4814202013-04-12 11:05:54 +08009148 ret = -ENOMEM;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009149 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
9150 if (!pmu->pmu_cpu_context)
Peter Zijlstraabe43402010-11-17 23:17:37 +01009151 goto free_dev;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009152
9153 for_each_possible_cpu(cpu) {
9154 struct perf_cpu_context *cpuctx;
9155
9156 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Peter Zijlstraeb184472010-09-07 15:55:13 +02009157 __perf_event_init_context(&cpuctx->ctx);
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01009158 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
Peter Zijlstrafacc4302011-04-09 21:17:42 +02009159 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009160 cpuctx->ctx.pmu = pmu;
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02009161 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
Stephane Eranian9e630202013-04-03 14:21:33 +02009162
Peter Zijlstra272325c2015-04-15 11:41:58 +02009163 __perf_mux_hrtimer_init(cpuctx, cpu);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009164 }
9165
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02009166got_cpu_context:
Peter Zijlstraad5133b2010-06-15 12:22:39 +02009167 if (!pmu->start_txn) {
9168 if (pmu->pmu_enable) {
9169 /*
9170 * If we have pmu_enable/pmu_disable calls, install
9171 * transaction stubs that use that to try and batch
9172 * hardware accesses.
9173 */
9174 pmu->start_txn = perf_pmu_start_txn;
9175 pmu->commit_txn = perf_pmu_commit_txn;
9176 pmu->cancel_txn = perf_pmu_cancel_txn;
9177 } else {
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07009178 pmu->start_txn = perf_pmu_nop_txn;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02009179 pmu->commit_txn = perf_pmu_nop_int;
9180 pmu->cancel_txn = perf_pmu_nop_void;
9181 }
9182 }
9183
9184 if (!pmu->pmu_enable) {
9185 pmu->pmu_enable = perf_pmu_nop_void;
9186 pmu->pmu_disable = perf_pmu_nop_void;
9187 }
9188
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01009189 if (!pmu->event_idx)
9190 pmu->event_idx = perf_event_idx_default;
9191
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009192 list_add_rcu(&pmu->entry, &pmus);
Alexander Shishkinbed5b252015-01-30 12:31:06 +02009193 atomic_set(&pmu->exclusive_cnt, 0);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009194 ret = 0;
9195unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009196 mutex_unlock(&pmus_lock);
9197
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009198 return ret;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009199
Peter Zijlstraabe43402010-11-17 23:17:37 +01009200free_dev:
9201 device_del(pmu->dev);
9202 put_device(pmu->dev);
9203
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009204free_idr:
9205 if (pmu->type >= PERF_TYPE_MAX)
9206 idr_remove(&pmu_idr, pmu->type);
9207
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009208free_pdc:
9209 free_percpu(pmu->pmu_disable_count);
9210 goto unlock;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009211}
Yan, Zhengc464c762014-03-18 16:56:41 +08009212EXPORT_SYMBOL_GPL(perf_pmu_register);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009213
9214void perf_pmu_unregister(struct pmu *pmu)
9215{
Jiri Olsa09338402016-10-20 13:10:11 +02009216 int remove_device;
9217
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009218 mutex_lock(&pmus_lock);
Jiri Olsa09338402016-10-20 13:10:11 +02009219 remove_device = pmu_bus_running;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009220 list_del_rcu(&pmu->entry);
9221 mutex_unlock(&pmus_lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009222
9223 /*
Peter Zijlstracde8e882010-09-13 11:06:55 +02009224 * We dereference the pmu list under both SRCU and regular RCU, so
9225 * synchronize against both of those.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009226 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009227 synchronize_srcu(&pmus_srcu);
Peter Zijlstracde8e882010-09-13 11:06:55 +02009228 synchronize_rcu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009229
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009230 free_percpu(pmu->pmu_disable_count);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009231 if (pmu->type >= PERF_TYPE_MAX)
9232 idr_remove(&pmu_idr, pmu->type);
Jiri Olsa09338402016-10-20 13:10:11 +02009233 if (remove_device) {
9234 if (pmu->nr_addr_filters)
9235 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
9236 device_del(pmu->dev);
9237 put_device(pmu->dev);
9238 }
Peter Zijlstra51676952010-12-07 14:18:20 +01009239 free_pmu_context(pmu);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009240}
Yan, Zhengc464c762014-03-18 16:56:41 +08009241EXPORT_SYMBOL_GPL(perf_pmu_unregister);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009242
Mark Rutlandcc34b982015-01-07 14:56:51 +00009243static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
9244{
Peter Zijlstraccd41c82015-02-25 15:56:04 +01009245 struct perf_event_context *ctx = NULL;
Mark Rutlandcc34b982015-01-07 14:56:51 +00009246 int ret;
9247
9248 if (!try_module_get(pmu->module))
9249 return -ENODEV;
Peter Zijlstraccd41c82015-02-25 15:56:04 +01009250
9251 if (event->group_leader != event) {
Peter Zijlstra8b10c5e22015-05-01 16:08:46 +02009252 /*
9253 * This ctx->mutex can nest when we're called through
9254 * inheritance. See the perf_event_ctx_lock_nested() comment.
9255 */
9256 ctx = perf_event_ctx_lock_nested(event->group_leader,
9257 SINGLE_DEPTH_NESTING);
Peter Zijlstraccd41c82015-02-25 15:56:04 +01009258 BUG_ON(!ctx);
9259 }
9260
Mark Rutlandcc34b982015-01-07 14:56:51 +00009261 event->pmu = pmu;
9262 ret = pmu->event_init(event);
Peter Zijlstraccd41c82015-02-25 15:56:04 +01009263
9264 if (ctx)
9265 perf_event_ctx_unlock(event->group_leader, ctx);
9266
Mark Rutlandcc34b982015-01-07 14:56:51 +00009267 if (ret)
9268 module_put(pmu->module);
9269
9270 return ret;
9271}
9272
Geliang Tang18ab2cd2015-09-27 23:25:50 +08009273static struct pmu *perf_init_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009274{
Dan Carpenter85c617a2017-05-22 12:03:49 +03009275 struct pmu *pmu;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009276 int idx;
Lin Ming940c5b22011-02-27 21:13:31 +08009277 int ret;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02009278
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009279 idx = srcu_read_lock(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009280
Kan Liang40999312017-01-18 08:21:01 -05009281 /* Try parent's PMU first: */
9282 if (event->parent && event->parent->pmu) {
9283 pmu = event->parent->pmu;
9284 ret = perf_try_init_event(pmu, event);
9285 if (!ret)
9286 goto unlock;
9287 }
9288
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009289 rcu_read_lock();
9290 pmu = idr_find(&pmu_idr, event->attr.type);
9291 rcu_read_unlock();
Lin Ming940c5b22011-02-27 21:13:31 +08009292 if (pmu) {
Mark Rutlandcc34b982015-01-07 14:56:51 +00009293 ret = perf_try_init_event(pmu, event);
Lin Ming940c5b22011-02-27 21:13:31 +08009294 if (ret)
9295 pmu = ERR_PTR(ret);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009296 goto unlock;
Lin Ming940c5b22011-02-27 21:13:31 +08009297 }
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009298
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009299 list_for_each_entry_rcu(pmu, &pmus, entry) {
Mark Rutlandcc34b982015-01-07 14:56:51 +00009300 ret = perf_try_init_event(pmu, event);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009301 if (!ret)
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02009302 goto unlock;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02009303
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009304 if (ret != -ENOENT) {
9305 pmu = ERR_PTR(ret);
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02009306 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009307 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009308 }
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02009309 pmu = ERR_PTR(-ENOENT);
9310unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009311 srcu_read_unlock(&pmus_srcu, idx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009312
9313 return pmu;
9314}
9315
Kan Liangf2fb6be2016-03-23 11:24:37 -07009316static void attach_sb_event(struct perf_event *event)
9317{
9318 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
9319
9320 raw_spin_lock(&pel->lock);
9321 list_add_rcu(&event->sb_list, &pel->list);
9322 raw_spin_unlock(&pel->lock);
9323}
9324
Peter Zijlstraaab5b712016-05-12 17:26:46 +02009325/*
9326 * We keep a list of all !task (and therefore per-cpu) events
9327 * that need to receive side-band records.
9328 *
9329 * This avoids having to scan all the various PMU per-cpu contexts
9330 * looking for them.
9331 */
Kan Liangf2fb6be2016-03-23 11:24:37 -07009332static void account_pmu_sb_event(struct perf_event *event)
9333{
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07009334 if (is_sb_event(event))
Kan Liangf2fb6be2016-03-23 11:24:37 -07009335 attach_sb_event(event);
9336}
9337
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009338static void account_event_cpu(struct perf_event *event, int cpu)
9339{
9340 if (event->parent)
9341 return;
9342
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009343 if (is_cgroup_event(event))
9344 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
9345}
9346
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02009347/* Freq events need the tick to stay alive (see perf_event_task_tick). */
9348static void account_freq_event_nohz(void)
9349{
9350#ifdef CONFIG_NO_HZ_FULL
9351 /* Lock so we don't race with concurrent unaccount */
9352 spin_lock(&nr_freq_lock);
9353 if (atomic_inc_return(&nr_freq_events) == 1)
9354 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
9355 spin_unlock(&nr_freq_lock);
9356#endif
9357}
9358
9359static void account_freq_event(void)
9360{
9361 if (tick_nohz_full_enabled())
9362 account_freq_event_nohz();
9363 else
9364 atomic_inc(&nr_freq_events);
9365}
9366
9367
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009368static void account_event(struct perf_event *event)
9369{
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009370 bool inc = false;
9371
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009372 if (event->parent)
9373 return;
9374
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009375 if (event->attach_state & PERF_ATTACH_TASK)
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009376 inc = true;
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009377 if (event->attr.mmap || event->attr.mmap_data)
9378 atomic_inc(&nr_mmap_events);
9379 if (event->attr.comm)
9380 atomic_inc(&nr_comm_events);
Hari Bathinie4222672017-03-08 02:11:36 +05309381 if (event->attr.namespaces)
9382 atomic_inc(&nr_namespaces_events);
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009383 if (event->attr.task)
9384 atomic_inc(&nr_task_events);
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02009385 if (event->attr.freq)
9386 account_freq_event();
Adrian Hunter45ac1402015-07-21 12:44:02 +03009387 if (event->attr.context_switch) {
9388 atomic_inc(&nr_switch_events);
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009389 inc = true;
Adrian Hunter45ac1402015-07-21 12:44:02 +03009390 }
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009391 if (has_branch_stack(event))
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009392 inc = true;
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009393 if (is_cgroup_event(event))
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009394 inc = true;
9395
Peter Zijlstra9107c892016-02-24 18:45:45 +01009396 if (inc) {
9397 if (atomic_inc_not_zero(&perf_sched_count))
9398 goto enabled;
9399
9400 mutex_lock(&perf_sched_mutex);
9401 if (!atomic_read(&perf_sched_count)) {
9402 static_branch_enable(&perf_sched_events);
9403 /*
9404 * Guarantee that all CPUs observe they key change and
9405 * call the perf scheduling hooks before proceeding to
9406 * install events that need them.
9407 */
9408 synchronize_sched();
9409 }
9410 /*
9411 * Now that we have waited for the sync_sched(), allow further
9412 * increments to by-pass the mutex.
9413 */
9414 atomic_inc(&perf_sched_count);
9415 mutex_unlock(&perf_sched_mutex);
9416 }
9417enabled:
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009418
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009419 account_event_cpu(event, event->cpu);
Kan Liangf2fb6be2016-03-23 11:24:37 -07009420
9421 account_pmu_sb_event(event);
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009422}
9423
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009424/*
9425 * Allocate and initialize a event structure
9426 */
9427static struct perf_event *
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02009428perf_event_alloc(struct perf_event_attr *attr, int cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009429 struct task_struct *task,
9430 struct perf_event *group_leader,
9431 struct perf_event *parent_event,
Avi Kivity4dc0da82011-06-29 18:42:35 +03009432 perf_overflow_handler_t overflow_handler,
Matt Fleming79dff512015-01-23 18:45:42 +00009433 void *context, int cgroup_fd)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009434{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02009435 struct pmu *pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009436 struct perf_event *event;
9437 struct hw_perf_event *hwc;
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009438 long err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009439
Oleg Nesterov66832eb2011-01-18 17:10:32 +01009440 if ((unsigned)cpu >= nr_cpu_ids) {
9441 if (!task || cpu != -1)
9442 return ERR_PTR(-EINVAL);
9443 }
9444
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02009445 event = kzalloc(sizeof(*event), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009446 if (!event)
9447 return ERR_PTR(-ENOMEM);
9448
9449 /*
9450 * Single events are their own group leaders, with an
9451 * empty sibling list:
9452 */
9453 if (!group_leader)
9454 group_leader = event;
9455
9456 mutex_init(&event->child_mutex);
9457 INIT_LIST_HEAD(&event->child_list);
9458
9459 INIT_LIST_HEAD(&event->group_entry);
9460 INIT_LIST_HEAD(&event->event_entry);
9461 INIT_LIST_HEAD(&event->sibling_list);
Peter Zijlstra10c6db12011-11-26 02:47:31 +01009462 INIT_LIST_HEAD(&event->rb_entry);
Stephane Eranian71ad88e2013-11-12 17:58:48 +01009463 INIT_LIST_HEAD(&event->active_entry);
Alexander Shishkin375637b2016-04-27 18:44:46 +03009464 INIT_LIST_HEAD(&event->addr_filters.list);
Stephane Eranianf3ae75d2014-01-08 11:15:52 +01009465 INIT_HLIST_NODE(&event->hlist_entry);
9466
Peter Zijlstra10c6db12011-11-26 02:47:31 +01009467
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009468 init_waitqueue_head(&event->waitq);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08009469 init_irq_work(&event->pending, perf_pending_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009470
9471 mutex_init(&event->mmap_mutex);
Alexander Shishkin375637b2016-04-27 18:44:46 +03009472 raw_spin_lock_init(&event->addr_filters.lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009473
Al Viroa6fa9412012-08-20 14:59:25 +01009474 atomic_long_set(&event->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009475 event->cpu = cpu;
9476 event->attr = *attr;
9477 event->group_leader = group_leader;
9478 event->pmu = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009479 event->oncpu = -1;
9480
9481 event->parent = parent_event;
9482
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08009483 event->ns = get_pid_ns(task_active_pid_ns(current));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009484 event->id = atomic64_inc_return(&perf_event_id);
9485
9486 event->state = PERF_EVENT_STATE_INACTIVE;
9487
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009488 if (task) {
9489 event->attach_state = PERF_ATTACH_TASK;
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009490 /*
Peter Zijlstra50f16a82015-03-05 22:10:19 +01009491 * XXX pmu::event_init needs to know what task to account to
9492 * and we cannot use the ctx information because we need the
9493 * pmu before we get a ctx.
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009494 */
Peter Zijlstra50f16a82015-03-05 22:10:19 +01009495 event->hw.target = task;
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009496 }
9497
Peter Zijlstra34f43922015-02-20 14:05:38 +01009498 event->clock = &local_clock;
9499 if (parent_event)
9500 event->clock = parent_event->clock;
9501
Avi Kivity4dc0da82011-06-29 18:42:35 +03009502 if (!overflow_handler && parent_event) {
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01009503 overflow_handler = parent_event->overflow_handler;
Avi Kivity4dc0da82011-06-29 18:42:35 +03009504 context = parent_event->overflow_handler_context;
Arnd Bergmannf1e4ba52016-09-06 15:10:22 +02009505#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07009506 if (overflow_handler == bpf_overflow_handler) {
9507 struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
9508
9509 if (IS_ERR(prog)) {
9510 err = PTR_ERR(prog);
9511 goto err_ns;
9512 }
9513 event->prog = prog;
9514 event->orig_overflow_handler =
9515 parent_event->orig_overflow_handler;
9516 }
9517#endif
Avi Kivity4dc0da82011-06-29 18:42:35 +03009518 }
Oleg Nesterov66832eb2011-01-18 17:10:32 +01009519
Wang Nan18794452016-03-28 06:41:30 +00009520 if (overflow_handler) {
9521 event->overflow_handler = overflow_handler;
9522 event->overflow_handler_context = context;
Wang Nan9ecda412016-04-05 14:11:18 +00009523 } else if (is_write_backward(event)){
9524 event->overflow_handler = perf_event_output_backward;
9525 event->overflow_handler_context = NULL;
Wang Nan18794452016-03-28 06:41:30 +00009526 } else {
Wang Nan9ecda412016-04-05 14:11:18 +00009527 event->overflow_handler = perf_event_output_forward;
Wang Nan18794452016-03-28 06:41:30 +00009528 event->overflow_handler_context = NULL;
9529 }
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02009530
Jiri Olsa0231bb52013-02-01 11:23:45 +01009531 perf_event__state_init(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009532
9533 pmu = NULL;
9534
9535 hwc = &event->hw;
9536 hwc->sample_period = attr->sample_period;
9537 if (attr->freq && attr->sample_freq)
9538 hwc->sample_period = 1;
9539 hwc->last_period = hwc->sample_period;
9540
Peter Zijlstrae7850592010-05-21 14:43:08 +02009541 local64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009542
9543 /*
Peter Zijlstraba5213a2017-05-30 11:45:12 +02009544 * We currently do not support PERF_SAMPLE_READ on inherited events.
9545 * See perf_output_read().
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009546 */
Peter Zijlstraba5213a2017-05-30 11:45:12 +02009547 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009548 goto err_ns;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009549
Yan, Zhenga46a2302014-11-04 21:56:06 -05009550 if (!has_branch_stack(event))
9551 event->attr.branch_sample_type = 0;
9552
Matt Fleming79dff512015-01-23 18:45:42 +00009553 if (cgroup_fd != -1) {
9554 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
9555 if (err)
9556 goto err_ns;
9557 }
9558
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009559 pmu = perf_init_event(event);
Dan Carpenter85c617a2017-05-22 12:03:49 +03009560 if (IS_ERR(pmu)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009561 err = PTR_ERR(pmu);
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009562 goto err_ns;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009563 }
9564
Alexander Shishkinbed5b252015-01-30 12:31:06 +02009565 err = exclusive_event_init(event);
9566 if (err)
9567 goto err_pmu;
9568
Alexander Shishkin375637b2016-04-27 18:44:46 +03009569 if (has_addr_filter(event)) {
9570 event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
9571 sizeof(unsigned long),
9572 GFP_KERNEL);
Dan Carpenter36cc2b92017-05-22 12:04:18 +03009573 if (!event->addr_filters_offs) {
9574 err = -ENOMEM;
Alexander Shishkin375637b2016-04-27 18:44:46 +03009575 goto err_per_task;
Dan Carpenter36cc2b92017-05-22 12:04:18 +03009576 }
Alexander Shishkin375637b2016-04-27 18:44:46 +03009577
9578 /* force hw sync on the address filters */
9579 event->addr_filters_gen = 1;
9580 }
9581
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009582 if (!event->parent) {
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02009583 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
Arnaldo Carvalho de Melo97c79a32016-04-28 13:16:33 -03009584 err = get_callchain_buffers(attr->sample_max_stack);
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009585 if (err)
Alexander Shishkin375637b2016-04-27 18:44:46 +03009586 goto err_addr_filters;
Stephane Eraniand010b332012-02-09 23:21:00 +01009587 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009588 }
9589
Alexander Shishkin927a5572016-03-02 13:24:14 +02009590 /* symmetric to unaccount_event() in _free_event() */
9591 account_event(event);
9592
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009593 return event;
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009594
Alexander Shishkin375637b2016-04-27 18:44:46 +03009595err_addr_filters:
9596 kfree(event->addr_filters_offs);
9597
Alexander Shishkinbed5b252015-01-30 12:31:06 +02009598err_per_task:
9599 exclusive_event_destroy(event);
9600
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009601err_pmu:
9602 if (event->destroy)
9603 event->destroy(event);
Yan, Zhengc464c762014-03-18 16:56:41 +08009604 module_put(pmu->module);
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009605err_ns:
Matt Fleming79dff512015-01-23 18:45:42 +00009606 if (is_cgroup_event(event))
9607 perf_detach_cgroup(event);
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009608 if (event->ns)
9609 put_pid_ns(event->ns);
9610 kfree(event);
9611
9612 return ERR_PTR(err);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009613}
9614
9615static int perf_copy_attr(struct perf_event_attr __user *uattr,
9616 struct perf_event_attr *attr)
9617{
9618 u32 size;
9619 int ret;
9620
9621 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
9622 return -EFAULT;
9623
9624 /*
9625 * zero the full structure, so that a short copy will be nice.
9626 */
9627 memset(attr, 0, sizeof(*attr));
9628
9629 ret = get_user(size, &uattr->size);
9630 if (ret)
9631 return ret;
9632
9633 if (size > PAGE_SIZE) /* silly large */
9634 goto err_size;
9635
9636 if (!size) /* abi compat */
9637 size = PERF_ATTR_SIZE_VER0;
9638
9639 if (size < PERF_ATTR_SIZE_VER0)
9640 goto err_size;
9641
9642 /*
9643 * If we're handed a bigger struct than we know of,
9644 * ensure all the unknown bits are 0 - i.e. new
9645 * user-space does not rely on any kernel feature
9646 * extensions we dont know about yet.
9647 */
9648 if (size > sizeof(*attr)) {
9649 unsigned char __user *addr;
9650 unsigned char __user *end;
9651 unsigned char val;
9652
9653 addr = (void __user *)uattr + sizeof(*attr);
9654 end = (void __user *)uattr + size;
9655
9656 for (; addr < end; addr++) {
9657 ret = get_user(val, addr);
9658 if (ret)
9659 return ret;
9660 if (val)
9661 goto err_size;
9662 }
9663 size = sizeof(*attr);
9664 }
9665
9666 ret = copy_from_user(attr, uattr, size);
9667 if (ret)
9668 return -EFAULT;
9669
Meng Xuf12f42a2017-08-23 17:07:50 -04009670 attr->size = size;
9671
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05309672 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009673 return -EINVAL;
9674
9675 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
9676 return -EINVAL;
9677
9678 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
9679 return -EINVAL;
9680
Stephane Eranianbce38cd2012-02-09 23:20:51 +01009681 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
9682 u64 mask = attr->branch_sample_type;
9683
9684 /* only using defined bits */
9685 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
9686 return -EINVAL;
9687
9688 /* at least one branch bit must be set */
9689 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
9690 return -EINVAL;
9691
Stephane Eranianbce38cd2012-02-09 23:20:51 +01009692 /* propagate priv level, when not set for branch */
9693 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
9694
9695 /* exclude_kernel checked on syscall entry */
9696 if (!attr->exclude_kernel)
9697 mask |= PERF_SAMPLE_BRANCH_KERNEL;
9698
9699 if (!attr->exclude_user)
9700 mask |= PERF_SAMPLE_BRANCH_USER;
9701
9702 if (!attr->exclude_hv)
9703 mask |= PERF_SAMPLE_BRANCH_HV;
9704 /*
9705 * adjust user setting (for HW filter setup)
9706 */
9707 attr->branch_sample_type = mask;
9708 }
Stephane Eraniane7122092013-06-06 11:02:04 +02009709 /* privileged levels capture (kernel, hv): check permissions */
9710 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
Stephane Eranian2b923c82013-05-21 12:53:37 +02009711 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9712 return -EACCES;
Stephane Eranianbce38cd2012-02-09 23:20:51 +01009713 }
Jiri Olsa40189942012-08-07 15:20:37 +02009714
Jiri Olsac5ebced2012-08-07 15:20:40 +02009715 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
Jiri Olsa40189942012-08-07 15:20:37 +02009716 ret = perf_reg_validate(attr->sample_regs_user);
Jiri Olsac5ebced2012-08-07 15:20:40 +02009717 if (ret)
9718 return ret;
9719 }
9720
9721 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
9722 if (!arch_perf_have_user_stack_dump())
9723 return -ENOSYS;
9724
9725 /*
9726 * We have __u32 type for the size, but so far
9727 * we can only use __u16 as maximum due to the
9728 * __u16 sample size limit.
9729 */
9730 if (attr->sample_stack_user >= USHRT_MAX)
9731 ret = -EINVAL;
9732 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
9733 ret = -EINVAL;
9734 }
Jiri Olsa40189942012-08-07 15:20:37 +02009735
Stephane Eranian60e23642014-09-24 13:48:37 +02009736 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
9737 ret = perf_reg_validate(attr->sample_regs_intr);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009738out:
9739 return ret;
9740
9741err_size:
9742 put_user(sizeof(*attr), &uattr->size);
9743 ret = -E2BIG;
9744 goto out;
9745}
9746
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009747static int
9748perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009749{
Peter Zijlstrab69cf532014-03-14 10:50:33 +01009750 struct ring_buffer *rb = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009751 int ret = -EINVAL;
9752
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009753 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009754 goto set;
9755
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009756 /* don't allow circular references */
9757 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009758 goto out;
9759
Peter Zijlstra0f139302010-05-20 14:35:15 +02009760 /*
9761 * Don't allow cross-cpu buffers
9762 */
9763 if (output_event->cpu != event->cpu)
9764 goto out;
9765
9766 /*
Frederic Weisbecker76369132011-05-19 19:55:04 +02009767 * If its not a per-cpu rb, it must be the same task.
Peter Zijlstra0f139302010-05-20 14:35:15 +02009768 */
9769 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
9770 goto out;
9771
Peter Zijlstra34f43922015-02-20 14:05:38 +01009772 /*
9773 * Mixing clocks in the same buffer is trouble you don't need.
9774 */
9775 if (output_event->clock != event->clock)
9776 goto out;
9777
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02009778 /*
Wang Nan9ecda412016-04-05 14:11:18 +00009779 * Either writing ring buffer from beginning or from end.
9780 * Mixing is not allowed.
9781 */
9782 if (is_write_backward(output_event) != is_write_backward(event))
9783 goto out;
9784
9785 /*
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02009786 * If both events generate aux data, they must be on the same PMU
9787 */
9788 if (has_aux(event) && has_aux(output_event) &&
9789 event->pmu != output_event->pmu)
9790 goto out;
9791
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009792set:
9793 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009794 /* Can't redirect output if we've got an active mmap() */
9795 if (atomic_read(&event->mmap_count))
9796 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009797
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009798 if (output_event) {
Frederic Weisbecker76369132011-05-19 19:55:04 +02009799 /* get the rb we want to redirect to */
9800 rb = ring_buffer_get(output_event);
9801 if (!rb)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009802 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009803 }
9804
Peter Zijlstrab69cf532014-03-14 10:50:33 +01009805 ring_buffer_attach(event, rb);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02009806
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009807 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009808unlock:
9809 mutex_unlock(&event->mmap_mutex);
9810
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009811out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009812 return ret;
9813}
9814
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01009815static void mutex_lock_double(struct mutex *a, struct mutex *b)
9816{
9817 if (b < a)
9818 swap(a, b);
9819
9820 mutex_lock(a);
9821 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
9822}
9823
Peter Zijlstra34f43922015-02-20 14:05:38 +01009824static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9825{
9826 bool nmi_safe = false;
9827
9828 switch (clk_id) {
9829 case CLOCK_MONOTONIC:
9830 event->clock = &ktime_get_mono_fast_ns;
9831 nmi_safe = true;
9832 break;
9833
9834 case CLOCK_MONOTONIC_RAW:
9835 event->clock = &ktime_get_raw_fast_ns;
9836 nmi_safe = true;
9837 break;
9838
9839 case CLOCK_REALTIME:
9840 event->clock = &ktime_get_real_ns;
9841 break;
9842
9843 case CLOCK_BOOTTIME:
9844 event->clock = &ktime_get_boot_ns;
9845 break;
9846
9847 case CLOCK_TAI:
9848 event->clock = &ktime_get_tai_ns;
9849 break;
9850
9851 default:
9852 return -EINVAL;
9853 }
9854
9855 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
9856 return -EINVAL;
9857
9858 return 0;
9859}
9860
Peter Zijlstra321027c2017-01-11 21:09:50 +01009861/*
9862 * Variation on perf_event_ctx_lock_nested(), except we take two context
9863 * mutexes.
9864 */
9865static struct perf_event_context *
9866__perf_event_ctx_lock_double(struct perf_event *group_leader,
9867 struct perf_event_context *ctx)
9868{
9869 struct perf_event_context *gctx;
9870
9871again:
9872 rcu_read_lock();
9873 gctx = READ_ONCE(group_leader->ctx);
9874 if (!atomic_inc_not_zero(&gctx->refcount)) {
9875 rcu_read_unlock();
9876 goto again;
9877 }
9878 rcu_read_unlock();
9879
9880 mutex_lock_double(&gctx->mutex, &ctx->mutex);
9881
9882 if (group_leader->ctx != gctx) {
9883 mutex_unlock(&ctx->mutex);
9884 mutex_unlock(&gctx->mutex);
9885 put_ctx(gctx);
9886 goto again;
9887 }
9888
9889 return gctx;
9890}
9891
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009892/**
9893 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9894 *
9895 * @attr_uptr: event_id type attributes for monitoring/sampling
9896 * @pid: target pid
9897 * @cpu: target cpu
9898 * @group_fd: group leader event fd
9899 */
9900SYSCALL_DEFINE5(perf_event_open,
9901 struct perf_event_attr __user *, attr_uptr,
9902 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
9903{
Peter Zijlstrab04243e2010-09-17 11:28:48 +02009904 struct perf_event *group_leader = NULL, *output_event = NULL;
9905 struct perf_event *event, *sibling;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009906 struct perf_event_attr attr;
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01009907 struct perf_event_context *ctx, *uninitialized_var(gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009908 struct file *event_file = NULL;
Al Viro2903ff02012-08-28 12:52:22 -04009909 struct fd group = {NULL, 0};
Matt Helsley38a81da2010-09-13 13:01:20 -07009910 struct task_struct *task = NULL;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02009911 struct pmu *pmu;
Al Viroea635c62010-05-26 17:40:29 -04009912 int event_fd;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02009913 int move_group = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009914 int err;
Yann Droneauda21b0b32014-01-05 21:36:33 +01009915 int f_flags = O_RDWR;
Matt Fleming79dff512015-01-23 18:45:42 +00009916 int cgroup_fd = -1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009917
9918 /* for future expandability... */
Stephane Eraniane5d13672011-02-14 11:20:01 +02009919 if (flags & ~PERF_FLAG_ALL)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009920 return -EINVAL;
9921
9922 err = perf_copy_attr(attr_uptr, &attr);
9923 if (err)
9924 return err;
9925
9926 if (!attr.exclude_kernel) {
9927 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9928 return -EACCES;
9929 }
9930
Hari Bathinie4222672017-03-08 02:11:36 +05309931 if (attr.namespaces) {
9932 if (!capable(CAP_SYS_ADMIN))
9933 return -EACCES;
9934 }
9935
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009936 if (attr.freq) {
9937 if (attr.sample_freq > sysctl_perf_event_sample_rate)
9938 return -EINVAL;
Peter Zijlstra0819b2e2014-05-15 20:23:48 +02009939 } else {
9940 if (attr.sample_period & (1ULL << 63))
9941 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009942 }
9943
Kan Liangfc7ce9c2017-08-28 20:52:49 -04009944 /* Only privileged users can get physical addresses */
9945 if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR) &&
9946 perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9947 return -EACCES;
9948
Arnaldo Carvalho de Melo97c79a32016-04-28 13:16:33 -03009949 if (!attr.sample_max_stack)
9950 attr.sample_max_stack = sysctl_perf_event_max_stack;
9951
Stephane Eraniane5d13672011-02-14 11:20:01 +02009952 /*
9953 * In cgroup mode, the pid argument is used to pass the fd
9954 * opened to the cgroup directory in cgroupfs. The cpu argument
9955 * designates the cpu on which to monitor threads from that
9956 * cgroup.
9957 */
9958 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
9959 return -EINVAL;
9960
Yann Droneauda21b0b32014-01-05 21:36:33 +01009961 if (flags & PERF_FLAG_FD_CLOEXEC)
9962 f_flags |= O_CLOEXEC;
9963
9964 event_fd = get_unused_fd_flags(f_flags);
Al Viroea635c62010-05-26 17:40:29 -04009965 if (event_fd < 0)
9966 return event_fd;
9967
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009968 if (group_fd != -1) {
Al Viro2903ff02012-08-28 12:52:22 -04009969 err = perf_fget_light(group_fd, &group);
9970 if (err)
Stephane Eraniand14b12d2010-09-17 11:28:47 +02009971 goto err_fd;
Al Viro2903ff02012-08-28 12:52:22 -04009972 group_leader = group.file->private_data;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009973 if (flags & PERF_FLAG_FD_OUTPUT)
9974 output_event = group_leader;
9975 if (flags & PERF_FLAG_FD_NO_GROUP)
9976 group_leader = NULL;
9977 }
9978
Stephane Eraniane5d13672011-02-14 11:20:01 +02009979 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02009980 task = find_lively_task_by_vpid(pid);
9981 if (IS_ERR(task)) {
9982 err = PTR_ERR(task);
9983 goto err_group_fd;
9984 }
9985 }
9986
Peter Zijlstra1f4ee502014-05-06 09:59:34 +02009987 if (task && group_leader &&
9988 group_leader->attr.inherit != attr.inherit) {
9989 err = -EINVAL;
9990 goto err_task;
9991 }
9992
Peter Zijlstra79c9ce52016-04-26 11:36:53 +02009993 if (task) {
9994 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
9995 if (err)
Alexander Levine5aeee52017-06-03 03:39:13 +00009996 goto err_task;
Peter Zijlstra79c9ce52016-04-26 11:36:53 +02009997
9998 /*
9999 * Reuse ptrace permission checks for now.
10000 *
10001 * We must hold cred_guard_mutex across this and any potential
10002 * perf_install_in_context() call for this new event to
10003 * serialize against exec() altering our credentials (and the
10004 * perf_event_exit_task() that could imply).
10005 */
10006 err = -EACCES;
10007 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
10008 goto err_cred;
10009 }
10010
Matt Fleming79dff512015-01-23 18:45:42 +000010011 if (flags & PERF_FLAG_PID_CGROUP)
10012 cgroup_fd = pid;
10013
Avi Kivity4dc0da82011-06-29 18:42:35 +030010014 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
Matt Fleming79dff512015-01-23 18:45:42 +000010015 NULL, NULL, cgroup_fd);
Stephane Eraniand14b12d2010-09-17 11:28:47 +020010016 if (IS_ERR(event)) {
10017 err = PTR_ERR(event);
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010018 goto err_cred;
Stephane Eraniand14b12d2010-09-17 11:28:47 +020010019 }
10020
Vince Weaver53b25332014-05-16 17:12:12 -040010021 if (is_sampling_event(event)) {
10022 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
Vineet Guptaa1396552016-05-09 15:07:40 +053010023 err = -EOPNOTSUPP;
Vince Weaver53b25332014-05-16 17:12:12 -040010024 goto err_alloc;
10025 }
10026 }
10027
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010028 /*
Peter Zijlstra89a1e182010-09-07 17:34:50 +020010029 * Special case software events and allow them to be part of
10030 * any hardware group.
10031 */
10032 pmu = event->pmu;
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010033
Peter Zijlstra34f43922015-02-20 14:05:38 +010010034 if (attr.use_clockid) {
10035 err = perf_event_set_clock(event, attr.clockid);
10036 if (err)
10037 goto err_alloc;
10038 }
10039
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -070010040 if (pmu->task_ctx_nr == perf_sw_context)
10041 event->event_caps |= PERF_EV_CAP_SOFTWARE;
10042
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010043 if (group_leader &&
10044 (is_software_event(event) != is_software_event(group_leader))) {
10045 if (is_software_event(event)) {
10046 /*
10047 * If event and group_leader are not both a software
10048 * event, and event is, then group leader is not.
10049 *
10050 * Allow the addition of software events to !software
10051 * groups, this is safe because software events never
10052 * fail to schedule.
10053 */
10054 pmu = group_leader->pmu;
10055 } else if (is_software_event(group_leader) &&
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -070010056 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010057 /*
10058 * In case the group is a pure software group, and we
10059 * try to add a hardware event, move the whole group to
10060 * the hardware context.
10061 */
10062 move_group = 1;
10063 }
10064 }
Peter Zijlstra89a1e182010-09-07 17:34:50 +020010065
10066 /*
10067 * Get the target context (task or percpu):
10068 */
Yan, Zheng4af57ef2014-11-04 21:56:01 -050010069 ctx = find_get_context(pmu, task, event);
Peter Zijlstra89a1e182010-09-07 17:34:50 +020010070 if (IS_ERR(ctx)) {
10071 err = PTR_ERR(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +020010072 goto err_alloc;
Peter Zijlstra89a1e182010-09-07 17:34:50 +020010073 }
10074
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010075 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
10076 err = -EBUSY;
10077 goto err_context;
10078 }
10079
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010080 /*
10081 * Look up the group leader (we will attach this event to it):
10082 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +020010083 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010084 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010085
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010086 /*
10087 * Do not allow a recursive hierarchy (this new sibling
10088 * becoming part of another group-sibling):
10089 */
10090 if (group_leader->group_leader != group_leader)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010091 goto err_context;
Peter Zijlstra34f43922015-02-20 14:05:38 +010010092
10093 /* All events in a group should have the same clock */
10094 if (group_leader->clock != event->clock)
10095 goto err_context;
10096
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010097 /*
Mark Rutland64aee2a2017-06-22 15:41:38 +010010098 * Make sure we're both events for the same CPU;
10099 * grouping events for different CPUs is broken; since
10100 * you can never concurrently schedule them anyhow.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010101 */
Mark Rutland64aee2a2017-06-22 15:41:38 +010010102 if (group_leader->cpu != event->cpu)
10103 goto err_context;
Peter Zijlstrac3c87e72015-01-23 11:19:48 +010010104
Mark Rutland64aee2a2017-06-22 15:41:38 +010010105 /*
10106 * Make sure we're both on the same task, or both
10107 * per-CPU events.
10108 */
10109 if (group_leader->ctx->task != ctx->task)
10110 goto err_context;
10111
10112 /*
10113 * Do not allow to attach to a group in a different task
10114 * or CPU context. If we're moving SW events, we'll fix
10115 * this up later, so allow that.
10116 */
10117 if (!move_group && group_leader->ctx != ctx)
10118 goto err_context;
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010119
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010120 /*
10121 * Only a group leader can be exclusive or pinned
10122 */
10123 if (attr.exclusive || attr.pinned)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010124 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +020010125 }
10126
10127 if (output_event) {
10128 err = perf_event_set_output(event, output_event);
10129 if (err)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010130 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +020010131 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010132
Yann Droneauda21b0b32014-01-05 21:36:33 +010010133 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
10134 f_flags);
Al Viroea635c62010-05-26 17:40:29 -040010135 if (IS_ERR(event_file)) {
10136 err = PTR_ERR(event_file);
Alexander Shishkin201c2f82016-03-21 10:02:42 +020010137 event_file = NULL;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010138 goto err_context;
Al Viroea635c62010-05-26 17:40:29 -040010139 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010140
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010141 if (move_group) {
Peter Zijlstra321027c2017-01-11 21:09:50 +010010142 gctx = __perf_event_ctx_lock_double(group_leader, ctx);
10143
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010144 if (gctx->task == TASK_TOMBSTONE) {
10145 err = -ESRCH;
10146 goto err_locked;
10147 }
Peter Zijlstra321027c2017-01-11 21:09:50 +010010148
10149 /*
10150 * Check if we raced against another sys_perf_event_open() call
10151 * moving the software group underneath us.
10152 */
10153 if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
10154 /*
10155 * If someone moved the group out from under us, check
10156 * if this new event wound up on the same ctx, if so
10157 * its the regular !move_group case, otherwise fail.
10158 */
10159 if (gctx != ctx) {
10160 err = -EINVAL;
10161 goto err_locked;
10162 } else {
10163 perf_event_ctx_unlock(group_leader, gctx);
10164 move_group = 0;
10165 }
10166 }
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010167 } else {
10168 mutex_lock(&ctx->mutex);
10169 }
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010170
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010171 if (ctx->task == TASK_TOMBSTONE) {
10172 err = -ESRCH;
10173 goto err_locked;
10174 }
10175
Peter Zijlstraa7239682015-09-09 19:06:33 +020010176 if (!perf_event_validate_size(event)) {
10177 err = -E2BIG;
10178 goto err_locked;
10179 }
10180
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020010181 if (!task) {
10182 /*
10183 * Check if the @cpu we're creating an event for is online.
10184 *
10185 * We use the perf_cpu_context::ctx::mutex to serialize against
10186 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
10187 */
10188 struct perf_cpu_context *cpuctx =
10189 container_of(ctx, struct perf_cpu_context, ctx);
10190
10191 if (!cpuctx->online) {
10192 err = -ENODEV;
10193 goto err_locked;
10194 }
10195 }
10196
10197
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010198 /*
10199 * Must be under the same ctx::mutex as perf_install_in_context(),
10200 * because we need to serialize with concurrent event creation.
10201 */
10202 if (!exclusive_event_installable(event, ctx)) {
10203 /* exclusive and group stuff are assumed mutually exclusive */
10204 WARN_ON_ONCE(move_group);
10205
10206 err = -EBUSY;
10207 goto err_locked;
10208 }
10209
10210 WARN_ON_ONCE(ctx->parent_ctx);
10211
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010212 /*
10213 * This is the point on no return; we cannot fail hereafter. This is
10214 * where we start modifying current state.
10215 */
10216
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010217 if (move_group) {
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010218 /*
10219 * See perf_event_ctx_lock() for comments on the details
10220 * of swizzling perf_event::ctx.
10221 */
Peter Zijlstra45a0e072016-01-26 13:09:48 +010010222 perf_remove_from_context(group_leader, 0);
Peter Zijlstra279b5162017-02-16 10:28:37 +010010223 put_ctx(gctx);
Jiri Olsa0231bb52013-02-01 11:23:45 +010010224
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010225 list_for_each_entry(sibling, &group_leader->sibling_list,
10226 group_entry) {
Peter Zijlstra45a0e072016-01-26 13:09:48 +010010227 perf_remove_from_context(sibling, 0);
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010228 put_ctx(gctx);
10229 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010230
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010231 /*
10232 * Wait for everybody to stop referencing the events through
10233 * the old lists, before installing it on new lists.
10234 */
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010235 synchronize_rcu();
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010236
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010237 /*
10238 * Install the group siblings before the group leader.
10239 *
10240 * Because a group leader will try and install the entire group
10241 * (through the sibling list, which is still in-tact), we can
10242 * end up with siblings installed in the wrong context.
10243 *
10244 * By installing siblings first we NO-OP because they're not
10245 * reachable through the group lists.
10246 */
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010247 list_for_each_entry(sibling, &group_leader->sibling_list,
10248 group_entry) {
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010249 perf_event__state_init(sibling);
Jiri Olsa9fc81d82014-12-10 21:23:51 +010010250 perf_install_in_context(ctx, sibling, sibling->cpu);
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010251 get_ctx(ctx);
10252 }
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010253
10254 /*
10255 * Removing from the context ends up with disabled
10256 * event. What we want here is event in the initial
10257 * startup state, ready to be add into new context.
10258 */
10259 perf_event__state_init(group_leader);
10260 perf_install_in_context(ctx, group_leader, group_leader->cpu);
10261 get_ctx(ctx);
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010262 }
10263
Peter Zijlstraf73e22a2015-09-09 20:48:22 +020010264 /*
10265 * Precalculate sample_data sizes; do while holding ctx::mutex such
10266 * that we're serialized against further additions and before
10267 * perf_install_in_context() which is the point the event is active and
10268 * can use these values.
10269 */
10270 perf_event__header_size(event);
10271 perf_event__id_header_size(event);
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010272
Peter Zijlstra78cd2c72016-01-25 14:08:45 +010010273 event->owner = current;
10274
Yan, Zhenge2d37cd2012-06-15 14:31:32 +080010275 perf_install_in_context(ctx, event, event->cpu);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010276 perf_unpin_context(ctx);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010277
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010278 if (move_group)
Peter Zijlstra321027c2017-01-11 21:09:50 +010010279 perf_event_ctx_unlock(group_leader, gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010280 mutex_unlock(&ctx->mutex);
10281
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010282 if (task) {
10283 mutex_unlock(&task->signal->cred_guard_mutex);
10284 put_task_struct(task);
10285 }
10286
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010287 mutex_lock(&current->perf_event_mutex);
10288 list_add_tail(&event->owner_entry, &current->perf_event_list);
10289 mutex_unlock(&current->perf_event_mutex);
10290
Peter Zijlstra8a495422010-05-27 15:47:49 +020010291 /*
10292 * Drop the reference on the group_event after placing the
10293 * new event on the sibling_list. This ensures destruction
10294 * of the group leader will find the pointer to itself in
10295 * perf_group_detach().
10296 */
Al Viro2903ff02012-08-28 12:52:22 -040010297 fdput(group);
Al Viroea635c62010-05-26 17:40:29 -040010298 fd_install(event_fd, event_file);
10299 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010300
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010301err_locked:
10302 if (move_group)
Peter Zijlstra321027c2017-01-11 21:09:50 +010010303 perf_event_ctx_unlock(group_leader, gctx);
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010304 mutex_unlock(&ctx->mutex);
10305/* err_file: */
10306 fput(event_file);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010307err_context:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010308 perf_unpin_context(ctx);
Al Viroea635c62010-05-26 17:40:29 -040010309 put_ctx(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +020010310err_alloc:
Peter Zijlstra13005622016-02-24 18:45:41 +010010311 /*
10312 * If event_file is set, the fput() above will have called ->release()
10313 * and that will take care of freeing the event.
10314 */
10315 if (!event_file)
10316 free_event(event);
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010317err_cred:
10318 if (task)
10319 mutex_unlock(&task->signal->cred_guard_mutex);
Peter Zijlstra1f4ee502014-05-06 09:59:34 +020010320err_task:
Peter Zijlstrae7d0bc02010-10-14 16:54:51 +020010321 if (task)
10322 put_task_struct(task);
Peter Zijlstra89a1e182010-09-07 17:34:50 +020010323err_group_fd:
Al Viro2903ff02012-08-28 12:52:22 -040010324 fdput(group);
Al Viroea635c62010-05-26 17:40:29 -040010325err_fd:
10326 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010327 return err;
10328}
10329
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010330/**
10331 * perf_event_create_kernel_counter
10332 *
10333 * @attr: attributes of the counter to create
10334 * @cpu: cpu in which the counter is bound
Matt Helsley38a81da2010-09-13 13:01:20 -070010335 * @task: task to profile (NULL for percpu)
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010336 */
10337struct perf_event *
10338perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Matt Helsley38a81da2010-09-13 13:01:20 -070010339 struct task_struct *task,
Avi Kivity4dc0da82011-06-29 18:42:35 +030010340 perf_overflow_handler_t overflow_handler,
10341 void *context)
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010342{
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010343 struct perf_event_context *ctx;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010344 struct perf_event *event;
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010345 int err;
10346
10347 /*
10348 * Get the target context (task or percpu):
10349 */
10350
Avi Kivity4dc0da82011-06-29 18:42:35 +030010351 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
Matt Fleming79dff512015-01-23 18:45:42 +000010352 overflow_handler, context, -1);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +010010353 if (IS_ERR(event)) {
10354 err = PTR_ERR(event);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010355 goto err;
10356 }
10357
Jiri Olsaf8697762014-08-01 14:33:01 +020010358 /* Mark owner so we could distinguish it from user events. */
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010359 event->owner = TASK_TOMBSTONE;
Jiri Olsaf8697762014-08-01 14:33:01 +020010360
Yan, Zheng4af57ef2014-11-04 21:56:01 -050010361 ctx = find_get_context(event->pmu, task, event);
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010362 if (IS_ERR(ctx)) {
10363 err = PTR_ERR(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010364 goto err_free;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +010010365 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010366
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010367 WARN_ON_ONCE(ctx->parent_ctx);
10368 mutex_lock(&ctx->mutex);
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010369 if (ctx->task == TASK_TOMBSTONE) {
10370 err = -ESRCH;
10371 goto err_unlock;
10372 }
10373
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020010374 if (!task) {
10375 /*
10376 * Check if the @cpu we're creating an event for is online.
10377 *
10378 * We use the perf_cpu_context::ctx::mutex to serialize against
10379 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
10380 */
10381 struct perf_cpu_context *cpuctx =
10382 container_of(ctx, struct perf_cpu_context, ctx);
10383 if (!cpuctx->online) {
10384 err = -ENODEV;
10385 goto err_unlock;
10386 }
10387 }
10388
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010389 if (!exclusive_event_installable(event, ctx)) {
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010390 err = -EBUSY;
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010391 goto err_unlock;
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010392 }
10393
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010394 perf_install_in_context(ctx, event, cpu);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010395 perf_unpin_context(ctx);
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010396 mutex_unlock(&ctx->mutex);
10397
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010398 return event;
10399
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010400err_unlock:
10401 mutex_unlock(&ctx->mutex);
10402 perf_unpin_context(ctx);
10403 put_ctx(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010404err_free:
10405 free_event(event);
10406err:
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +010010407 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010408}
10409EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
10410
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010411void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
10412{
10413 struct perf_event_context *src_ctx;
10414 struct perf_event_context *dst_ctx;
10415 struct perf_event *event, *tmp;
10416 LIST_HEAD(events);
10417
10418 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
10419 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
10420
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010421 /*
10422 * See perf_event_ctx_lock() for comments on the details
10423 * of swizzling perf_event::ctx.
10424 */
10425 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010426 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
10427 event_entry) {
Peter Zijlstra45a0e072016-01-26 13:09:48 +010010428 perf_remove_from_context(event, 0);
Frederic Weisbecker9a545de2013-07-23 02:31:03 +020010429 unaccount_event_cpu(event, src_cpu);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010430 put_ctx(src_ctx);
Peter Zijlstra98861672013-10-03 16:02:23 +020010431 list_add(&event->migrate_entry, &events);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010432 }
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010433
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010434 /*
10435 * Wait for the events to quiesce before re-instating them.
10436 */
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010437 synchronize_rcu();
10438
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010439 /*
10440 * Re-instate events in 2 passes.
10441 *
10442 * Skip over group leaders and only install siblings on this first
10443 * pass, siblings will not get enabled without a leader, however a
10444 * leader will enable its siblings, even if those are still on the old
10445 * context.
10446 */
10447 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10448 if (event->group_leader == event)
10449 continue;
10450
10451 list_del(&event->migrate_entry);
10452 if (event->state >= PERF_EVENT_STATE_OFF)
10453 event->state = PERF_EVENT_STATE_INACTIVE;
10454 account_event_cpu(event, dst_cpu);
10455 perf_install_in_context(dst_ctx, event, dst_cpu);
10456 get_ctx(dst_ctx);
10457 }
10458
10459 /*
10460 * Once all the siblings are setup properly, install the group leaders
10461 * to make it go.
10462 */
Peter Zijlstra98861672013-10-03 16:02:23 +020010463 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10464 list_del(&event->migrate_entry);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010465 if (event->state >= PERF_EVENT_STATE_OFF)
10466 event->state = PERF_EVENT_STATE_INACTIVE;
Frederic Weisbecker9a545de2013-07-23 02:31:03 +020010467 account_event_cpu(event, dst_cpu);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010468 perf_install_in_context(dst_ctx, event, dst_cpu);
10469 get_ctx(dst_ctx);
10470 }
10471 mutex_unlock(&dst_ctx->mutex);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010472 mutex_unlock(&src_ctx->mutex);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010473}
10474EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
10475
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010476static void sync_child_event(struct perf_event *child_event,
10477 struct task_struct *child)
10478{
10479 struct perf_event *parent_event = child_event->parent;
10480 u64 child_val;
10481
10482 if (child_event->attr.inherit_stat)
10483 perf_event_read_event(child_event, child);
10484
Peter Zijlstrab5e58792010-05-21 14:43:12 +020010485 child_val = perf_event_count(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010486
10487 /*
10488 * Add back the child's count to the parent's count:
10489 */
Peter Zijlstraa6e6dea2010-05-21 14:27:58 +020010490 atomic64_add(child_val, &parent_event->child_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010491 atomic64_add(child_event->total_time_enabled,
10492 &parent_event->child_total_time_enabled);
10493 atomic64_add(child_event->total_time_running,
10494 &parent_event->child_total_time_running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010495}
10496
10497static void
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010498perf_event_exit_event(struct perf_event *child_event,
10499 struct perf_event_context *child_ctx,
10500 struct task_struct *child)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010501{
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010502 struct perf_event *parent_event = child_event->parent;
10503
Peter Zijlstra1903d502014-07-15 17:27:27 +020010504 /*
10505 * Do not destroy the 'original' grouping; because of the context
10506 * switch optimization the original events could've ended up in a
10507 * random child task.
10508 *
10509 * If we were to destroy the original group, all group related
10510 * operations would cease to function properly after this random
10511 * child dies.
10512 *
10513 * Do destroy all inherited groups, we don't care about those
10514 * and being thorough is better.
10515 */
Peter Zijlstra32132a32016-01-11 15:40:59 +010010516 raw_spin_lock_irq(&child_ctx->lock);
10517 WARN_ON_ONCE(child_ctx->is_active);
10518
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010519 if (parent_event)
Peter Zijlstra32132a32016-01-11 15:40:59 +010010520 perf_group_detach(child_event);
10521 list_del_event(child_event, child_ctx);
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +010010522 child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
Peter Zijlstra32132a32016-01-11 15:40:59 +010010523 raw_spin_unlock_irq(&child_ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010524
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010525 /*
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010526 * Parent events are governed by their filedesc, retain them.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010527 */
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010528 if (!parent_event) {
Jiri Olsa179033b2014-08-07 11:48:26 -040010529 perf_event_wakeup(child_event);
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010530 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010531 }
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010532 /*
10533 * Child events can be cleaned up.
10534 */
10535
10536 sync_child_event(child_event, child);
10537
10538 /*
10539 * Remove this event from the parent's list
10540 */
10541 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
10542 mutex_lock(&parent_event->child_mutex);
10543 list_del_init(&child_event->child_list);
10544 mutex_unlock(&parent_event->child_mutex);
10545
10546 /*
10547 * Kick perf_poll() for is_event_hup().
10548 */
10549 perf_event_wakeup(parent_event);
10550 free_event(child_event);
10551 put_event(parent_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010552}
10553
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010554static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010555{
Peter Zijlstra211de6e2014-09-30 19:23:08 +020010556 struct perf_event_context *child_ctx, *clone_ctx = NULL;
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010557 struct perf_event *child_event, *next;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010558
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010559 WARN_ON_ONCE(child != current);
10560
Peter Zijlstra6a3351b62016-01-25 14:09:54 +010010561 child_ctx = perf_pin_task_context(child, ctxn);
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010562 if (!child_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010563 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010564
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010565 /*
Peter Zijlstra6a3351b62016-01-25 14:09:54 +010010566 * In order to reduce the amount of tricky in ctx tear-down, we hold
10567 * ctx::mutex over the entire thing. This serializes against almost
10568 * everything that wants to access the ctx.
10569 *
10570 * The exception is sys_perf_event_open() /
10571 * perf_event_create_kernel_count() which does find_get_context()
10572 * without ctx::mutex (it cannot because of the move_group double mutex
10573 * lock thing). See the comments in perf_install_in_context().
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010574 */
Peter Zijlstra6a3351b62016-01-25 14:09:54 +010010575 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010576
10577 /*
Peter Zijlstra6a3351b62016-01-25 14:09:54 +010010578 * In a single ctx::lock section, de-schedule the events and detach the
10579 * context from the task such that we cannot ever get it scheduled back
10580 * in.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010581 */
Peter Zijlstra6a3351b62016-01-25 14:09:54 +010010582 raw_spin_lock_irq(&child_ctx->lock);
Alexander Shishkin487f05e2017-01-19 18:43:30 +020010583 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL);
Peter Zijlstra4a1c0f22014-06-23 16:12:42 +020010584
10585 /*
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010586 * Now that the context is inactive, destroy the task <-> ctx relation
10587 * and mark the context dead.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010588 */
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010589 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
10590 put_ctx(child_ctx); /* cannot be last */
10591 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
10592 put_task_struct(current); /* cannot be last */
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010593
Peter Zijlstra211de6e2014-09-30 19:23:08 +020010594 clone_ctx = unclone_ctx(child_ctx);
Peter Zijlstra6a3351b62016-01-25 14:09:54 +010010595 raw_spin_unlock_irq(&child_ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010596
Peter Zijlstra211de6e2014-09-30 19:23:08 +020010597 if (clone_ctx)
10598 put_ctx(clone_ctx);
Peter Zijlstra4a1c0f22014-06-23 16:12:42 +020010599
10600 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010601 * Report the task dead after unscheduling the events so that we
10602 * won't get any samples after PERF_RECORD_EXIT. We can however still
10603 * get a few PERF_RECORD_READ events.
10604 */
10605 perf_event_task(child, child_ctx, 0);
10606
Peter Zijlstraebf905f2014-05-29 19:00:24 +020010607 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010608 perf_event_exit_event(child_event, child_ctx, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010609
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010610 mutex_unlock(&child_ctx->mutex);
10611
10612 put_ctx(child_ctx);
10613}
10614
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010615/*
10616 * When a child task exits, feed back event values to parent events.
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010617 *
10618 * Can be called with cred_guard_mutex held when called from
10619 * install_exec_creds().
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010620 */
10621void perf_event_exit_task(struct task_struct *child)
10622{
Peter Zijlstra88821352010-11-09 19:01:43 +010010623 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010624 int ctxn;
10625
Peter Zijlstra88821352010-11-09 19:01:43 +010010626 mutex_lock(&child->perf_event_mutex);
10627 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
10628 owner_entry) {
10629 list_del_init(&event->owner_entry);
10630
10631 /*
10632 * Ensure the list deletion is visible before we clear
10633 * the owner, closes a race against perf_release() where
10634 * we need to serialize on the owner->perf_event_mutex.
10635 */
Peter Zijlstraf47c02c2016-01-26 12:30:14 +010010636 smp_store_release(&event->owner, NULL);
Peter Zijlstra88821352010-11-09 19:01:43 +010010637 }
10638 mutex_unlock(&child->perf_event_mutex);
10639
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010640 for_each_task_context_nr(ctxn)
10641 perf_event_exit_task_context(child, ctxn);
Jiri Olsa4e93ad62015-11-04 16:00:05 +010010642
10643 /*
10644 * The perf_event_exit_task_context calls perf_event_task
10645 * with child's task_ctx, which generates EXIT events for
10646 * child contexts and sets child->perf_event_ctxp[] to NULL.
10647 * At this point we need to send EXIT events to cpu contexts.
10648 */
10649 perf_event_task(child, NULL, 0);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010650}
10651
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010652static void perf_free_event(struct perf_event *event,
10653 struct perf_event_context *ctx)
10654{
10655 struct perf_event *parent = event->parent;
10656
10657 if (WARN_ON_ONCE(!parent))
10658 return;
10659
10660 mutex_lock(&parent->child_mutex);
10661 list_del_init(&event->child_list);
10662 mutex_unlock(&parent->child_mutex);
10663
Al Viroa6fa9412012-08-20 14:59:25 +010010664 put_event(parent);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010665
Peter Zijlstra652884f2015-01-23 11:20:10 +010010666 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +020010667 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010668 list_del_event(event, ctx);
Peter Zijlstra652884f2015-01-23 11:20:10 +010010669 raw_spin_unlock_irq(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010670 free_event(event);
10671}
10672
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010673/*
Peter Zijlstra652884f2015-01-23 11:20:10 +010010674 * Free an unexposed, unused context as created by inheritance by
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010675 * perf_event_init_task below, used by fork() in case of fail.
Peter Zijlstra652884f2015-01-23 11:20:10 +010010676 *
10677 * Not all locks are strictly required, but take them anyway to be nice and
10678 * help out with the lockdep assertions.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010679 */
10680void perf_event_free_task(struct task_struct *task)
10681{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010682 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010683 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010684 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010685
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010686 for_each_task_context_nr(ctxn) {
10687 ctx = task->perf_event_ctxp[ctxn];
10688 if (!ctx)
10689 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010690
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010691 mutex_lock(&ctx->mutex);
Peter Zijlstrae552a832017-03-16 13:47:48 +010010692 raw_spin_lock_irq(&ctx->lock);
10693 /*
10694 * Destroy the task <-> ctx relation and mark the context dead.
10695 *
10696 * This is important because even though the task hasn't been
10697 * exposed yet the context has been (through child_list).
10698 */
10699 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
10700 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
10701 put_task_struct(task); /* cannot be last */
10702 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010703
Peter Zijlstra15121c72017-03-16 13:47:50 +010010704 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010705 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010706
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010707 mutex_unlock(&ctx->mutex);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010708 put_ctx(ctx);
10709 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010710}
10711
Peter Zijlstra4e231c72010-09-09 21:01:59 +020010712void perf_event_delayed_put(struct task_struct *task)
10713{
10714 int ctxn;
10715
10716 for_each_task_context_nr(ctxn)
10717 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
10718}
10719
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010720struct file *perf_event_get(unsigned int fd)
Kaixu Xiaffe86902015-08-06 07:02:32 +000010721{
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010722 struct file *file;
Kaixu Xiaffe86902015-08-06 07:02:32 +000010723
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010724 file = fget_raw(fd);
10725 if (!file)
10726 return ERR_PTR(-EBADF);
Kaixu Xiaffe86902015-08-06 07:02:32 +000010727
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010728 if (file->f_op != &perf_fops) {
10729 fput(file);
10730 return ERR_PTR(-EBADF);
10731 }
Kaixu Xiaffe86902015-08-06 07:02:32 +000010732
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010733 return file;
Kaixu Xiaffe86902015-08-06 07:02:32 +000010734}
10735
10736const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10737{
10738 if (!event)
10739 return ERR_PTR(-EINVAL);
10740
10741 return &event->attr;
10742}
10743
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010744/*
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +010010745 * Inherit a event from parent task to child task.
10746 *
10747 * Returns:
10748 * - valid pointer on success
10749 * - NULL for orphaned events
10750 * - IS_ERR() on error
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010751 */
10752static struct perf_event *
10753inherit_event(struct perf_event *parent_event,
10754 struct task_struct *parent,
10755 struct perf_event_context *parent_ctx,
10756 struct task_struct *child,
10757 struct perf_event *group_leader,
10758 struct perf_event_context *child_ctx)
10759{
Jiri Olsa1929def2014-09-12 13:18:27 +020010760 enum perf_event_active_state parent_state = parent_event->state;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010761 struct perf_event *child_event;
Peter Zijlstracee010e2010-09-10 12:51:54 +020010762 unsigned long flags;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010763
10764 /*
10765 * Instead of creating recursive hierarchies of events,
10766 * we link inherited events back to the original parent,
10767 * which has a filp for sure, which we use as the reference
10768 * count:
10769 */
10770 if (parent_event->parent)
10771 parent_event = parent_event->parent;
10772
10773 child_event = perf_event_alloc(&parent_event->attr,
10774 parent_event->cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +020010775 child,
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010776 group_leader, parent_event,
Matt Fleming79dff512015-01-23 18:45:42 +000010777 NULL, NULL, -1);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010778 if (IS_ERR(child_event))
10779 return child_event;
Al Viroa6fa9412012-08-20 14:59:25 +010010780
Peter Zijlstrac6e5b732016-01-15 16:07:41 +020010781 /*
10782 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
10783 * must be under the same lock in order to serialize against
10784 * perf_event_release_kernel(), such that either we must observe
10785 * is_orphaned_event() or they will observe us on the child_list.
10786 */
10787 mutex_lock(&parent_event->child_mutex);
Jiri Olsafadfe7b2014-08-01 14:33:02 +020010788 if (is_orphaned_event(parent_event) ||
10789 !atomic_long_inc_not_zero(&parent_event->refcount)) {
Peter Zijlstrac6e5b732016-01-15 16:07:41 +020010790 mutex_unlock(&parent_event->child_mutex);
Al Viroa6fa9412012-08-20 14:59:25 +010010791 free_event(child_event);
10792 return NULL;
10793 }
10794
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010795 get_ctx(child_ctx);
10796
10797 /*
10798 * Make the child state follow the state of the parent event,
10799 * not its attr.disabled bit. We hold the parent's mutex,
10800 * so we won't race with perf_event_{en, dis}able_family.
10801 */
Jiri Olsa1929def2014-09-12 13:18:27 +020010802 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010803 child_event->state = PERF_EVENT_STATE_INACTIVE;
10804 else
10805 child_event->state = PERF_EVENT_STATE_OFF;
10806
10807 if (parent_event->attr.freq) {
10808 u64 sample_period = parent_event->hw.sample_period;
10809 struct hw_perf_event *hwc = &child_event->hw;
10810
10811 hwc->sample_period = sample_period;
10812 hwc->last_period = sample_period;
10813
10814 local64_set(&hwc->period_left, sample_period);
10815 }
10816
10817 child_event->ctx = child_ctx;
10818 child_event->overflow_handler = parent_event->overflow_handler;
Avi Kivity4dc0da82011-06-29 18:42:35 +030010819 child_event->overflow_handler_context
10820 = parent_event->overflow_handler_context;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010821
10822 /*
Thomas Gleixner614b6782010-12-03 16:24:32 -020010823 * Precalculate sample_data sizes
10824 */
10825 perf_event__header_size(child_event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -020010826 perf_event__id_header_size(child_event);
Thomas Gleixner614b6782010-12-03 16:24:32 -020010827
10828 /*
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010829 * Link it up in the child's context:
10830 */
Peter Zijlstracee010e2010-09-10 12:51:54 +020010831 raw_spin_lock_irqsave(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010832 add_event_to_ctx(child_event, child_ctx);
Peter Zijlstracee010e2010-09-10 12:51:54 +020010833 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010834
10835 /*
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010836 * Link this into the parent event's child list
10837 */
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010838 list_add_tail(&child_event->child_list, &parent_event->child_list);
10839 mutex_unlock(&parent_event->child_mutex);
10840
10841 return child_event;
10842}
10843
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +010010844/*
10845 * Inherits an event group.
10846 *
10847 * This will quietly suppress orphaned events; !inherit_event() is not an error.
10848 * This matches with perf_event_release_kernel() removing all child events.
10849 *
10850 * Returns:
10851 * - 0 on success
10852 * - <0 on error
10853 */
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010854static int inherit_group(struct perf_event *parent_event,
10855 struct task_struct *parent,
10856 struct perf_event_context *parent_ctx,
10857 struct task_struct *child,
10858 struct perf_event_context *child_ctx)
10859{
10860 struct perf_event *leader;
10861 struct perf_event *sub;
10862 struct perf_event *child_ctr;
10863
10864 leader = inherit_event(parent_event, parent, parent_ctx,
10865 child, NULL, child_ctx);
10866 if (IS_ERR(leader))
10867 return PTR_ERR(leader);
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +010010868 /*
10869 * @leader can be NULL here because of is_orphaned_event(). In this
10870 * case inherit_event() will create individual events, similar to what
10871 * perf_group_detach() would do anyway.
10872 */
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010873 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10874 child_ctr = inherit_event(sub, parent, parent_ctx,
10875 child, leader, child_ctx);
10876 if (IS_ERR(child_ctr))
10877 return PTR_ERR(child_ctr);
10878 }
10879 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010880}
10881
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +010010882/*
10883 * Creates the child task context and tries to inherit the event-group.
10884 *
10885 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
10886 * inherited_all set when we 'fail' to inherit an orphaned event; this is
10887 * consistent with perf_event_release_kernel() removing all child events.
10888 *
10889 * Returns:
10890 * - 0 on success
10891 * - <0 on error
10892 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010893static int
10894inherit_task_group(struct perf_event *event, struct task_struct *parent,
10895 struct perf_event_context *parent_ctx,
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010896 struct task_struct *child, int ctxn,
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010897 int *inherited_all)
10898{
10899 int ret;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010900 struct perf_event_context *child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010901
10902 if (!event->attr.inherit) {
10903 *inherited_all = 0;
10904 return 0;
10905 }
10906
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010907 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010908 if (!child_ctx) {
10909 /*
10910 * This is executed from the parent task context, so
10911 * inherit events that have been marked for cloning.
10912 * First allocate and initialize a context for the
10913 * child.
10914 */
Jiri Olsa734df5ab2013-07-09 17:44:10 +020010915 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010916 if (!child_ctx)
10917 return -ENOMEM;
10918
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010919 child->perf_event_ctxp[ctxn] = child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010920 }
10921
10922 ret = inherit_group(event, parent, parent_ctx,
10923 child, child_ctx);
10924
10925 if (ret)
10926 *inherited_all = 0;
10927
10928 return ret;
10929}
10930
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010931/*
10932 * Initialize the perf_event context in task_struct
10933 */
Jiri Olsa985c8dc2014-06-24 10:20:24 +020010934static int perf_event_init_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010935{
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010936 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010937 struct perf_event_context *cloned_ctx;
10938 struct perf_event *event;
10939 struct task_struct *parent = current;
10940 int inherited_all = 1;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +010010941 unsigned long flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010942 int ret = 0;
10943
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010944 if (likely(!parent->perf_event_ctxp[ctxn]))
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010945 return 0;
10946
10947 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010948 * If the parent's context is a clone, pin it so it won't get
10949 * swapped under us.
10950 */
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010951 parent_ctx = perf_pin_task_context(parent, ctxn);
Peter Zijlstraffb4ef22014-05-05 19:12:20 +020010952 if (!parent_ctx)
10953 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010954
10955 /*
10956 * No need to check if parent_ctx != NULL here; since we saw
10957 * it non-NULL earlier, the only reason for it to become NULL
10958 * is if we exit, and since we're currently in the middle of
10959 * a fork we can't be exiting at the same time.
10960 */
10961
10962 /*
10963 * Lock the parent list. No need to lock the child - not PID
10964 * hashed yet and not running, so nobody can access it.
10965 */
10966 mutex_lock(&parent_ctx->mutex);
10967
10968 /*
10969 * We dont have to disable NMIs - we are only looking at
10970 * the list, not manipulating it:
10971 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010972 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010973 ret = inherit_task_group(event, parent, parent_ctx,
10974 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010975 if (ret)
Peter Zijlstrae7cc4862017-03-16 13:47:49 +010010976 goto out_unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010977 }
10978
Thomas Gleixnerdddd3372010-11-24 10:05:55 +010010979 /*
10980 * We can't hold ctx->lock when iterating the ->flexible_group list due
10981 * to allocations, but we need to prevent rotation because
10982 * rotate_ctx() will change the list from interrupt context.
10983 */
10984 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10985 parent_ctx->rotate_disable = 1;
10986 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10987
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010988 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010989 ret = inherit_task_group(event, parent, parent_ctx,
10990 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010991 if (ret)
Peter Zijlstrae7cc4862017-03-16 13:47:49 +010010992 goto out_unlock;
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010993 }
10994
Thomas Gleixnerdddd3372010-11-24 10:05:55 +010010995 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10996 parent_ctx->rotate_disable = 0;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +010010997
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010998 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010999
Peter Zijlstra05cbaa22009-12-30 16:00:35 +010011000 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011001 /*
11002 * Mark the child context as a clone of the parent
11003 * context, or of whatever the parent is a clone of.
Peter Zijlstrac5ed5142011-01-17 13:45:37 +010011004 *
11005 * Note that if the parent is a clone, the holding of
11006 * parent_ctx->lock avoids it from being uncloned.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011007 */
Peter Zijlstrac5ed5142011-01-17 13:45:37 +010011008 cloned_ctx = parent_ctx->parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011009 if (cloned_ctx) {
11010 child_ctx->parent_ctx = cloned_ctx;
11011 child_ctx->parent_gen = parent_ctx->parent_gen;
11012 } else {
11013 child_ctx->parent_ctx = parent_ctx;
11014 child_ctx->parent_gen = parent_ctx->generation;
11015 }
11016 get_ctx(child_ctx->parent_ctx);
11017 }
11018
Peter Zijlstrac5ed5142011-01-17 13:45:37 +010011019 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
Peter Zijlstrae7cc4862017-03-16 13:47:49 +010011020out_unlock:
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011021 mutex_unlock(&parent_ctx->mutex);
11022
11023 perf_unpin_context(parent_ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010011024 put_ctx(parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011025
11026 return ret;
11027}
11028
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020011029/*
11030 * Initialize the perf_event context in task_struct
11031 */
11032int perf_event_init_task(struct task_struct *child)
11033{
11034 int ctxn, ret;
11035
Oleg Nesterov8550d7c2011-01-19 19:22:28 +010011036 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
11037 mutex_init(&child->perf_event_mutex);
11038 INIT_LIST_HEAD(&child->perf_event_list);
11039
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020011040 for_each_task_context_nr(ctxn) {
11041 ret = perf_event_init_context(child, ctxn);
Peter Zijlstra6c72e3502014-10-02 16:17:02 -070011042 if (ret) {
11043 perf_event_free_task(child);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020011044 return ret;
Peter Zijlstra6c72e3502014-10-02 16:17:02 -070011045 }
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020011046 }
11047
11048 return 0;
11049}
11050
Paul Mackerras220b1402010-03-10 20:45:52 +110011051static void __init perf_event_init_all_cpus(void)
11052{
Peter Zijlstrab28ab832010-09-06 14:48:15 +020011053 struct swevent_htable *swhash;
Paul Mackerras220b1402010-03-10 20:45:52 +110011054 int cpu;
Paul Mackerras220b1402010-03-10 20:45:52 +110011055
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011056 zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
11057
Paul Mackerras220b1402010-03-10 20:45:52 +110011058 for_each_possible_cpu(cpu) {
Peter Zijlstrab28ab832010-09-06 14:48:15 +020011059 swhash = &per_cpu(swevent_htable, cpu);
11060 mutex_init(&swhash->hlist_mutex);
Mark Rutland2fde4f92015-01-07 15:01:54 +000011061 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
Kan Liangf2fb6be2016-03-23 11:24:37 -070011062
11063 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
11064 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
Peter Zijlstrae48c1782016-07-06 09:18:30 +020011065
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -080011066#ifdef CONFIG_CGROUP_PERF
11067 INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
11068#endif
Peter Zijlstrae48c1782016-07-06 09:18:30 +020011069 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
Paul Mackerras220b1402010-03-10 20:45:52 +110011070 }
11071}
11072
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011073void perf_swevent_init_cpu(unsigned int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011074{
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011075 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011076
Peter Zijlstrab28ab832010-09-06 14:48:15 +020011077 mutex_lock(&swhash->hlist_mutex);
Thomas Gleixner059fcd82016-02-09 20:11:34 +000011078 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020011079 struct swevent_hlist *hlist;
11080
Peter Zijlstrab28ab832010-09-06 14:48:15 +020011081 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
11082 WARN_ON(!hlist);
11083 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020011084 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +020011085 mutex_unlock(&swhash->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011086}
11087
Dave Young2965faa2015-09-09 15:38:55 -070011088#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011089static void __perf_event_exit_context(void *__info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011090{
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011091 struct perf_event_context *ctx = __info;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +010011092 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
11093 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011094
Peter Zijlstrafae3fde2016-01-11 15:00:50 +010011095 raw_spin_lock(&ctx->lock);
11096 list_for_each_entry(event, &ctx->event_list, event_entry)
Peter Zijlstra45a0e072016-01-26 13:09:48 +010011097 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +010011098 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011099}
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011100
11101static void perf_event_exit_cpu_context(int cpu)
11102{
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011103 struct perf_cpu_context *cpuctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011104 struct perf_event_context *ctx;
11105 struct pmu *pmu;
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011106
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011107 mutex_lock(&pmus_lock);
11108 list_for_each_entry(pmu, &pmus, entry) {
11109 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
11110 ctx = &cpuctx->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011111
11112 mutex_lock(&ctx->mutex);
11113 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011114 cpuctx->online = 0;
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011115 mutex_unlock(&ctx->mutex);
11116 }
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011117 cpumask_clear_cpu(cpu, perf_online_mask);
11118 mutex_unlock(&pmus_lock);
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011119}
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011120#else
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011121
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011122static void perf_event_exit_cpu_context(int cpu) { }
11123
11124#endif
11125
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011126int perf_event_init_cpu(unsigned int cpu)
11127{
11128 struct perf_cpu_context *cpuctx;
11129 struct perf_event_context *ctx;
11130 struct pmu *pmu;
11131
11132 perf_swevent_init_cpu(cpu);
11133
11134 mutex_lock(&pmus_lock);
11135 cpumask_set_cpu(cpu, perf_online_mask);
11136 list_for_each_entry(pmu, &pmus, entry) {
11137 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
11138 ctx = &cpuctx->ctx;
11139
11140 mutex_lock(&ctx->mutex);
11141 cpuctx->online = 1;
11142 mutex_unlock(&ctx->mutex);
11143 }
11144 mutex_unlock(&pmus_lock);
11145
11146 return 0;
11147}
11148
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011149int perf_event_exit_cpu(unsigned int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011150{
Peter Zijlstrae3703f82014-02-24 12:06:12 +010011151 perf_event_exit_cpu_context(cpu);
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011152 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011153}
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011154
Peter Zijlstrac2774432010-12-08 15:29:02 +010011155static int
11156perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
11157{
11158 int cpu;
11159
11160 for_each_online_cpu(cpu)
11161 perf_event_exit_cpu(cpu);
11162
11163 return NOTIFY_OK;
11164}
11165
11166/*
11167 * Run the perf reboot notifier at the very last possible moment so that
11168 * the generic watchdog code runs as long as possible.
11169 */
11170static struct notifier_block perf_reboot_notifier = {
11171 .notifier_call = perf_reboot,
11172 .priority = INT_MIN,
11173};
11174
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011175void __init perf_event_init(void)
11176{
Jason Wessel3c502e72010-11-04 17:33:01 -050011177 int ret;
11178
Peter Zijlstra2e80a822010-11-17 23:17:36 +010011179 idr_init(&pmu_idr);
11180
Paul Mackerras220b1402010-03-10 20:45:52 +110011181 perf_event_init_all_cpus();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +020011182 init_srcu_struct(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +010011183 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
11184 perf_pmu_register(&perf_cpu_clock, NULL, -1);
11185 perf_pmu_register(&perf_task_clock, NULL, -1);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +020011186 perf_tp_register();
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011187 perf_event_init_cpu(smp_processor_id());
Peter Zijlstrac2774432010-12-08 15:29:02 +010011188 register_reboot_notifier(&perf_reboot_notifier);
Jason Wessel3c502e72010-11-04 17:33:01 -050011189
11190 ret = init_hw_breakpoint();
11191 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
Gleb Natapovb2029522011-11-27 17:59:09 +020011192
Jiri Olsab01c3a02012-03-23 15:41:20 +010011193 /*
11194 * Build time assertion that we keep the data_head at the intended
11195 * location. IOW, validation we got the __reserved[] size right.
11196 */
11197 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
11198 != 1024);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011199}
Peter Zijlstraabe43402010-11-17 23:17:37 +010011200
Cody P Schaferfd979c02015-01-30 13:45:57 -080011201ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
11202 char *page)
11203{
11204 struct perf_pmu_events_attr *pmu_attr =
11205 container_of(attr, struct perf_pmu_events_attr, attr);
11206
11207 if (pmu_attr->event_str)
11208 return sprintf(page, "%s\n", pmu_attr->event_str);
11209
11210 return 0;
11211}
Thomas Gleixner675965b2016-02-22 22:19:27 +000011212EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
Cody P Schaferfd979c02015-01-30 13:45:57 -080011213
Peter Zijlstraabe43402010-11-17 23:17:37 +010011214static int __init perf_event_sysfs_init(void)
11215{
11216 struct pmu *pmu;
11217 int ret;
11218
11219 mutex_lock(&pmus_lock);
11220
11221 ret = bus_register(&pmu_bus);
11222 if (ret)
11223 goto unlock;
11224
11225 list_for_each_entry(pmu, &pmus, entry) {
11226 if (!pmu->name || pmu->type < 0)
11227 continue;
11228
11229 ret = pmu_dev_alloc(pmu);
11230 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
11231 }
11232 pmu_bus_running = 1;
11233 ret = 0;
11234
11235unlock:
11236 mutex_unlock(&pmus_lock);
11237
11238 return ret;
11239}
11240device_initcall(perf_event_sysfs_init);
Stephane Eraniane5d13672011-02-14 11:20:01 +020011241
11242#ifdef CONFIG_CGROUP_PERF
Tejun Heoeb954192013-08-08 20:11:23 -040011243static struct cgroup_subsys_state *
11244perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Stephane Eraniane5d13672011-02-14 11:20:01 +020011245{
11246 struct perf_cgroup *jc;
Stephane Eraniane5d13672011-02-14 11:20:01 +020011247
Li Zefan1b15d052011-03-03 14:26:06 +080011248 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
Stephane Eraniane5d13672011-02-14 11:20:01 +020011249 if (!jc)
11250 return ERR_PTR(-ENOMEM);
11251
Stephane Eraniane5d13672011-02-14 11:20:01 +020011252 jc->info = alloc_percpu(struct perf_cgroup_info);
11253 if (!jc->info) {
11254 kfree(jc);
11255 return ERR_PTR(-ENOMEM);
11256 }
11257
Stephane Eraniane5d13672011-02-14 11:20:01 +020011258 return &jc->css;
11259}
11260
Tejun Heoeb954192013-08-08 20:11:23 -040011261static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
Stephane Eraniane5d13672011-02-14 11:20:01 +020011262{
Tejun Heoeb954192013-08-08 20:11:23 -040011263 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
11264
Stephane Eraniane5d13672011-02-14 11:20:01 +020011265 free_percpu(jc->info);
11266 kfree(jc);
11267}
11268
11269static int __perf_cgroup_move(void *info)
11270{
11271 struct task_struct *task = info;
Stephane Eranianddaaf4e2015-11-12 11:00:03 +010011272 rcu_read_lock();
Stephane Eraniane5d13672011-02-14 11:20:01 +020011273 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
Stephane Eranianddaaf4e2015-11-12 11:00:03 +010011274 rcu_read_unlock();
Stephane Eraniane5d13672011-02-14 11:20:01 +020011275 return 0;
11276}
11277
Tejun Heo1f7dd3e52015-12-03 10:18:21 -050011278static void perf_cgroup_attach(struct cgroup_taskset *tset)
Stephane Eraniane5d13672011-02-14 11:20:01 +020011279{
Tejun Heobb9d97b2011-12-12 18:12:21 -080011280 struct task_struct *task;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -050011281 struct cgroup_subsys_state *css;
Tejun Heobb9d97b2011-12-12 18:12:21 -080011282
Tejun Heo1f7dd3e52015-12-03 10:18:21 -050011283 cgroup_taskset_for_each(task, css, tset)
Tejun Heobb9d97b2011-12-12 18:12:21 -080011284 task_function_call(task, __perf_cgroup_move, task);
Stephane Eraniane5d13672011-02-14 11:20:01 +020011285}
11286
Tejun Heo073219e2014-02-08 10:36:58 -050011287struct cgroup_subsys perf_event_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -080011288 .css_alloc = perf_cgroup_css_alloc,
11289 .css_free = perf_cgroup_css_free,
Tejun Heobb9d97b2011-12-12 18:12:21 -080011290 .attach = perf_cgroup_attach,
Tejun Heo968ebff2017-01-29 14:35:20 -050011291 /*
11292 * Implicitly enable on dfl hierarchy so that perf events can
11293 * always be filtered by cgroup2 path as long as perf_event
11294 * controller is not mounted on a legacy hierarchy.
11295 */
11296 .implicit_on_dfl = true,
Tejun Heo8cfd8142017-07-21 11:14:51 -040011297 .threaded = true,
Stephane Eraniane5d13672011-02-14 11:20:01 +020011298};
11299#endif /* CONFIG_CGROUP_PERF */