| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * Performance events core code: |
| * |
| * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
| * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra |
| * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| */ |
| |
| #include <linux/fs.h> |
| #include <linux/mm.h> |
| #include <linux/cpu.h> |
| #include <linux/smp.h> |
| #include <linux/idr.h> |
| #include <linux/file.h> |
| #include <linux/poll.h> |
| #include <linux/slab.h> |
| #include <linux/hash.h> |
| #include <linux/tick.h> |
| #include <linux/sysfs.h> |
| #include <linux/dcache.h> |
| #include <linux/percpu.h> |
| #include <linux/ptrace.h> |
| #include <linux/reboot.h> |
| #include <linux/vmstat.h> |
| #include <linux/device.h> |
| #include <linux/export.h> |
| #include <linux/vmalloc.h> |
| #include <linux/hardirq.h> |
| #include <linux/hugetlb.h> |
| #include <linux/rculist.h> |
| #include <linux/uaccess.h> |
| #include <linux/syscalls.h> |
| #include <linux/anon_inodes.h> |
| #include <linux/kernel_stat.h> |
| #include <linux/cgroup.h> |
| #include <linux/perf_event.h> |
| #include <linux/trace_events.h> |
| #include <linux/hw_breakpoint.h> |
| #include <linux/mm_types.h> |
| #include <linux/module.h> |
| #include <linux/mman.h> |
| #include <linux/compat.h> |
| #include <linux/bpf.h> |
| #include <linux/filter.h> |
| #include <linux/namei.h> |
| #include <linux/parser.h> |
| #include <linux/sched/clock.h> |
| #include <linux/sched/mm.h> |
| #include <linux/proc_ns.h> |
| #include <linux/mount.h> |
| #include <linux/min_heap.h> |
| #include <linux/highmem.h> |
| #include <linux/pgtable.h> |
| #include <linux/buildid.h> |
| |
| #include "internal.h" |
| |
| #include <asm/irq_regs.h> |
| |
| typedef int (*remote_function_f)(void *); |
| |
| struct remote_function_call { |
| struct task_struct *p; |
| remote_function_f func; |
| void *info; |
| int ret; |
| }; |
| |
| static void remote_function(void *data) |
| { |
| struct remote_function_call *tfc = data; |
| struct task_struct *p = tfc->p; |
| |
| if (p) { |
| /* -EAGAIN */ |
| if (task_cpu(p) != smp_processor_id()) |
| return; |
| |
| /* |
| * Now that we're on right CPU with IRQs disabled, we can test |
| * if we hit the right task without races. |
| */ |
| |
| tfc->ret = -ESRCH; /* No such (running) process */ |
| if (p != current) |
| return; |
| } |
| |
| tfc->ret = tfc->func(tfc->info); |
| } |
| |
| /** |
| * task_function_call - call a function on the cpu on which a task runs |
| * @p: the task to evaluate |
| * @func: the function to be called |
| * @info: the function call argument |
| * |
| * Calls the function @func when the task is currently running. This might |
| * be on the current CPU, which just calls the function directly. This will |
| * retry due to any failures in smp_call_function_single(), such as if the |
| * task_cpu() goes offline concurrently. |
| * |
| * returns @func return value or -ESRCH or -ENXIO when the process isn't running |
| */ |
| static int |
| task_function_call(struct task_struct *p, remote_function_f func, void *info) |
| { |
| struct remote_function_call data = { |
| .p = p, |
| .func = func, |
| .info = info, |
| .ret = -EAGAIN, |
| }; |
| int ret; |
| |
| for (;;) { |
| ret = smp_call_function_single(task_cpu(p), remote_function, |
| &data, 1); |
| if (!ret) |
| ret = data.ret; |
| |
| if (ret != -EAGAIN) |
| break; |
| |
| cond_resched(); |
| } |
| |
| return ret; |
| } |
| |
| /** |
| * cpu_function_call - call a function on the cpu |
| * @cpu: target cpu to queue this function |
| * @func: the function to be called |
| * @info: the function call argument |
| * |
| * Calls the function @func on the remote cpu. |
| * |
| * returns: @func return value or -ENXIO when the cpu is offline |
| */ |
| static int cpu_function_call(int cpu, remote_function_f func, void *info) |
| { |
| struct remote_function_call data = { |
| .p = NULL, |
| .func = func, |
| .info = info, |
| .ret = -ENXIO, /* No such CPU */ |
| }; |
| |
| smp_call_function_single(cpu, remote_function, &data, 1); |
| |
| return data.ret; |
| } |
| |
| static inline struct perf_cpu_context * |
| __get_cpu_context(struct perf_event_context *ctx) |
| { |
| return this_cpu_ptr(ctx->pmu->pmu_cpu_context); |
| } |
| |
| static void perf_ctx_lock(struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx) |
| { |
| raw_spin_lock(&cpuctx->ctx.lock); |
| if (ctx) |
| raw_spin_lock(&ctx->lock); |
| } |
| |
| static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx) |
| { |
| if (ctx) |
| raw_spin_unlock(&ctx->lock); |
| raw_spin_unlock(&cpuctx->ctx.lock); |
| } |
| |
| #define TASK_TOMBSTONE ((void *)-1L) |
| |
| static bool is_kernel_event(struct perf_event *event) |
| { |
| return READ_ONCE(event->owner) == TASK_TOMBSTONE; |
| } |
| |
| /* |
| * On task ctx scheduling... |
| * |
| * When !ctx->nr_events a task context will not be scheduled. This means |
| * we can disable the scheduler hooks (for performance) without leaving |
| * pending task ctx state. |
| * |
| * This however results in two special cases: |
| * |
| * - removing the last event from a task ctx; this is relatively straight |
| * forward and is done in __perf_remove_from_context. |
| * |
| * - adding the first event to a task ctx; this is tricky because we cannot |
| * rely on ctx->is_active and therefore cannot use event_function_call(). |
| * See perf_install_in_context(). |
| * |
| * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set. |
| */ |
| |
| typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, |
| struct perf_event_context *, void *); |
| |
| struct event_function_struct { |
| struct perf_event *event; |
| event_f func; |
| void *data; |
| }; |
| |
| static int event_function(void *info) |
| { |
| struct event_function_struct *efs = info; |
| struct perf_event *event = efs->event; |
| struct perf_event_context *ctx = event->ctx; |
| struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| struct perf_event_context *task_ctx = cpuctx->task_ctx; |
| int ret = 0; |
| |
| lockdep_assert_irqs_disabled(); |
| |
| perf_ctx_lock(cpuctx, task_ctx); |
| /* |
| * Since we do the IPI call without holding ctx->lock things can have |
| * changed, double check we hit the task we set out to hit. |
| */ |
| if (ctx->task) { |
| if (ctx->task != current) { |
| ret = -ESRCH; |
| goto unlock; |
| } |
| |
| /* |
| * We only use event_function_call() on established contexts, |
| * and event_function() is only ever called when active (or |
| * rather, we'll have bailed in task_function_call() or the |
| * above ctx->task != current test), therefore we must have |
| * ctx->is_active here. |
| */ |
| WARN_ON_ONCE(!ctx->is_active); |
| /* |
| * And since we have ctx->is_active, cpuctx->task_ctx must |
| * match. |
| */ |
| WARN_ON_ONCE(task_ctx != ctx); |
| } else { |
| WARN_ON_ONCE(&cpuctx->ctx != ctx); |
| } |
| |
| efs->func(event, cpuctx, ctx, efs->data); |
| unlock: |
| perf_ctx_unlock(cpuctx, task_ctx); |
| |
| return ret; |
| } |
| |
| static void event_function_call(struct perf_event *event, event_f func, void *data) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ |
| struct event_function_struct efs = { |
| .event = event, |
| .func = func, |
| .data = data, |
| }; |
| |
| if (!event->parent) { |
| /* |
| * If this is a !child event, we must hold ctx::mutex to |
| * stabilize the event->ctx relation. See |
| * perf_event_ctx_lock(). |
| */ |
| lockdep_assert_held(&ctx->mutex); |
| } |
| |
| if (!task) { |
| cpu_function_call(event->cpu, event_function, &efs); |
| return; |
| } |
| |
| if (task == TASK_TOMBSTONE) |
| return; |
| |
| again: |
| if (!task_function_call(task, event_function, &efs)) |
| return; |
| |
| raw_spin_lock_irq(&ctx->lock); |
| /* |
| * Reload the task pointer, it might have been changed by |
| * a concurrent perf_event_context_sched_out(). |
| */ |
| task = ctx->task; |
| if (task == TASK_TOMBSTONE) { |
| raw_spin_unlock_irq(&ctx->lock); |
| return; |
| } |
| if (ctx->is_active) { |
| raw_spin_unlock_irq(&ctx->lock); |
| goto again; |
| } |
| func(event, NULL, ctx, data); |
| raw_spin_unlock_irq(&ctx->lock); |
| } |
| |
| /* |
| * Similar to event_function_call() + event_function(), but hard assumes IRQs |
| * are already disabled and we're on the right CPU. |
| */ |
| static void event_function_local(struct perf_event *event, event_f func, void *data) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| struct task_struct *task = READ_ONCE(ctx->task); |
| struct perf_event_context *task_ctx = NULL; |
| |
| lockdep_assert_irqs_disabled(); |
| |
| if (task) { |
| if (task == TASK_TOMBSTONE) |
| return; |
| |
| task_ctx = ctx; |
| } |
| |
| perf_ctx_lock(cpuctx, task_ctx); |
| |
| task = ctx->task; |
| if (task == TASK_TOMBSTONE) |
| goto unlock; |
| |
| if (task) { |
| /* |
| * We must be either inactive or active and the right task, |
| * otherwise we're screwed, since we cannot IPI to somewhere |
| * else. |
| */ |
| if (ctx->is_active) { |
| if (WARN_ON_ONCE(task != current)) |
| goto unlock; |
| |
| if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) |
| goto unlock; |
| } |
| } else { |
| WARN_ON_ONCE(&cpuctx->ctx != ctx); |
| } |
| |
| func(event, cpuctx, ctx, data); |
| unlock: |
| perf_ctx_unlock(cpuctx, task_ctx); |
| } |
| |
| #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ |
| PERF_FLAG_FD_OUTPUT |\ |
| PERF_FLAG_PID_CGROUP |\ |
| PERF_FLAG_FD_CLOEXEC) |
| |
| /* |
| * branch priv levels that need permission checks |
| */ |
| #define PERF_SAMPLE_BRANCH_PERM_PLM \ |
| (PERF_SAMPLE_BRANCH_KERNEL |\ |
| PERF_SAMPLE_BRANCH_HV) |
| |
| enum event_type_t { |
| EVENT_FLEXIBLE = 0x1, |
| EVENT_PINNED = 0x2, |
| EVENT_TIME = 0x4, |
| /* see ctx_resched() for details */ |
| EVENT_CPU = 0x8, |
| EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, |
| }; |
| |
| /* |
| * perf_sched_events : >0 events exist |
| * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
| */ |
| |
| static void perf_sched_delayed(struct work_struct *work); |
| DEFINE_STATIC_KEY_FALSE(perf_sched_events); |
| static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed); |
| static DEFINE_MUTEX(perf_sched_mutex); |
| static atomic_t perf_sched_count; |
| |
| static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
| static DEFINE_PER_CPU(int, perf_sched_cb_usages); |
| static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events); |
| |
| static atomic_t nr_mmap_events __read_mostly; |
| static atomic_t nr_comm_events __read_mostly; |
| static atomic_t nr_namespaces_events __read_mostly; |
| static atomic_t nr_task_events __read_mostly; |
| static atomic_t nr_freq_events __read_mostly; |
| static atomic_t nr_switch_events __read_mostly; |
| static atomic_t nr_ksymbol_events __read_mostly; |
| static atomic_t nr_bpf_events __read_mostly; |
| static atomic_t nr_cgroup_events __read_mostly; |
| static atomic_t nr_text_poke_events __read_mostly; |
| static atomic_t nr_build_id_events __read_mostly; |
| |
| static LIST_HEAD(pmus); |
| static DEFINE_MUTEX(pmus_lock); |
| static struct srcu_struct pmus_srcu; |
| static cpumask_var_t perf_online_mask; |
| static struct kmem_cache *perf_event_cache; |
| |
| /* |
| * perf event paranoia level: |
| * -1 - not paranoid at all |
| * 0 - disallow raw tracepoint access for unpriv |
| * 1 - disallow cpu events for unpriv |
| * 2 - disallow kernel profiling for unpriv |
| */ |
| int sysctl_perf_event_paranoid __read_mostly = 2; |
| |
| /* Minimum for 512 kiB + 1 user control page */ |
| int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ |
| |
| /* |
| * max perf event sample rate |
| */ |
| #define DEFAULT_MAX_SAMPLE_RATE 100000 |
| #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) |
| #define DEFAULT_CPU_TIME_MAX_PERCENT 25 |
| |
| int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; |
| |
| static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); |
| static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; |
| |
| static int perf_sample_allowed_ns __read_mostly = |
| DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; |
| |
| static void update_perf_cpu_limits(void) |
| { |
| u64 tmp = perf_sample_period_ns; |
| |
| tmp *= sysctl_perf_cpu_time_max_percent; |
| tmp = div_u64(tmp, 100); |
| if (!tmp) |
| tmp = 1; |
| |
| WRITE_ONCE(perf_sample_allowed_ns, tmp); |
| } |
| |
| static bool perf_rotate_context(struct perf_cpu_context *cpuctx); |
| |
| int perf_proc_update_handler(struct ctl_table *table, int write, |
| void *buffer, size_t *lenp, loff_t *ppos) |
| { |
| int ret; |
| int perf_cpu = sysctl_perf_cpu_time_max_percent; |
| /* |
| * If throttling is disabled don't allow the write: |
| */ |
| if (write && (perf_cpu == 100 || perf_cpu == 0)) |
| return -EINVAL; |
| |
| ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| if (ret || !write) |
| return ret; |
| |
| max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); |
| perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
| update_perf_cpu_limits(); |
| |
| return 0; |
| } |
| |
| int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; |
| |
| int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, |
| void *buffer, size_t *lenp, loff_t *ppos) |
| { |
| int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| |
| if (ret || !write) |
| return ret; |
| |
| if (sysctl_perf_cpu_time_max_percent == 100 || |
| sysctl_perf_cpu_time_max_percent == 0) { |
| printk(KERN_WARNING |
| "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); |
| WRITE_ONCE(perf_sample_allowed_ns, 0); |
| } else { |
| update_perf_cpu_limits(); |
| } |
| |
| return 0; |
| } |
| |
| /* |
| * perf samples are done in some very critical code paths (NMIs). |
| * If they take too much CPU time, the system can lock up and not |
| * get any real work done. This will drop the sample rate when |
| * we detect that events are taking too long. |
| */ |
| #define NR_ACCUMULATED_SAMPLES 128 |
| static DEFINE_PER_CPU(u64, running_sample_length); |
| |
| static u64 __report_avg; |
| static u64 __report_allowed; |
| |
| static void perf_duration_warn(struct irq_work *w) |
| { |
| printk_ratelimited(KERN_INFO |
| "perf: interrupt took too long (%lld > %lld), lowering " |
| "kernel.perf_event_max_sample_rate to %d\n", |
| __report_avg, __report_allowed, |
| sysctl_perf_event_sample_rate); |
| } |
| |
| static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); |
| |
| void perf_sample_event_took(u64 sample_len_ns) |
| { |
| u64 max_len = READ_ONCE(perf_sample_allowed_ns); |
| u64 running_len; |
| u64 avg_len; |
| u32 max; |
| |
| if (max_len == 0) |
| return; |
| |
| /* Decay the counter by 1 average sample. */ |
| running_len = __this_cpu_read(running_sample_length); |
| running_len -= running_len/NR_ACCUMULATED_SAMPLES; |
| running_len += sample_len_ns; |
| __this_cpu_write(running_sample_length, running_len); |
| |
| /* |
| * Note: this will be biased artifically low until we have |
| * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us |
| * from having to maintain a count. |
| */ |
| avg_len = running_len/NR_ACCUMULATED_SAMPLES; |
| if (avg_len <= max_len) |
| return; |
| |
| __report_avg = avg_len; |
| __report_allowed = max_len; |
| |
| /* |
| * Compute a throttle threshold 25% below the current duration. |
| */ |
| avg_len += avg_len / 4; |
| max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent; |
| if (avg_len < max) |
| max /= (u32)avg_len; |
| else |
| max = 1; |
| |
| WRITE_ONCE(perf_sample_allowed_ns, avg_len); |
| WRITE_ONCE(max_samples_per_tick, max); |
| |
| sysctl_perf_event_sample_rate = max * HZ; |
| perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
| |
| if (!irq_work_queue(&perf_duration_work)) { |
| early_printk("perf: interrupt took too long (%lld > %lld), lowering " |
| "kernel.perf_event_max_sample_rate to %d\n", |
| __report_avg, __report_allowed, |
| sysctl_perf_event_sample_rate); |
| } |
| } |
| |
| static atomic64_t perf_event_id; |
| |
| static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, |
| enum event_type_t event_type); |
| |
| static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, |
| enum event_type_t event_type, |
| struct task_struct *task); |
| |
| static void update_context_time(struct perf_event_context *ctx); |
| static u64 perf_event_time(struct perf_event *event); |
| |
| void __weak perf_event_print_debug(void) { } |
| |
| static inline u64 perf_clock(void) |
| { |
| return local_clock(); |
| } |
| |
| static inline u64 perf_event_clock(struct perf_event *event) |
| { |
| return event->clock(); |
| } |
| |
| /* |
| * State based event timekeeping... |
| * |
| * The basic idea is to use event->state to determine which (if any) time |
| * fields to increment with the current delta. This means we only need to |
| * update timestamps when we change state or when they are explicitly requested |
| * (read). |
| * |
| * Event groups make things a little more complicated, but not terribly so. The |
| * rules for a group are that if the group leader is OFF the entire group is |
| * OFF, irrespecive of what the group member states are. This results in |
| * __perf_effective_state(). |
| * |
| * A futher ramification is that when a group leader flips between OFF and |
| * !OFF, we need to update all group member times. |
| * |
| * |
| * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we |
| * need to make sure the relevant context time is updated before we try and |
| * update our timestamps. |
| */ |
| |
| static __always_inline enum perf_event_state |
| __perf_effective_state(struct perf_event *event) |
| { |
| struct perf_event *leader = event->group_leader; |
| |
| if (leader->state <= PERF_EVENT_STATE_OFF) |
| return leader->state; |
| |
| return event->state; |
| } |
| |
| static __always_inline void |
| __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) |
| { |
| enum perf_event_state state = __perf_effective_state(event); |
| u64 delta = now - event->tstamp; |
| |
| *enabled = event->total_time_enabled; |
| if (state >= PERF_EVENT_STATE_INACTIVE) |
| *enabled += delta; |
| |
| *running = event->total_time_running; |
| if (state >= PERF_EVENT_STATE_ACTIVE) |
| *running += delta; |
| } |
| |
| static void perf_event_update_time(struct perf_event *event) |
| { |
| u64 now = perf_event_time(event); |
| |
| __perf_update_times(event, now, &event->total_time_enabled, |
| &event->total_time_running); |
| event->tstamp = now; |
| } |
| |
| static void perf_event_update_sibling_time(struct perf_event *leader) |
| { |
| struct perf_event *sibling; |
| |
| for_each_sibling_event(sibling, leader) |
| perf_event_update_time(sibling); |
| } |
| |
| static void |
| perf_event_set_state(struct perf_event *event, enum perf_event_state state) |
| { |
| if (event->state == state) |
| return; |
| |
| perf_event_update_time(event); |
| /* |
| * If a group leader gets enabled/disabled all its siblings |
| * are affected too. |
| */ |
| if ((event->state < 0) ^ (state < 0)) |
| perf_event_update_sibling_time(event); |
| |
| WRITE_ONCE(event->state, state); |
| } |
| |
| #ifdef CONFIG_CGROUP_PERF |
| |
| static inline bool |
| perf_cgroup_match(struct perf_event *event) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| |
| /* @event doesn't care about cgroup */ |
| if (!event->cgrp) |
| return true; |
| |
| /* wants specific cgroup scope but @cpuctx isn't associated with any */ |
| if (!cpuctx->cgrp) |
| return false; |
| |
| /* |
| * Cgroup scoping is recursive. An event enabled for a cgroup is |
| * also enabled for all its descendant cgroups. If @cpuctx's |
| * cgroup is a descendant of @event's (the test covers identity |
| * case), it's a match. |
| */ |
| return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, |
| event->cgrp->css.cgroup); |
| } |
| |
| static inline void perf_detach_cgroup(struct perf_event *event) |
| { |
| css_put(&event->cgrp->css); |
| event->cgrp = NULL; |
| } |
| |
| static inline int is_cgroup_event(struct perf_event *event) |
| { |
| return event->cgrp != NULL; |
| } |
| |
| static inline u64 perf_cgroup_event_time(struct perf_event *event) |
| { |
| struct perf_cgroup_info *t; |
| |
| t = per_cpu_ptr(event->cgrp->info, event->cpu); |
| return t->time; |
| } |
| |
| static inline void __update_cgrp_time(struct perf_cgroup *cgrp) |
| { |
| struct perf_cgroup_info *info; |
| u64 now; |
| |
| now = perf_clock(); |
| |
| info = this_cpu_ptr(cgrp->info); |
| |
| info->time += now - info->timestamp; |
| info->timestamp = now; |
| } |
| |
| static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) |
| { |
| struct perf_cgroup *cgrp = cpuctx->cgrp; |
| struct cgroup_subsys_state *css; |
| |
| if (cgrp) { |
| for (css = &cgrp->css; css; css = css->parent) { |
| cgrp = container_of(css, struct perf_cgroup, css); |
| __update_cgrp_time(cgrp); |
| } |
| } |
| } |
| |
| static inline void update_cgrp_time_from_event(struct perf_event *event) |
| { |
| struct perf_cgroup *cgrp; |
| |
| /* |
| * ensure we access cgroup data only when needed and |
| * when we know the cgroup is pinned (css_get) |
| */ |
| if (!is_cgroup_event(event)) |
| return; |
| |
| cgrp = perf_cgroup_from_task(current, event->ctx); |
| /* |
| * Do not update time when cgroup is not active |
| */ |
| if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) |
| __update_cgrp_time(event->cgrp); |
| } |
| |
| static inline void |
| perf_cgroup_set_timestamp(struct task_struct *task, |
| struct perf_event_context *ctx) |
| { |
| struct perf_cgroup *cgrp; |
| struct perf_cgroup_info *info; |
| struct cgroup_subsys_state *css; |
| |
| /* |
| * ctx->lock held by caller |
| * ensure we do not access cgroup data |
| * unless we have the cgroup pinned (css_get) |
| */ |
| if (!task || !ctx->nr_cgroups) |
| return; |
| |
| cgrp = perf_cgroup_from_task(task, ctx); |
| |
| for (css = &cgrp->css; css; css = css->parent) { |
| cgrp = container_of(css, struct perf_cgroup, css); |
| info = this_cpu_ptr(cgrp->info); |
| info->timestamp = ctx->timestamp; |
| } |
| } |
| |
| static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); |
| |
| #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ |
| #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ |
| |
| /* |
| * reschedule events based on the cgroup constraint of task. |
| * |
| * mode SWOUT : schedule out everything |
| * mode SWIN : schedule in based on cgroup for next |
| */ |
| static void perf_cgroup_switch(struct task_struct *task, int mode) |
| { |
| struct perf_cpu_context *cpuctx; |
| struct list_head *list; |
| unsigned long flags; |
| |
| /* |
| * Disable interrupts and preemption to avoid this CPU's |
| * cgrp_cpuctx_entry to change under us. |
| */ |
| local_irq_save(flags); |
| |
| list = this_cpu_ptr(&cgrp_cpuctx_list); |
| list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) { |
| WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); |
| |
| perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
| perf_pmu_disable(cpuctx->ctx.pmu); |
| |
| if (mode & PERF_CGROUP_SWOUT) { |
| cpu_ctx_sched_out(cpuctx, EVENT_ALL); |
| /* |
| * must not be done before ctxswout due |
| * to event_filter_match() in event_sched_out() |
| */ |
| cpuctx->cgrp = NULL; |
| } |
| |
| if (mode & PERF_CGROUP_SWIN) { |
| WARN_ON_ONCE(cpuctx->cgrp); |
| /* |
| * set cgrp before ctxsw in to allow |
| * event_filter_match() to not have to pass |
| * task around |
| * we pass the cpuctx->ctx to perf_cgroup_from_task() |
| * because cgorup events are only per-cpu |
| */ |
| cpuctx->cgrp = perf_cgroup_from_task(task, |
| &cpuctx->ctx); |
| cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); |
| } |
| perf_pmu_enable(cpuctx->ctx.pmu); |
| perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
| } |
| |
| local_irq_restore(flags); |
| } |
| |
| static inline void perf_cgroup_sched_out(struct task_struct *task, |
| struct task_struct *next) |
| { |
| struct perf_cgroup *cgrp1; |
| struct perf_cgroup *cgrp2 = NULL; |
| |
| rcu_read_lock(); |
| /* |
| * we come here when we know perf_cgroup_events > 0 |
| * we do not need to pass the ctx here because we know |
| * we are holding the rcu lock |
| */ |
| cgrp1 = perf_cgroup_from_task(task, NULL); |
| cgrp2 = perf_cgroup_from_task(next, NULL); |
| |
| /* |
| * only schedule out current cgroup events if we know |
| * that we are switching to a different cgroup. Otherwise, |
| * do no touch the cgroup events. |
| */ |
| if (cgrp1 != cgrp2) |
| perf_cgroup_switch(task, PERF_CGROUP_SWOUT); |
| |
| rcu_read_unlock(); |
| } |
| |
| static inline void perf_cgroup_sched_in(struct task_struct *prev, |
| struct task_struct *task) |
| { |
| struct perf_cgroup *cgrp1; |
| struct perf_cgroup *cgrp2 = NULL; |
| |
| rcu_read_lock(); |
| /* |
| * we come here when we know perf_cgroup_events > 0 |
| * we do not need to pass the ctx here because we know |
| * we are holding the rcu lock |
| */ |
| cgrp1 = perf_cgroup_from_task(task, NULL); |
| cgrp2 = perf_cgroup_from_task(prev, NULL); |
| |
| /* |
| * only need to schedule in cgroup events if we are changing |
| * cgroup during ctxsw. Cgroup events were not scheduled |
| * out of ctxsw out if that was not the case. |
| */ |
| if (cgrp1 != cgrp2) |
| perf_cgroup_switch(task, PERF_CGROUP_SWIN); |
| |
| rcu_read_unlock(); |
| } |
| |
| static int perf_cgroup_ensure_storage(struct perf_event *event, |
| struct cgroup_subsys_state *css) |
| { |
| struct perf_cpu_context *cpuctx; |
| struct perf_event **storage; |
| int cpu, heap_size, ret = 0; |
| |
| /* |
| * Allow storage to have sufficent space for an iterator for each |
| * possibly nested cgroup plus an iterator for events with no cgroup. |
| */ |
| for (heap_size = 1; css; css = css->parent) |
| heap_size++; |
| |
| for_each_possible_cpu(cpu) { |
| cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); |
| if (heap_size <= cpuctx->heap_size) |
| continue; |
| |
| storage = kmalloc_node(heap_size * sizeof(struct perf_event *), |
| GFP_KERNEL, cpu_to_node(cpu)); |
| if (!storage) { |
| ret = -ENOMEM; |
| break; |
| } |
| |
| raw_spin_lock_irq(&cpuctx->ctx.lock); |
| if (cpuctx->heap_size < heap_size) { |
| swap(cpuctx->heap, storage); |
| if (storage == cpuctx->heap_default) |
| storage = NULL; |
| cpuctx->heap_size = heap_size; |
| } |
| raw_spin_unlock_irq(&cpuctx->ctx.lock); |
| |
| kfree(storage); |
| } |
| |
| return ret; |
| } |
| |
| static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
| struct perf_event_attr *attr, |
| struct perf_event *group_leader) |
| { |
| struct perf_cgroup *cgrp; |
| struct cgroup_subsys_state *css; |
| struct fd f = fdget(fd); |
| int ret = 0; |
| |
| if (!f.file) |
| return -EBADF; |
| |
| css = css_tryget_online_from_dir(f.file->f_path.dentry, |
| &perf_event_cgrp_subsys); |
| if (IS_ERR(css)) { |
| ret = PTR_ERR(css); |
| goto out; |
| } |
| |
| ret = perf_cgroup_ensure_storage(event, css); |
| if (ret) |
| goto out; |
| |
| cgrp = container_of(css, struct perf_cgroup, css); |
| event->cgrp = cgrp; |
| |
| /* |
| * all events in a group must monitor |
| * the same cgroup because a task belongs |
| * to only one perf cgroup at a time |
| */ |
| if (group_leader && group_leader->cgrp != cgrp) { |
| perf_detach_cgroup(event); |
| ret = -EINVAL; |
| } |
| out: |
| fdput(f); |
| return ret; |
| } |
| |
| static inline void |
| perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) |
| { |
| struct perf_cgroup_info *t; |
| t = per_cpu_ptr(event->cgrp->info, event->cpu); |
| event->shadow_ctx_time = now - t->timestamp; |
| } |
| |
| static inline void |
| perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| struct perf_cpu_context *cpuctx; |
| |
| if (!is_cgroup_event(event)) |
| return; |
| |
| /* |
| * Because cgroup events are always per-cpu events, |
| * @ctx == &cpuctx->ctx. |
| */ |
| cpuctx = container_of(ctx, struct perf_cpu_context, ctx); |
| |
| /* |
| * Since setting cpuctx->cgrp is conditional on the current @cgrp |
| * matching the event's cgroup, we must do this for every new event, |
| * because if the first would mismatch, the second would not try again |
| * and we would leave cpuctx->cgrp unset. |
| */ |
| if (ctx->is_active && !cpuctx->cgrp) { |
| struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); |
| |
| if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) |
| cpuctx->cgrp = cgrp; |
| } |
| |
| if (ctx->nr_cgroups++) |
| return; |
| |
| list_add(&cpuctx->cgrp_cpuctx_entry, |
| per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); |
| } |
| |
| static inline void |
| perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| struct perf_cpu_context *cpuctx; |
| |
| if (!is_cgroup_event(event)) |
| return; |
| |
| /* |
| * Because cgroup events are always per-cpu events, |
| * @ctx == &cpuctx->ctx. |
| */ |
| cpuctx = container_of(ctx, struct perf_cpu_context, ctx); |
| |
| if (--ctx->nr_cgroups) |
| return; |
| |
| if (ctx->is_active && cpuctx->cgrp) |
| cpuctx->cgrp = NULL; |
| |
| list_del(&cpuctx->cgrp_cpuctx_entry); |
| } |
| |
| #else /* !CONFIG_CGROUP_PERF */ |
| |
| static inline bool |
| perf_cgroup_match(struct perf_event *event) |
| { |
| return true; |
| } |
| |
| static inline void perf_detach_cgroup(struct perf_event *event) |
| {} |
| |
| static inline int is_cgroup_event(struct perf_event *event) |
| { |
| return 0; |
| } |
| |
| static inline void update_cgrp_time_from_event(struct perf_event *event) |
| { |
| } |
| |
| static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) |
| { |
| } |
| |
| static inline void perf_cgroup_sched_out(struct task_struct *task, |
| struct task_struct *next) |
| { |
| } |
| |
| static inline void perf_cgroup_sched_in(struct task_struct *prev, |
| struct task_struct *task) |
| { |
| } |
| |
| static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, |
| struct perf_event_attr *attr, |
| struct perf_event *group_leader) |
| { |
| return -EINVAL; |
| } |
| |
| static inline void |
| perf_cgroup_set_timestamp(struct task_struct *task, |
| struct perf_event_context *ctx) |
| { |
| } |
| |
| static inline void |
| perf_cgroup_switch(struct task_struct *task, struct task_struct *next) |
| { |
| } |
| |
| static inline void |
| perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) |
| { |
| } |
| |
| static inline u64 perf_cgroup_event_time(struct perf_event *event) |
| { |
| return 0; |
| } |
| |
| static inline void |
| perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| } |
| |
| static inline void |
| perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| } |
| #endif |
| |
| /* |
| * set default to be dependent on timer tick just |
| * like original code |
| */ |
| #define PERF_CPU_HRTIMER (1000 / HZ) |
| /* |
| * function must be called with interrupts disabled |
| */ |
| static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) |
| { |
| struct perf_cpu_context *cpuctx; |
| bool rotations; |
| |
| lockdep_assert_irqs_disabled(); |
| |
| cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); |
| rotations = perf_rotate_context(cpuctx); |
| |
| raw_spin_lock(&cpuctx->hrtimer_lock); |
| if (rotations) |
| hrtimer_forward_now(hr, cpuctx->hrtimer_interval); |
| else |
| cpuctx->hrtimer_active = 0; |
| raw_spin_unlock(&cpuctx->hrtimer_lock); |
| |
| return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; |
| } |
| |
| static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) |
| { |
| struct hrtimer *timer = &cpuctx->hrtimer; |
| struct pmu *pmu = cpuctx->ctx.pmu; |
| u64 interval; |
| |
| /* no multiplexing needed for SW PMU */ |
| if (pmu->task_ctx_nr == perf_sw_context) |
| return; |
| |
| /* |
| * check default is sane, if not set then force to |
| * default interval (1/tick) |
| */ |
| interval = pmu->hrtimer_interval_ms; |
| if (interval < 1) |
| interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; |
| |
| cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); |
| |
| raw_spin_lock_init(&cpuctx->hrtimer_lock); |
| hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); |
| timer->function = perf_mux_hrtimer_handler; |
| } |
| |
| static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) |
| { |
| struct hrtimer *timer = &cpuctx->hrtimer; |
| struct pmu *pmu = cpuctx->ctx.pmu; |
| unsigned long flags; |
| |
| /* not for SW PMU */ |
| if (pmu->task_ctx_nr == perf_sw_context) |
| return 0; |
| |
| raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); |
| if (!cpuctx->hrtimer_active) { |
| cpuctx->hrtimer_active = 1; |
| hrtimer_forward_now(timer, cpuctx->hrtimer_interval); |
| hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); |
| } |
| raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); |
| |
| return 0; |
| } |
| |
| void perf_pmu_disable(struct pmu *pmu) |
| { |
| int *count = this_cpu_ptr(pmu->pmu_disable_count); |
| if (!(*count)++) |
| pmu->pmu_disable(pmu); |
| } |
| |
| void perf_pmu_enable(struct pmu *pmu) |
| { |
| int *count = this_cpu_ptr(pmu->pmu_disable_count); |
| if (!--(*count)) |
| pmu->pmu_enable(pmu); |
| } |
| |
| static DEFINE_PER_CPU(struct list_head, active_ctx_list); |
| |
| /* |
| * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and |
| * perf_event_task_tick() are fully serialized because they're strictly cpu |
| * affine and perf_event_ctx{activate,deactivate} are called with IRQs |
| * disabled, while perf_event_task_tick is called from IRQ context. |
| */ |
| static void perf_event_ctx_activate(struct perf_event_context *ctx) |
| { |
| struct list_head *head = this_cpu_ptr(&active_ctx_list); |
| |
| lockdep_assert_irqs_disabled(); |
| |
| WARN_ON(!list_empty(&ctx->active_ctx_list)); |
| |
| list_add(&ctx->active_ctx_list, head); |
| } |
| |
| static void perf_event_ctx_deactivate(struct perf_event_context *ctx) |
| { |
| lockdep_assert_irqs_disabled(); |
| |
| WARN_ON(list_empty(&ctx->active_ctx_list)); |
| |
| list_del_init(&ctx->active_ctx_list); |
| } |
| |
| static void get_ctx(struct perf_event_context *ctx) |
| { |
| refcount_inc(&ctx->refcount); |
| } |
| |
| static void *alloc_task_ctx_data(struct pmu *pmu) |
| { |
| if (pmu->task_ctx_cache) |
| return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); |
| |
| return NULL; |
| } |
| |
| static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data) |
| { |
| if (pmu->task_ctx_cache && task_ctx_data) |
| kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); |
| } |
| |
| static void free_ctx(struct rcu_head *head) |
| { |
| struct perf_event_context *ctx; |
| |
| ctx = container_of(head, struct perf_event_context, rcu_head); |
| free_task_ctx_data(ctx->pmu, ctx->task_ctx_data); |
| kfree(ctx); |
| } |
| |
| static void put_ctx(struct perf_event_context *ctx) |
| { |
| if (refcount_dec_and_test(&ctx->refcount)) { |
| if (ctx->parent_ctx) |
| put_ctx(ctx->parent_ctx); |
| if (ctx->task && ctx->task != TASK_TOMBSTONE) |
| put_task_struct(ctx->task); |
| call_rcu(&ctx->rcu_head, free_ctx); |
| } |
| } |
| |
| /* |
| * Because of perf_event::ctx migration in sys_perf_event_open::move_group and |
| * perf_pmu_migrate_context() we need some magic. |
| * |
| * Those places that change perf_event::ctx will hold both |
| * perf_event_ctx::mutex of the 'old' and 'new' ctx value. |
| * |
| * Lock ordering is by mutex address. There are two other sites where |
| * perf_event_context::mutex nests and those are: |
| * |
| * - perf_event_exit_task_context() [ child , 0 ] |
| * perf_event_exit_event() |
| * put_event() [ parent, 1 ] |
| * |
| * - perf_event_init_context() [ parent, 0 ] |
| * inherit_task_group() |
| * inherit_group() |
| * inherit_event() |
| * perf_event_alloc() |
| * perf_init_event() |
| * perf_try_init_event() [ child , 1 ] |
| * |
| * While it appears there is an obvious deadlock here -- the parent and child |
| * nesting levels are inverted between the two. This is in fact safe because |
| * life-time rules separate them. That is an exiting task cannot fork, and a |
| * spawning task cannot (yet) exit. |
| * |
| * But remember that these are parent<->child context relations, and |
| * migration does not affect children, therefore these two orderings should not |
| * interact. |
| * |
| * The change in perf_event::ctx does not affect children (as claimed above) |
| * because the sys_perf_event_open() case will install a new event and break |
| * the ctx parent<->child relation, and perf_pmu_migrate_context() is only |
| * concerned with cpuctx and that doesn't have children. |
| * |
| * The places that change perf_event::ctx will issue: |
| * |
| * perf_remove_from_context(); |
| * synchronize_rcu(); |
| * perf_install_in_context(); |
| * |
| * to affect the change. The remove_from_context() + synchronize_rcu() should |
| * quiesce the event, after which we can install it in the new location. This |
| * means that only external vectors (perf_fops, prctl) can perturb the event |
| * while in transit. Therefore all such accessors should also acquire |
| * perf_event_context::mutex to serialize against this. |
| * |
| * However; because event->ctx can change while we're waiting to acquire |
| * ctx->mutex we must be careful and use the below perf_event_ctx_lock() |
| * function. |
| * |
| * Lock order: |
| * exec_update_lock |
| * task_struct::perf_event_mutex |
| * perf_event_context::mutex |
| * perf_event::child_mutex; |
| * perf_event_context::lock |
| * perf_event::mmap_mutex |
| * mmap_lock |
| * perf_addr_filters_head::lock |
| * |
| * cpu_hotplug_lock |
| * pmus_lock |
| * cpuctx->mutex / perf_event_context::mutex |
| */ |
| static struct perf_event_context * |
| perf_event_ctx_lock_nested(struct perf_event *event, int nesting) |
| { |
| struct perf_event_context *ctx; |
| |
| again: |
| rcu_read_lock(); |
| ctx = READ_ONCE(event->ctx); |
| if (!refcount_inc_not_zero(&ctx->refcount)) { |
| rcu_read_unlock(); |
| goto again; |
| } |
| rcu_read_unlock(); |
| |
| mutex_lock_nested(&ctx->mutex, nesting); |
| if (event->ctx != ctx) { |
| mutex_unlock(&ctx->mutex); |
| put_ctx(ctx); |
| goto again; |
| } |
| |
| return ctx; |
| } |
| |
| static inline struct perf_event_context * |
| perf_event_ctx_lock(struct perf_event *event) |
| { |
| return perf_event_ctx_lock_nested(event, 0); |
| } |
| |
| static void perf_event_ctx_unlock(struct perf_event *event, |
| struct perf_event_context *ctx) |
| { |
| mutex_unlock(&ctx->mutex); |
| put_ctx(ctx); |
| } |
| |
| /* |
| * This must be done under the ctx->lock, such as to serialize against |
| * context_equiv(), therefore we cannot call put_ctx() since that might end up |
| * calling scheduler related locks and ctx->lock nests inside those. |
| */ |
| static __must_check struct perf_event_context * |
| unclone_ctx(struct perf_event_context *ctx) |
| { |
| struct perf_event_context *parent_ctx = ctx->parent_ctx; |
| |
| lockdep_assert_held(&ctx->lock); |
| |
| if (parent_ctx) |
| ctx->parent_ctx = NULL; |
| ctx->generation++; |
| |
| return parent_ctx; |
| } |
| |
| static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, |
| enum pid_type type) |
| { |
| u32 nr; |
| /* |
| * only top level events have the pid namespace they were created in |
| */ |
| if (event->parent) |
| event = event->parent; |
| |
| nr = __task_pid_nr_ns(p, type, event->ns); |
| /* avoid -1 if it is idle thread or runs in another ns */ |
| if (!nr && !pid_alive(p)) |
| nr = -1; |
| return nr; |
| } |
| |
| static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) |
| { |
| return perf_event_pid_type(event, p, PIDTYPE_TGID); |
| } |
| |
| static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) |
| { |
| return perf_event_pid_type(event, p, PIDTYPE_PID); |
| } |
| |
| /* |
| * If we inherit events we want to return the parent event id |
| * to userspace. |
| */ |
| static u64 primary_event_id(struct perf_event *event) |
| { |
| u64 id = event->id; |
| |
| if (event->parent) |
| id = event->parent->id; |
| |
| return id; |
| } |
| |
| /* |
| * Get the perf_event_context for a task and lock it. |
| * |
| * This has to cope with the fact that until it is locked, |
| * the context could get moved to another task. |
| */ |
| static struct perf_event_context * |
| perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) |
| { |
| struct perf_event_context *ctx; |
| |
| retry: |
| /* |
| * One of the few rules of preemptible RCU is that one cannot do |
| * rcu_read_unlock() while holding a scheduler (or nested) lock when |
| * part of the read side critical section was irqs-enabled -- see |
| * rcu_read_unlock_special(). |
| * |
| * Since ctx->lock nests under rq->lock we must ensure the entire read |
| * side critical section has interrupts disabled. |
| */ |
| local_irq_save(*flags); |
| rcu_read_lock(); |
| ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); |
| if (ctx) { |
| /* |
| * If this context is a clone of another, it might |
| * get swapped for another underneath us by |
| * perf_event_task_sched_out, though the |
| * rcu_read_lock() protects us from any context |
| * getting freed. Lock the context and check if it |
| * got swapped before we could get the lock, and retry |
| * if so. If we locked the right context, then it |
| * can't get swapped on us any more. |
| */ |
| raw_spin_lock(&ctx->lock); |
| if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { |
| raw_spin_unlock(&ctx->lock); |
| rcu_read_unlock(); |
| local_irq_restore(*flags); |
| goto retry; |
| } |
| |
| if (ctx->task == TASK_TOMBSTONE || |
| !refcount_inc_not_zero(&ctx->refcount)) { |
| raw_spin_unlock(&ctx->lock); |
| ctx = NULL; |
| } else { |
| WARN_ON_ONCE(ctx->task != task); |
| } |
| } |
| rcu_read_unlock(); |
| if (!ctx) |
| local_irq_restore(*flags); |
| return ctx; |
| } |
| |
| /* |
| * Get the context for a task and increment its pin_count so it |
| * can't get swapped to another task. This also increments its |
| * reference count so that the context can't get freed. |
| */ |
| static struct perf_event_context * |
| perf_pin_task_context(struct task_struct *task, int ctxn) |
| { |
| struct perf_event_context *ctx; |
| unsigned long flags; |
| |
| ctx = perf_lock_task_context(task, ctxn, &flags); |
| if (ctx) { |
| ++ctx->pin_count; |
| raw_spin_unlock_irqrestore(&ctx->lock, flags); |
| } |
| return ctx; |
| } |
| |
| static void perf_unpin_context(struct perf_event_context *ctx) |
| { |
| unsigned long flags; |
| |
| raw_spin_lock_irqsave(&ctx->lock, flags); |
| --ctx->pin_count; |
| raw_spin_unlock_irqrestore(&ctx->lock, flags); |
| } |
| |
| /* |
| * Update the record of the current time in a context. |
| */ |
| static void update_context_time(struct perf_event_context *ctx) |
| { |
| u64 now = perf_clock(); |
| |
| ctx->time += now - ctx->timestamp; |
| ctx->timestamp = now; |
| } |
| |
| static u64 perf_event_time(struct perf_event *event) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| |
| if (is_cgroup_event(event)) |
| return perf_cgroup_event_time(event); |
| |
| return ctx ? ctx->time : 0; |
| } |
| |
| static enum event_type_t get_event_type(struct perf_event *event) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| enum event_type_t event_type; |
| |
| lockdep_assert_held(&ctx->lock); |
| |
| /* |
| * It's 'group type', really, because if our group leader is |
| * pinned, so are we. |
| */ |
| if (event->group_leader != event) |
| event = event->group_leader; |
| |
| event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; |
| if (!ctx->task) |
| event_type |= EVENT_CPU; |
| |
| return event_type; |
| } |
| |
| /* |
| * Helper function to initialize event group nodes. |
| */ |
| static void init_event_group(struct perf_event *event) |
| { |
| RB_CLEAR_NODE(&event->group_node); |
| event->group_index = 0; |
| } |
| |
| /* |
| * Extract pinned or flexible groups from the context |
| * based on event attrs bits. |
| */ |
| static struct perf_event_groups * |
| get_event_groups(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| if (event->attr.pinned) |
| return &ctx->pinned_groups; |
| else |
| return &ctx->flexible_groups; |
| } |
| |
| /* |
| * Helper function to initializes perf_event_group trees. |
| */ |
| static void perf_event_groups_init(struct perf_event_groups *groups) |
| { |
| groups->tree = RB_ROOT; |
| groups->index = 0; |
| } |
| |
| static inline struct cgroup *event_cgroup(const struct perf_event *event) |
| { |
| struct cgroup *cgroup = NULL; |
| |
| #ifdef CONFIG_CGROUP_PERF |
| if (event->cgrp) |
| cgroup = event->cgrp->css.cgroup; |
| #endif |
| |
| return cgroup; |
| } |
| |
| /* |
| * Compare function for event groups; |
| * |
| * Implements complex key that first sorts by CPU and then by virtual index |
| * which provides ordering when rotating groups for the same CPU. |
| */ |
| static __always_inline int |
| perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup, |
| const u64 left_group_index, const struct perf_event *right) |
| { |
| if (left_cpu < right->cpu) |
| return -1; |
| if (left_cpu > right->cpu) |
| return 1; |
| |
| #ifdef CONFIG_CGROUP_PERF |
| { |
| const struct cgroup *right_cgroup = event_cgroup(right); |
| |
| if (left_cgroup != right_cgroup) { |
| if (!left_cgroup) { |
| /* |
| * Left has no cgroup but right does, no |
| * cgroups come first. |
| */ |
| return -1; |
| } |
| if (!right_cgroup) { |
| /* |
| * Right has no cgroup but left does, no |
| * cgroups come first. |
| */ |
| return 1; |
| } |
| /* Two dissimilar cgroups, order by id. */ |
| if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup)) |
| return -1; |
| |
| return 1; |
| } |
| } |
| #endif |
| |
| if (left_group_index < right->group_index) |
| return -1; |
| if (left_group_index > right->group_index) |
| return 1; |
| |
| return 0; |
| } |
| |
| #define __node_2_pe(node) \ |
| rb_entry((node), struct perf_event, group_node) |
| |
| static inline bool __group_less(struct rb_node *a, const struct rb_node *b) |
| { |
| struct perf_event *e = __node_2_pe(a); |
| return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index, |
| __node_2_pe(b)) < 0; |
| } |
| |
| struct __group_key { |
| int cpu; |
| struct cgroup *cgroup; |
| }; |
| |
| static inline int __group_cmp(const void *key, const struct rb_node *node) |
| { |
| const struct __group_key *a = key; |
| const struct perf_event *b = __node_2_pe(node); |
| |
| /* partial/subtree match: @cpu, @cgroup; ignore: @group_index */ |
| return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b); |
| } |
| |
| /* |
| * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for |
| * key (see perf_event_groups_less). This places it last inside the CPU |
| * subtree. |
| */ |
| static void |
| perf_event_groups_insert(struct perf_event_groups *groups, |
| struct perf_event *event) |
| { |
| event->group_index = ++groups->index; |
| |
| rb_add(&event->group_node, &groups->tree, __group_less); |
| } |
| |
| /* |
| * Helper function to insert event into the pinned or flexible groups. |
| */ |
| static void |
| add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| struct perf_event_groups *groups; |
| |
| groups = get_event_groups(event, ctx); |
| perf_event_groups_insert(groups, event); |
| } |
| |
| /* |
| * Delete a group from a tree. |
| */ |
| static void |
| perf_event_groups_delete(struct perf_event_groups *groups, |
| struct perf_event *event) |
| { |
| WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || |
| RB_EMPTY_ROOT(&groups->tree)); |
| |
| rb_erase(&event->group_node, &groups->tree); |
| init_event_group(event); |
| } |
| |
| /* |
| * Helper function to delete event from its groups. |
| */ |
| static void |
| del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| struct perf_event_groups *groups; |
| |
| groups = get_event_groups(event, ctx); |
| perf_event_groups_delete(groups, event); |
| } |
| |
| /* |
| * Get the leftmost event in the cpu/cgroup subtree. |
| */ |
| static struct perf_event * |
| perf_event_groups_first(struct perf_event_groups *groups, int cpu, |
| struct cgroup *cgrp) |
| { |
| struct __group_key key = { |
| .cpu = cpu, |
| .cgroup = cgrp, |
| }; |
| struct rb_node *node; |
| |
| node = rb_find_first(&key, &groups->tree, __group_cmp); |
| if (node) |
| return __node_2_pe(node); |
| |
| return NULL; |
| } |
| |
| /* |
| * Like rb_entry_next_safe() for the @cpu subtree. |
| */ |
| static struct perf_event * |
| perf_event_groups_next(struct perf_event *event) |
| { |
| struct __group_key key = { |
| .cpu = event->cpu, |
| .cgroup = event_cgroup(event), |
| }; |
| struct rb_node *next; |
| |
| next = rb_next_match(&key, &event->group_node, __group_cmp); |
| if (next) |
| return __node_2_pe(next); |
| |
| return NULL; |
| } |
| |
| /* |
| * Iterate through the whole groups tree. |
| */ |
| #define perf_event_groups_for_each(event, groups) \ |
| for (event = rb_entry_safe(rb_first(&((groups)->tree)), \ |
| typeof(*event), group_node); event; \ |
| event = rb_entry_safe(rb_next(&event->group_node), \ |
| typeof(*event), group_node)) |
| |
| /* |
| * Add an event from the lists for its context. |
| * Must be called with ctx->mutex and ctx->lock held. |
| */ |
| static void |
| list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| lockdep_assert_held(&ctx->lock); |
| |
| WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); |
| event->attach_state |= PERF_ATTACH_CONTEXT; |
| |
| event->tstamp = perf_event_time(event); |
| |
| /* |
| * If we're a stand alone event or group leader, we go to the context |
| * list, group events are kept attached to the group so that |
| * perf_group_detach can, at all times, locate all siblings. |
| */ |
| if (event->group_leader == event) { |
| event->group_caps = event->event_caps; |
| add_event_to_groups(event, ctx); |
| } |
| |
| list_add_rcu(&event->event_entry, &ctx->event_list); |
| ctx->nr_events++; |
| if (event->attr.inherit_stat) |
| ctx->nr_stat++; |
| |
| if (event->state > PERF_EVENT_STATE_OFF) |
| perf_cgroup_event_enable(event, ctx); |
| |
| ctx->generation++; |
| } |
| |
| /* |
| * Initialize event state based on the perf_event_attr::disabled. |
| */ |
| static inline void perf_event__state_init(struct perf_event *event) |
| { |
| event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : |
| PERF_EVENT_STATE_INACTIVE; |
| } |
| |
| static void __perf_event_read_size(struct perf_event *event, int nr_siblings) |
| { |
| int entry = sizeof(u64); /* value */ |
| int size = 0; |
| int nr = 1; |
| |
| if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| size += sizeof(u64); |
| |
| if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| size += sizeof(u64); |
| |
| if (event->attr.read_format & PERF_FORMAT_ID) |
| entry += sizeof(u64); |
| |
| if (event->attr.read_format & PERF_FORMAT_GROUP) { |
| nr += nr_siblings; |
| size += sizeof(u64); |
| } |
| |
| size += entry * nr; |
| event->read_size = size; |
| } |
| |
| static void __perf_event_header_size(struct perf_event *event, u64 sample_type) |
| { |
| struct perf_sample_data *data; |
| u16 size = 0; |
| |
| if (sample_type & PERF_SAMPLE_IP) |
| size += sizeof(data->ip); |
| |
| if (sample_type & PERF_SAMPLE_ADDR) |
| size += sizeof(data->addr); |
| |
| if (sample_type & PERF_SAMPLE_PERIOD) |
| size += sizeof(data->period); |
| |
| if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) |
| size += sizeof(data->weight.full); |
| |
| if (sample_type & PERF_SAMPLE_READ) |
| size += event->read_size; |
| |
| if (sample_type & PERF_SAMPLE_DATA_SRC) |
| size += sizeof(data->data_src.val); |
| |
| if (sample_type & PERF_SAMPLE_TRANSACTION) |
| size += sizeof(data->txn); |
| |
| if (sample_type & PERF_SAMPLE_PHYS_ADDR) |
| size += sizeof(data->phys_addr); |
| |
| if (sample_type & PERF_SAMPLE_CGROUP) |
| size += sizeof(data->cgroup); |
| |
| if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) |
| size += sizeof(data->data_page_size); |
| |
| if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) |
| size += sizeof(data->code_page_size); |
| |
| event->header_size = size; |
| } |
| |
| /* |
| * Called at perf_event creation and when events are attached/detached from a |
| * group. |
| */ |
| static void perf_event__header_size(struct perf_event *event) |
| { |
| __perf_event_read_size(event, |
| event->group_leader->nr_siblings); |
| __perf_event_header_size(event, event->attr.sample_type); |
| } |
| |
| static void perf_event__id_header_size(struct perf_event *event) |
| { |
| struct perf_sample_data *data; |
| u64 sample_type = event->attr.sample_type; |
| u16 size = 0; |
| |
| if (sample_type & PERF_SAMPLE_TID) |
| size += sizeof(data->tid_entry); |
| |
| if (sample_type & PERF_SAMPLE_TIME) |
| size += sizeof(data->time); |
| |
| if (sample_type & PERF_SAMPLE_IDENTIFIER) |
| size += sizeof(data->id); |
| |
| if (sample_type & PERF_SAMPLE_ID) |
| size += sizeof(data->id); |
| |
| if (sample_type & PERF_SAMPLE_STREAM_ID) |
| size += sizeof(data->stream_id); |
| |
| if (sample_type & PERF_SAMPLE_CPU) |
| size += sizeof(data->cpu_entry); |
| |
| event->id_header_size = size; |
| } |
| |
| static bool perf_event_validate_size(struct perf_event *event) |
| { |
| /* |
| * The values computed here will be over-written when we actually |
| * attach the event. |
| */ |
| __perf_event_read_size(event, event->group_leader->nr_siblings + 1); |
| __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); |
| perf_event__id_header_size(event); |
| |
| /* |
| * Sum the lot; should not exceed the 64k limit we have on records. |
| * Conservative limit to allow for callchains and other variable fields. |
| */ |
| if (event->read_size + event->header_size + |
| event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) |
| return false; |
| |
| return true; |
| } |
| |
| static void perf_group_attach(struct perf_event *event) |
| { |
| struct perf_event *group_leader = event->group_leader, *pos; |
| |
| lockdep_assert_held(&event->ctx->lock); |
| |
| /* |
| * We can have double attach due to group movement in perf_event_open. |
| */ |
| if (event->attach_state & PERF_ATTACH_GROUP) |
| return; |
| |
| event->attach_state |= PERF_ATTACH_GROUP; |
| |
| if (group_leader == event) |
| return; |
| |
| WARN_ON_ONCE(group_leader->ctx != event->ctx); |
| |
| group_leader->group_caps &= event->event_caps; |
| |
| list_add_tail(&event->sibling_list, &group_leader->sibling_list); |
| group_leader->nr_siblings++; |
| |
| perf_event__header_size(group_leader); |
| |
| for_each_sibling_event(pos, group_leader) |
| perf_event__header_size(pos); |
| } |
| |
| /* |
| * Remove an event from the lists for its context. |
| * Must be called with ctx->mutex and ctx->lock held. |
| */ |
| static void |
| list_del_event(struct perf_event *event, struct perf_event_context *ctx) |
| { |
| WARN_ON_ONCE(event->ctx != ctx); |
| lockdep_assert_held(&ctx->lock); |
| |
| /* |
| * We can have double detach due to exit/hot-unplug + close. |
| */ |
| if (!(event->attach_state & PERF_ATTACH_CONTEXT)) |
| return; |
| |
| event->attach_state &= ~PERF_ATTACH_CONTEXT; |
| |
| ctx->nr_events--; |
| if (event->attr.inherit_stat) |
| ctx->nr_stat--; |
| |
| list_del_rcu(&event->event_entry); |
| |
| if (event->group_leader == event) |
| del_event_from_groups(event, ctx); |
| |
| /* |
| * If event was in error state, then keep it |
| * that way, otherwise bogus counts will be |
| * returned on read(). The only way to get out |
| * of error state is by explicit re-enabling |
| * of the event |
| */ |
| if (event->state > PERF_EVENT_STATE_OFF) { |
| perf_cgroup_event_disable(event, ctx); |
| perf_event_set_state(event, PERF_EVENT_STATE_OFF); |
| } |
| |
| ctx->generation++; |
| } |
| |
| static int |
| perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) |
| { |
| if (!has_aux(aux_event)) |
| return 0; |
| |
| if (!event->pmu->aux_output_match) |
| return 0; |
| |
| return event->pmu->aux_output_match(aux_event); |
| } |
| |
| static void put_event(struct perf_event *event); |
| static void event_sched_out(struct perf_event *event, |
| struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx); |
| |
| static void perf_put_aux_event(struct perf_event *event) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| struct perf_event *iter; |
| |
| /* |
| * If event uses aux_event tear down the link |
| */ |
| if (event->aux_event) { |
| iter = event->aux_event; |
| event->aux_event = NULL; |
| put_event(iter); |
| return; |
| } |
| |
| /* |
| * If the event is an aux_event, tear down all links to |
| * it from other events. |
| */ |
| for_each_sibling_event(iter, event->group_leader) { |
| if (iter->aux_event != event) |
| continue; |
| |
| iter->aux_event = NULL; |
| put_event(event); |
| |
| /* |
| * If it's ACTIVE, schedule it out and put it into ERROR |
| * state so that we don't try to schedule it again. Note |
| * that perf_event_enable() will clear the ERROR status. |
| */ |
| event_sched_out(iter, cpuctx, ctx); |
| perf_event_set_state(event, PERF_EVENT_STATE_ERROR); |
| } |
| } |
| |
| static bool perf_need_aux_event(struct perf_event *event) |
| { |
| return !!event->attr.aux_output || !!event->attr.aux_sample_size; |
| } |
| |
| static int perf_get_aux_event(struct perf_event *event, |
| struct perf_event *group_leader) |
| { |
| /* |
| * Our group leader must be an aux event if we want to be |
| * an aux_output. This way, the aux event will precede its |
| * aux_output events in the group, and therefore will always |
| * schedule first. |
| */ |
| if (!group_leader) |
| return 0; |
| |
| /* |
| * aux_output and aux_sample_size are mutually exclusive. |
| */ |
| if (event->attr.aux_output && event->attr.aux_sample_size) |
| return 0; |
| |
| if (event->attr.aux_output && |
| !perf_aux_output_match(event, group_leader)) |
| return 0; |
| |
| if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) |
| return 0; |
| |
| if (!atomic_long_inc_not_zero(&group_leader->refcount)) |
| return 0; |
| |
| /* |
| * Link aux_outputs to their aux event; this is undone in |
| * perf_group_detach() by perf_put_aux_event(). When the |
| * group in torn down, the aux_output events loose their |
| * link to the aux_event and can't schedule any more. |
| */ |
| event->aux_event = group_leader; |
| |
| return 1; |
| } |
| |
| static inline struct list_head *get_event_list(struct perf_event *event) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; |
| } |
| |
| /* |
| * Events that have PERF_EV_CAP_SIBLING require being part of a group and |
| * cannot exist on their own, schedule them out and move them into the ERROR |
| * state. Also see _perf_event_enable(), it will not be able to recover |
| * this ERROR state. |
| */ |
| static inline void perf_remove_sibling_event(struct perf_event *event) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| |
| event_sched_out(event, cpuctx, ctx); |
| perf_event_set_state(event, PERF_EVENT_STATE_ERROR); |
| } |
| |
| static void perf_group_detach(struct perf_event *event) |
| { |
| struct perf_event *leader = event->group_leader; |
| struct perf_event *sibling, *tmp; |
| struct perf_event_context *ctx = event->ctx; |
| |
| lockdep_assert_held(&ctx->lock); |
| |
| /* |
| * We can have double detach due to exit/hot-unplug + close. |
| */ |
| if (!(event->attach_state & PERF_ATTACH_GROUP)) |
| return; |
| |
| event->attach_state &= ~PERF_ATTACH_GROUP; |
| |
| perf_put_aux_event(event); |
| |
| /* |
| * If this is a sibling, remove it from its group. |
| */ |
| if (leader != event) { |
| list_del_init(&event->sibling_list); |
| event->group_leader->nr_siblings--; |
| goto out; |
| } |
| |
| /* |
| * If this was a group event with sibling events then |
| * upgrade the siblings to singleton events by adding them |
| * to whatever list we are on. |
| */ |
| list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { |
| |
| if (sibling->event_caps & PERF_EV_CAP_SIBLING) |
| perf_remove_sibling_event(sibling); |
| |
| sibling->group_leader = sibling; |
| list_del_init(&sibling->sibling_list); |
| |
| /* Inherit group flags from the previous leader */ |
| sibling->group_caps = event->group_caps; |
| |
| if (!RB_EMPTY_NODE(&event->group_node)) { |
| add_event_to_groups(sibling, event->ctx); |
| |
| if (sibling->state == PERF_EVENT_STATE_ACTIVE) |
| list_add_tail(&sibling->active_list, get_event_list(sibling)); |
| } |
| |
| WARN_ON_ONCE(sibling->ctx != event->ctx); |
| } |
| |
| out: |
| for_each_sibling_event(tmp, leader) |
| perf_event__header_size(tmp); |
| |
| perf_event__header_size(leader); |
| } |
| |
| static void sync_child_event(struct perf_event *child_event); |
| |
| static void perf_child_detach(struct perf_event *event) |
| { |
| struct perf_event *parent_event = event->parent; |
| |
| if (!(event->attach_state & PERF_ATTACH_CHILD)) |
| return; |
| |
| event->attach_state &= ~PERF_ATTACH_CHILD; |
| |
| if (WARN_ON_ONCE(!parent_event)) |
| return; |
| |
| lockdep_assert_held(&parent_event->child_mutex); |
| |
| sync_child_event(event); |
| list_del_init(&event->child_list); |
| } |
| |
| static bool is_orphaned_event(struct perf_event *event) |
| { |
| return event->state == PERF_EVENT_STATE_DEAD; |
| } |
| |
| static inline int __pmu_filter_match(struct perf_event *event) |
| { |
| struct pmu *pmu = event->pmu; |
| return pmu->filter_match ? pmu->filter_match(event) : 1; |
| } |
| |
| /* |
| * Check whether we should attempt to schedule an event group based on |
| * PMU-specific filtering. An event group can consist of HW and SW events, |
| * potentially with a SW leader, so we must check all the filters, to |
| * determine whether a group is schedulable: |
| */ |
| static inline int pmu_filter_match(struct perf_event *event) |
| { |
| struct perf_event *sibling; |
| |
| if (!__pmu_filter_match(event)) |
| return 0; |
| |
| for_each_sibling_event(sibling, event) { |
| if (!__pmu_filter_match(sibling)) |
| return 0; |
| } |
| |
| return 1; |
| } |
| |
| static inline int |
| event_filter_match(struct perf_event *event) |
| { |
| return (event->cpu == -1 || event->cpu == smp_processor_id()) && |
| perf_cgroup_match(event) && pmu_filter_match(event); |
| } |
| |
| static void |
| event_sched_out(struct perf_event *event, |
| struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx) |
| { |
| enum perf_event_state state = PERF_EVENT_STATE_INACTIVE; |
| |
| WARN_ON_ONCE(event->ctx != ctx); |
| lockdep_assert_held(&ctx->lock); |
| |
| if (event->state != PERF_EVENT_STATE_ACTIVE) |
| return; |
| |
| /* |
| * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but |
| * we can schedule events _OUT_ individually through things like |
| * __perf_remove_from_context(). |
| */ |
| list_del_init(&event->active_list); |
| |
| perf_pmu_disable(event->pmu); |
| |
| event->pmu->del(event, 0); |
| event->oncpu = -1; |
| |
| if (READ_ONCE(event->pending_disable) >= 0) { |
| WRITE_ONCE(event->pending_disable, -1); |
| perf_cgroup_event_disable(event, ctx); |
| state = PERF_EVENT_STATE_OFF; |
| } |
| perf_event_set_state(event, state); |
| |
| if (!is_software_event(event)) |
| cpuctx->active_oncpu--; |
| if (!--ctx->nr_active) |
| perf_event_ctx_deactivate(ctx); |
| if (event->attr.freq && event->attr.sample_freq) |
| ctx->nr_freq--; |
| if (event->attr.exclusive || !cpuctx->active_oncpu) |
| cpuctx->exclusive = 0; |
| |
| perf_pmu_enable(event->pmu); |
| } |
| |
| static void |
| group_sched_out(struct perf_event *group_event, |
| struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx) |
| { |
| struct perf_event *event; |
| |
| if (group_event->state != PERF_EVENT_STATE_ACTIVE) |
| return; |
| |
| perf_pmu_disable(ctx->pmu); |
| |
| event_sched_out(group_event, cpuctx, ctx); |
| |
| /* |
| * Schedule out siblings (if any): |
| */ |
| for_each_sibling_event(event, group_event) |
| event_sched_out(event, cpuctx, ctx); |
| |
| perf_pmu_enable(ctx->pmu); |
| } |
| |
| #define DETACH_GROUP 0x01UL |
| #define DETACH_CHILD 0x02UL |
| |
| /* |
| * Cross CPU call to remove a performance event |
| * |
| * We disable the event on the hardware level first. After that we |
| * remove it from the context list. |
| */ |
| static void |
| __perf_remove_from_context(struct perf_event *event, |
| struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx, |
| void *info) |
| { |
| unsigned long flags = (unsigned long)info; |
| |
| if (ctx->is_active & EVENT_TIME) { |
| update_context_time(ctx); |
| update_cgrp_time_from_cpuctx(cpuctx); |
| } |
| |
| event_sched_out(event, cpuctx, ctx); |
| if (flags & DETACH_GROUP) |
| perf_group_detach(event); |
| if (flags & DETACH_CHILD) |
| perf_child_detach(event); |
| list_del_event(event, ctx); |
| |
| if (!ctx->nr_events && ctx->is_active) { |
| ctx->is_active = 0; |
| ctx->rotate_necessary = 0; |
| if (ctx->task) { |
| WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
| cpuctx->task_ctx = NULL; |
| } |
| } |
| } |
| |
| /* |
| * Remove the event from a task's (or a CPU's) list of events. |
| * |
| * If event->ctx is a cloned context, callers must make sure that |
| * every task struct that event->ctx->task could possibly point to |
| * remains valid. This is OK when called from perf_release since |
| * that only calls us on the top-level context, which can't be a clone. |
| * When called from perf_event_exit_task, it's OK because the |
| * context has been detached from its task. |
| */ |
| static void perf_remove_from_context(struct perf_event *event, unsigned long flags) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| |
| lockdep_assert_held(&ctx->mutex); |
| |
| /* |
| * Because of perf_event_exit_task(), perf_remove_from_context() ought |
| * to work in the face of TASK_TOMBSTONE, unlike every other |
| * event_function_call() user. |
| */ |
| raw_spin_lock_irq(&ctx->lock); |
| if (!ctx->is_active) { |
| __perf_remove_from_context(event, __get_cpu_context(ctx), |
| ctx, (void *)flags); |
| raw_spin_unlock_irq(&ctx->lock); |
| return; |
| } |
| raw_spin_unlock_irq(&ctx->lock); |
| |
| event_function_call(event, __perf_remove_from_context, (void *)flags); |
| } |
| |
| /* |
| * Cross CPU call to disable a performance event |
| */ |
| static void __perf_event_disable(struct perf_event *event, |
| struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx, |
| void *info) |
| { |
| if (event->state < PERF_EVENT_STATE_INACTIVE) |
| return; |
| |
| if (ctx->is_active & EVENT_TIME) { |
| update_context_time(ctx); |
| update_cgrp_time_from_event(event); |
| } |
| |
| if (event == event->group_leader) |
| group_sched_out(event, cpuctx, ctx); |
| else |
| event_sched_out(event, cpuctx, ctx); |
| |
| perf_event_set_state(event, PERF_EVENT_STATE_OFF); |
| perf_cgroup_event_disable(event, ctx); |
| } |
| |
| /* |
| * Disable an event. |
| * |
| * If event->ctx is a cloned context, callers must make sure that |
| * every task struct that event->ctx->task could possibly point to |
| * remains valid. This condition is satisfied when called through |
| * perf_event_for_each_child or perf_event_for_each because they |
| * hold the top-level event's child_mutex, so any descendant that |
| * goes to exit will block in perf_event_exit_event(). |
| * |
| * When called from perf_pending_event it's OK because event->ctx |
| * is the current context on this CPU and preemption is disabled, |
| * hence we can't get into perf_event_task_sched_out for this context. |
| */ |
| static void _perf_event_disable(struct perf_event *event) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| |
| raw_spin_lock_irq(&ctx->lock); |
| if (event->state <= PERF_EVENT_STATE_OFF) { |
| raw_spin_unlock_irq(&ctx->lock); |
| return; |
| } |
| raw_spin_unlock_irq(&ctx->lock); |
| |
| event_function_call(event, __perf_event_disable, NULL); |
| } |
| |
| void perf_event_disable_local(struct perf_event *event) |
| { |
| event_function_local(event, __perf_event_disable, NULL); |
| } |
| |
| /* |
| * Strictly speaking kernel users cannot create groups and therefore this |
| * interface does not need the perf_event_ctx_lock() magic. |
| */ |
| void perf_event_disable(struct perf_event *event) |
| { |
| struct perf_event_context *ctx; |
| |
| ctx = perf_event_ctx_lock(event); |
| _perf_event_disable(event); |
| perf_event_ctx_unlock(event, ctx); |
| } |
| EXPORT_SYMBOL_GPL(perf_event_disable); |
| |
| void perf_event_disable_inatomic(struct perf_event *event) |
| { |
| WRITE_ONCE(event->pending_disable, smp_processor_id()); |
| /* can fail, see perf_pending_event_disable() */ |
| irq_work_queue(&event->pending); |
| } |
| |
| static void perf_set_shadow_time(struct perf_event *event, |
| struct perf_event_context *ctx) |
| { |
| /* |
| * use the correct time source for the time snapshot |
| * |
| * We could get by without this by leveraging the |
| * fact that to get to this function, the caller |
| * has most likely already called update_context_time() |
| * and update_cgrp_time_xx() and thus both timestamp |
| * are identical (or very close). Given that tstamp is, |
| * already adjusted for cgroup, we could say that: |
| * tstamp - ctx->timestamp |
| * is equivalent to |
| * tstamp - cgrp->timestamp. |
| * |
| * Then, in perf_output_read(), the calculation would |
| * work with no changes because: |
| * - event is guaranteed scheduled in |
| * - no scheduled out in between |
| * - thus the timestamp would be the same |
| * |
| * But this is a bit hairy. |
| * |
| * So instead, we have an explicit cgroup call to remain |
| * within the time source all along. We believe it |
| * is cleaner and simpler to understand. |
| */ |
| if (is_cgroup_event(event)) |
| perf_cgroup_set_shadow_time(event, event->tstamp); |
| else |
| event->shadow_ctx_time = event->tstamp - ctx->timestamp; |
| } |
| |
| #define MAX_INTERRUPTS (~0ULL) |
| |
| static void perf_log_throttle(struct perf_event *event, int enable); |
| static void perf_log_itrace_start(struct perf_event *event); |
| |
| static int |
| event_sched_in(struct perf_event *event, |
| struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx) |
| { |
| int ret = 0; |
| |
| WARN_ON_ONCE(event->ctx != ctx); |
| |
| lockdep_assert_held(&ctx->lock); |
| |
| if (event->state <= PERF_EVENT_STATE_OFF) |
| return 0; |
| |
| WRITE_ONCE(event->oncpu, smp_processor_id()); |
| /* |
| * Order event::oncpu write to happen before the ACTIVE state is |
| * visible. This allows perf_event_{stop,read}() to observe the correct |
| * ->oncpu if it sees ACTIVE. |
| */ |
| smp_wmb(); |
| perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); |
| |
| /* |
| * Unthrottle events, since we scheduled we might have missed several |
| * ticks already, also for a heavily scheduling task there is little |
| * guarantee it'll get a tick in a timely manner. |
| */ |
| if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { |
| perf_log_throttle(event, 1); |
| event->hw.interrupts = 0; |
| } |
| |
| perf_pmu_disable(event->pmu); |
| |
| perf_set_shadow_time(event, ctx); |
| |
| perf_log_itrace_start(event); |
| |
| if (event->pmu->add(event, PERF_EF_START)) { |
| perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); |
| event->oncpu = -1; |
| ret = -EAGAIN; |
| goto out; |
| } |
| |
| if (!is_software_event(event)) |
| cpuctx->active_oncpu++; |
| if (!ctx->nr_active++) |
| perf_event_ctx_activate(ctx); |
| if (event->attr.freq && event->attr.sample_freq) |
| ctx->nr_freq++; |
| |
| if (event->attr.exclusive) |
| cpuctx->exclusive = 1; |
| |
| out: |
| perf_pmu_enable(event->pmu); |
| |
| return ret; |
| } |
| |
| static int |
| group_sched_in(struct perf_event *group_event, |
| struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx) |
| { |
| struct perf_event *event, *partial_group = NULL; |
| struct pmu *pmu = ctx->pmu; |
| |
| if (group_event->state == PERF_EVENT_STATE_OFF) |
| return 0; |
| |
| pmu->start_txn(pmu, PERF_PMU_TXN_ADD); |
| |
| if (event_sched_in(group_event, cpuctx, ctx)) |
| goto error; |
| |
| /* |
| * Schedule in siblings as one group (if any): |
| */ |
| for_each_sibling_event(event, group_event) { |
| if (event_sched_in(event, cpuctx, ctx)) { |
| partial_group = event; |
| goto group_error; |
| } |
| } |
| |
| if (!pmu->commit_txn(pmu)) |
| return 0; |
| |
| group_error: |
| /* |
| * Groups can be scheduled in as one unit only, so undo any |
| * partial group before returning: |
| * The events up to the failed event are scheduled out normally. |
| */ |
| for_each_sibling_event(event, group_event) { |
| if (event == partial_group) |
| break; |
| |
| event_sched_out(event, cpuctx, ctx); |
| } |
| event_sched_out(group_event, cpuctx, ctx); |
| |
| error: |
| pmu->cancel_txn(pmu); |
| return -EAGAIN; |
| } |
| |
| /* |
| * Work out whether we can put this event group on the CPU now. |
| */ |
| static int group_can_go_on(struct perf_event *event, |
| struct perf_cpu_context *cpuctx, |
| int can_add_hw) |
| { |
| /* |
| * Groups consisting entirely of software events can always go on. |
| */ |
| if (event->group_caps & PERF_EV_CAP_SOFTWARE) |
| return 1; |
| /* |
| * If an exclusive group is already on, no other hardware |
| * events can go on. |
| */ |
| if (cpuctx->exclusive) |
| return 0; |
| /* |
| * If this group is exclusive and there are already |
| * events on the CPU, it can't go on. |
| */ |
| if (event->attr.exclusive && !list_empty(get_event_list(event))) |
| return 0; |
| /* |
| * Otherwise, try to add it if all previous groups were able |
| * to go on. |
| */ |
| return can_add_hw; |
| } |
| |
| static void add_event_to_ctx(struct perf_event *event, |
| struct perf_event_context *ctx) |
| { |
| list_add_event(event, ctx); |
| perf_group_attach(event); |
| } |
| |
| static void ctx_sched_out(struct perf_event_context *ctx, |
| struct perf_cpu_context *cpuctx, |
| enum event_type_t event_type); |
| static void |
| ctx_sched_in(struct perf_event_context *ctx, |
| struct perf_cpu_context *cpuctx, |
| enum event_type_t event_type, |
| struct task_struct *task); |
| |
| static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx, |
| enum event_type_t event_type) |
| { |
| if (!cpuctx->task_ctx) |
| return; |
| |
| if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) |
| return; |
| |
| ctx_sched_out(ctx, cpuctx, event_type); |
| } |
| |
| static void perf_event_sched_in(struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx, |
| struct task_struct *task) |
| { |
| cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); |
| if (ctx) |
| ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); |
| cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); |
| if (ctx) |
| ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); |
| } |
| |
| /* |
| * We want to maintain the following priority of scheduling: |
| * - CPU pinned (EVENT_CPU | EVENT_PINNED) |
| * - task pinned (EVENT_PINNED) |
| * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE) |
| * - task flexible (EVENT_FLEXIBLE). |
| * |
| * In order to avoid unscheduling and scheduling back in everything every |
| * time an event is added, only do it for the groups of equal priority and |
| * below. |
| * |
| * This can be called after a batch operation on task events, in which case |
| * event_type is a bit mask of the types of events involved. For CPU events, |
| * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE. |
| */ |
| static void ctx_resched(struct perf_cpu_context *cpuctx, |
| struct perf_event_context *task_ctx, |
| enum event_type_t event_type) |
| { |
| enum event_type_t ctx_event_type; |
| bool cpu_event = !!(event_type & EVENT_CPU); |
| |
| /* |
| * If pinned groups are involved, flexible groups also need to be |
| * scheduled out. |
| */ |
| if (event_type & EVENT_PINNED) |
| event_type |= EVENT_FLEXIBLE; |
| |
| ctx_event_type = event_type & EVENT_ALL; |
| |
| perf_pmu_disable(cpuctx->ctx.pmu); |
| if (task_ctx) |
| task_ctx_sched_out(cpuctx, task_ctx, event_type); |
| |
| /* |
| * Decide which cpu ctx groups to schedule out based on the types |
| * of events that caused rescheduling: |
| * - EVENT_CPU: schedule out corresponding groups; |
| * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups; |
| * - otherwise, do nothing more. |
| */ |
| if (cpu_event) |
| cpu_ctx_sched_out(cpuctx, ctx_event_type); |
| else if (ctx_event_type & EVENT_PINNED) |
| cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
| |
| perf_event_sched_in(cpuctx, task_ctx, current); |
| perf_pmu_enable(cpuctx->ctx.pmu); |
| } |
| |
| void perf_pmu_resched(struct pmu *pmu) |
| { |
| struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| struct perf_event_context *task_ctx = cpuctx->task_ctx; |
| |
| perf_ctx_lock(cpuctx, task_ctx); |
| ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); |
| perf_ctx_unlock(cpuctx, task_ctx); |
| } |
| |
| /* |
| * Cross CPU call to install and enable a performance event |
| * |
| * Very similar to remote_function() + event_function() but cannot assume that |
| * things like ctx->is_active and cpuctx->task_ctx are set. |
| */ |
| static int __perf_install_in_context(void *info) |
| { |
| struct perf_event *event = info; |
| struct perf_event_context *ctx = event->ctx; |
| struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| struct perf_event_context *task_ctx = cpuctx->task_ctx; |
| bool reprogram = true; |
| int ret = 0; |
| |
| raw_spin_lock(&cpuctx->ctx.lock); |
| if (ctx->task) { |
| raw_spin_lock(&ctx->lock); |
| task_ctx = ctx; |
| |
| reprogram = (ctx->task == current); |
| |
| /* |
| * If the task is running, it must be running on this CPU, |
| * otherwise we cannot reprogram things. |
| * |
| * If its not running, we don't care, ctx->lock will |
| * serialize against it becoming runnable. |
| */ |
| if (task_curr(ctx->task) && !reprogram) { |
| ret = -ESRCH; |
| goto unlock; |
| } |
| |
| WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); |
| } else if (task_ctx) { |
| raw_spin_lock(&task_ctx->lock); |
| } |
| |
| #ifdef CONFIG_CGROUP_PERF |
| if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { |
| /* |
| * If the current cgroup doesn't match the event's |
| * cgroup, we should not try to schedule it. |
| */ |
| struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); |
| reprogram = cgroup_is_descendant(cgrp->css.cgroup, |
| event->cgrp->css.cgroup); |
| } |
| #endif |
| |
| if (reprogram) { |
| ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
| add_event_to_ctx(event, ctx); |
| ctx_resched(cpuctx, task_ctx, get_event_type(event)); |
| } else { |
| add_event_to_ctx(event, ctx); |
| } |
| |
| unlock: |
| perf_ctx_unlock(cpuctx, task_ctx); |
| |
| return ret; |
| } |
| |
| static bool exclusive_event_installable(struct perf_event *event, |
| struct perf_event_context *ctx); |
| |
| /* |
| * Attach a performance event to a context. |
| * |
| * Very similar to event_function_call, see comment there. |
| */ |
| static void |
| perf_install_in_context(struct perf_event_context *ctx, |
| struct perf_event *event, |
| int cpu) |
| { |
| struct task_struct *task = READ_ONCE(ctx->task); |
| |
| lockdep_assert_held(&ctx->mutex); |
| |
| WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); |
| |
| if (event->cpu != -1) |
| event->cpu = cpu; |
| |
| /* |
| * Ensures that if we can observe event->ctx, both the event and ctx |
| * will be 'complete'. See perf_iterate_sb_cpu(). |
| */ |
| smp_store_release(&event->ctx, ctx); |
| |
| /* |
| * perf_event_attr::disabled events will not run and can be initialized |
| * without IPI. Except when this is the first event for the context, in |
| * that case we need the magic of the IPI to set ctx->is_active. |
| * |
| * The IOC_ENABLE that is sure to follow the creation of a disabled |
| * event will issue the IPI and reprogram the hardware. |
| */ |
| if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) { |
| raw_spin_lock_irq(&ctx->lock); |
| if (ctx->task == TASK_TOMBSTONE) { |
| raw_spin_unlock_irq(&ctx->lock); |
| return; |
| } |
| add_event_to_ctx(event, ctx); |
| raw_spin_unlock_irq(&ctx->lock); |
| return; |
| } |
| |
| if (!task) { |
| cpu_function_call(cpu, __perf_install_in_context, event); |
| return; |
| } |
| |
| /* |
| * Should not happen, we validate the ctx is still alive before calling. |
| */ |
| if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) |
| return; |
| |
| /* |
| * Installing events is tricky because we cannot rely on ctx->is_active |
| * to be set in case this is the nr_events 0 -> 1 transition. |
| * |
| * Instead we use task_curr(), which tells us if the task is running. |
| * However, since we use task_curr() outside of rq::lock, we can race |
| * against the actual state. This means the result can be wrong. |
| * |
| * If we get a false positive, we retry, this is harmless. |
| * |
| * If we get a false negative, things are complicated. If we are after |
| * perf_event_context_sched_in() ctx::lock will serialize us, and the |
| * value must be correct. If we're before, it doesn't matter since |
| * perf_event_context_sched_in() will program the counter. |
| * |
| * However, this hinges on the remote context switch having observed |
| * our task->perf_event_ctxp[] store, such that it will in fact take |
| * ctx::lock in perf_event_context_sched_in(). |
| * |
| * We do this by task_function_call(), if the IPI fails to hit the task |
| * we know any future context switch of task must see the |
| * perf_event_ctpx[] store. |
| */ |
| |
| /* |
| * This smp_mb() orders the task->perf_event_ctxp[] store with the |
| * task_cpu() load, such that if the IPI then does not find the task |
| * running, a future context switch of that task must observe the |
| * store. |
| */ |
| smp_mb(); |
| again: |
| if (!task_function_call(task, __perf_install_in_context, event)) |
| return; |
| |
| raw_spin_lock_irq(&ctx->lock); |
| task = ctx->task; |
| if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { |
| /* |
| * Cannot happen because we already checked above (which also |
| * cannot happen), and we hold ctx->mutex, which serializes us |
| * against perf_event_exit_task_context(). |
| */ |
| raw_spin_unlock_irq(&ctx->lock); |
| return; |
| } |
| /* |
| * If the task is not running, ctx->lock will avoid it becoming so, |
| * thus we can safely install the event. |
| */ |
| if (task_curr(task)) { |
| raw_spin_unlock_irq(&ctx->lock); |
| goto again; |
| } |
| add_event_to_ctx(event, ctx); |
| raw_spin_unlock_irq(&ctx->lock); |
| } |
| |
| /* |
| * Cross CPU call to enable a performance event |
| */ |
| static void __perf_event_enable(struct perf_event *event, |
| struct perf_cpu_context *cpuctx, |
| struct perf_event_context *ctx, |
| void *info) |
| { |
| struct perf_event *leader = event->group_leader; |
| struct perf_event_context *task_ctx; |
| |
| if (event->state >= PERF_EVENT_STATE_INACTIVE || |
| event->state <= PERF_EVENT_STATE_ERROR) |
| return; |
| |
| if (ctx->is_active) |
| ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
| |
| perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); |
| perf_cgroup_event_enable(event, ctx); |
| |
| if (!ctx->is_active) |
| return; |
| |
| if (!event_filter_match(event)) { |
| ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); |
| return; |
| } |
| |
| /* |
| * If the event is in a group and isn't the group leader, |
| * then don't put it on unless the group is on. |
| */ |
| if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { |
| ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); |
| return; |
| } |
| |
| task_ctx = cpuctx->task_ctx; |
| if (ctx->task) |
| WARN_ON_ONCE(task_ctx != ctx); |
| |
| ctx_resched(cpuctx, task_ctx, get_event_type(event)); |
| } |
| |
| /* |
| * Enable an event. |
| * |
| * If event->ctx is a cloned context, callers must make sure that |
| * every task struct that event->ctx->task could possibly point to |
| * remains valid. This condition is satisfied when called through |
| * perf_event_for_each_child or perf_event_for_each as described |
| * for perf_event_disable. |
| */ |
| static void _perf_event_enable(struct perf_event *event) |
| { |
| struct perf_event_context *ctx = event->ctx; |
| |
| raw_spin_lock_irq(&ctx->lock); |
| if (event->state >= PERF_EVENT_STATE_INACTIVE || |
| event->state < PERF_EVENT_STATE_ERROR) { |
| out: |
| raw_spin_unlock_irq(&ctx->lock); |
| return; |
| } |
| |
| /* |
| * If the event is in error state, clear that first. |
| * |
| * That way, if we see the event in error state below, we know that it |
| * has gone back into error state, as distinct from the task having |
| * been scheduled away before the cross-call arrived. |
| */ |
| if (event->state == PERF_EVENT_STATE_ERROR) { |
| /* |
| * Detached SIBLING events cannot leave ERROR state. |
| */ |
| if (event->event_caps & PERF_EV_CAP_SIBLING && |
| event->group_leader == event) |
| goto out; |
| |
| event->state = PERF_EVENT_STATE_OFF; |
| } |
| raw_spin_unlock_irq(&ctx->lock); |
| |
| event_function_call(event, __perf_event_enable, NULL); |
| } |
| |
| /* |
| * See perf_event_disable(); |
| */ |
| void perf_event_enable(struct perf_event *event) |
| { |
| struct perf_event_context *ctx; |
| |
| ctx = perf_event_ctx_lock(event); |
| _perf_event_enable(event); |
| perf_event_ctx_unlock(event, ctx); |
| } |
| EXPORT_SYMBOL_GPL(perf_event_enable); |
| |
| struct stop_event_data { |
| struct perf_event *event; |
| unsigned int restart; |
| }; |
| |
| static int __perf_event_stop(void *info) |
| { |
| struct stop_event_data *sd = info; |
| struct perf_event *event = sd->event; |
| |
| /* if it's already INACTIVE, do nothing */ |
| if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) |
| return 0; |
| |
| /* matches smp_wmb() in event_sched_in() */ |
| smp_rmb(); |
| |
| /* |
| * There is a window with interrupts enabled before we get here, |
| * so we need to check again lest we try to stop another CPU's event. |
| */ |
| if (READ_ONCE(event->oncpu) != smp_processor_id()) |
| return -EAGAIN; |
| |
| event->pmu->stop(event, PERF_EF_UPDATE); |
| |
| /* |
| * May race with the actual stop (through perf_pmu_output_stop()), |
| * but it is only used for events with AUX ring buffer, and such |
| * events will refuse to restart because of rb::aux_mmap_count==0, |
| * see comments in perf_aux_output_begin(). |
| * |
| * Since this is happening on an event-local CPU, no trace is lost |
| * while restarting. |
| */ |
| if (sd->restart) |
| event->pmu->start(event, 0); |
| |
| return 0; |
| } |
| |
| static int perf_event_stop(struct perf_event *event, int restart) |
| { |
| struct stop_event_data sd = { |
| .event = event, |
| .restart = restart, |
| }; |
| int ret = 0; |
| |
| do { |
| if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) |
| return 0; |
| |
| /* matches smp_wmb() in event_sched_in() */ |
| smp_rmb(); |
| |
| /* |
| * We only want to restart ACTIVE events, so if the event goes |
| * inactive here (event->oncpu==-1), there's nothing more to do; |
| * fall through with ret==-ENXIO. |
| */ |
| ret = cpu_function_call(READ_ONCE(event->oncpu), |
| __perf_event_stop, &sd); |
| } while (ret == -EAGAIN); |
| |
| return ret; |
| } |
| |
| /* |
| * In order to contain the amount of racy and tricky in the address filter |
| * configuration management, it is a two part process: |
| * |
| * (p1) when userspace mappings change as a result of (1) or (2) or (3) below, |
| * we update the addresses of corresponding vmas in |
| * event::addr_filter_ranges array and bump the event::addr_filters_gen; |
| * (p2) when an event is scheduled in (pmu::add), it calls |
| * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync() |
| * if the generation has changed since the previous call. |
| * |
| * If (p1) happens while the event is active, we restart it to force (p2). |
| * |
| * (1) perf_addr_filters_apply(): adjusting filters' offsets based on |
| * pre-existing mappings, called once when new filters arrive via SET_FILTER |
| * ioctl; |
| * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly |
| * registered mapping, called for every new mmap(), with mm::mmap_lock down |
| * for reading; |
| * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process |
| * of exec. |
| */ |
| void perf_event_addr_filters_sync(struct perf_event *event) |
| { |
| struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| |
| if (!has_addr_filter(event)) |
| return; |
| |
| raw_spin_lock(&ifh->lock); |
| if (event->addr_filters_gen != event->hw.addr_filters_gen) { |
| event->pmu->addr_filters_sync(event); |
| event->hw.addr_filters_gen = event->addr_filters_gen; |
| } |
| raw_spin_unlock(&ifh->lock); |
| } |
| EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync); |
| |
| static int _perf_event_refresh(struct perf_event *event, int refresh) |
| { |
| /* |
| * not supported on inherited events |
| */ |
| if (event->attr.inherit || !is_sampling_event(event)) |
| return -EINVAL; |
| |
| atomic_add(refresh, &event->event_limit); |
| _perf_event_enable(event); |
| |
| return 0; |
| } |
| |
| /* |
| * See perf_event_disable() |
| */ |
| int perf_event_refresh(struct perf_event *event, int refresh) |
| { |
| struct perf_event_context *ctx; |
| int ret; |
| |
| ctx = perf_event_ctx_lock(event); |
| ret = _perf_event_refresh(event, refresh); |
| perf_event_ctx_unlock(event, ctx); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(perf_event_refresh); |
| |
| static int perf_event_modify_breakpoint(struct perf_event *bp, |
| struct perf_event_attr *attr) |
| { |
| int err; |
| |
| _perf_event_disable(bp); |
| |
| err = modify_user_hw_breakpoint_check(bp, attr, true); |
| |
| if (!bp->attr.disabled) |
| _perf_event_enable(bp); |
| |
| return err; |
| } |
| |
| static int perf_event_modify_attr(struct perf_event *event, |
| struct perf_event_attr *attr) |
| { |
| int (*func)(struct perf_event *, struct perf_event_attr *); |
| struct perf_event *child; |
| int err; |
| |
| if (event->attr.type != attr->type) |
| return -EINVAL; |
| |
| switch (event->attr.type) { |
| case PERF_TYPE_BREAKPOINT: |
| func = perf_event_modify_breakpoint; |
| break; |
| default: |
| /* Place holder for future additions. */ |
| return -EOPNOTSUPP; |
| } |
| |
| WARN_ON_ONCE(event->ctx->parent_ctx); |
| |
| mutex_lock(&event->child_mutex); |
| err = func(event, attr); |
| if (err) |
| goto out; |
| list_for_each_entry(child, &event->child_list, child_list) { |
| err = func(child, attr); |
| if (err) |
| goto out; |
| } |
| out: |
| mutex_unlock(&event->child_mutex); |
| return err; |
| } |
| |
| static void ctx_sched_out(struct perf_event_context *ctx, |
| struct perf_cpu_context *cpuctx, |
| enum event_type_t event_type) |
| { |
| struct perf_event *event, *tmp; |
| int is_active = ctx->is_active; |
| |
| lockdep_assert_held(&ctx->lock); |
| |
| if (likely(!ctx->nr_events)) { |
| /* |
| * See __perf_remove_from_context(). |
| */ |
| WARN_ON_ONCE(ctx->is_active); |
| if (ctx->task) |
| WARN_ON_ONCE(cpuctx->task_ctx); |
| return; |
| } |
| |
| ctx->is_active &= ~event_type; |
| if (!(ctx->is_active & EVENT_ALL)) |
| ctx->is_active = 0; |
| |
| if (ctx->task) { |
| WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
| if (!ctx->is_active) |
| cpuctx->task_ctx = NULL; |
| } |
| |
| /* |
| * Always update time if it was set; not only when it changes. |
| * Otherwise we can 'forget' to update time for any but the last |
| * context we sched out. For example: |
| * |
| * ctx_sched_out(.event_type = EVENT_FLEXIBLE) |
| * ctx_sched_out(.event_type = EVENT_PINNED) |
| * |
| * would only update time for the pinned events. |
| */ |
| if (is_active & EVENT_TIME) { |
| /* update (and stop) ctx time */ |
| update_context_time(ctx); |
| update_cgrp_time_from_cpuctx(cpuctx); |
| } |
| |
| is_active ^= ctx->is_active; /* changed bits */ |
| |
| if (!ctx->nr_active || !(is_active & EVENT_ALL)) |
| return; |
| |
| perf_pmu_disable(ctx->pmu); |
| if (is_active & EVENT_PINNED) { |
| list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) |
| group_sched_out(event, cpuctx, ctx); |
| } |
| |
| if (is_active & EVENT_FLEXIBLE) { |
| list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) |
| group_sched_out(event, cpuctx, ctx); |
| |
| /* |
| * Since we cleared EVENT_FLEXIBLE, also clear |
| * rotate_necessary, is will be reset by |
| * ctx_flexible_sched_in() when needed. |
| */ |
| ctx->rotate_necessary = 0; |
| } |
| perf_pmu_enable(ctx->pmu); |
| } |
| |
| /* |
| * Test whether two contexts are equivalent, i.e. whether they have both been |
| * cloned from the same version of the same context. |
| * |
| * Equivalence is measured using a generation number in the context that is |
| * incremented on each modification to it; see unclone_ctx(), list_add_event() |
| * and list_del_event(). |
| */ |
| static int context_equiv(struct perf_event_context *ctx1, |
| struct perf_event_context *ctx2) |
| { |
| lockdep_assert_held(&ctx1->lock); |
| lockdep_assert_held(&ctx2->lock); |
| |
| /* Pinning disables the swap optimization */ |
| if (ctx1->pin_count || ctx2->pin_count) |
| return 0; |
| |
| /* If ctx1 is the parent of ctx2 */ |
| if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) |
| return 1; |
| |
| /* If ctx2 is the parent of ctx1 */ |
| if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) |
| return 1; |
| |
| /* |
| * If ctx1 and ctx2 have the same parent; we flatten the parent |
| * hierarchy, see perf_event_init_context(). |
| */ |
| if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && |
| ctx1->parent_gen == ctx2->parent_gen) |
| return 1; |
| |
| /* Unmatched */ |
| return 0; |
| } |
| |
| static void __perf_event_sync_stat(struct perf_event *event, |
| struct perf_event *next_event) |
| { |
| u64 value; |
| |
| if (!event->attr.inherit_stat) |
| return; |
| |
| /* |
| * Update the event value, we cannot use perf_event_read() |
| * because we're in the middle of a context switch and have IRQs |
| * disabled, which upsets smp_call_function_single(), however |
| * we know the event must be on the current CPU, therefore we |
| * don't need to use it. |
| */ |
| if (event->state == PERF_EVENT_STATE_ACTIVE) |
| event->pmu->read(event); |
| |
| perf_event_update_time(event); |
| |
| /* |
| * In order to keep per-task stats reliable we need to flip the event |
| * values when we flip the contexts. |
| */ |
| value = local64_read(&next_event->count); |
| value = local64_xchg(&event->count, value); |
| local64_set(&next_event->count, value); |
| |
| swap(event->total_time_enabled, next_event->total_time_enabled); |
| swap(event->total_time_running, next_event->total_time_running); |
| |
| <
|