K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License as published by |
| 4 | * the Free Software Foundation; either version 2 of the License, or |
| 5 | * (at your option) any later version. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | * You should have received a copy of the GNU General Public License |
| 13 | * along with this program; if not, write to the Free Software |
| 14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 15 | * |
| 16 | * Copyright (C) 2007 Alan Stern |
| 17 | * Copyright (C) IBM Corporation, 2009 |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 19 | * |
| 20 | * Thanks to Ingo Molnar for his many suggestions. |
K.Prasad | ba6909b | 2009-11-23 21:17:13 +0530 | [diff] [blame] | 21 | * |
| 22 | * Authors: Alan Stern <stern@rowland.harvard.edu> |
| 23 | * K.Prasad <prasad@linux.vnet.ibm.com> |
| 24 | * Frederic Weisbecker <fweisbec@gmail.com> |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 25 | */ |
| 26 | |
| 27 | /* |
| 28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, |
| 29 | * using the CPU's debug registers. |
| 30 | * This file contains the arch-independent routines. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/irqflags.h> |
| 34 | #include <linux/kallsyms.h> |
| 35 | #include <linux/notifier.h> |
| 36 | #include <linux/kprobes.h> |
| 37 | #include <linux/kdebug.h> |
| 38 | #include <linux/kernel.h> |
| 39 | #include <linux/module.h> |
| 40 | #include <linux/percpu.h> |
| 41 | #include <linux/sched.h> |
| 42 | #include <linux/init.h> |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 43 | #include <linux/slab.h> |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 44 | #include <linux/list.h> |
Li Zefan | 88f7a890 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 45 | #include <linux/cpu.h> |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 46 | #include <linux/smp.h> |
| 47 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 48 | #include <linux/hw_breakpoint.h> |
| 49 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 50 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 51 | /* |
| 52 | * Constraints data |
| 53 | */ |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 54 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 55 | /* Number of pinned cpu breakpoints in a cpu */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 56 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 57 | |
| 58 | /* Number of pinned task breakpoints in a cpu */ |
Frederic Weisbecker | 777d041 | 2010-05-03 15:39:45 +0200 | [diff] [blame] | 59 | static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 60 | |
| 61 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 62 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 63 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 64 | static int nr_slots[TYPE_MAX]; |
| 65 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 66 | /* Keep track of the breakpoints attached to tasks */ |
| 67 | static LIST_HEAD(bp_task_head); |
| 68 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 69 | static int constraints_initialized; |
| 70 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 71 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ |
| 72 | struct bp_busy_slots { |
| 73 | unsigned int pinned; |
| 74 | unsigned int flexible; |
| 75 | }; |
| 76 | |
| 77 | /* Serialize accesses to the above constraints */ |
| 78 | static DEFINE_MUTEX(nr_bp_mutex); |
| 79 | |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 80 | __weak int hw_breakpoint_weight(struct perf_event *bp) |
| 81 | { |
| 82 | return 1; |
| 83 | } |
| 84 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 85 | static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) |
| 86 | { |
| 87 | if (bp->attr.bp_type & HW_BREAKPOINT_RW) |
| 88 | return TYPE_DATA; |
| 89 | |
| 90 | return TYPE_INST; |
| 91 | } |
| 92 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 93 | /* |
| 94 | * Report the maximum number of pinned breakpoints a task |
| 95 | * have in this cpu |
| 96 | */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 97 | static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 98 | { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 99 | int i; |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 100 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 101 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 102 | for (i = nr_slots[type] - 1; i >= 0; i--) { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 103 | if (tsk_pinned[i] > 0) |
| 104 | return i + 1; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 105 | } |
| 106 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 107 | return 0; |
| 108 | } |
| 109 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 110 | /* |
| 111 | * Count the number of breakpoints of the same type and same task. |
| 112 | * The given event must be not on the list. |
| 113 | */ |
| 114 | static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 115 | { |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 116 | struct task_struct *tsk = bp->hw.bp_target; |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 117 | struct perf_event *iter; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 118 | int count = 0; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 119 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 121 | if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 122 | count += hw_breakpoint_weight(iter); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 123 | } |
| 124 | |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 125 | return count; |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Report the number of pinned/un-pinned breakpoints we have in |
| 130 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
| 131 | */ |
| 132 | static void |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 133 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
| 134 | enum bp_type_idx type) |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 135 | { |
| 136 | int cpu = bp->cpu; |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 137 | struct task_struct *tsk = bp->hw.bp_target; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 138 | |
| 139 | if (cpu >= 0) { |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 140 | slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 141 | if (!tsk) |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 142 | slots->pinned += max_task_bp_pinned(cpu, type); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 143 | else |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 144 | slots->pinned += task_bp_pinned(bp, type); |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 145 | slots->flexible = per_cpu(nr_bp_flexible[type], cpu); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 146 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 147 | return; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | for_each_online_cpu(cpu) { |
| 151 | unsigned int nr; |
| 152 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 153 | nr = per_cpu(nr_cpu_bp_pinned[type], cpu); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 154 | if (!tsk) |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 155 | nr += max_task_bp_pinned(cpu, type); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 156 | else |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 157 | nr += task_bp_pinned(bp, type); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 158 | |
| 159 | if (nr > slots->pinned) |
| 160 | slots->pinned = nr; |
| 161 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 162 | nr = per_cpu(nr_bp_flexible[type], cpu); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 163 | |
| 164 | if (nr > slots->flexible) |
| 165 | slots->flexible = nr; |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | /* |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 170 | * For now, continue to consider flexible as pinned, until we can |
| 171 | * ensure no flexible event can ever be scheduled before a pinned event |
| 172 | * in a same cpu. |
| 173 | */ |
| 174 | static void |
| 175 | fetch_this_slot(struct bp_busy_slots *slots, int weight) |
| 176 | { |
| 177 | slots->pinned += weight; |
| 178 | } |
| 179 | |
| 180 | /* |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 181 | * Add a pinned breakpoint for the given task in our constraint table |
| 182 | */ |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 183 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 184 | enum bp_type_idx type, int weight) |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 185 | { |
| 186 | unsigned int *tsk_pinned; |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 187 | int old_count = 0; |
| 188 | int old_idx = 0; |
| 189 | int idx = 0; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 190 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 191 | old_count = task_bp_pinned(bp, type); |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 192 | old_idx = old_count - 1; |
| 193 | idx = old_idx + weight; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 194 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 195 | /* tsk_pinned[n] is the number of tasks having n breakpoints */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 196 | tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 197 | if (enable) { |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 198 | tsk_pinned[idx]++; |
| 199 | if (old_count > 0) |
| 200 | tsk_pinned[old_idx]--; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 201 | } else { |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 202 | tsk_pinned[idx]--; |
| 203 | if (old_count > 0) |
| 204 | tsk_pinned[old_idx]++; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 205 | } |
| 206 | } |
| 207 | |
| 208 | /* |
| 209 | * Add/remove the given breakpoint in our constraint table |
| 210 | */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 211 | static void |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 212 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, |
| 213 | int weight) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 214 | { |
| 215 | int cpu = bp->cpu; |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 216 | struct task_struct *tsk = bp->hw.bp_target; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 217 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 218 | /* Pinned counter cpu profiling */ |
| 219 | if (!tsk) { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 220 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 221 | if (enable) |
| 222 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; |
| 223 | else |
| 224 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 225 | return; |
| 226 | } |
| 227 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 228 | /* Pinned counter task profiling */ |
| 229 | |
| 230 | if (!enable) |
| 231 | list_del(&bp->hw.bp_list); |
| 232 | |
| 233 | if (cpu >= 0) { |
| 234 | toggle_bp_task_slot(bp, cpu, enable, type, weight); |
| 235 | } else { |
| 236 | for_each_online_cpu(cpu) |
| 237 | toggle_bp_task_slot(bp, cpu, enable, type, weight); |
| 238 | } |
| 239 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 240 | if (enable) |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 241 | list_add_tail(&bp->hw.bp_list, &bp_task_head); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 242 | } |
| 243 | |
| 244 | /* |
K.Prasad | f7136c5 | 2010-06-15 11:34:34 +0530 | [diff] [blame] | 245 | * Function to perform processor-specific cleanup during unregistration |
| 246 | */ |
| 247 | __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) |
| 248 | { |
| 249 | /* |
| 250 | * A weak stub function here for those archs that don't define |
| 251 | * it inside arch/.../kernel/hw_breakpoint.c |
| 252 | */ |
| 253 | } |
| 254 | |
| 255 | /* |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 256 | * Contraints to check before allowing this new breakpoint counter: |
| 257 | * |
| 258 | * == Non-pinned counter == (Considered as pinned for now) |
| 259 | * |
| 260 | * - If attached to a single cpu, check: |
| 261 | * |
| 262 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) |
Stephen Rothwell | 6ab8886 | 2009-12-08 18:25:15 +1100 | [diff] [blame] | 263 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 264 | * |
| 265 | * -> If there are already non-pinned counters in this cpu, it means |
| 266 | * there is already a free slot for them. |
| 267 | * Otherwise, we check that the maximum number of per task |
| 268 | * breakpoints (for this cpu) plus the number of per cpu breakpoint |
| 269 | * (for this cpu) doesn't cover every registers. |
| 270 | * |
| 271 | * - If attached to every cpus, check: |
| 272 | * |
| 273 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) |
Stephen Rothwell | 6ab8886 | 2009-12-08 18:25:15 +1100 | [diff] [blame] | 274 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 275 | * |
| 276 | * -> This is roughly the same, except we check the number of per cpu |
| 277 | * bp for every cpu and we keep the max one. Same for the per tasks |
| 278 | * breakpoints. |
| 279 | * |
| 280 | * |
| 281 | * == Pinned counter == |
| 282 | * |
| 283 | * - If attached to a single cpu, check: |
| 284 | * |
| 285 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) |
Stephen Rothwell | 6ab8886 | 2009-12-08 18:25:15 +1100 | [diff] [blame] | 286 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 287 | * |
| 288 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep |
| 289 | * one register at least (or they will never be fed). |
| 290 | * |
| 291 | * - If attached to every cpus, check: |
| 292 | * |
| 293 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
Stephen Rothwell | 6ab8886 | 2009-12-08 18:25:15 +1100 | [diff] [blame] | 294 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 295 | */ |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 296 | static int __reserve_bp_slot(struct perf_event *bp) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 297 | { |
| 298 | struct bp_busy_slots slots = {0}; |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 299 | enum bp_type_idx type; |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 300 | int weight; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 301 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 302 | /* We couldn't initialize breakpoint constraints on boot */ |
| 303 | if (!constraints_initialized) |
| 304 | return -ENOMEM; |
| 305 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 306 | /* Basic checks */ |
| 307 | if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || |
| 308 | bp->attr.bp_type == HW_BREAKPOINT_INVALID) |
| 309 | return -EINVAL; |
| 310 | |
| 311 | type = find_slot_idx(bp); |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 312 | weight = hw_breakpoint_weight(bp); |
| 313 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 314 | fetch_bp_busy_slots(&slots, bp, type); |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 315 | /* |
| 316 | * Simulate the addition of this breakpoint to the constraints |
| 317 | * and see the result. |
| 318 | */ |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 319 | fetch_this_slot(&slots, weight); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 320 | |
| 321 | /* Flexible counters need to keep at least one slot */ |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 322 | if (slots.pinned + (!!slots.flexible) > nr_slots[type]) |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 323 | return -ENOSPC; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 324 | |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 325 | toggle_bp_slot(bp, true, type, weight); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 326 | |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 327 | return 0; |
| 328 | } |
| 329 | |
| 330 | int reserve_bp_slot(struct perf_event *bp) |
| 331 | { |
| 332 | int ret; |
| 333 | |
| 334 | mutex_lock(&nr_bp_mutex); |
| 335 | |
| 336 | ret = __reserve_bp_slot(bp); |
| 337 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 338 | mutex_unlock(&nr_bp_mutex); |
| 339 | |
| 340 | return ret; |
| 341 | } |
| 342 | |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 343 | static void __release_bp_slot(struct perf_event *bp) |
| 344 | { |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 345 | enum bp_type_idx type; |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 346 | int weight; |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 347 | |
| 348 | type = find_slot_idx(bp); |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 349 | weight = hw_breakpoint_weight(bp); |
| 350 | toggle_bp_slot(bp, false, type, weight); |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 351 | } |
| 352 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 353 | void release_bp_slot(struct perf_event *bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 354 | { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 355 | mutex_lock(&nr_bp_mutex); |
| 356 | |
K.Prasad | f7136c5 | 2010-06-15 11:34:34 +0530 | [diff] [blame] | 357 | arch_unregister_hw_breakpoint(bp); |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 358 | __release_bp_slot(bp); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 359 | |
| 360 | mutex_unlock(&nr_bp_mutex); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 361 | } |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 362 | |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 363 | /* |
| 364 | * Allow the kernel debugger to reserve breakpoint slots without |
| 365 | * taking a lock using the dbg_* variant of for the reserve and |
| 366 | * release breakpoint slots. |
| 367 | */ |
| 368 | int dbg_reserve_bp_slot(struct perf_event *bp) |
| 369 | { |
| 370 | if (mutex_is_locked(&nr_bp_mutex)) |
| 371 | return -1; |
| 372 | |
| 373 | return __reserve_bp_slot(bp); |
| 374 | } |
| 375 | |
| 376 | int dbg_release_bp_slot(struct perf_event *bp) |
| 377 | { |
| 378 | if (mutex_is_locked(&nr_bp_mutex)) |
| 379 | return -1; |
| 380 | |
| 381 | __release_bp_slot(bp); |
| 382 | |
| 383 | return 0; |
| 384 | } |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 385 | |
Frederic Weisbecker | b2812d0 | 2010-04-18 18:11:53 +0200 | [diff] [blame] | 386 | static int validate_hw_breakpoint(struct perf_event *bp) |
| 387 | { |
| 388 | int ret; |
| 389 | |
| 390 | ret = arch_validate_hwbkpt_settings(bp); |
| 391 | if (ret) |
| 392 | return ret; |
| 393 | |
| 394 | if (arch_check_bp_in_kernelspace(bp)) { |
| 395 | if (bp->attr.exclude_kernel) |
| 396 | return -EINVAL; |
| 397 | /* |
| 398 | * Don't let unprivileged users set a breakpoint in the trap |
| 399 | * path to avoid trap recursion attacks. |
| 400 | */ |
| 401 | if (!capable(CAP_SYS_ADMIN)) |
| 402 | return -EPERM; |
| 403 | } |
| 404 | |
| 405 | return 0; |
| 406 | } |
| 407 | |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 408 | int register_perf_hw_breakpoint(struct perf_event *bp) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 409 | { |
| 410 | int ret; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 411 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 412 | ret = reserve_bp_slot(bp); |
| 413 | if (ret) |
| 414 | return ret; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 415 | |
Frederic Weisbecker | b2812d0 | 2010-04-18 18:11:53 +0200 | [diff] [blame] | 416 | ret = validate_hw_breakpoint(bp); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 417 | |
Mahesh Salgaonkar | b23ff0e | 2010-01-21 18:25:16 +0530 | [diff] [blame] | 418 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ |
| 419 | if (ret) |
| 420 | release_bp_slot(bp); |
| 421 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 422 | return ret; |
| 423 | } |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 424 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 425 | /** |
| 426 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 427 | * @attr: breakpoint attributes |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 428 | * @triggered: callback to trigger when we hit the breakpoint |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 429 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 430 | */ |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 431 | struct perf_event * |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 432 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 433 | perf_overflow_handler_t triggered, |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 434 | struct task_struct *tsk) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 435 | { |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 436 | return perf_event_create_kernel_counter(attr, -1, tsk, triggered); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 437 | } |
| 438 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
| 439 | |
| 440 | /** |
| 441 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 442 | * @bp: the breakpoint structure to modify |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 443 | * @attr: new breakpoint attributes |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 444 | * @triggered: callback to trigger when we hit the breakpoint |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 445 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 446 | */ |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 447 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 448 | { |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 449 | u64 old_addr = bp->attr.bp_addr; |
Mahesh Salgaonkar | cd75764 | 2010-01-30 10:25:18 +0530 | [diff] [blame] | 450 | u64 old_len = bp->attr.bp_len; |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 451 | int old_type = bp->attr.bp_type; |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 452 | int err = 0; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 453 | |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 454 | perf_event_disable(bp); |
| 455 | |
| 456 | bp->attr.bp_addr = attr->bp_addr; |
| 457 | bp->attr.bp_type = attr->bp_type; |
| 458 | bp->attr.bp_len = attr->bp_len; |
| 459 | |
| 460 | if (attr->disabled) |
| 461 | goto end; |
| 462 | |
Frederic Weisbecker | b2812d0 | 2010-04-18 18:11:53 +0200 | [diff] [blame] | 463 | err = validate_hw_breakpoint(bp); |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 464 | if (!err) |
| 465 | perf_event_enable(bp); |
| 466 | |
| 467 | if (err) { |
| 468 | bp->attr.bp_addr = old_addr; |
| 469 | bp->attr.bp_type = old_type; |
| 470 | bp->attr.bp_len = old_len; |
| 471 | if (!bp->attr.disabled) |
| 472 | perf_event_enable(bp); |
| 473 | |
| 474 | return err; |
| 475 | } |
| 476 | |
| 477 | end: |
| 478 | bp->attr.disabled = attr->disabled; |
| 479 | |
| 480 | return 0; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 481 | } |
| 482 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
| 483 | |
| 484 | /** |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 485 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 486 | * @bp: the breakpoint structure to unregister |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 487 | */ |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 488 | void unregister_hw_breakpoint(struct perf_event *bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 489 | { |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 490 | if (!bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 491 | return; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 492 | perf_event_release_kernel(bp); |
| 493 | } |
| 494 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); |
| 495 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 496 | /** |
| 497 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
Frederic Weisbecker | dd1853c | 2009-11-27 04:55:54 +0100 | [diff] [blame] | 498 | * @attr: breakpoint attributes |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 499 | * @triggered: callback to trigger when we hit the breakpoint |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 500 | * |
| 501 | * @return a set of per_cpu pointers to perf events |
| 502 | */ |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 503 | struct perf_event * __percpu * |
Frederic Weisbecker | dd1853c | 2009-11-27 04:55:54 +0100 | [diff] [blame] | 504 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 505 | perf_overflow_handler_t triggered) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 506 | { |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 507 | struct perf_event * __percpu *cpu_events, **pevent, *bp; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 508 | long err; |
| 509 | int cpu; |
| 510 | |
| 511 | cpu_events = alloc_percpu(typeof(*cpu_events)); |
| 512 | if (!cpu_events) |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 513 | return (void __percpu __force *)ERR_PTR(-ENOMEM); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 514 | |
Li Zefan | 88f7a890 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 515 | get_online_cpus(); |
| 516 | for_each_online_cpu(cpu) { |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 517 | pevent = per_cpu_ptr(cpu_events, cpu); |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 518 | bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 519 | |
| 520 | *pevent = bp; |
| 521 | |
Frederic Weisbecker | 605bfae | 2009-11-26 05:35:42 +0100 | [diff] [blame] | 522 | if (IS_ERR(bp)) { |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 523 | err = PTR_ERR(bp); |
| 524 | goto fail; |
| 525 | } |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 526 | } |
Li Zefan | 88f7a890 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 527 | put_online_cpus(); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 528 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 529 | return cpu_events; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 530 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 531 | fail: |
Li Zefan | 88f7a890 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 532 | for_each_online_cpu(cpu) { |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 533 | pevent = per_cpu_ptr(cpu_events, cpu); |
Frederic Weisbecker | 605bfae | 2009-11-26 05:35:42 +0100 | [diff] [blame] | 534 | if (IS_ERR(*pevent)) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 535 | break; |
| 536 | unregister_hw_breakpoint(*pevent); |
| 537 | } |
Li Zefan | 88f7a890 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 538 | put_online_cpus(); |
| 539 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 540 | free_percpu(cpu_events); |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 541 | return (void __percpu __force *)ERR_PTR(err); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 542 | } |
Frederic Weisbecker | f60d24d | 2009-11-10 10:17:07 +0100 | [diff] [blame] | 543 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 544 | |
| 545 | /** |
| 546 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
| 547 | * @cpu_events: the per cpu set of events to unregister |
| 548 | */ |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 549 | void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 550 | { |
| 551 | int cpu; |
| 552 | struct perf_event **pevent; |
| 553 | |
| 554 | for_each_possible_cpu(cpu) { |
| 555 | pevent = per_cpu_ptr(cpu_events, cpu); |
| 556 | unregister_hw_breakpoint(*pevent); |
| 557 | } |
| 558 | free_percpu(cpu_events); |
| 559 | } |
Frederic Weisbecker | f60d24d | 2009-11-10 10:17:07 +0100 | [diff] [blame] | 560 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 561 | |
| 562 | static struct notifier_block hw_breakpoint_exceptions_nb = { |
| 563 | .notifier_call = hw_breakpoint_exceptions_notify, |
| 564 | /* we need to be notified first */ |
| 565 | .priority = 0x7fffffff |
| 566 | }; |
| 567 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 568 | static void bp_perf_event_destroy(struct perf_event *event) |
| 569 | { |
| 570 | release_bp_slot(event); |
| 571 | } |
| 572 | |
| 573 | static int hw_breakpoint_event_init(struct perf_event *bp) |
| 574 | { |
| 575 | int err; |
| 576 | |
| 577 | if (bp->attr.type != PERF_TYPE_BREAKPOINT) |
| 578 | return -ENOENT; |
| 579 | |
| 580 | err = register_perf_hw_breakpoint(bp); |
| 581 | if (err) |
| 582 | return err; |
| 583 | |
| 584 | bp->destroy = bp_perf_event_destroy; |
| 585 | |
| 586 | return 0; |
| 587 | } |
| 588 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 589 | static int hw_breakpoint_add(struct perf_event *bp, int flags) |
| 590 | { |
| 591 | if (!(flags & PERF_EF_START)) |
| 592 | bp->hw.state = PERF_HES_STOPPED; |
| 593 | |
| 594 | return arch_install_hw_breakpoint(bp); |
| 595 | } |
| 596 | |
| 597 | static void hw_breakpoint_del(struct perf_event *bp, int flags) |
| 598 | { |
| 599 | arch_uninstall_hw_breakpoint(bp); |
| 600 | } |
| 601 | |
| 602 | static void hw_breakpoint_start(struct perf_event *bp, int flags) |
| 603 | { |
| 604 | bp->hw.state = 0; |
| 605 | } |
| 606 | |
| 607 | static void hw_breakpoint_stop(struct perf_event *bp, int flags) |
| 608 | { |
| 609 | bp->hw.state = PERF_HES_STOPPED; |
| 610 | } |
| 611 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 612 | static struct pmu perf_breakpoint = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 613 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ |
| 614 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 615 | .event_init = hw_breakpoint_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 616 | .add = hw_breakpoint_add, |
| 617 | .del = hw_breakpoint_del, |
| 618 | .start = hw_breakpoint_start, |
| 619 | .stop = hw_breakpoint_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 620 | .read = hw_breakpoint_pmu_read, |
| 621 | }; |
| 622 | |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 623 | int __init init_hw_breakpoint(void) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 624 | { |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 625 | unsigned int **task_bp_pinned; |
| 626 | int cpu, err_cpu; |
| 627 | int i; |
| 628 | |
| 629 | for (i = 0; i < TYPE_MAX; i++) |
| 630 | nr_slots[i] = hw_breakpoint_slots(i); |
| 631 | |
| 632 | for_each_possible_cpu(cpu) { |
| 633 | for (i = 0; i < TYPE_MAX; i++) { |
| 634 | task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); |
| 635 | *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], |
| 636 | GFP_KERNEL); |
| 637 | if (!*task_bp_pinned) |
| 638 | goto err_alloc; |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | constraints_initialized = 1; |
| 643 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 644 | perf_pmu_register(&perf_breakpoint); |
| 645 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 646 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 647 | |
| 648 | err_alloc: |
| 649 | for_each_possible_cpu(err_cpu) { |
| 650 | if (err_cpu == cpu) |
| 651 | break; |
| 652 | for (i = 0; i < TYPE_MAX; i++) |
| 653 | kfree(per_cpu(nr_task_bp_pinned[i], cpu)); |
| 654 | } |
| 655 | |
| 656 | return -ENOMEM; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 657 | } |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 658 | |
| 659 | |