Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1 | #include <linux/perf_event.h> |
| 2 | #include <linux/types.h> |
| 3 | |
| 4 | #include <asm/perf_event.h> |
| 5 | #include <asm/msr.h> |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 6 | #include <asm/insn.h> |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 7 | |
| 8 | #include "perf_event.h" |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 9 | |
| 10 | enum { |
| 11 | LBR_FORMAT_32 = 0x00, |
| 12 | LBR_FORMAT_LIP = 0x01, |
| 13 | LBR_FORMAT_EIP = 0x02, |
| 14 | LBR_FORMAT_EIP_FLAGS = 0x03, |
| 15 | }; |
| 16 | |
| 17 | /* |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 18 | * Intel LBR_SELECT bits |
| 19 | * Intel Vol3a, April 2011, Section 16.7 Table 16-10 |
| 20 | * |
| 21 | * Hardware branch filter (not available on all CPUs) |
| 22 | */ |
| 23 | #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */ |
| 24 | #define LBR_USER_BIT 1 /* do not capture at ring > 0 */ |
| 25 | #define LBR_JCC_BIT 2 /* do not capture conditional branches */ |
| 26 | #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */ |
| 27 | #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */ |
| 28 | #define LBR_RETURN_BIT 5 /* do not capture near returns */ |
| 29 | #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ |
| 30 | #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ |
| 31 | #define LBR_FAR_BIT 8 /* do not capture far branches */ |
| 32 | |
| 33 | #define LBR_KERNEL (1 << LBR_KERNEL_BIT) |
| 34 | #define LBR_USER (1 << LBR_USER_BIT) |
| 35 | #define LBR_JCC (1 << LBR_JCC_BIT) |
| 36 | #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT) |
| 37 | #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT) |
| 38 | #define LBR_RETURN (1 << LBR_RETURN_BIT) |
| 39 | #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) |
| 40 | #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) |
| 41 | #define LBR_FAR (1 << LBR_FAR_BIT) |
| 42 | |
| 43 | #define LBR_PLM (LBR_KERNEL | LBR_USER) |
| 44 | |
| 45 | #define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ |
| 46 | #define LBR_NOT_SUPP -1 /* LBR filter not supported */ |
| 47 | #define LBR_IGN 0 /* ignored */ |
| 48 | |
| 49 | #define LBR_ANY \ |
| 50 | (LBR_JCC |\ |
| 51 | LBR_REL_CALL |\ |
| 52 | LBR_IND_CALL |\ |
| 53 | LBR_RETURN |\ |
| 54 | LBR_REL_JMP |\ |
| 55 | LBR_IND_JMP |\ |
| 56 | LBR_FAR) |
| 57 | |
| 58 | #define LBR_FROM_FLAG_MISPRED (1ULL << 63) |
| 59 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 60 | #define for_each_branch_sample_type(x) \ |
| 61 | for ((x) = PERF_SAMPLE_BRANCH_USER; \ |
| 62 | (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1) |
| 63 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 64 | /* |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 65 | * x86control flow change classification |
| 66 | * x86control flow changes include branches, interrupts, traps, faults |
| 67 | */ |
| 68 | enum { |
| 69 | X86_BR_NONE = 0, /* unknown */ |
| 70 | |
| 71 | X86_BR_USER = 1 << 0, /* branch target is user */ |
| 72 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ |
| 73 | |
| 74 | X86_BR_CALL = 1 << 2, /* call */ |
| 75 | X86_BR_RET = 1 << 3, /* return */ |
| 76 | X86_BR_SYSCALL = 1 << 4, /* syscall */ |
| 77 | X86_BR_SYSRET = 1 << 5, /* syscall return */ |
| 78 | X86_BR_INT = 1 << 6, /* sw interrupt */ |
| 79 | X86_BR_IRET = 1 << 7, /* return from interrupt */ |
| 80 | X86_BR_JCC = 1 << 8, /* conditional */ |
| 81 | X86_BR_JMP = 1 << 9, /* jump */ |
| 82 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ |
| 83 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ |
| 84 | }; |
| 85 | |
| 86 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) |
| 87 | |
| 88 | #define X86_BR_ANY \ |
| 89 | (X86_BR_CALL |\ |
| 90 | X86_BR_RET |\ |
| 91 | X86_BR_SYSCALL |\ |
| 92 | X86_BR_SYSRET |\ |
| 93 | X86_BR_INT |\ |
| 94 | X86_BR_IRET |\ |
| 95 | X86_BR_JCC |\ |
| 96 | X86_BR_JMP |\ |
| 97 | X86_BR_IRQ |\ |
| 98 | X86_BR_IND_CALL) |
| 99 | |
| 100 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) |
| 101 | |
| 102 | #define X86_BR_ANY_CALL \ |
| 103 | (X86_BR_CALL |\ |
| 104 | X86_BR_IND_CALL |\ |
| 105 | X86_BR_SYSCALL |\ |
| 106 | X86_BR_IRQ |\ |
| 107 | X86_BR_INT) |
| 108 | |
| 109 | static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); |
| 110 | |
| 111 | /* |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 112 | * We only support LBR implementations that have FREEZE_LBRS_ON_PMI |
| 113 | * otherwise it becomes near impossible to get a reliable stack. |
| 114 | */ |
| 115 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 116 | static void __intel_pmu_lbr_enable(void) |
| 117 | { |
| 118 | u64 debugctl; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 119 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 120 | |
| 121 | if (cpuc->lbr_sel) |
| 122 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 123 | |
| 124 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
Peter Zijlstra | 7c5ecaf | 2010-03-25 14:51:49 +0100 | [diff] [blame] | 125 | debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 126 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
| 127 | } |
| 128 | |
| 129 | static void __intel_pmu_lbr_disable(void) |
| 130 | { |
| 131 | u64 debugctl; |
| 132 | |
| 133 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
Peter Zijlstra | 7c5ecaf | 2010-03-25 14:51:49 +0100 | [diff] [blame] | 134 | debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 135 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
| 136 | } |
| 137 | |
| 138 | static void intel_pmu_lbr_reset_32(void) |
| 139 | { |
| 140 | int i; |
| 141 | |
| 142 | for (i = 0; i < x86_pmu.lbr_nr; i++) |
| 143 | wrmsrl(x86_pmu.lbr_from + i, 0); |
| 144 | } |
| 145 | |
| 146 | static void intel_pmu_lbr_reset_64(void) |
| 147 | { |
| 148 | int i; |
| 149 | |
| 150 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
| 151 | wrmsrl(x86_pmu.lbr_from + i, 0); |
| 152 | wrmsrl(x86_pmu.lbr_to + i, 0); |
| 153 | } |
| 154 | } |
| 155 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 156 | void intel_pmu_lbr_reset(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 157 | { |
Peter Zijlstra | 74846d3 | 2010-03-05 13:49:35 +0100 | [diff] [blame] | 158 | if (!x86_pmu.lbr_nr) |
| 159 | return; |
| 160 | |
Peter Zijlstra | 8db909a | 2010-03-03 17:07:40 +0100 | [diff] [blame] | 161 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 162 | intel_pmu_lbr_reset_32(); |
| 163 | else |
| 164 | intel_pmu_lbr_reset_64(); |
| 165 | } |
| 166 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 167 | void intel_pmu_lbr_enable(struct perf_event *event) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 168 | { |
| 169 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 170 | |
| 171 | if (!x86_pmu.lbr_nr) |
| 172 | return; |
| 173 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 174 | /* |
Peter Zijlstra | b83a46e | 2010-03-08 13:51:12 +0100 | [diff] [blame] | 175 | * Reset the LBR stack if we changed task context to |
| 176 | * avoid data leaks. |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 177 | */ |
Peter Zijlstra | b83a46e | 2010-03-08 13:51:12 +0100 | [diff] [blame] | 178 | if (event->ctx->task && cpuc->lbr_context != event->ctx) { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 179 | intel_pmu_lbr_reset(); |
| 180 | cpuc->lbr_context = event->ctx; |
| 181 | } |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 182 | cpuc->br_sel = event->hw.branch_reg.reg; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 183 | |
| 184 | cpuc->lbr_users++; |
| 185 | } |
| 186 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 187 | void intel_pmu_lbr_disable(struct perf_event *event) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 188 | { |
| 189 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 190 | |
| 191 | if (!x86_pmu.lbr_nr) |
| 192 | return; |
| 193 | |
| 194 | cpuc->lbr_users--; |
Peter Zijlstra | b83a46e | 2010-03-08 13:51:12 +0100 | [diff] [blame] | 195 | WARN_ON_ONCE(cpuc->lbr_users < 0); |
Peter Zijlstra | 2df202b | 2010-03-06 13:48:54 +0100 | [diff] [blame] | 196 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 197 | if (cpuc->enabled && !cpuc->lbr_users) { |
Peter Zijlstra | 2df202b | 2010-03-06 13:48:54 +0100 | [diff] [blame] | 198 | __intel_pmu_lbr_disable(); |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 199 | /* avoid stale pointer */ |
| 200 | cpuc->lbr_context = NULL; |
| 201 | } |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 202 | } |
| 203 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 204 | void intel_pmu_lbr_enable_all(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 205 | { |
| 206 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 207 | |
| 208 | if (cpuc->lbr_users) |
| 209 | __intel_pmu_lbr_enable(); |
| 210 | } |
| 211 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 212 | void intel_pmu_lbr_disable_all(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 213 | { |
| 214 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 215 | |
| 216 | if (cpuc->lbr_users) |
| 217 | __intel_pmu_lbr_disable(); |
| 218 | } |
| 219 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 220 | /* |
| 221 | * TOS = most recently recorded branch |
| 222 | */ |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 223 | static inline u64 intel_pmu_lbr_tos(void) |
| 224 | { |
| 225 | u64 tos; |
| 226 | |
| 227 | rdmsrl(x86_pmu.lbr_tos, tos); |
| 228 | |
| 229 | return tos; |
| 230 | } |
| 231 | |
| 232 | static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) |
| 233 | { |
| 234 | unsigned long mask = x86_pmu.lbr_nr - 1; |
| 235 | u64 tos = intel_pmu_lbr_tos(); |
| 236 | int i; |
| 237 | |
Peter Zijlstra | 63fb3f9 | 2010-03-09 11:51:02 +0100 | [diff] [blame] | 238 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 239 | unsigned long lbr_idx = (tos - i) & mask; |
| 240 | union { |
| 241 | struct { |
| 242 | u32 from; |
| 243 | u32 to; |
| 244 | }; |
| 245 | u64 lbr; |
| 246 | } msr_lastbranch; |
| 247 | |
| 248 | rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); |
| 249 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 250 | cpuc->lbr_entries[i].from = msr_lastbranch.from; |
| 251 | cpuc->lbr_entries[i].to = msr_lastbranch.to; |
| 252 | cpuc->lbr_entries[i].mispred = 0; |
| 253 | cpuc->lbr_entries[i].predicted = 0; |
| 254 | cpuc->lbr_entries[i].reserved = 0; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 255 | } |
| 256 | cpuc->lbr_stack.nr = i; |
| 257 | } |
| 258 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 259 | /* |
| 260 | * Due to lack of segmentation in Linux the effective address (offset) |
| 261 | * is the same as the linear address, allowing us to merge the LIP and EIP |
| 262 | * LBR formats. |
| 263 | */ |
| 264 | static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) |
| 265 | { |
| 266 | unsigned long mask = x86_pmu.lbr_nr - 1; |
Peter Zijlstra | 8db909a | 2010-03-03 17:07:40 +0100 | [diff] [blame] | 267 | int lbr_format = x86_pmu.intel_cap.lbr_format; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 268 | u64 tos = intel_pmu_lbr_tos(); |
| 269 | int i; |
| 270 | |
Peter Zijlstra | 63fb3f9 | 2010-03-09 11:51:02 +0100 | [diff] [blame] | 271 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 272 | unsigned long lbr_idx = (tos - i) & mask; |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 273 | u64 from, to, mis = 0, pred = 0; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 274 | |
| 275 | rdmsrl(x86_pmu.lbr_from + lbr_idx, from); |
| 276 | rdmsrl(x86_pmu.lbr_to + lbr_idx, to); |
| 277 | |
Peter Zijlstra | 8db909a | 2010-03-03 17:07:40 +0100 | [diff] [blame] | 278 | if (lbr_format == LBR_FORMAT_EIP_FLAGS) { |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 279 | mis = !!(from & LBR_FROM_FLAG_MISPRED); |
| 280 | pred = !mis; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 281 | from = (u64)((((s64)from) << 1) >> 1); |
| 282 | } |
| 283 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 284 | cpuc->lbr_entries[i].from = from; |
| 285 | cpuc->lbr_entries[i].to = to; |
| 286 | cpuc->lbr_entries[i].mispred = mis; |
| 287 | cpuc->lbr_entries[i].predicted = pred; |
| 288 | cpuc->lbr_entries[i].reserved = 0; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 289 | } |
| 290 | cpuc->lbr_stack.nr = i; |
| 291 | } |
| 292 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 293 | void intel_pmu_lbr_read(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 294 | { |
| 295 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 296 | |
| 297 | if (!cpuc->lbr_users) |
| 298 | return; |
| 299 | |
Peter Zijlstra | 8db909a | 2010-03-03 17:07:40 +0100 | [diff] [blame] | 300 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 301 | intel_pmu_lbr_read_32(cpuc); |
| 302 | else |
| 303 | intel_pmu_lbr_read_64(cpuc); |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 304 | |
| 305 | intel_pmu_lbr_filter(cpuc); |
| 306 | } |
| 307 | |
| 308 | /* |
| 309 | * SW filter is used: |
| 310 | * - in case there is no HW filter |
| 311 | * - in case the HW filter has errata or limitations |
| 312 | */ |
| 313 | static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
| 314 | { |
| 315 | u64 br_type = event->attr.branch_sample_type; |
| 316 | int mask = 0; |
| 317 | |
| 318 | if (br_type & PERF_SAMPLE_BRANCH_USER) |
| 319 | mask |= X86_BR_USER; |
| 320 | |
| 321 | if (br_type & PERF_SAMPLE_BRANCH_KERNEL) |
| 322 | mask |= X86_BR_KERNEL; |
| 323 | |
| 324 | /* we ignore BRANCH_HV here */ |
| 325 | |
| 326 | if (br_type & PERF_SAMPLE_BRANCH_ANY) |
| 327 | mask |= X86_BR_ANY; |
| 328 | |
| 329 | if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL) |
| 330 | mask |= X86_BR_ANY_CALL; |
| 331 | |
| 332 | if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN) |
| 333 | mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET; |
| 334 | |
| 335 | if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) |
| 336 | mask |= X86_BR_IND_CALL; |
| 337 | /* |
| 338 | * stash actual user request into reg, it may |
| 339 | * be used by fixup code for some CPU |
| 340 | */ |
| 341 | event->hw.branch_reg.reg = mask; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 342 | } |
| 343 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 344 | /* |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 345 | * setup the HW LBR filter |
| 346 | * Used only when available, may not be enough to disambiguate |
| 347 | * all branches, may need the help of the SW filter |
| 348 | */ |
| 349 | static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) |
| 350 | { |
| 351 | struct hw_perf_event_extra *reg; |
| 352 | u64 br_type = event->attr.branch_sample_type; |
| 353 | u64 mask = 0, m; |
| 354 | u64 v; |
| 355 | |
| 356 | for_each_branch_sample_type(m) { |
| 357 | if (!(br_type & m)) |
| 358 | continue; |
| 359 | |
| 360 | v = x86_pmu.lbr_sel_map[m]; |
| 361 | if (v == LBR_NOT_SUPP) |
| 362 | return -EOPNOTSUPP; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 363 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 364 | if (v != LBR_IGN) |
| 365 | mask |= v; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 366 | } |
| 367 | reg = &event->hw.branch_reg; |
| 368 | reg->idx = EXTRA_REG_LBR; |
| 369 | |
| 370 | /* LBR_SELECT operates in suppress mode so invert mask */ |
| 371 | reg->config = ~mask & x86_pmu.lbr_sel_mask; |
| 372 | |
| 373 | return 0; |
| 374 | } |
| 375 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 376 | int intel_pmu_setup_lbr_filter(struct perf_event *event) |
| 377 | { |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 378 | int ret = 0; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 379 | |
| 380 | /* |
| 381 | * no LBR on this PMU |
| 382 | */ |
| 383 | if (!x86_pmu.lbr_nr) |
| 384 | return -EOPNOTSUPP; |
| 385 | |
| 386 | /* |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 387 | * setup SW LBR filter |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 388 | */ |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 389 | intel_pmu_setup_sw_lbr_filter(event); |
| 390 | |
| 391 | /* |
| 392 | * setup HW LBR filter, if any |
| 393 | */ |
| 394 | if (x86_pmu.lbr_sel_map) |
| 395 | ret = intel_pmu_setup_hw_lbr_filter(event); |
| 396 | |
| 397 | return ret; |
| 398 | } |
| 399 | |
| 400 | /* |
| 401 | * return the type of control flow change at address "from" |
| 402 | * intruction is not necessarily a branch (in case of interrupt). |
| 403 | * |
| 404 | * The branch type returned also includes the priv level of the |
| 405 | * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). |
| 406 | * |
| 407 | * If a branch type is unknown OR the instruction cannot be |
| 408 | * decoded (e.g., text page not present), then X86_BR_NONE is |
| 409 | * returned. |
| 410 | */ |
| 411 | static int branch_type(unsigned long from, unsigned long to) |
| 412 | { |
| 413 | struct insn insn; |
| 414 | void *addr; |
| 415 | int bytes, size = MAX_INSN_SIZE; |
| 416 | int ret = X86_BR_NONE; |
| 417 | int ext, to_plm, from_plm; |
| 418 | u8 buf[MAX_INSN_SIZE]; |
| 419 | int is64 = 0; |
| 420 | |
| 421 | to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER; |
| 422 | from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER; |
| 423 | |
| 424 | /* |
| 425 | * maybe zero if lbr did not fill up after a reset by the time |
| 426 | * we get a PMU interrupt |
| 427 | */ |
| 428 | if (from == 0 || to == 0) |
| 429 | return X86_BR_NONE; |
| 430 | |
| 431 | if (from_plm == X86_BR_USER) { |
| 432 | /* |
| 433 | * can happen if measuring at the user level only |
| 434 | * and we interrupt in a kernel thread, e.g., idle. |
| 435 | */ |
| 436 | if (!current->mm) |
| 437 | return X86_BR_NONE; |
| 438 | |
| 439 | /* may fail if text not present */ |
| 440 | bytes = copy_from_user_nmi(buf, (void __user *)from, size); |
| 441 | if (bytes != size) |
| 442 | return X86_BR_NONE; |
| 443 | |
| 444 | addr = buf; |
| 445 | } else |
| 446 | addr = (void *)from; |
| 447 | |
| 448 | /* |
| 449 | * decoder needs to know the ABI especially |
| 450 | * on 64-bit systems running 32-bit apps |
| 451 | */ |
| 452 | #ifdef CONFIG_X86_64 |
| 453 | is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); |
| 454 | #endif |
| 455 | insn_init(&insn, addr, is64); |
| 456 | insn_get_opcode(&insn); |
| 457 | |
| 458 | switch (insn.opcode.bytes[0]) { |
| 459 | case 0xf: |
| 460 | switch (insn.opcode.bytes[1]) { |
| 461 | case 0x05: /* syscall */ |
| 462 | case 0x34: /* sysenter */ |
| 463 | ret = X86_BR_SYSCALL; |
| 464 | break; |
| 465 | case 0x07: /* sysret */ |
| 466 | case 0x35: /* sysexit */ |
| 467 | ret = X86_BR_SYSRET; |
| 468 | break; |
| 469 | case 0x80 ... 0x8f: /* conditional */ |
| 470 | ret = X86_BR_JCC; |
| 471 | break; |
| 472 | default: |
| 473 | ret = X86_BR_NONE; |
| 474 | } |
| 475 | break; |
| 476 | case 0x70 ... 0x7f: /* conditional */ |
| 477 | ret = X86_BR_JCC; |
| 478 | break; |
| 479 | case 0xc2: /* near ret */ |
| 480 | case 0xc3: /* near ret */ |
| 481 | case 0xca: /* far ret */ |
| 482 | case 0xcb: /* far ret */ |
| 483 | ret = X86_BR_RET; |
| 484 | break; |
| 485 | case 0xcf: /* iret */ |
| 486 | ret = X86_BR_IRET; |
| 487 | break; |
| 488 | case 0xcc ... 0xce: /* int */ |
| 489 | ret = X86_BR_INT; |
| 490 | break; |
| 491 | case 0xe8: /* call near rel */ |
| 492 | case 0x9a: /* call far absolute */ |
| 493 | ret = X86_BR_CALL; |
| 494 | break; |
| 495 | case 0xe0 ... 0xe3: /* loop jmp */ |
| 496 | ret = X86_BR_JCC; |
| 497 | break; |
| 498 | case 0xe9 ... 0xeb: /* jmp */ |
| 499 | ret = X86_BR_JMP; |
| 500 | break; |
| 501 | case 0xff: /* call near absolute, call far absolute ind */ |
| 502 | insn_get_modrm(&insn); |
| 503 | ext = (insn.modrm.bytes[0] >> 3) & 0x7; |
| 504 | switch (ext) { |
| 505 | case 2: /* near ind call */ |
| 506 | case 3: /* far ind call */ |
| 507 | ret = X86_BR_IND_CALL; |
| 508 | break; |
| 509 | case 4: |
| 510 | case 5: |
| 511 | ret = X86_BR_JMP; |
| 512 | break; |
| 513 | } |
| 514 | break; |
| 515 | default: |
| 516 | ret = X86_BR_NONE; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 517 | } |
| 518 | /* |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 519 | * interrupts, traps, faults (and thus ring transition) may |
| 520 | * occur on any instructions. Thus, to classify them correctly, |
| 521 | * we need to first look at the from and to priv levels. If they |
| 522 | * are different and to is in the kernel, then it indicates |
| 523 | * a ring transition. If the from instruction is not a ring |
| 524 | * transition instr (syscall, systenter, int), then it means |
| 525 | * it was a irq, trap or fault. |
| 526 | * |
| 527 | * we have no way of detecting kernel to kernel faults. |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 528 | */ |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 529 | if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL |
| 530 | && ret != X86_BR_SYSCALL && ret != X86_BR_INT) |
| 531 | ret = X86_BR_IRQ; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 532 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 533 | /* |
| 534 | * branch priv level determined by target as |
| 535 | * is done by HW when LBR_SELECT is implemented |
| 536 | */ |
| 537 | if (ret != X86_BR_NONE) |
| 538 | ret |= to_plm; |
| 539 | |
| 540 | return ret; |
| 541 | } |
| 542 | |
| 543 | /* |
| 544 | * implement actual branch filter based on user demand. |
| 545 | * Hardware may not exactly satisfy that request, thus |
| 546 | * we need to inspect opcodes. Mismatched branches are |
| 547 | * discarded. Therefore, the number of branches returned |
| 548 | * in PERF_SAMPLE_BRANCH_STACK sample may vary. |
| 549 | */ |
| 550 | static void |
| 551 | intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) |
| 552 | { |
| 553 | u64 from, to; |
| 554 | int br_sel = cpuc->br_sel; |
| 555 | int i, j, type; |
| 556 | bool compress = false; |
| 557 | |
| 558 | /* if sampling all branches, then nothing to filter */ |
| 559 | if ((br_sel & X86_BR_ALL) == X86_BR_ALL) |
| 560 | return; |
| 561 | |
| 562 | for (i = 0; i < cpuc->lbr_stack.nr; i++) { |
| 563 | |
| 564 | from = cpuc->lbr_entries[i].from; |
| 565 | to = cpuc->lbr_entries[i].to; |
| 566 | |
| 567 | type = branch_type(from, to); |
| 568 | |
| 569 | /* if type does not correspond, then discard */ |
| 570 | if (type == X86_BR_NONE || (br_sel & type) != type) { |
| 571 | cpuc->lbr_entries[i].from = 0; |
| 572 | compress = true; |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | if (!compress) |
| 577 | return; |
| 578 | |
| 579 | /* remove all entries with from=0 */ |
| 580 | for (i = 0; i < cpuc->lbr_stack.nr; ) { |
| 581 | if (!cpuc->lbr_entries[i].from) { |
| 582 | j = i; |
| 583 | while (++j < cpuc->lbr_stack.nr) |
| 584 | cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; |
| 585 | cpuc->lbr_stack.nr--; |
| 586 | if (!cpuc->lbr_entries[i].from) |
| 587 | continue; |
| 588 | } |
| 589 | i++; |
| 590 | } |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 591 | } |
| 592 | |
| 593 | /* |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 594 | * Map interface branch filters onto LBR filters |
| 595 | */ |
| 596 | static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { |
| 597 | [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, |
| 598 | [PERF_SAMPLE_BRANCH_USER] = LBR_USER, |
| 599 | [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, |
| 600 | [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, |
| 601 | [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP |
| 602 | | LBR_IND_JMP | LBR_FAR, |
| 603 | /* |
| 604 | * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches |
| 605 | */ |
| 606 | [PERF_SAMPLE_BRANCH_ANY_CALL] = |
| 607 | LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, |
| 608 | /* |
| 609 | * NHM/WSM erratum: must include IND_JMP to capture IND_CALL |
| 610 | */ |
| 611 | [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP, |
| 612 | }; |
| 613 | |
| 614 | static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { |
| 615 | [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, |
| 616 | [PERF_SAMPLE_BRANCH_USER] = LBR_USER, |
| 617 | [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, |
| 618 | [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, |
| 619 | [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR, |
| 620 | [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL |
| 621 | | LBR_FAR, |
| 622 | [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL, |
| 623 | }; |
| 624 | |
| 625 | /* core */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 626 | void intel_pmu_lbr_init_core(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 627 | { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 628 | x86_pmu.lbr_nr = 4; |
Stephane Eranian | 225ce53 | 2012-02-09 23:20:52 +0100 | [diff] [blame] | 629 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 630 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; |
| 631 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 632 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 633 | /* |
| 634 | * SW branch filter usage: |
| 635 | * - compensate for lack of HW filter |
| 636 | */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 637 | pr_cont("4-deep LBR, "); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 638 | } |
| 639 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 640 | /* nehalem/westmere */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 641 | void intel_pmu_lbr_init_nhm(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 642 | { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 643 | x86_pmu.lbr_nr = 16; |
Stephane Eranian | 225ce53 | 2012-02-09 23:20:52 +0100 | [diff] [blame] | 644 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 645 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; |
| 646 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 647 | |
| 648 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; |
| 649 | x86_pmu.lbr_sel_map = nhm_lbr_sel_map; |
| 650 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 651 | /* |
| 652 | * SW branch filter usage: |
| 653 | * - workaround LBR_SEL errata (see above) |
| 654 | * - support syscall, sysret capture. |
| 655 | * That requires LBR_FAR but that means far |
| 656 | * jmp need to be filtered out |
| 657 | */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 658 | pr_cont("16-deep LBR, "); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 659 | } |
| 660 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 661 | /* sandy bridge */ |
| 662 | void intel_pmu_lbr_init_snb(void) |
| 663 | { |
| 664 | x86_pmu.lbr_nr = 16; |
| 665 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 666 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; |
| 667 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; |
| 668 | |
| 669 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; |
| 670 | x86_pmu.lbr_sel_map = snb_lbr_sel_map; |
| 671 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 672 | /* |
| 673 | * SW branch filter usage: |
| 674 | * - support syscall, sysret capture. |
| 675 | * That requires LBR_FAR but that means far |
| 676 | * jmp need to be filtered out |
| 677 | */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 678 | pr_cont("16-deep LBR, "); |
| 679 | } |
| 680 | |
| 681 | /* atom */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 682 | void intel_pmu_lbr_init_atom(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 683 | { |
Stephane Eranian | 88c9a65 | 2012-02-09 23:20:56 +0100 | [diff] [blame] | 684 | /* |
| 685 | * only models starting at stepping 10 seems |
| 686 | * to have an operational LBR which can freeze |
| 687 | * on PMU interrupt |
| 688 | */ |
| 689 | if (boot_cpu_data.x86_mask < 10) { |
| 690 | pr_cont("LBR disabled due to erratum"); |
| 691 | return; |
| 692 | } |
| 693 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 694 | x86_pmu.lbr_nr = 8; |
Stephane Eranian | 225ce53 | 2012-02-09 23:20:52 +0100 | [diff] [blame] | 695 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 696 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; |
| 697 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 698 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 699 | /* |
| 700 | * SW branch filter usage: |
| 701 | * - compensate for lack of HW filter |
| 702 | */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 703 | pr_cont("8-deep LBR, "); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 704 | } |