Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2006 Mike Kravetz IBM Corporation |
| 4 | * |
| 5 | * Hypervisor Call Instrumentation |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/percpu.h> |
| 10 | #include <linux/debugfs.h> |
| 11 | #include <linux/seq_file.h> |
| 12 | #include <linux/cpumask.h> |
| 13 | #include <asm/hvcall.h> |
| 14 | #include <asm/firmware.h> |
| 15 | #include <asm/cputable.h> |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 16 | #include <asm/trace.h> |
Michael Ellerman | 8e83e90 | 2014-07-16 12:02:43 +1000 | [diff] [blame] | 17 | #include <asm/machdep.h> |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 18 | |
Tobin C. Harding | 017614a | 2017-03-07 20:32:42 +1100 | [diff] [blame] | 19 | /* For hcall instrumentation. One structure per-hcall, per-CPU */ |
| 20 | struct hcall_stats { |
| 21 | unsigned long num_calls; /* number of calls (on this CPU) */ |
| 22 | unsigned long tb_total; /* total wall time (mftb) of calls. */ |
| 23 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ |
| 24 | unsigned long tb_start; |
| 25 | unsigned long purr_start; |
| 26 | }; |
| 27 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) |
| 28 | |
Bixuan Cui | 193e4cd | 2021-04-09 17:01:09 +0800 | [diff] [blame] | 29 | static DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 30 | |
| 31 | /* |
| 32 | * Routines for displaying the statistics in debugfs |
| 33 | */ |
| 34 | static void *hc_start(struct seq_file *m, loff_t *pos) |
| 35 | { |
Anton Blanchard | dc40127 | 2007-01-09 02:43:02 +1100 | [diff] [blame] | 36 | if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1)) |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 37 | return (void *)(unsigned long)(*pos + 1); |
| 38 | |
| 39 | return NULL; |
| 40 | } |
| 41 | |
| 42 | static void *hc_next(struct seq_file *m, void *p, loff_t * pos) |
| 43 | { |
| 44 | ++*pos; |
| 45 | |
| 46 | return hc_start(m, pos); |
| 47 | } |
| 48 | |
| 49 | static void hc_stop(struct seq_file *m, void *p) |
| 50 | { |
| 51 | } |
| 52 | |
| 53 | static int hc_show(struct seq_file *m, void *p) |
| 54 | { |
| 55 | unsigned long h_num = (unsigned long)p; |
matt mooney | 6d2ad1e | 2010-09-27 19:04:44 -0700 | [diff] [blame] | 56 | struct hcall_stats *hs = m->private; |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 57 | |
| 58 | if (hs[h_num].num_calls) { |
Anton Blanchard | dc40127 | 2007-01-09 02:43:02 +1100 | [diff] [blame] | 59 | if (cpu_has_feature(CPU_FTR_PURR)) |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 60 | seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2, |
| 61 | hs[h_num].num_calls, |
| 62 | hs[h_num].tb_total, |
| 63 | hs[h_num].purr_total); |
| 64 | else |
| 65 | seq_printf(m, "%lu %lu %lu\n", h_num<<2, |
| 66 | hs[h_num].num_calls, |
| 67 | hs[h_num].tb_total); |
| 68 | } |
| 69 | |
| 70 | return 0; |
| 71 | } |
| 72 | |
Liu Shixin | 96543e7 | 2020-09-16 10:50:26 +0800 | [diff] [blame] | 73 | static const struct seq_operations hcall_inst_sops = { |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 74 | .start = hc_start, |
| 75 | .next = hc_next, |
| 76 | .stop = hc_stop, |
| 77 | .show = hc_show |
| 78 | }; |
| 79 | |
Liu Shixin | 96543e7 | 2020-09-16 10:50:26 +0800 | [diff] [blame] | 80 | DEFINE_SEQ_ATTRIBUTE(hcall_inst); |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 81 | |
| 82 | #define HCALL_ROOT_DIR "hcall_inst" |
| 83 | #define CPU_NAME_BUF_SIZE 32 |
| 84 | |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 85 | |
Stephen Rothwell | 969ea5c | 2010-05-28 15:05:00 +1000 | [diff] [blame] | 86 | static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args) |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 87 | { |
| 88 | struct hcall_stats *h; |
| 89 | |
| 90 | if (opcode > MAX_HCALL_OPCODE) |
| 91 | return; |
| 92 | |
Christoph Lameter | 69111ba | 2014-10-21 15:23:25 -0500 | [diff] [blame] | 93 | h = this_cpu_ptr(&hcall_stats[opcode / 4]); |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 94 | h->tb_start = mftb(); |
| 95 | h->purr_start = mfspr(SPRN_PURR); |
| 96 | } |
| 97 | |
Michael Ellerman | 8f2133c | 2018-05-07 23:03:55 +1000 | [diff] [blame] | 98 | static void probe_hcall_exit(void *ignored, unsigned long opcode, long retval, |
Anton Blanchard | 6f26353 | 2009-10-26 18:51:09 +0000 | [diff] [blame] | 99 | unsigned long *retbuf) |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 100 | { |
| 101 | struct hcall_stats *h; |
| 102 | |
| 103 | if (opcode > MAX_HCALL_OPCODE) |
| 104 | return; |
| 105 | |
Christoph Lameter | 69111ba | 2014-10-21 15:23:25 -0500 | [diff] [blame] | 106 | h = this_cpu_ptr(&hcall_stats[opcode / 4]); |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 107 | h->num_calls++; |
Will Schmidt | 25ef231 | 2009-11-25 06:12:09 +0000 | [diff] [blame] | 108 | h->tb_total += mftb() - h->tb_start; |
| 109 | h->purr_total += mfspr(SPRN_PURR) - h->purr_start; |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 110 | } |
| 111 | |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 112 | static int __init hcall_inst_init(void) |
| 113 | { |
| 114 | struct dentry *hcall_root; |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 115 | char cpu_name_buf[CPU_NAME_BUF_SIZE]; |
| 116 | int cpu; |
| 117 | |
| 118 | if (!firmware_has_feature(FW_FEATURE_LPAR)) |
| 119 | return 0; |
| 120 | |
Stephen Rothwell | 969ea5c | 2010-05-28 15:05:00 +1000 | [diff] [blame] | 121 | if (register_trace_hcall_entry(probe_hcall_entry, NULL)) |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 122 | return -EINVAL; |
| 123 | |
Stephen Rothwell | 969ea5c | 2010-05-28 15:05:00 +1000 | [diff] [blame] | 124 | if (register_trace_hcall_exit(probe_hcall_exit, NULL)) { |
| 125 | unregister_trace_hcall_entry(probe_hcall_entry, NULL); |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 126 | return -EINVAL; |
| 127 | } |
| 128 | |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 129 | hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 130 | |
| 131 | for_each_possible_cpu(cpu) { |
| 132 | snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu); |
Greg Kroah-Hartman | ff22931 | 2019-10-14 12:16:42 +0200 | [diff] [blame] | 133 | debugfs_create_file(cpu_name_buf, 0444, hcall_root, |
| 134 | per_cpu(hcall_stats, cpu), |
Liu Shixin | 96543e7 | 2020-09-16 10:50:26 +0800 | [diff] [blame] | 135 | &hcall_inst_fops); |
Mike Kravetz | 57852a8 | 2006-09-06 16:23:12 -0700 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | return 0; |
| 139 | } |
Michael Ellerman | 8e83e90 | 2014-07-16 12:02:43 +1000 | [diff] [blame] | 140 | machine_device_initcall(pseries, hcall_inst_init); |