blob: 3a50612a78db8be1642fbc14e718d609496549d6 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Mike Kravetz57852a82006-09-06 16:23:12 -07002/*
3 * Copyright (C) 2006 Mike Kravetz IBM Corporation
4 *
5 * Hypervisor Call Instrumentation
Mike Kravetz57852a82006-09-06 16:23:12 -07006 */
7
8#include <linux/kernel.h>
9#include <linux/percpu.h>
10#include <linux/debugfs.h>
11#include <linux/seq_file.h>
12#include <linux/cpumask.h>
13#include <asm/hvcall.h>
14#include <asm/firmware.h>
15#include <asm/cputable.h>
Anton Blanchardc8cd0932009-10-26 18:50:29 +000016#include <asm/trace.h>
Michael Ellerman8e83e902014-07-16 12:02:43 +100017#include <asm/machdep.h>
Mike Kravetz57852a82006-09-06 16:23:12 -070018
Tobin C. Harding017614a2017-03-07 20:32:42 +110019/* For hcall instrumentation. One structure per-hcall, per-CPU */
20struct hcall_stats {
21 unsigned long num_calls; /* number of calls (on this CPU) */
22 unsigned long tb_total; /* total wall time (mftb) of calls. */
23 unsigned long purr_total; /* total cpu time (PURR) of calls. */
24 unsigned long tb_start;
25 unsigned long purr_start;
26};
27#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
28
Bixuan Cui193e4cd2021-04-09 17:01:09 +080029static DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
Mike Kravetz57852a82006-09-06 16:23:12 -070030
31/*
32 * Routines for displaying the statistics in debugfs
33 */
34static void *hc_start(struct seq_file *m, loff_t *pos)
35{
Anton Blancharddc401272007-01-09 02:43:02 +110036 if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1))
Mike Kravetz57852a82006-09-06 16:23:12 -070037 return (void *)(unsigned long)(*pos + 1);
38
39 return NULL;
40}
41
42static void *hc_next(struct seq_file *m, void *p, loff_t * pos)
43{
44 ++*pos;
45
46 return hc_start(m, pos);
47}
48
49static void hc_stop(struct seq_file *m, void *p)
50{
51}
52
53static int hc_show(struct seq_file *m, void *p)
54{
55 unsigned long h_num = (unsigned long)p;
matt mooney6d2ad1e2010-09-27 19:04:44 -070056 struct hcall_stats *hs = m->private;
Mike Kravetz57852a82006-09-06 16:23:12 -070057
58 if (hs[h_num].num_calls) {
Anton Blancharddc401272007-01-09 02:43:02 +110059 if (cpu_has_feature(CPU_FTR_PURR))
Mike Kravetz57852a82006-09-06 16:23:12 -070060 seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2,
61 hs[h_num].num_calls,
62 hs[h_num].tb_total,
63 hs[h_num].purr_total);
64 else
65 seq_printf(m, "%lu %lu %lu\n", h_num<<2,
66 hs[h_num].num_calls,
67 hs[h_num].tb_total);
68 }
69
70 return 0;
71}
72
Liu Shixin96543e72020-09-16 10:50:26 +080073static const struct seq_operations hcall_inst_sops = {
Mike Kravetz57852a82006-09-06 16:23:12 -070074 .start = hc_start,
75 .next = hc_next,
76 .stop = hc_stop,
77 .show = hc_show
78};
79
Liu Shixin96543e72020-09-16 10:50:26 +080080DEFINE_SEQ_ATTRIBUTE(hcall_inst);
Mike Kravetz57852a82006-09-06 16:23:12 -070081
82#define HCALL_ROOT_DIR "hcall_inst"
83#define CPU_NAME_BUF_SIZE 32
84
Anton Blanchardc8cd0932009-10-26 18:50:29 +000085
Stephen Rothwell969ea5c2010-05-28 15:05:00 +100086static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args)
Anton Blanchardc8cd0932009-10-26 18:50:29 +000087{
88 struct hcall_stats *h;
89
90 if (opcode > MAX_HCALL_OPCODE)
91 return;
92
Christoph Lameter69111ba2014-10-21 15:23:25 -050093 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
Anton Blanchardc8cd0932009-10-26 18:50:29 +000094 h->tb_start = mftb();
95 h->purr_start = mfspr(SPRN_PURR);
96}
97
Michael Ellerman8f2133c2018-05-07 23:03:55 +100098static void probe_hcall_exit(void *ignored, unsigned long opcode, long retval,
Anton Blanchard6f263532009-10-26 18:51:09 +000099 unsigned long *retbuf)
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000100{
101 struct hcall_stats *h;
102
103 if (opcode > MAX_HCALL_OPCODE)
104 return;
105
Christoph Lameter69111ba2014-10-21 15:23:25 -0500106 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000107 h->num_calls++;
Will Schmidt25ef2312009-11-25 06:12:09 +0000108 h->tb_total += mftb() - h->tb_start;
109 h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000110}
111
Mike Kravetz57852a82006-09-06 16:23:12 -0700112static int __init hcall_inst_init(void)
113{
114 struct dentry *hcall_root;
Mike Kravetz57852a82006-09-06 16:23:12 -0700115 char cpu_name_buf[CPU_NAME_BUF_SIZE];
116 int cpu;
117
118 if (!firmware_has_feature(FW_FEATURE_LPAR))
119 return 0;
120
Stephen Rothwell969ea5c2010-05-28 15:05:00 +1000121 if (register_trace_hcall_entry(probe_hcall_entry, NULL))
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000122 return -EINVAL;
123
Stephen Rothwell969ea5c2010-05-28 15:05:00 +1000124 if (register_trace_hcall_exit(probe_hcall_exit, NULL)) {
125 unregister_trace_hcall_entry(probe_hcall_entry, NULL);
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000126 return -EINVAL;
127 }
128
Mike Kravetz57852a82006-09-06 16:23:12 -0700129 hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL);
Mike Kravetz57852a82006-09-06 16:23:12 -0700130
131 for_each_possible_cpu(cpu) {
132 snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu);
Greg Kroah-Hartmanff229312019-10-14 12:16:42 +0200133 debugfs_create_file(cpu_name_buf, 0444, hcall_root,
134 per_cpu(hcall_stats, cpu),
Liu Shixin96543e72020-09-16 10:50:26 +0800135 &hcall_inst_fops);
Mike Kravetz57852a82006-09-06 16:23:12 -0700136 }
137
138 return 0;
139}
Michael Ellerman8e83e902014-07-16 12:02:43 +1000140machine_device_initcall(pseries, hcall_inst_init);