blob: 34fd0e0ec51d2a49c27d082f363be11276d7d753 [file] [log] [blame]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301/*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
Masami Hiramatsu72576342017-02-07 20:21:28 +090020#define pr_fmt(fmt) "trace_kprobe: " fmt
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053021
22#include <linux/module.h>
23#include <linux/uaccess.h>
24#include <linux/uprobes.h>
25#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080026#include <linux/string.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010027#include <linux/rculist.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053028
29#include "trace_probe.h"
30
31#define UPROBE_EVENT_SYSTEM "uprobes"
32
Oleg Nesterov457d1772013-03-29 18:26:51 +010033struct uprobe_trace_entry_head {
34 struct trace_entry ent;
35 unsigned long vaddr[];
36};
37
38#define SIZEOF_TRACE_ENTRY(is_return) \
39 (sizeof(struct uprobe_trace_entry_head) + \
40 sizeof(unsigned long) * (is_return ? 2 : 1))
41
42#define DATAOF_TRACE_ENTRY(entry, is_return) \
43 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
44
Oleg Nesterov736288b2013-02-03 20:58:35 +010045struct trace_uprobe_filter {
46 rwlock_t rwlock;
47 int nr_systemwide;
48 struct list_head perf_events;
49};
50
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053051/*
52 * uprobe event core functions
53 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053054struct trace_uprobe {
55 struct list_head list;
Oleg Nesterov736288b2013-02-03 20:58:35 +010056 struct trace_uprobe_filter filter;
Oleg Nesterova932b732013-01-31 19:47:23 +010057 struct uprobe_consumer consumer;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053058 struct inode *inode;
59 char *filename;
60 unsigned long offset;
61 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090062 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053063};
64
Namhyung Kim14577c32013-07-03 15:42:53 +090065#define SIZEOF_TRACE_UPROBE(n) \
66 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053067 (sizeof(struct probe_arg) * (n)))
68
69static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040070static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053071
72static DEFINE_MUTEX(uprobe_lock);
73static LIST_HEAD(uprobe_list);
74
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090075struct uprobe_dispatch_data {
76 struct trace_uprobe *tu;
77 unsigned long bp_addr;
78};
79
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053080static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010081static int uretprobe_dispatcher(struct uprobe_consumer *con,
82 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053083
Namhyung Kim3fd996a2013-11-26 15:21:04 +090084#ifdef CONFIG_STACK_GROWSUP
85static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
86{
87 return addr - (n * sizeof(long));
88}
89#else
90static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
91{
92 return addr + (n * sizeof(long));
93}
94#endif
95
96static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
97{
98 unsigned long ret;
99 unsigned long addr = user_stack_pointer(regs);
100
101 addr = adjust_stack_addr(addr, n);
102
103 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
104 return 0;
105
106 return ret;
107}
108
109/*
110 * Uprobes-specific fetch functions
111 */
112#define DEFINE_FETCH_stack(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900113static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
114 void *offset, void *dest) \
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900115{ \
116 *(type *)dest = (type)get_user_stack_nth(regs, \
117 ((unsigned long)offset)); \
118}
119DEFINE_BASIC_FETCH_FUNCS(stack)
120/* No string on the stack entry */
121#define fetch_stack_string NULL
122#define fetch_stack_string_size NULL
123
Namhyung Kim5baaa592013-11-26 15:21:04 +0900124#define DEFINE_FETCH_memory(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900125static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
126 void *addr, void *dest) \
Namhyung Kim5baaa592013-11-26 15:21:04 +0900127{ \
128 type retval; \
129 void __user *vaddr = (void __force __user *) addr; \
130 \
131 if (copy_from_user(&retval, vaddr, sizeof(type))) \
132 *(type *)dest = 0; \
133 else \
134 *(type *) dest = retval; \
135}
136DEFINE_BASIC_FETCH_FUNCS(memory)
137/*
138 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
139 * length and relative data location.
140 */
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900141static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
142 void *addr, void *dest)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900143{
144 long ret;
145 u32 rloc = *(u32 *)dest;
146 int maxlen = get_rloc_len(rloc);
147 u8 *dst = get_rloc_data(dest);
148 void __user *src = (void __force __user *) addr;
149
150 if (!maxlen)
151 return;
152
153 ret = strncpy_from_user(dst, src, maxlen);
Masami Hiramatsu50268a32018-04-10 21:20:08 +0900154 if (ret == maxlen)
155 dst[--ret] = '\0';
Namhyung Kim5baaa592013-11-26 15:21:04 +0900156
157 if (ret < 0) { /* Failed to fetch string */
158 ((u8 *)get_rloc_data(dest))[0] = '\0';
159 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
160 } else {
161 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
162 }
163}
164
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900165static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
166 void *addr, void *dest)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900167{
168 int len;
169 void __user *vaddr = (void __force __user *) addr;
170
171 len = strnlen_user(vaddr, MAX_STRING_SIZE);
172
173 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
174 *(u32 *)dest = 0;
175 else
176 *(u32 *)dest = len;
177}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900178
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900179static unsigned long translate_user_vaddr(void *file_offset)
180{
181 unsigned long base_addr;
182 struct uprobe_dispatch_data *udd;
183
184 udd = (void *) current->utask->vaddr;
185
186 base_addr = udd->bp_addr - udd->tu->offset;
187 return base_addr + (unsigned long)file_offset;
188}
189
190#define DEFINE_FETCH_file_offset(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900191static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
192 void *offset, void *dest)\
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900193{ \
194 void *vaddr = (void *)translate_user_vaddr(offset); \
195 \
196 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
197}
198DEFINE_BASIC_FETCH_FUNCS(file_offset)
199DEFINE_FETCH_file_offset(string)
200DEFINE_FETCH_file_offset(string_size)
201
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900202/* Fetch type information table */
Stephen Rothwelld9a16d32015-03-12 16:58:34 +1100203static const struct fetch_type uprobes_fetch_type_table[] = {
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900204 /* Special types */
205 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
206 sizeof(u32), 1, "__data_loc char[]"),
207 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
208 string_size, sizeof(u32), 0, "u32"),
209 /* Basic types */
210 ASSIGN_FETCH_TYPE(u8, u8, 0),
211 ASSIGN_FETCH_TYPE(u16, u16, 0),
212 ASSIGN_FETCH_TYPE(u32, u32, 0),
213 ASSIGN_FETCH_TYPE(u64, u64, 0),
214 ASSIGN_FETCH_TYPE(s8, u8, 1),
215 ASSIGN_FETCH_TYPE(s16, u16, 1),
216 ASSIGN_FETCH_TYPE(s32, u32, 1),
217 ASSIGN_FETCH_TYPE(s64, u64, 1),
Masami Hiramatsu17ce3dc2016-08-18 17:57:50 +0900218 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
219 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
220 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
221 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900222
223 ASSIGN_FETCH_TYPE_END
224};
225
Oleg Nesterov736288b2013-02-03 20:58:35 +0100226static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
227{
228 rwlock_init(&filter->rwlock);
229 filter->nr_systemwide = 0;
230 INIT_LIST_HEAD(&filter->perf_events);
231}
232
233static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
234{
235 return !filter->nr_systemwide && list_empty(&filter->perf_events);
236}
237
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100238static inline bool is_ret_probe(struct trace_uprobe *tu)
239{
240 return tu->consumer.ret_handler != NULL;
241}
242
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530243/*
244 * Allocate new trace_uprobe and initialize it (including uprobes).
245 */
246static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100247alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530248{
249 struct trace_uprobe *tu;
250
251 if (!event || !is_good_name(event))
252 return ERR_PTR(-EINVAL);
253
254 if (!group || !is_good_name(group))
255 return ERR_PTR(-EINVAL);
256
257 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
258 if (!tu)
259 return ERR_PTR(-ENOMEM);
260
Namhyung Kim14577c32013-07-03 15:42:53 +0900261 tu->tp.call.class = &tu->tp.class;
262 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
263 if (!tu->tp.call.name)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530264 goto error;
265
Namhyung Kim14577c32013-07-03 15:42:53 +0900266 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
267 if (!tu->tp.class.system)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530268 goto error;
269
270 INIT_LIST_HEAD(&tu->list);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900271 INIT_LIST_HEAD(&tu->tp.files);
Oleg Nesterova932b732013-01-31 19:47:23 +0100272 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100273 if (is_ret)
274 tu->consumer.ret_handler = uretprobe_dispatcher;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100275 init_trace_uprobe_filter(&tu->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530276 return tu;
277
278error:
Namhyung Kim14577c32013-07-03 15:42:53 +0900279 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530280 kfree(tu);
281
282 return ERR_PTR(-ENOMEM);
283}
284
285static void free_trace_uprobe(struct trace_uprobe *tu)
286{
287 int i;
288
Namhyung Kim14577c32013-07-03 15:42:53 +0900289 for (i = 0; i < tu->tp.nr_args; i++)
290 traceprobe_free_probe_arg(&tu->tp.args[i]);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530291
292 iput(tu->inode);
Namhyung Kim14577c32013-07-03 15:42:53 +0900293 kfree(tu->tp.call.class->system);
294 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530295 kfree(tu->filename);
296 kfree(tu);
297}
298
299static struct trace_uprobe *find_probe_event(const char *event, const char *group)
300{
301 struct trace_uprobe *tu;
302
303 list_for_each_entry(tu, &uprobe_list, list)
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400304 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
Namhyung Kim14577c32013-07-03 15:42:53 +0900305 strcmp(tu->tp.call.class->system, group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530306 return tu;
307
308 return NULL;
309}
310
311/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400312static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530313{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400314 int ret;
315
316 ret = unregister_uprobe_event(tu);
317 if (ret)
318 return ret;
319
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530320 list_del(&tu->list);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530321 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400322 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530323}
324
325/* Register a trace_uprobe and probe_event */
326static int register_trace_uprobe(struct trace_uprobe *tu)
327{
Namhyung Kim14577c32013-07-03 15:42:53 +0900328 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530329 int ret;
330
331 mutex_lock(&uprobe_lock);
332
333 /* register as an event */
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400334 old_tu = find_probe_event(trace_event_name(&tu->tp.call),
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400335 tu->tp.call.class->system);
Namhyung Kim14577c32013-07-03 15:42:53 +0900336 if (old_tu) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530337 /* delete old event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900338 ret = unregister_trace_uprobe(old_tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400339 if (ret)
340 goto end;
341 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530342
343 ret = register_uprobe_event(tu);
344 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700345 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530346 goto end;
347 }
348
349 list_add_tail(&tu->list, &uprobe_list);
350
351end:
352 mutex_unlock(&uprobe_lock);
353
354 return ret;
355}
356
357/*
358 * Argument syntax:
Namhyung Kim306cfe22013-07-03 16:44:46 +0900359 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530360 *
361 * - Remove uprobe: -:[GRP/]EVENT
362 */
363static int create_trace_uprobe(int argc, char **argv)
364{
365 struct trace_uprobe *tu;
366 struct inode *inode;
367 char *arg, *event, *group, *filename;
368 char buf[MAX_EVENT_NAME_LEN];
369 struct path path;
370 unsigned long offset;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100371 bool is_delete, is_return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530372 int i, ret;
373
374 inode = NULL;
375 ret = 0;
376 is_delete = false;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100377 is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530378 event = NULL;
379 group = NULL;
380
381 /* argc must be >= 1 */
382 if (argv[0][0] == '-')
383 is_delete = true;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100384 else if (argv[0][0] == 'r')
385 is_return = true;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530386 else if (argv[0][0] != 'p') {
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100387 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530388 return -EINVAL;
389 }
390
391 if (argv[0][1] == ':') {
392 event = &argv[0][2];
393 arg = strchr(event, '/');
394
395 if (arg) {
396 group = event;
397 event = arg + 1;
398 event[-1] = '\0';
399
400 if (strlen(group) == 0) {
401 pr_info("Group name is not specified\n");
402 return -EINVAL;
403 }
404 }
405 if (strlen(event) == 0) {
406 pr_info("Event name is not specified\n");
407 return -EINVAL;
408 }
409 }
410 if (!group)
411 group = UPROBE_EVENT_SYSTEM;
412
413 if (is_delete) {
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400414 int ret;
415
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530416 if (!event) {
417 pr_info("Delete command needs an event name.\n");
418 return -EINVAL;
419 }
420 mutex_lock(&uprobe_lock);
421 tu = find_probe_event(event, group);
422
423 if (!tu) {
424 mutex_unlock(&uprobe_lock);
425 pr_info("Event %s/%s doesn't exist.\n", group, event);
426 return -ENOENT;
427 }
428 /* delete an event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400429 ret = unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530430 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400431 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530432 }
433
434 if (argc < 2) {
435 pr_info("Probe point is not specified.\n");
436 return -EINVAL;
437 }
Kenny Yu6496bb72017-01-13 08:58:34 -0800438 /* Find the last occurrence, in case the path contains ':' too. */
439 arg = strrchr(argv[1], ':');
zhangwei(Jovi)fa440632013-06-13 14:21:51 +0800440 if (!arg) {
441 ret = -EINVAL;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530442 goto fail_address_parse;
zhangwei(Jovi)fa440632013-06-13 14:21:51 +0800443 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530444
445 *arg++ = '\0';
446 filename = argv[1];
447 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
448 if (ret)
449 goto fail_address_parse;
450
Howard McLauchlanf0a2aa52018-04-10 16:10:30 -0700451 inode = igrab(d_real_inode(path.dentry));
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100452 path_put(&path);
453
Oleg Nesterov7e4e28c2013-01-28 17:08:47 +0100454 if (!inode || !S_ISREG(inode->i_mode)) {
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800455 ret = -EINVAL;
456 goto fail_address_parse;
457 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530458
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100459 ret = kstrtoul(arg, 0, &offset);
460 if (ret)
461 goto fail_address_parse;
462
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530463 argc -= 2;
464 argv += 2;
465
466 /* setup a probe */
467 if (!event) {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800468 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530469 char *ptr;
470
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800471 tail = kstrdup(kbasename(filename), GFP_KERNEL);
472 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530473 ret = -ENOMEM;
474 goto fail_address_parse;
475 }
476
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530477 ptr = strpbrk(tail, ".-_");
478 if (ptr)
479 *ptr = '\0';
480
481 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
482 event = buf;
483 kfree(tail);
484 }
485
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100486 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530487 if (IS_ERR(tu)) {
488 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
489 ret = PTR_ERR(tu);
490 goto fail_address_parse;
491 }
492 tu->offset = offset;
493 tu->inode = inode;
494 tu->filename = kstrdup(filename, GFP_KERNEL);
495
496 if (!tu->filename) {
497 pr_info("Failed to allocate filename.\n");
498 ret = -ENOMEM;
499 goto error;
500 }
501
502 /* parse arguments */
503 ret = 0;
504 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Namhyung Kim14577c32013-07-03 15:42:53 +0900505 struct probe_arg *parg = &tu->tp.args[i];
506
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530507 /* Increment count for freeing args in error case */
Namhyung Kim14577c32013-07-03 15:42:53 +0900508 tu->tp.nr_args++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530509
510 /* Parse argument name */
511 arg = strchr(argv[i], '=');
512 if (arg) {
513 *arg++ = '\0';
Namhyung Kim14577c32013-07-03 15:42:53 +0900514 parg->name = kstrdup(argv[i], GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530515 } else {
516 arg = argv[i];
517 /* If argument name is omitted, set "argN" */
518 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
Namhyung Kim14577c32013-07-03 15:42:53 +0900519 parg->name = kstrdup(buf, GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530520 }
521
Namhyung Kim14577c32013-07-03 15:42:53 +0900522 if (!parg->name) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530523 pr_info("Failed to allocate argument[%d] name.\n", i);
524 ret = -ENOMEM;
525 goto error;
526 }
527
Namhyung Kim14577c32013-07-03 15:42:53 +0900528 if (!is_good_name(parg->name)) {
529 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530530 ret = -EINVAL;
531 goto error;
532 }
533
Namhyung Kim14577c32013-07-03 15:42:53 +0900534 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530535 pr_info("Argument[%d] name '%s' conflicts with "
536 "another field.\n", i, argv[i]);
537 ret = -EINVAL;
538 goto error;
539 }
540
541 /* Parse fetch argument */
Namhyung Kim14577c32013-07-03 15:42:53 +0900542 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
Stephen Rothwelld9a16d32015-03-12 16:58:34 +1100543 is_return, false,
544 uprobes_fetch_type_table);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530545 if (ret) {
546 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
547 goto error;
548 }
549 }
550
551 ret = register_trace_uprobe(tu);
552 if (ret)
553 goto error;
554 return 0;
555
556error:
557 free_trace_uprobe(tu);
558 return ret;
559
560fail_address_parse:
Markus Elfring16a8ef22014-11-16 14:46:28 +0100561 iput(inode);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530562
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800563 pr_info("Failed to parse address or file.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530564
565 return ret;
566}
567
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400568static int cleanup_all_probes(void)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530569{
570 struct trace_uprobe *tu;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400571 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530572
573 mutex_lock(&uprobe_lock);
574 while (!list_empty(&uprobe_list)) {
575 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400576 ret = unregister_trace_uprobe(tu);
577 if (ret)
578 break;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530579 }
580 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400581 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530582}
583
584/* Probes listing interfaces */
585static void *probes_seq_start(struct seq_file *m, loff_t *pos)
586{
587 mutex_lock(&uprobe_lock);
588 return seq_list_start(&uprobe_list, *pos);
589}
590
591static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
592{
593 return seq_list_next(v, &uprobe_list, pos);
594}
595
596static void probes_seq_stop(struct seq_file *m, void *v)
597{
598 mutex_unlock(&uprobe_lock);
599}
600
601static int probes_seq_show(struct seq_file *m, void *v)
602{
603 struct trace_uprobe *tu = v;
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100604 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530605 int i;
606
Ravi Bangoriaa64b2c02018-03-15 13:57:56 +0530607 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
608 trace_event_name(&tu->tp.call), tu->filename,
609 (int)(sizeof(void *) * 2), tu->offset);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530610
Namhyung Kim14577c32013-07-03 15:42:53 +0900611 for (i = 0; i < tu->tp.nr_args; i++)
612 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530613
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100614 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530615 return 0;
616}
617
618static const struct seq_operations probes_seq_op = {
619 .start = probes_seq_start,
620 .next = probes_seq_next,
621 .stop = probes_seq_stop,
622 .show = probes_seq_show
623};
624
625static int probes_open(struct inode *inode, struct file *file)
626{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400627 int ret;
628
629 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
630 ret = cleanup_all_probes();
631 if (ret)
632 return ret;
633 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530634
635 return seq_open(file, &probes_seq_op);
636}
637
638static ssize_t probes_write(struct file *file, const char __user *buffer,
639 size_t count, loff_t *ppos)
640{
Tom Zanussi7e465ba2017-09-22 14:58:20 -0500641 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530642}
643
644static const struct file_operations uprobe_events_ops = {
645 .owner = THIS_MODULE,
646 .open = probes_open,
647 .read = seq_read,
648 .llseek = seq_lseek,
649 .release = seq_release,
650 .write = probes_write,
651};
652
653/* Probes profiling interfaces */
654static int probes_profile_seq_show(struct seq_file *m, void *v)
655{
656 struct trace_uprobe *tu = v;
657
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400658 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400659 trace_event_name(&tu->tp.call), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530660 return 0;
661}
662
663static const struct seq_operations profile_seq_op = {
664 .start = probes_seq_start,
665 .next = probes_seq_next,
666 .stop = probes_seq_stop,
667 .show = probes_profile_seq_show
668};
669
670static int profile_open(struct inode *inode, struct file *file)
671{
672 return seq_open(file, &profile_seq_op);
673}
674
675static const struct file_operations uprobe_profile_ops = {
676 .owner = THIS_MODULE,
677 .open = profile_open,
678 .read = seq_read,
679 .llseek = seq_lseek,
680 .release = seq_release,
681};
682
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900683struct uprobe_cpu_buffer {
684 struct mutex mutex;
685 void *buf;
686};
687static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
688static int uprobe_buffer_refcnt;
689
690static int uprobe_buffer_init(void)
691{
692 int cpu, err_cpu;
693
694 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
695 if (uprobe_cpu_buffer == NULL)
696 return -ENOMEM;
697
698 for_each_possible_cpu(cpu) {
699 struct page *p = alloc_pages_node(cpu_to_node(cpu),
700 GFP_KERNEL, 0);
701 if (p == NULL) {
702 err_cpu = cpu;
703 goto err;
704 }
705 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
706 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
707 }
708
709 return 0;
710
711err:
712 for_each_possible_cpu(cpu) {
713 if (cpu == err_cpu)
714 break;
715 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
716 }
717
718 free_percpu(uprobe_cpu_buffer);
719 return -ENOMEM;
720}
721
722static int uprobe_buffer_enable(void)
723{
724 int ret = 0;
725
726 BUG_ON(!mutex_is_locked(&event_mutex));
727
728 if (uprobe_buffer_refcnt++ == 0) {
729 ret = uprobe_buffer_init();
730 if (ret < 0)
731 uprobe_buffer_refcnt--;
732 }
733
734 return ret;
735}
736
737static void uprobe_buffer_disable(void)
738{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800739 int cpu;
740
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900741 BUG_ON(!mutex_is_locked(&event_mutex));
742
743 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800744 for_each_possible_cpu(cpu)
745 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
746 cpu)->buf);
747
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900748 free_percpu(uprobe_cpu_buffer);
749 uprobe_cpu_buffer = NULL;
750 }
751}
752
753static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
754{
755 struct uprobe_cpu_buffer *ucb;
756 int cpu;
757
758 cpu = raw_smp_processor_id();
759 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
760
761 /*
762 * Use per-cpu buffers for fastest access, but we might migrate
763 * so the mutex makes sure we have sole access to it.
764 */
765 mutex_lock(&ucb->mutex);
766
767 return ucb;
768}
769
770static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
771{
772 mutex_unlock(&ucb->mutex);
773}
774
Namhyung Kima43b9702014-01-17 17:08:36 +0900775static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900776 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900777 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400778 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530779{
780 struct uprobe_trace_entry_head *entry;
781 struct ring_buffer_event *event;
782 struct ring_buffer *buffer;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100783 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900784 int size, esize;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400785 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530786
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400787 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900788
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900789 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100790 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530791
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400792 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900793 return;
794
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900795 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900796 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400797 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900798 call->event.type, size, 0, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900799 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900800 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900801
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530802 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100803 if (is_ret_probe(tu)) {
804 entry->vaddr[0] = func;
805 entry->vaddr[1] = instruction_pointer(regs);
806 data = DATAOF_TRACE_ENTRY(entry, true);
807 } else {
808 entry->vaddr[0] = instruction_pointer(regs);
809 data = DATAOF_TRACE_ENTRY(entry, false);
810 }
811
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900812 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530813
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400814 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100815}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100816
Oleg Nesterova51cc602013-03-30 18:02:12 +0100817/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900818static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
819 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100820{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900821 struct event_file_link *link;
822
823 if (is_ret_probe(tu))
824 return 0;
825
826 rcu_read_lock();
827 list_for_each_entry_rcu(link, &tu->tp.files, list)
828 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
829 rcu_read_unlock();
830
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100831 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530832}
833
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100834static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900835 struct pt_regs *regs,
836 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100837{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900838 struct event_file_link *link;
839
840 rcu_read_lock();
841 list_for_each_entry_rcu(link, &tu->tp.files, list)
842 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
843 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100844}
845
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530846/* Event entry printers */
847static enum print_line_t
848print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
849{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100850 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530851 struct trace_seq *s = &iter->seq;
852 struct trace_uprobe *tu;
853 u8 *data;
854 int i;
855
Oleg Nesterov457d1772013-03-29 18:26:51 +0100856 entry = (struct uprobe_trace_entry_head *)iter->ent;
Namhyung Kim14577c32013-07-03 15:42:53 +0900857 tu = container_of(event, struct trace_uprobe, tp.call.event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530858
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100859 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500860 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400861 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500862 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100863 data = DATAOF_TRACE_ENTRY(entry, true);
864 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500865 trace_seq_printf(s, "%s: (0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400866 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500867 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100868 data = DATAOF_TRACE_ENTRY(entry, false);
869 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530870
Namhyung Kim14577c32013-07-03 15:42:53 +0900871 for (i = 0; i < tu->tp.nr_args; i++) {
872 struct probe_arg *parg = &tu->tp.args[i];
873
874 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500875 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530876 }
877
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500878 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530879
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500880 out:
881 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530882}
883
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100884typedef bool (*filter_func_t)(struct uprobe_consumer *self,
885 enum uprobe_filter_ctx ctx,
886 struct mm_struct *mm);
887
888static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400889probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900890 filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530891{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900892 bool enabled = trace_probe_is_enabled(&tu->tp);
893 struct event_file_link *link = NULL;
894 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530895
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900896 if (file) {
Oleg Nesterov48212542014-06-27 19:01:36 +0200897 if (tu->tp.flags & TP_FLAG_PROFILE)
898 return -EINTR;
899
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900900 link = kmalloc(sizeof(*link), GFP_KERNEL);
901 if (!link)
902 return -ENOMEM;
903
904 link->file = file;
905 list_add_tail_rcu(&link->list, &tu->tp.files);
906
907 tu->tp.flags |= TP_FLAG_TRACE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200908 } else {
909 if (tu->tp.flags & TP_FLAG_TRACE)
910 return -EINTR;
911
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900912 tu->tp.flags |= TP_FLAG_PROFILE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200913 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530914
Oleg Nesterov736288b2013-02-03 20:58:35 +0100915 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
916
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900917 if (enabled)
918 return 0;
919
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200920 ret = uprobe_buffer_enable();
921 if (ret)
922 goto err_flags;
923
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100924 tu->consumer.filter = filter;
Oleg Nesterova932b732013-01-31 19:47:23 +0100925 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200926 if (ret)
927 goto err_buffer;
Oleg Nesterov41618242013-01-27 18:36:24 +0100928
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200929 return 0;
930
931 err_buffer:
932 uprobe_buffer_disable();
933
934 err_flags:
935 if (file) {
936 list_del(&link->list);
937 kfree(link);
938 tu->tp.flags &= ~TP_FLAG_TRACE;
939 } else {
940 tu->tp.flags &= ~TP_FLAG_PROFILE;
941 }
Oleg Nesterov41618242013-01-27 18:36:24 +0100942 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530943}
944
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900945static void
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400946probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530947{
Namhyung Kim14577c32013-07-03 15:42:53 +0900948 if (!trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530949 return;
950
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900951 if (file) {
952 struct event_file_link *link;
953
954 link = find_event_file_link(&tu->tp, file);
955 if (!link)
956 return;
957
958 list_del_rcu(&link->list);
959 /* synchronize with u{,ret}probe_trace_func */
960 synchronize_sched();
961 kfree(link);
962
963 if (!list_empty(&tu->tp.files))
964 return;
965 }
966
Oleg Nesterov736288b2013-02-03 20:58:35 +0100967 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
968
Oleg Nesterova932b732013-01-31 19:47:23 +0100969 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900970 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900971
972 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530973}
974
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400975static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530976{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100977 int ret, i, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530978 struct uprobe_trace_entry_head field;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100979 struct trace_uprobe *tu = event_call->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530980
Oleg Nesterov4d1298e2013-03-30 19:23:15 +0100981 if (is_ret_probe(tu)) {
982 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
983 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
984 size = SIZEOF_TRACE_ENTRY(true);
985 } else {
986 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
987 size = SIZEOF_TRACE_ENTRY(false);
988 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530989 /* Set argument names as fields */
Namhyung Kim14577c32013-07-03 15:42:53 +0900990 for (i = 0; i < tu->tp.nr_args; i++) {
991 struct probe_arg *parg = &tu->tp.args[i];
992
993 ret = trace_define_field(event_call, parg->type->fmttype,
994 parg->name, size + parg->offset,
995 parg->type->size, parg->type->is_signed,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530996 FILTER_OTHER);
997
998 if (ret)
999 return ret;
1000 }
1001 return 0;
1002}
1003
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301004#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001005static bool
1006__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1007{
1008 struct perf_event *event;
1009
1010 if (filter->nr_systemwide)
1011 return true;
1012
1013 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001014 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001015 return true;
1016 }
1017
1018 return false;
1019}
1020
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001021static inline bool
1022uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1023{
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001024 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001025}
1026
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001027static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1028{
1029 bool done;
1030
1031 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001032 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001033 list_del(&event->hw.tp_list);
1034 done = tu->filter.nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001035 (event->hw.target->flags & PF_EXITING) ||
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001036 uprobe_filter_event(tu, event);
1037 } else {
1038 tu->filter.nr_systemwide--;
1039 done = tu->filter.nr_systemwide;
1040 }
1041 write_unlock(&tu->filter.rwlock);
1042
1043 if (!done)
Oleg Nesterov927d6872014-04-24 13:33:31 +02001044 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001045
1046 return 0;
1047}
1048
Oleg Nesterov736288b2013-02-03 20:58:35 +01001049static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1050{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001051 bool done;
Oleg Nesterov927d6872014-04-24 13:33:31 +02001052 int err;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001053
Oleg Nesterov736288b2013-02-03 20:58:35 +01001054 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001055 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001056 /*
1057 * event->parent != NULL means copy_process(), we can avoid
1058 * uprobe_apply(). current->mm must be probed and we can rely
1059 * on dup_mmap() which preserves the already installed bp's.
1060 *
1061 * attr.enable_on_exec means that exec/mmap will install the
1062 * breakpoints we need.
1063 */
1064 done = tu->filter.nr_systemwide ||
1065 event->parent || event->attr.enable_on_exec ||
1066 uprobe_filter_event(tu, event);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001067 list_add(&event->hw.tp_list, &tu->filter.perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001068 } else {
1069 done = tu->filter.nr_systemwide;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001070 tu->filter.nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001071 }
Oleg Nesterov736288b2013-02-03 20:58:35 +01001072 write_unlock(&tu->filter.rwlock);
1073
Oleg Nesterov927d6872014-04-24 13:33:31 +02001074 err = 0;
1075 if (!done) {
1076 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1077 if (err)
1078 uprobe_perf_close(tu, event);
1079 }
1080 return err;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001081}
1082
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001083static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1084 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1085{
1086 struct trace_uprobe *tu;
1087 int ret;
1088
1089 tu = container_of(uc, struct trace_uprobe, consumer);
1090 read_lock(&tu->filter.rwlock);
1091 ret = __uprobe_perf_filter(&tu->filter, mm);
1092 read_unlock(&tu->filter.rwlock);
1093
1094 return ret;
1095}
1096
Namhyung Kima43b9702014-01-17 17:08:36 +09001097static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001098 unsigned long func, struct pt_regs *regs,
1099 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301100{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001101 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301102 struct uprobe_trace_entry_head *entry;
1103 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001104 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001105 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001106 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301107
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001108 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
Wang Nan04a22fa2015-07-01 02:13:50 +00001109 return;
1110
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001111 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1112
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001113 size = esize + tu->tp.size + dsize;
1114 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1115 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1116 return;
1117
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301118 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001119 head = this_cpu_ptr(call->perf_events);
1120 if (hlist_empty(head))
1121 goto out;
1122
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001123 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301124 if (!entry)
1125 goto out;
1126
Oleg Nesterov393a7362013-03-30 18:46:22 +01001127 if (is_ret_probe(tu)) {
1128 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001129 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001130 data = DATAOF_TRACE_ENTRY(entry, true);
1131 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001132 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001133 data = DATAOF_TRACE_ENTRY(entry, false);
1134 }
1135
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001136 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001137
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001138 if (size - esize > tu->tp.size + dsize) {
1139 int len = tu->tp.size + dsize;
1140
1141 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001142 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301143
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001144 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001145 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301146 out:
1147 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001148}
1149
1150/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001151static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1152 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001153{
1154 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1155 return UPROBE_HANDLER_REMOVE;
1156
Oleg Nesterov393a7362013-03-30 18:46:22 +01001157 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001158 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001159 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301160}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001161
1162static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001163 struct pt_regs *regs,
1164 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001165{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001166 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001167}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301168#endif /* CONFIG_PERF_EVENTS */
1169
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001170static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001171trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001172 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301173{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001174 struct trace_uprobe *tu = event->data;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001175 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301176
1177 switch (type) {
1178 case TRACE_REG_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001179 return probe_event_enable(tu, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301180
1181 case TRACE_REG_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001182 probe_event_disable(tu, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301183 return 0;
1184
1185#ifdef CONFIG_PERF_EVENTS
1186 case TRACE_REG_PERF_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001187 return probe_event_enable(tu, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301188
1189 case TRACE_REG_PERF_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001190 probe_event_disable(tu, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301191 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001192
1193 case TRACE_REG_PERF_OPEN:
1194 return uprobe_perf_open(tu, data);
1195
1196 case TRACE_REG_PERF_CLOSE:
1197 return uprobe_perf_close(tu, data);
1198
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301199#endif
1200 default:
1201 return 0;
1202 }
1203 return 0;
1204}
1205
1206static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1207{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301208 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001209 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001210 struct uprobe_cpu_buffer *ucb;
1211 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001212 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301213
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001214
Oleg Nesterova932b732013-01-31 19:47:23 +01001215 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001216 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301217
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001218 udd.tu = tu;
1219 udd.bp_addr = instruction_pointer(regs);
1220
1221 current->utask->vaddr = (unsigned long) &udd;
1222
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001223 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1224 return 0;
1225
1226 dsize = __get_data_size(&tu->tp, regs);
1227 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1228
1229 ucb = uprobe_buffer_get();
1230 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1231
Namhyung Kim14577c32013-07-03 15:42:53 +09001232 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001233 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301234
1235#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001236 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001237 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301238#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001239 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001240 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301241}
1242
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001243static int uretprobe_dispatcher(struct uprobe_consumer *con,
1244 unsigned long func, struct pt_regs *regs)
1245{
1246 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001247 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001248 struct uprobe_cpu_buffer *ucb;
1249 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001250
1251 tu = container_of(con, struct trace_uprobe, consumer);
1252
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001253 udd.tu = tu;
1254 udd.bp_addr = func;
1255
1256 current->utask->vaddr = (unsigned long) &udd;
1257
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001258 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1259 return 0;
1260
1261 dsize = __get_data_size(&tu->tp, regs);
1262 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1263
1264 ucb = uprobe_buffer_get();
1265 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1266
Namhyung Kim14577c32013-07-03 15:42:53 +09001267 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001268 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001269
1270#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001271 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001272 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001273#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001274 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001275 return 0;
1276}
1277
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301278static struct trace_event_functions uprobe_funcs = {
1279 .trace = print_uprobe_event
1280};
1281
Song Liu33ea4b22017-12-06 14:45:16 -08001282static inline void init_trace_event_call(struct trace_uprobe *tu,
1283 struct trace_event_call *call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301284{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301285 INIT_LIST_HEAD(&call->class->fields);
1286 call->event.funcs = &uprobe_funcs;
1287 call->class->define_fields = uprobe_event_define_fields;
1288
Song Liu33ea4b22017-12-06 14:45:16 -08001289 call->flags = TRACE_EVENT_FL_UPROBE;
1290 call->class->reg = trace_uprobe_register;
1291 call->data = tu;
1292}
1293
1294static int register_uprobe_event(struct trace_uprobe *tu)
1295{
1296 struct trace_event_call *call = &tu->tp.call;
1297 int ret = 0;
1298
1299 init_trace_event_call(tu, call);
1300
Namhyung Kim5bf652a2013-07-03 16:09:02 +09001301 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301302 return -ENOMEM;
1303
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001304 ret = register_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301305 if (!ret) {
1306 kfree(call->print_fmt);
1307 return -ENODEV;
1308 }
Oleg Nesterovede392a2014-07-15 20:48:24 +02001309
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301310 ret = trace_add_event_call(call);
1311
1312 if (ret) {
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001313 pr_info("Failed to register uprobe event: %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001314 trace_event_name(call));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301315 kfree(call->print_fmt);
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001316 unregister_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301317 }
1318
1319 return ret;
1320}
1321
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001322static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301323{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001324 int ret;
1325
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301326 /* tu->event is unregistered in trace_remove_event_call() */
Namhyung Kim14577c32013-07-03 15:42:53 +09001327 ret = trace_remove_event_call(&tu->tp.call);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001328 if (ret)
1329 return ret;
Namhyung Kim14577c32013-07-03 15:42:53 +09001330 kfree(tu->tp.call.print_fmt);
1331 tu->tp.call.print_fmt = NULL;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001332 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301333}
1334
Song Liu33ea4b22017-12-06 14:45:16 -08001335#ifdef CONFIG_PERF_EVENTS
1336struct trace_event_call *
1337create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
1338{
1339 struct trace_uprobe *tu;
1340 struct inode *inode;
1341 struct path path;
1342 int ret;
1343
1344 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1345 if (ret)
1346 return ERR_PTR(ret);
1347
1348 inode = igrab(d_inode(path.dentry));
1349 path_put(&path);
1350
1351 if (!inode || !S_ISREG(inode->i_mode)) {
1352 iput(inode);
1353 return ERR_PTR(-EINVAL);
1354 }
1355
1356 /*
1357 * local trace_kprobes are not added to probe_list, so they are never
1358 * searched in find_trace_kprobe(). Therefore, there is no concern of
1359 * duplicated name "DUMMY_EVENT" here.
1360 */
1361 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1362 is_return);
1363
1364 if (IS_ERR(tu)) {
1365 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1366 (int)PTR_ERR(tu));
1367 return ERR_CAST(tu);
1368 }
1369
1370 tu->offset = offs;
1371 tu->inode = inode;
1372 tu->filename = kstrdup(name, GFP_KERNEL);
1373 init_trace_event_call(tu, &tu->tp.call);
1374
1375 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1376 ret = -ENOMEM;
1377 goto error;
1378 }
1379
1380 return &tu->tp.call;
1381error:
1382 free_trace_uprobe(tu);
1383 return ERR_PTR(ret);
1384}
1385
1386void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1387{
1388 struct trace_uprobe *tu;
1389
1390 tu = container_of(event_call, struct trace_uprobe, tp.call);
1391
1392 kfree(tu->tp.call.print_fmt);
1393 tu->tp.call.print_fmt = NULL;
1394
1395 free_trace_uprobe(tu);
1396}
1397#endif /* CONFIG_PERF_EVENTS */
1398
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301399/* Make a trace interface for controling probe points */
1400static __init int init_uprobe_trace(void)
1401{
1402 struct dentry *d_tracer;
1403
1404 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05001405 if (IS_ERR(d_tracer))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301406 return 0;
1407
1408 trace_create_file("uprobe_events", 0644, d_tracer,
1409 NULL, &uprobe_events_ops);
1410 /* Profile interface */
1411 trace_create_file("uprobe_profile", 0444, d_tracer,
1412 NULL, &uprobe_profile_ops);
1413 return 0;
1414}
1415
1416fs_initcall(init_uprobe_trace);