blob: 072ebe7f290ba77b5e40a2303e0f856edfa5660f [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Naveen N. Raoead514d2017-04-19 18:22:26 +05302/*
3 * Dynamic Ftrace based Kprobes Optimization
4 *
Naveen N. Raoead514d2017-04-19 18:22:26 +05305 * Copyright (C) Hitachi Ltd., 2012
6 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7 * IBM Corporation
8 */
9#include <linux/kprobes.h>
10#include <linux/ptrace.h>
11#include <linux/hardirq.h>
12#include <linux/preempt.h>
13#include <linux/ftrace.h>
14
Naveen N. Raoead514d2017-04-19 18:22:26 +053015/* Ftrace callback handler for kprobes */
16void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -040017 struct ftrace_ops *ops, struct ftrace_regs *fregs)
Naveen N. Raoead514d2017-04-19 18:22:26 +053018{
19 struct kprobe *p;
20 struct kprobe_ctlblk *kcb;
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -040021 struct pt_regs *regs;
Steven Rostedt (VMware)c536aa12020-11-05 21:32:40 -050022 int bit;
Naveen N. Raoead514d2017-04-19 18:22:26 +053023
Steven Rostedt (VMware)773c1672020-11-05 21:32:46 -050024 bit = ftrace_test_recursion_trylock(nip, parent_nip);
Steven Rostedt (VMware)c536aa12020-11-05 21:32:40 -050025 if (bit < 0)
26 return;
27
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -040028 regs = ftrace_get_regs(fregs);
Naveen N. Raoead514d2017-04-19 18:22:26 +053029 p = get_kprobe((kprobe_opcode_t *)nip);
30 if (unlikely(!p) || kprobe_disabled(p))
Steven Rostedt (VMware)c536aa12020-11-05 21:32:40 -050031 goto out;
Naveen N. Raoead514d2017-04-19 18:22:26 +053032
33 kcb = get_kprobe_ctlblk();
34 if (kprobe_running()) {
35 kprobes_inc_nmissed_count(p);
36 } else {
Naveen N. Raoead514d2017-04-19 18:22:26 +053037 /*
38 * On powerpc, NIP is *before* this instruction for the
39 * pre handler
40 */
Nicholas Piggin59dc5bf2021-06-18 01:51:03 +100041 regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
Naveen N. Raoead514d2017-04-19 18:22:26 +053042
43 __this_cpu_write(current_kprobe, p);
44 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
Masami Hiramatsu6e5fd3a2018-06-20 01:12:51 +090045 if (!p->pre_handler || !p->pre_handler(p, regs)) {
46 /*
47 * Emulate singlestep (and also recover regs->nip)
48 * as if there is a nop
49 */
Nicholas Piggin59dc5bf2021-06-18 01:51:03 +100050 regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
Masami Hiramatsu6e5fd3a2018-06-20 01:12:51 +090051 if (unlikely(p->post_handler)) {
52 kcb->kprobe_status = KPROBE_HIT_SSDONE;
53 p->post_handler(p, regs, 0);
54 }
Naveen N. Rao6baea4332017-09-22 14:40:47 +053055 }
Masami Hiramatsucce188b2018-06-20 01:15:45 +090056 /*
57 * If pre_handler returns !0, it changes regs->nip. We have to
58 * skip emulating post_handler.
59 */
60 __this_cpu_write(current_kprobe, NULL);
Naveen N. Raoead514d2017-04-19 18:22:26 +053061 }
Steven Rostedt (VMware)c536aa12020-11-05 21:32:40 -050062out:
Steven Rostedt (VMware)c536aa12020-11-05 21:32:40 -050063 ftrace_test_recursion_unlock(bit);
Naveen N. Raoead514d2017-04-19 18:22:26 +053064}
65NOKPROBE_SYMBOL(kprobe_ftrace_handler);
66
67int arch_prepare_kprobe_ftrace(struct kprobe *p)
68{
69 p->ainsn.insn = NULL;
70 p->ainsn.boostable = -1;
71 return 0;
72}