Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Kernel Probes (KProbes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2002, 2004 |
| 19 | * |
| 20 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel |
| 21 | * Probes initial implementation ( includes contributions from |
| 22 | * Rusty Russell). |
| 23 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
| 24 | * interface to access function arguments. |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 25 | * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
| 26 | * <prasanna@in.ibm.com> adapted for x86_64 from i386. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | * 2005-Mar Roland McGrath <roland@redhat.com> |
| 28 | * Fixed to handle %rip-relative addressing mode correctly. |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 29 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston |
| 30 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
| 31 | * <prasanna@in.ibm.com> added function-return probes. |
| 32 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 33 | * Added function return probes functionality |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 34 | * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 35 | * kprobe-booster and kretprobe-booster for i386. |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 36 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 37 | * and kretprobe-booster for x86-64 |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 38 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 39 | * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> |
| 40 | * unified x86 kprobes code. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <linux/kprobes.h> |
| 43 | #include <linux/ptrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <linux/string.h> |
| 45 | #include <linux/slab.h> |
Quentin Barnes | b506a9d | 2008-01-30 13:32:32 +0100 | [diff] [blame] | 46 | #include <linux/hardirq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <linux/preempt.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 48 | #include <linux/sched/debug.h> |
Paul Gortmaker | 744c193 | 2016-09-19 17:04:18 -0400 | [diff] [blame] | 49 | #include <linux/extable.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 50 | #include <linux/kdebug.h> |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 51 | #include <linux/kallsyms.h> |
Masami Hiramatsu | c0f7ac3 | 2010-02-25 08:34:46 -0500 | [diff] [blame] | 52 | #include <linux/ftrace.h> |
Josh Poimboeuf | 87aaff2 | 2016-02-28 22:22:40 -0600 | [diff] [blame] | 53 | #include <linux/frame.h> |
Dmitry Vyukov | 9f7d416 | 2016-10-14 16:07:23 +0200 | [diff] [blame] | 54 | #include <linux/kasan.h> |
Masami Hiramatsu | c93f5cf | 2017-05-25 19:38:17 +0900 | [diff] [blame] | 55 | #include <linux/moduleloader.h> |
Ananth N Mavinakayanahalli | 9ec4b1f | 2005-06-27 15:17:01 -0700 | [diff] [blame] | 56 | |
Andy Lutomirski | 35de5b0 | 2016-04-26 12:23:24 -0700 | [diff] [blame] | 57 | #include <asm/text-patching.h> |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 58 | #include <asm/cacheflush.h> |
| 59 | #include <asm/desc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | #include <asm/pgtable.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 61 | #include <linux/uaccess.h> |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 62 | #include <asm/alternative.h> |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 63 | #include <asm/insn.h> |
K.Prasad | 62edab9 | 2009-06-01 23:47:06 +0530 | [diff] [blame] | 64 | #include <asm/debugreg.h> |
Laura Abbott | e6ccbff | 2017-05-08 15:58:47 -0700 | [diff] [blame] | 65 | #include <asm/set_memory.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
Masami Hiramatsu | f684199 | 2012-09-28 17:15:22 +0900 | [diff] [blame] | 67 | #include "common.h" |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 68 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | void jprobe_return_end(void); |
| 70 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 71 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
| 72 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
H. Peter Anvin | 98272ed | 2009-10-12 14:14:10 -0700 | [diff] [blame] | 74 | #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs)) |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 75 | |
| 76 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ |
| 77 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ |
| 78 | (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ |
| 79 | (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ |
| 80 | (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ |
| 81 | << (row % 32)) |
| 82 | /* |
| 83 | * Undefined/reserved opcodes, conditional jump, Opcode Extension |
| 84 | * Groups, and some special opcodes can not boost. |
Linus Torvalds | 7115e3f | 2011-10-26 17:03:38 +0200 | [diff] [blame] | 85 | * This is non-const and volatile to keep gcc from statically |
| 86 | * optimizing it out, as variable_test_bit makes gcc think only |
Masami Hiramatsu | f684199 | 2012-09-28 17:15:22 +0900 | [diff] [blame] | 87 | * *(unsigned long*) is used. |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 88 | */ |
Linus Torvalds | 7115e3f | 2011-10-26 17:03:38 +0200 | [diff] [blame] | 89 | static volatile u32 twobyte_is_boostable[256 / 32] = { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 90 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 91 | /* ---------------------------------------------- */ |
| 92 | W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ |
Wang Nan | b7e3756 | 2015-02-10 09:34:05 +0800 | [diff] [blame] | 93 | W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 94 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ |
| 95 | W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ |
| 96 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ |
| 97 | W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ |
| 98 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ |
| 99 | W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ |
| 100 | W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ |
| 101 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ |
| 102 | W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ |
| 103 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ |
| 104 | W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ |
| 105 | W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ |
| 106 | W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ |
| 107 | W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ |
| 108 | /* ----------------------------------------------- */ |
| 109 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 110 | }; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 111 | #undef W |
| 112 | |
Masami Hiramatsu | f438d91 | 2007-10-16 01:27:49 -0700 | [diff] [blame] | 113 | struct kretprobe_blackpoint kretprobe_blacklist[] = { |
| 114 | {"__switch_to", }, /* This function switches only current task, but |
| 115 | doesn't switch kernel stack.*/ |
| 116 | {NULL, NULL} /* Terminator */ |
| 117 | }; |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 118 | |
Masami Hiramatsu | f438d91 | 2007-10-16 01:27:49 -0700 | [diff] [blame] | 119 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); |
| 120 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 121 | static nokprobe_inline void |
| 122 | __synthesize_relative_insn(void *from, void *to, u8 op) |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 123 | { |
Masami Hiramatsu | c0f7ac3 | 2010-02-25 08:34:46 -0500 | [diff] [blame] | 124 | struct __arch_relative_insn { |
| 125 | u8 op; |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 126 | s32 raddr; |
Masami Hiramatsu | f684199 | 2012-09-28 17:15:22 +0900 | [diff] [blame] | 127 | } __packed *insn; |
Masami Hiramatsu | c0f7ac3 | 2010-02-25 08:34:46 -0500 | [diff] [blame] | 128 | |
| 129 | insn = (struct __arch_relative_insn *)from; |
| 130 | insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); |
| 131 | insn->op = op; |
| 132 | } |
| 133 | |
| 134 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 135 | void synthesize_reljump(void *from, void *to) |
Masami Hiramatsu | c0f7ac3 | 2010-02-25 08:34:46 -0500 | [diff] [blame] | 136 | { |
| 137 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 138 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 139 | NOKPROBE_SYMBOL(synthesize_reljump); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 140 | |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 141 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 142 | void synthesize_relcall(void *from, void *to) |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 143 | { |
| 144 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); |
| 145 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 146 | NOKPROBE_SYMBOL(synthesize_relcall); |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 147 | |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 148 | /* |
Masami Hiramatsu | 567a9fd | 2010-06-29 14:53:50 +0900 | [diff] [blame] | 149 | * Skip the prefixes of the instruction. |
Harvey Harrison | 9930927 | 2008-01-30 13:32:14 +0100 | [diff] [blame] | 150 | */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 151 | static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn) |
Harvey Harrison | 9930927 | 2008-01-30 13:32:14 +0100 | [diff] [blame] | 152 | { |
Masami Hiramatsu | 567a9fd | 2010-06-29 14:53:50 +0900 | [diff] [blame] | 153 | insn_attr_t attr; |
| 154 | |
| 155 | attr = inat_get_opcode_attribute((insn_byte_t)*insn); |
| 156 | while (inat_is_legacy_prefix(attr)) { |
| 157 | insn++; |
| 158 | attr = inat_get_opcode_attribute((insn_byte_t)*insn); |
| 159 | } |
Harvey Harrison | 9930927 | 2008-01-30 13:32:14 +0100 | [diff] [blame] | 160 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | 567a9fd | 2010-06-29 14:53:50 +0900 | [diff] [blame] | 161 | if (inat_is_rex_prefix(attr)) |
| 162 | insn++; |
Harvey Harrison | 9930927 | 2008-01-30 13:32:14 +0100 | [diff] [blame] | 163 | #endif |
Masami Hiramatsu | 567a9fd | 2010-06-29 14:53:50 +0900 | [diff] [blame] | 164 | return insn; |
Harvey Harrison | 9930927 | 2008-01-30 13:32:14 +0100 | [diff] [blame] | 165 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 166 | NOKPROBE_SYMBOL(skip_prefixes); |
Harvey Harrison | 9930927 | 2008-01-30 13:32:14 +0100 | [diff] [blame] | 167 | |
| 168 | /* |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 169 | * Returns non-zero if INSN is boostable. |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 170 | * RIP relative instructions are adjusted at copying time in 64 bits mode |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 171 | */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 172 | int can_boost(struct insn *insn, void *addr) |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 173 | { |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 174 | kprobe_opcode_t opcode; |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 175 | |
Masami Hiramatsu | 75013fb | 2017-03-01 01:23:24 +0900 | [diff] [blame] | 176 | if (search_exception_tables((unsigned long)addr)) |
Masami Hiramatsu | 3039088 | 2009-03-16 18:57:22 -0400 | [diff] [blame] | 177 | return 0; /* Page fault may occur on this address. */ |
| 178 | |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 179 | /* 2nd-byte opcode */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 180 | if (insn->opcode.nbytes == 2) |
| 181 | return test_bit(insn->opcode.bytes[1], |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 182 | (unsigned long *)twobyte_is_boostable); |
Masami Hiramatsu | 17880e4 | 2017-03-29 13:59:15 +0900 | [diff] [blame] | 183 | |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 184 | if (insn->opcode.nbytes != 1) |
Masami Hiramatsu | 17880e4 | 2017-03-29 13:59:15 +0900 | [diff] [blame] | 185 | return 0; |
| 186 | |
| 187 | /* Can't boost Address-size override prefix */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 188 | if (unlikely(inat_is_address_size_prefix(insn->attr))) |
Masami Hiramatsu | 17880e4 | 2017-03-29 13:59:15 +0900 | [diff] [blame] | 189 | return 0; |
| 190 | |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 191 | opcode = insn->opcode.bytes[0]; |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 192 | |
| 193 | switch (opcode & 0xf0) { |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 194 | case 0x60: |
Masami Hiramatsu | 17880e4 | 2017-03-29 13:59:15 +0900 | [diff] [blame] | 195 | /* can't boost "bound" */ |
| 196 | return (opcode != 0x62); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 197 | case 0x70: |
| 198 | return 0; /* can't boost conditional jump */ |
Masami Hiramatsu | bd0b906 | 2017-03-29 13:56:56 +0900 | [diff] [blame] | 199 | case 0x90: |
| 200 | return opcode != 0x9a; /* can't boost call far */ |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 201 | case 0xc0: |
| 202 | /* can't boost software-interruptions */ |
| 203 | return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; |
| 204 | case 0xd0: |
| 205 | /* can boost AA* and XLAT */ |
| 206 | return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); |
| 207 | case 0xe0: |
| 208 | /* can boost in/out and absolute jmps */ |
| 209 | return ((opcode & 0x04) || opcode == 0xea); |
| 210 | case 0xf0: |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 211 | /* clear and set flags are boostable */ |
| 212 | return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); |
| 213 | default: |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 214 | /* CS override prefix and call are not boostable */ |
| 215 | return (opcode != 0x2e && opcode != 0x9a); |
| 216 | } |
| 217 | } |
| 218 | |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 219 | static unsigned long |
| 220 | __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 221 | { |
| 222 | struct kprobe *kp; |
Petr Mladek | 650b7b2 | 2015-02-20 15:07:29 +0100 | [diff] [blame] | 223 | unsigned long faddr; |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 224 | |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 225 | kp = get_kprobe((void *)addr); |
Petr Mladek | 650b7b2 | 2015-02-20 15:07:29 +0100 | [diff] [blame] | 226 | faddr = ftrace_location(addr); |
| 227 | /* |
Petr Mladek | 2a6730c | 2015-02-20 15:07:30 +0100 | [diff] [blame] | 228 | * Addresses inside the ftrace location are refused by |
| 229 | * arch_check_ftrace_location(). Something went terribly wrong |
| 230 | * if such an address is checked here. |
| 231 | */ |
| 232 | if (WARN_ON(faddr && faddr != addr)) |
| 233 | return 0UL; |
| 234 | /* |
Petr Mladek | 650b7b2 | 2015-02-20 15:07:29 +0100 | [diff] [blame] | 235 | * Use the current code if it is not modified by Kprobe |
| 236 | * and it cannot be modified by ftrace. |
| 237 | */ |
| 238 | if (!kp && !faddr) |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 239 | return addr; |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 240 | |
| 241 | /* |
Petr Mladek | 650b7b2 | 2015-02-20 15:07:29 +0100 | [diff] [blame] | 242 | * Basically, kp->ainsn.insn has an original instruction. |
| 243 | * However, RIP-relative instruction can not do single-stepping |
| 244 | * at different place, __copy_instruction() tweaks the displacement of |
| 245 | * that instruction. In that case, we can't recover the instruction |
| 246 | * from the kp->ainsn.insn. |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 247 | * |
Petr Mladek | 650b7b2 | 2015-02-20 15:07:29 +0100 | [diff] [blame] | 248 | * On the other hand, in case on normal Kprobe, kp->opcode has a copy |
| 249 | * of the first byte of the probed instruction, which is overwritten |
| 250 | * by int3. And the instruction at kp->addr is not modified by kprobes |
| 251 | * except for the first byte, we can recover the original instruction |
| 252 | * from it and kp->opcode. |
| 253 | * |
| 254 | * In case of Kprobes using ftrace, we do not have a copy of |
| 255 | * the original instruction. In fact, the ftrace location might |
| 256 | * be modified at anytime and even could be in an inconsistent state. |
| 257 | * Fortunately, we know that the original code is the ideal 5-byte |
| 258 | * long NOP. |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 259 | */ |
Masami Hiramatsu | ea1e34f | 2017-03-29 14:03:56 +0900 | [diff] [blame] | 260 | if (probe_kernel_read(buf, (void *)addr, |
| 261 | MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) |
| 262 | return 0UL; |
| 263 | |
Petr Mladek | 650b7b2 | 2015-02-20 15:07:29 +0100 | [diff] [blame] | 264 | if (faddr) |
| 265 | memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); |
| 266 | else |
| 267 | buf[0] = kp->opcode; |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 268 | return (unsigned long)buf; |
| 269 | } |
| 270 | |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 271 | /* |
| 272 | * Recover the probed instruction at addr for further analysis. |
| 273 | * Caller must lock kprobes by kprobe_mutex, or disable preemption |
| 274 | * for preventing to release referencing kprobes. |
Masami Hiramatsu | ea1e34f | 2017-03-29 14:03:56 +0900 | [diff] [blame] | 275 | * Returns zero if the instruction can not get recovered (or access failed). |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 276 | */ |
Masami Hiramatsu | 3f33ab1 | 2012-03-05 22:32:22 +0900 | [diff] [blame] | 277 | unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 278 | { |
| 279 | unsigned long __addr; |
| 280 | |
| 281 | __addr = __recover_optprobed_insn(buf, addr); |
| 282 | if (__addr != addr) |
| 283 | return __addr; |
| 284 | |
| 285 | return __recover_probed_insn(buf, addr); |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 286 | } |
| 287 | |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 288 | /* Check if paddr is at an instruction boundary */ |
Masami Hiramatsu | 7ec8a97 | 2014-04-17 17:17:47 +0900 | [diff] [blame] | 289 | static int can_probe(unsigned long paddr) |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 290 | { |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 291 | unsigned long addr, __addr, offset = 0; |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 292 | struct insn insn; |
| 293 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
| 294 | |
Namhyung Kim | 6abded7 | 2010-09-15 10:04:29 +0900 | [diff] [blame] | 295 | if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 296 | return 0; |
| 297 | |
| 298 | /* Decode instructions */ |
| 299 | addr = paddr - offset; |
| 300 | while (addr < paddr) { |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 301 | /* |
| 302 | * Check if the instruction has been modified by another |
| 303 | * kprobe, in which case we replace the breakpoint by the |
| 304 | * original instruction in our buffer. |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 305 | * Also, jump optimization will change the breakpoint to |
| 306 | * relative-jump. Since the relative-jump itself is |
| 307 | * normally used, we just go through if there is no kprobe. |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 308 | */ |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 309 | __addr = recover_probed_instruction(buf, addr); |
Petr Mladek | 2a6730c | 2015-02-20 15:07:30 +0100 | [diff] [blame] | 310 | if (!__addr) |
| 311 | return 0; |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 312 | kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 313 | insn_get_length(&insn); |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 314 | |
| 315 | /* |
| 316 | * Another debugging subsystem might insert this breakpoint. |
| 317 | * In that case, we can't recover it. |
| 318 | */ |
| 319 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) |
| 320 | return 0; |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 321 | addr += insn.length; |
| 322 | } |
| 323 | |
| 324 | return (addr == paddr); |
| 325 | } |
| 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | /* |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 328 | * Returns non-zero if opcode modifies the interrupt flag. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | */ |
Masami Hiramatsu | 7ec8a97 | 2014-04-17 17:17:47 +0900 | [diff] [blame] | 330 | static int is_IF_modifier(kprobe_opcode_t *insn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | { |
Masami Hiramatsu | 567a9fd | 2010-06-29 14:53:50 +0900 | [diff] [blame] | 332 | /* Skip prefixes */ |
| 333 | insn = skip_prefixes(insn); |
| 334 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | switch (*insn) { |
| 336 | case 0xfa: /* cli */ |
| 337 | case 0xfb: /* sti */ |
| 338 | case 0xcf: /* iret/iretd */ |
| 339 | case 0x9d: /* popf/popfd */ |
| 340 | return 1; |
| 341 | } |
Harvey Harrison | 9930927 | 2008-01-30 13:32:14 +0100 | [diff] [blame] | 342 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | return 0; |
| 344 | } |
| 345 | |
| 346 | /* |
Masami Hiramatsu | 129d17e | 2017-03-29 13:58:06 +0900 | [diff] [blame] | 347 | * Copy an instruction with recovering modified instruction by kprobes |
| 348 | * and adjust the displacement if the instruction uses the %rip-relative |
| 349 | * addressing mode. |
| 350 | * This returns the length of copied instruction, or 0 if it has an error. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 352 | int __copy_instruction(u8 *dest, u8 *src, struct insn *insn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | { |
Masami Hiramatsu | c0f7ac3 | 2010-02-25 08:34:46 -0500 | [diff] [blame] | 354 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 355 | unsigned long recovered_insn = |
| 356 | recover_probed_instruction(buf, (unsigned long)src); |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 357 | |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 358 | if (!recovered_insn || !insn) |
Masami Hiramatsu | 86b4ce3 | 2012-03-05 22:32:09 +0900 | [diff] [blame] | 359 | return 0; |
Masami Hiramatsu | ea1e34f | 2017-03-29 14:03:56 +0900 | [diff] [blame] | 360 | |
| 361 | /* This can access kernel text if given address is not recovered */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 362 | if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE)) |
| 363 | return 0; |
| 364 | |
| 365 | kernel_insn_init(insn, dest, MAX_INSN_SIZE); |
| 366 | insn_get_length(insn); |
| 367 | |
| 368 | /* Another subsystem puts a breakpoint, failed to recover */ |
| 369 | if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) |
Masami Hiramatsu | ea1e34f | 2017-03-29 14:03:56 +0900 | [diff] [blame] | 370 | return 0; |
Masami Hiramatsu | c0f7ac3 | 2010-02-25 08:34:46 -0500 | [diff] [blame] | 371 | |
| 372 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | 129d17e | 2017-03-29 13:58:06 +0900 | [diff] [blame] | 373 | /* Only x86_64 has RIP relative instructions */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 374 | if (insn_rip_relative(insn)) { |
Masami Hiramatsu | 89ae465 | 2009-08-13 16:34:36 -0400 | [diff] [blame] | 375 | s64 newdisp; |
| 376 | u8 *disp; |
Masami Hiramatsu | 89ae465 | 2009-08-13 16:34:36 -0400 | [diff] [blame] | 377 | /* |
| 378 | * The copied instruction uses the %rip-relative addressing |
| 379 | * mode. Adjust the displacement for the difference between |
| 380 | * the original location of this instruction and the location |
| 381 | * of the copy that will actually be run. The tricky bit here |
| 382 | * is making sure that the sign extension happens correctly in |
| 383 | * this calculation, since we need a signed 32-bit result to |
| 384 | * be sign-extended to 64 bits when it's added to the %rip |
| 385 | * value and yield the same 64-bit result that the sign- |
| 386 | * extension of the original signed 32-bit displacement would |
| 387 | * have given. |
| 388 | */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 389 | newdisp = (u8 *) src + (s64) insn->displacement.value |
| 390 | - (u8 *) dest; |
Masami Hiramatsu | 8101376 | 2013-04-04 19:42:30 +0900 | [diff] [blame] | 391 | if ((s64) (s32) newdisp != newdisp) { |
| 392 | pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 393 | pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", |
| 394 | src, dest, insn->displacement.value); |
Masami Hiramatsu | 8101376 | 2013-04-04 19:42:30 +0900 | [diff] [blame] | 395 | return 0; |
| 396 | } |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 397 | disp = (u8 *) dest + insn_offset_displacement(insn); |
Masami Hiramatsu | 89ae465 | 2009-08-13 16:34:36 -0400 | [diff] [blame] | 398 | *(s32 *) disp = (s32) newdisp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | } |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 400 | #endif |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 401 | return insn->length; |
Harvey Harrison | 31f80e4 | 2008-01-30 13:32:16 +0100 | [diff] [blame] | 402 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | |
Masami Hiramatsu | 804dec5 | 2017-03-29 14:00:25 +0900 | [diff] [blame] | 404 | /* Prepare reljump right after instruction to boost */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 405 | static void prepare_boost(struct kprobe *p, struct insn *insn) |
Masami Hiramatsu | 804dec5 | 2017-03-29 14:00:25 +0900 | [diff] [blame] | 406 | { |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 407 | if (can_boost(insn, p->addr) && |
| 408 | MAX_INSN_SIZE - insn->length >= RELATIVEJUMP_SIZE) { |
Masami Hiramatsu | 804dec5 | 2017-03-29 14:00:25 +0900 | [diff] [blame] | 409 | /* |
| 410 | * These instructions can be executed directly if it |
| 411 | * jumps back to correct address. |
| 412 | */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 413 | synthesize_reljump(p->ainsn.insn + insn->length, |
| 414 | p->addr + insn->length); |
Masami Hiramatsu | 490154b | 2017-03-29 14:01:35 +0900 | [diff] [blame] | 415 | p->ainsn.boostable = true; |
Masami Hiramatsu | 804dec5 | 2017-03-29 14:00:25 +0900 | [diff] [blame] | 416 | } else { |
Masami Hiramatsu | 490154b | 2017-03-29 14:01:35 +0900 | [diff] [blame] | 417 | p->ainsn.boostable = false; |
Masami Hiramatsu | 804dec5 | 2017-03-29 14:00:25 +0900 | [diff] [blame] | 418 | } |
| 419 | } |
| 420 | |
Masami Hiramatsu | c93f5cf | 2017-05-25 19:38:17 +0900 | [diff] [blame] | 421 | /* Recover page to RW mode before releasing it */ |
| 422 | void free_insn_page(void *page) |
| 423 | { |
| 424 | set_memory_nx((unsigned long)page & PAGE_MASK, 1); |
| 425 | set_memory_rw((unsigned long)page & PAGE_MASK, 1); |
| 426 | module_memfree(page); |
| 427 | } |
| 428 | |
Masami Hiramatsu | 7ec8a97 | 2014-04-17 17:17:47 +0900 | [diff] [blame] | 429 | static int arch_copy_kprobe(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | { |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 431 | struct insn insn; |
Masami Hiramatsu | 804dec5 | 2017-03-29 14:00:25 +0900 | [diff] [blame] | 432 | int len; |
Masami Hiramatsu | 003002e | 2013-06-05 12:12:16 +0900 | [diff] [blame] | 433 | |
Masami Hiramatsu | d0381c8 | 2017-03-29 14:02:46 +0900 | [diff] [blame] | 434 | set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); |
| 435 | |
Masami Hiramatsu | 4648468 | 2012-03-05 22:32:16 +0900 | [diff] [blame] | 436 | /* Copy an instruction with recovering if other optprobe modifies it.*/ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 437 | len = __copy_instruction(p->ainsn.insn, p->addr, &insn); |
Masami Hiramatsu | 804dec5 | 2017-03-29 14:00:25 +0900 | [diff] [blame] | 438 | if (!len) |
Masami Hiramatsu | 003002e | 2013-06-05 12:12:16 +0900 | [diff] [blame] | 439 | return -EINVAL; |
Harvey Harrison | 31f80e4 | 2008-01-30 13:32:16 +0100 | [diff] [blame] | 440 | |
Masami Hiramatsu | 4648468 | 2012-03-05 22:32:16 +0900 | [diff] [blame] | 441 | /* |
| 442 | * __copy_instruction can modify the displacement of the instruction, |
| 443 | * but it doesn't affect boostable check. |
| 444 | */ |
Masami Hiramatsu | a8d11cd | 2017-03-29 14:05:06 +0900 | [diff] [blame] | 445 | prepare_boost(p, &insn); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 446 | |
Masami Hiramatsu | d0381c8 | 2017-03-29 14:02:46 +0900 | [diff] [blame] | 447 | set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); |
| 448 | |
Masami Hiramatsu | 9a556ab | 2013-03-14 20:52:43 +0900 | [diff] [blame] | 449 | /* Check whether the instruction modifies Interrupt Flag or not */ |
| 450 | p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); |
| 451 | |
Masami Hiramatsu | 4648468 | 2012-03-05 22:32:16 +0900 | [diff] [blame] | 452 | /* Also, displacement change doesn't affect the first byte */ |
| 453 | p->opcode = p->ainsn.insn[0]; |
Masami Hiramatsu | 003002e | 2013-06-05 12:12:16 +0900 | [diff] [blame] | 454 | |
| 455 | return 0; |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 456 | } |
| 457 | |
Masami Hiramatsu | 7ec8a97 | 2014-04-17 17:17:47 +0900 | [diff] [blame] | 458 | int arch_prepare_kprobe(struct kprobe *p) |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 459 | { |
Masami Hiramatsu | 38115f2 | 2017-07-21 23:45:52 +0900 | [diff] [blame] | 460 | int ret; |
| 461 | |
Masami Hiramatsu | 4554dbc | 2010-02-02 16:49:18 -0500 | [diff] [blame] | 462 | if (alternatives_text_reserved(p->addr, p->addr)) |
| 463 | return -EINVAL; |
| 464 | |
Masami Hiramatsu | b46b3d7 | 2009-08-13 16:34:28 -0400 | [diff] [blame] | 465 | if (!can_probe((unsigned long)p->addr)) |
| 466 | return -EILSEQ; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 467 | /* insn: must be on special executable page on x86. */ |
| 468 | p->ainsn.insn = get_insn_slot(); |
| 469 | if (!p->ainsn.insn) |
| 470 | return -ENOMEM; |
Masami Hiramatsu | 003002e | 2013-06-05 12:12:16 +0900 | [diff] [blame] | 471 | |
Masami Hiramatsu | 38115f2 | 2017-07-21 23:45:52 +0900 | [diff] [blame] | 472 | ret = arch_copy_kprobe(p); |
| 473 | if (ret) { |
| 474 | free_insn_slot(p->ainsn.insn, 0); |
| 475 | p->ainsn.insn = NULL; |
| 476 | } |
| 477 | |
| 478 | return ret; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 479 | } |
| 480 | |
Masami Hiramatsu | 7ec8a97 | 2014-04-17 17:17:47 +0900 | [diff] [blame] | 481 | void arch_arm_kprobe(struct kprobe *p) |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 482 | { |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 483 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 484 | } |
| 485 | |
Masami Hiramatsu | 7ec8a97 | 2014-04-17 17:17:47 +0900 | [diff] [blame] | 486 | void arch_disarm_kprobe(struct kprobe *p) |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 487 | { |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 488 | text_poke(p->addr, &p->opcode, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | } |
| 490 | |
Masami Hiramatsu | 7ec8a97 | 2014-04-17 17:17:47 +0900 | [diff] [blame] | 491 | void arch_remove_kprobe(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | { |
Masami Hiramatsu | 1294156 | 2009-01-06 14:41:50 -0800 | [diff] [blame] | 493 | if (p->ainsn.insn) { |
Masami Hiramatsu | 490154b | 2017-03-29 14:01:35 +0900 | [diff] [blame] | 494 | free_insn_slot(p->ainsn.insn, p->ainsn.boostable); |
Masami Hiramatsu | 1294156 | 2009-01-06 14:41:50 -0800 | [diff] [blame] | 495 | p->ainsn.insn = NULL; |
| 496 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | } |
| 498 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 499 | static nokprobe_inline void |
| 500 | save_previous_kprobe(struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 501 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 502 | kcb->prev_kprobe.kp = kprobe_running(); |
| 503 | kcb->prev_kprobe.status = kcb->kprobe_status; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 504 | kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; |
| 505 | kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 506 | } |
| 507 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 508 | static nokprobe_inline void |
| 509 | restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 510 | { |
Christoph Lameter | b76834b | 2010-12-06 11:16:25 -0600 | [diff] [blame] | 511 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 512 | kcb->kprobe_status = kcb->prev_kprobe.status; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 513 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; |
| 514 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 515 | } |
| 516 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 517 | static nokprobe_inline void |
| 518 | set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
| 519 | struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 520 | { |
Christoph Lameter | b76834b | 2010-12-06 11:16:25 -0600 | [diff] [blame] | 521 | __this_cpu_write(current_kprobe, p); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 522 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 523 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
Masami Hiramatsu | 9a556ab | 2013-03-14 20:52:43 +0900 | [diff] [blame] | 524 | if (p->ainsn.if_modifier) |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 525 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 526 | } |
| 527 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 528 | static nokprobe_inline void clear_btf(void) |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 529 | { |
Peter Zijlstra | ea8e61b | 2010-03-25 14:51:51 +0100 | [diff] [blame] | 530 | if (test_thread_flag(TIF_BLOCKSTEP)) { |
| 531 | unsigned long debugctl = get_debugctlmsr(); |
| 532 | |
| 533 | debugctl &= ~DEBUGCTLMSR_BTF; |
| 534 | update_debugctlmsr(debugctl); |
| 535 | } |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 536 | } |
| 537 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 538 | static nokprobe_inline void restore_btf(void) |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 539 | { |
Peter Zijlstra | ea8e61b | 2010-03-25 14:51:51 +0100 | [diff] [blame] | 540 | if (test_thread_flag(TIF_BLOCKSTEP)) { |
| 541 | unsigned long debugctl = get_debugctlmsr(); |
| 542 | |
| 543 | debugctl |= DEBUGCTLMSR_BTF; |
| 544 | update_debugctlmsr(debugctl); |
| 545 | } |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 546 | } |
| 547 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 548 | void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 549 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 550 | unsigned long *sara = stack_addr(regs); |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 551 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 552 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 553 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 554 | /* Replace the return addr with trampoline addr */ |
| 555 | *sara = (unsigned long) &kretprobe_trampoline; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 556 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 557 | NOKPROBE_SYMBOL(arch_prepare_kretprobe); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 558 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 559 | static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, |
| 560 | struct kprobe_ctlblk *kcb, int reenter) |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 561 | { |
Masami Hiramatsu | c0f7ac3 | 2010-02-25 08:34:46 -0500 | [diff] [blame] | 562 | if (setup_detour_execution(p, regs, reenter)) |
| 563 | return; |
| 564 | |
Masami Hiramatsu | 615d0eb | 2010-02-02 16:49:04 -0500 | [diff] [blame] | 565 | #if !defined(CONFIG_PREEMPT) |
Masami Hiramatsu | 490154b | 2017-03-29 14:01:35 +0900 | [diff] [blame] | 566 | if (p->ainsn.boostable && !p->post_handler) { |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 567 | /* Boost up -- we can execute copied instructions directly */ |
Masami Hiramatsu | 0f94eb6 | 2010-02-25 08:34:23 -0500 | [diff] [blame] | 568 | if (!reenter) |
| 569 | reset_current_kprobe(); |
| 570 | /* |
| 571 | * Reentering boosted probe doesn't reset current_kprobe, |
| 572 | * nor set current_kprobe, because it doesn't use single |
| 573 | * stepping. |
| 574 | */ |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 575 | regs->ip = (unsigned long)p->ainsn.insn; |
| 576 | preempt_enable_no_resched(); |
| 577 | return; |
| 578 | } |
| 579 | #endif |
Masami Hiramatsu | 0f94eb6 | 2010-02-25 08:34:23 -0500 | [diff] [blame] | 580 | if (reenter) { |
| 581 | save_previous_kprobe(kcb); |
| 582 | set_current_kprobe(p, regs, kcb); |
| 583 | kcb->kprobe_status = KPROBE_REENTER; |
| 584 | } else |
| 585 | kcb->kprobe_status = KPROBE_HIT_SS; |
| 586 | /* Prepare real single stepping */ |
| 587 | clear_btf(); |
| 588 | regs->flags |= X86_EFLAGS_TF; |
| 589 | regs->flags &= ~X86_EFLAGS_IF; |
| 590 | /* single step inline if the instruction is an int3 */ |
| 591 | if (p->opcode == BREAKPOINT_INSTRUCTION) |
| 592 | regs->ip = (unsigned long)p->addr; |
| 593 | else |
| 594 | regs->ip = (unsigned long)p->ainsn.insn; |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 595 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 596 | NOKPROBE_SYMBOL(setup_singlestep); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 597 | |
Harvey Harrison | 40102d4 | 2008-01-30 13:32:02 +0100 | [diff] [blame] | 598 | /* |
| 599 | * We have reentered the kprobe_handler(), since another probe was hit while |
| 600 | * within the handler. We save the original kprobes variables and just single |
| 601 | * step on the instruction of the new probe without calling any user handlers. |
| 602 | */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 603 | static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, |
| 604 | struct kprobe_ctlblk *kcb) |
Harvey Harrison | 40102d4 | 2008-01-30 13:32:02 +0100 | [diff] [blame] | 605 | { |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 606 | switch (kcb->kprobe_status) { |
| 607 | case KPROBE_HIT_SSDONE: |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 608 | case KPROBE_HIT_ACTIVE: |
Masami Hiramatsu | 6a5022a | 2014-04-17 17:16:51 +0900 | [diff] [blame] | 609 | case KPROBE_HIT_SS: |
Abhishek Sagar | fb8830e | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 610 | kprobes_inc_nmissed_count(p); |
Masami Hiramatsu | 0f94eb6 | 2010-02-25 08:34:23 -0500 | [diff] [blame] | 611 | setup_singlestep(p, regs, kcb, 1); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 612 | break; |
Masami Hiramatsu | 6a5022a | 2014-04-17 17:16:51 +0900 | [diff] [blame] | 613 | case KPROBE_REENTER: |
Masami Hiramatsu | e9afe9e | 2009-08-27 13:22:58 -0400 | [diff] [blame] | 614 | /* A probe has been hit in the codepath leading up to, or just |
| 615 | * after, single-stepping of a probed instruction. This entire |
| 616 | * codepath should strictly reside in .kprobes.text section. |
| 617 | * Raise a BUG or we'll continue in an endless reentering loop |
| 618 | * and eventually a stack overflow. |
| 619 | */ |
| 620 | printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", |
| 621 | p->addr); |
| 622 | dump_kprobe(p); |
| 623 | BUG(); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 624 | default: |
| 625 | /* impossible cases */ |
| 626 | WARN_ON(1); |
Abhishek Sagar | fb8830e | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 627 | return 0; |
Masami Hiramatsu | 59e87cd | 2008-01-30 13:32:02 +0100 | [diff] [blame] | 628 | } |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 629 | |
Masami Hiramatsu | 59e87cd | 2008-01-30 13:32:02 +0100 | [diff] [blame] | 630 | return 1; |
Harvey Harrison | 40102d4 | 2008-01-30 13:32:02 +0100 | [diff] [blame] | 631 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 632 | NOKPROBE_SYMBOL(reenter_kprobe); |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 633 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 634 | /* |
| 635 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 636 | * remain disabled throughout this function. |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 637 | */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 638 | int kprobe_int3_handler(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 640 | kprobe_opcode_t *addr; |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 641 | struct kprobe *p; |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 642 | struct kprobe_ctlblk *kcb; |
| 643 | |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 644 | if (user_mode(regs)) |
Andy Lutomirski | 0cdd192 | 2014-07-11 10:27:01 -0700 | [diff] [blame] | 645 | return 0; |
| 646 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 647 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 648 | /* |
| 649 | * We don't want to be preempted for the entire |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 650 | * duration of kprobe processing. We conditionally |
| 651 | * re-enable preemption at the end of this function, |
| 652 | * and also in reenter_kprobe() and setup_singlestep(). |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 653 | */ |
| 654 | preempt_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 656 | kcb = get_kprobe_ctlblk(); |
Harvey Harrison | b976015 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 657 | p = get_kprobe(addr); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 658 | |
Harvey Harrison | b976015 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 659 | if (p) { |
Harvey Harrison | b976015 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 660 | if (kprobe_running()) { |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 661 | if (reenter_kprobe(p, regs, kcb)) |
| 662 | return 1; |
Harvey Harrison | b976015 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 663 | } else { |
| 664 | set_current_kprobe(p, regs, kcb); |
| 665 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 666 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | /* |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 668 | * If we have no pre-handler or it returned 0, we |
| 669 | * continue with normal processing. If we have a |
| 670 | * pre-handler and it returned non-zero, it prepped |
| 671 | * for calling the break_handler below on re-entry |
| 672 | * for jprobe processing, so get out doing nothing |
| 673 | * more here. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | */ |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 675 | if (!p->pre_handler || !p->pre_handler(p, regs)) |
Masami Hiramatsu | 0f94eb6 | 2010-02-25 08:34:23 -0500 | [diff] [blame] | 676 | setup_singlestep(p, regs, kcb, 0); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 677 | return 1; |
Harvey Harrison | b976015 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 678 | } |
Masami Hiramatsu | 829e924 | 2010-04-27 18:33:49 -0400 | [diff] [blame] | 679 | } else if (*addr != BREAKPOINT_INSTRUCTION) { |
| 680 | /* |
| 681 | * The breakpoint instruction was removed right |
| 682 | * after we hit it. Another cpu has removed |
| 683 | * either a probepoint or a debugger breakpoint |
| 684 | * at this address. In either case, no further |
| 685 | * handling of this interrupt is appropriate. |
| 686 | * Back up over the (now missing) int3 and run |
| 687 | * the original instruction. |
| 688 | */ |
| 689 | regs->ip = (unsigned long)addr; |
| 690 | preempt_enable_no_resched(); |
| 691 | return 1; |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 692 | } else if (kprobe_running()) { |
Christoph Lameter | b76834b | 2010-12-06 11:16:25 -0600 | [diff] [blame] | 693 | p = __this_cpu_read(current_kprobe); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 694 | if (p->break_handler && p->break_handler(p, regs)) { |
Masami Hiramatsu | e7dbfe3 | 2012-09-28 17:15:20 +0900 | [diff] [blame] | 695 | if (!skip_singlestep(p, regs, kcb)) |
| 696 | setup_singlestep(p, regs, kcb, 0); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 697 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | } |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 699 | } /* else: not a kprobe fault; let the kernel handle it */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 701 | preempt_enable_no_resched(); |
Abhishek Sagar | f315dec | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 702 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 704 | NOKPROBE_SYMBOL(kprobe_int3_handler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | |
| 706 | /* |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 707 | * When a retprobed function returns, this code saves registers and |
| 708 | * calls trampoline_handler() runs, which calls the kretprobe's handler. |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 709 | */ |
Josh Poimboeuf | c1c355c | 2016-01-21 16:49:28 -0600 | [diff] [blame] | 710 | asm( |
| 711 | ".global kretprobe_trampoline\n" |
| 712 | ".type kretprobe_trampoline, @function\n" |
| 713 | "kretprobe_trampoline:\n" |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 714 | #ifdef CONFIG_X86_64 |
Josh Poimboeuf | c1c355c | 2016-01-21 16:49:28 -0600 | [diff] [blame] | 715 | /* We don't bother saving the ss register */ |
| 716 | " pushq %rsp\n" |
| 717 | " pushfq\n" |
| 718 | SAVE_REGS_STRING |
| 719 | " movq %rsp, %rdi\n" |
| 720 | " call trampoline_handler\n" |
| 721 | /* Replace saved sp with true return address. */ |
| 722 | " movq %rax, 152(%rsp)\n" |
| 723 | RESTORE_REGS_STRING |
| 724 | " popfq\n" |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 725 | #else |
Josh Poimboeuf | c1c355c | 2016-01-21 16:49:28 -0600 | [diff] [blame] | 726 | " pushf\n" |
| 727 | SAVE_REGS_STRING |
| 728 | " movl %esp, %eax\n" |
| 729 | " call trampoline_handler\n" |
| 730 | /* Move flags to cs */ |
| 731 | " movl 56(%esp), %edx\n" |
| 732 | " movl %edx, 52(%esp)\n" |
| 733 | /* Replace saved flags with true return address. */ |
| 734 | " movl %eax, 56(%esp)\n" |
| 735 | RESTORE_REGS_STRING |
| 736 | " popf\n" |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 737 | #endif |
Josh Poimboeuf | c1c355c | 2016-01-21 16:49:28 -0600 | [diff] [blame] | 738 | " ret\n" |
| 739 | ".size kretprobe_trampoline, .-kretprobe_trampoline\n" |
| 740 | ); |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 741 | NOKPROBE_SYMBOL(kretprobe_trampoline); |
Josh Poimboeuf | 87aaff2 | 2016-02-28 22:22:40 -0600 | [diff] [blame] | 742 | STACK_FRAME_NON_STANDARD(kretprobe_trampoline); |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 743 | |
| 744 | /* |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 745 | * Called from kretprobe_trampoline |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 746 | */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 747 | __visible __used void *trampoline_handler(struct pt_regs *regs) |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 748 | { |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 749 | struct kretprobe_instance *ri = NULL; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 750 | struct hlist_head *head, empty_rp; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 751 | struct hlist_node *tmp; |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 752 | unsigned long flags, orig_ret_address = 0; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 753 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
KUMANO Syuhei | 737480a | 2010-08-15 15:18:04 +0900 | [diff] [blame] | 754 | kprobe_opcode_t *correct_ret_addr = NULL; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 755 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 756 | INIT_HLIST_HEAD(&empty_rp); |
Srinivasa D S | ef53d9c | 2008-07-25 01:46:04 -0700 | [diff] [blame] | 757 | kretprobe_hash_lock(current, &head, &flags); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 758 | /* fixup registers */ |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 759 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 760 | regs->cs = __KERNEL_CS; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 761 | #else |
| 762 | regs->cs = __KERNEL_CS | get_kernel_rpl(); |
Masami Hiramatsu | fee039a | 2009-03-23 10:14:52 -0400 | [diff] [blame] | 763 | regs->gs = 0; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 764 | #endif |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 765 | regs->ip = trampoline_address; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 766 | regs->orig_ax = ~0UL; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 767 | |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 768 | /* |
| 769 | * It is possible to have multiple instances associated with a given |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 770 | * task either because multiple functions in the call path have |
Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 771 | * return probes installed on them, and/or more than one |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 772 | * return probe was registered for a target function. |
| 773 | * |
| 774 | * We can handle this because: |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 775 | * - instances are always pushed into the head of the list |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 776 | * - when multiple return probes are registered for the same |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 777 | * function, the (chronologically) first instance's ret_addr |
| 778 | * will be the real return address, and all the rest will |
| 779 | * point to kretprobe_trampoline. |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 780 | */ |
Masami Hiramatsu | b626317 | 2017-02-06 18:55:43 +0900 | [diff] [blame] | 781 | hlist_for_each_entry(ri, head, hlist) { |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 782 | if (ri->task != current) |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 783 | /* another task is sharing our hash bucket */ |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 784 | continue; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 785 | |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 786 | orig_ret_address = (unsigned long)ri->ret_addr; |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 787 | |
| 788 | if (orig_ret_address != trampoline_address) |
| 789 | /* |
| 790 | * This is the real return address. Any other |
| 791 | * instances associated with this task are for |
| 792 | * other calls deeper on the call stack |
| 793 | */ |
| 794 | break; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 795 | } |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 796 | |
Ananth N Mavinakayanahalli | 0f95b7f | 2007-05-08 00:28:27 -0700 | [diff] [blame] | 797 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 798 | |
KUMANO Syuhei | 737480a | 2010-08-15 15:18:04 +0900 | [diff] [blame] | 799 | correct_ret_addr = ri->ret_addr; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 800 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
KUMANO Syuhei | 737480a | 2010-08-15 15:18:04 +0900 | [diff] [blame] | 801 | if (ri->task != current) |
| 802 | /* another task is sharing our hash bucket */ |
| 803 | continue; |
| 804 | |
| 805 | orig_ret_address = (unsigned long)ri->ret_addr; |
| 806 | if (ri->rp && ri->rp->handler) { |
Christoph Lameter | b76834b | 2010-12-06 11:16:25 -0600 | [diff] [blame] | 807 | __this_cpu_write(current_kprobe, &ri->rp->kp); |
KUMANO Syuhei | 737480a | 2010-08-15 15:18:04 +0900 | [diff] [blame] | 808 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
| 809 | ri->ret_addr = correct_ret_addr; |
| 810 | ri->rp->handler(ri, regs); |
Christoph Lameter | b76834b | 2010-12-06 11:16:25 -0600 | [diff] [blame] | 811 | __this_cpu_write(current_kprobe, NULL); |
KUMANO Syuhei | 737480a | 2010-08-15 15:18:04 +0900 | [diff] [blame] | 812 | } |
| 813 | |
| 814 | recycle_rp_inst(ri, &empty_rp); |
| 815 | |
| 816 | if (orig_ret_address != trampoline_address) |
| 817 | /* |
| 818 | * This is the real return address. Any other |
| 819 | * instances associated with this task are for |
| 820 | * other calls deeper on the call stack |
| 821 | */ |
| 822 | break; |
| 823 | } |
| 824 | |
Srinivasa D S | ef53d9c | 2008-07-25 01:46:04 -0700 | [diff] [blame] | 825 | kretprobe_hash_unlock(current, &flags); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 826 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 827 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 828 | hlist_del(&ri->hlist); |
| 829 | kfree(ri); |
| 830 | } |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 831 | return (void *)orig_ret_address; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 832 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 833 | NOKPROBE_SYMBOL(trampoline_handler); |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 834 | |
| 835 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | * Called after single-stepping. p->addr is the address of the |
| 837 | * instruction whose first byte has been replaced by the "int 3" |
| 838 | * instruction. To avoid the SMP problems that can occur when we |
| 839 | * temporarily put back the original opcode to single-step, we |
| 840 | * single-stepped a copy of the instruction. The address of this |
| 841 | * copy is p->ainsn.insn. |
| 842 | * |
| 843 | * This function prepares to return from the post-single-step |
| 844 | * interrupt. We have to fix up the stack as follows: |
| 845 | * |
| 846 | * 0) Except in the case of absolute or indirect jump or call instructions, |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 847 | * the new ip is relative to the copied instruction. We need to make |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | * it relative to the original instruction. |
| 849 | * |
| 850 | * 1) If the single-stepped instruction was pushfl, then the TF and IF |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 851 | * flags are set in the just-pushed flags, and may need to be cleared. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | * |
| 853 | * 2) If the single-stepped instruction was a call, the return address |
| 854 | * that is atop the stack is the address following the copied instruction. |
| 855 | * We need to make it the address following the original instruction. |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 856 | * |
| 857 | * If this is the first time we've single-stepped the instruction at |
| 858 | * this probepoint, and the instruction is boostable, boost it: add a |
| 859 | * jump instruction after the copied instruction, that jumps to the next |
| 860 | * instruction after the probepoint. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 862 | static void resume_execution(struct kprobe *p, struct pt_regs *regs, |
| 863 | struct kprobe_ctlblk *kcb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 865 | unsigned long *tos = stack_addr(regs); |
| 866 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; |
| 867 | unsigned long orig_ip = (unsigned long)p->addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | kprobe_opcode_t *insn = p->ainsn.insn; |
| 869 | |
Masami Hiramatsu | 567a9fd | 2010-06-29 14:53:50 +0900 | [diff] [blame] | 870 | /* Skip prefixes */ |
| 871 | insn = skip_prefixes(insn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 873 | regs->flags &= ~X86_EFLAGS_TF; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | switch (*insn) { |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 875 | case 0x9c: /* pushfl */ |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 876 | *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 877 | *tos |= kcb->kprobe_old_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | break; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 879 | case 0xc2: /* iret/ret/lret */ |
| 880 | case 0xc3: |
Prasanna S Panchamukhi | 0b9e2ca | 2005-05-05 16:15:40 -0700 | [diff] [blame] | 881 | case 0xca: |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 882 | case 0xcb: |
| 883 | case 0xcf: |
| 884 | case 0xea: /* jmp absolute -- ip is correct */ |
| 885 | /* ip is already adjusted, no more changes required */ |
Masami Hiramatsu | 490154b | 2017-03-29 14:01:35 +0900 | [diff] [blame] | 886 | p->ainsn.boostable = true; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 887 | goto no_change; |
| 888 | case 0xe8: /* call relative - Fix return addr */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 889 | *tos = orig_ip + (*tos - copy_ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | break; |
Harvey Harrison | e7b5e11 | 2008-01-30 13:31:43 +0100 | [diff] [blame] | 891 | #ifdef CONFIG_X86_32 |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 892 | case 0x9a: /* call absolute -- same as call absolute, indirect */ |
| 893 | *tos = orig_ip + (*tos - copy_ip); |
| 894 | goto no_change; |
| 895 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 | case 0xff: |
Satoshi Oshima | dc49e34 | 2006-05-20 15:00:21 -0700 | [diff] [blame] | 897 | if ((insn[1] & 0x30) == 0x10) { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 898 | /* |
| 899 | * call absolute, indirect |
| 900 | * Fix return addr; ip is correct. |
| 901 | * But this is not boostable |
| 902 | */ |
| 903 | *tos = orig_ip + (*tos - copy_ip); |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 904 | goto no_change; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 905 | } else if (((insn[1] & 0x31) == 0x20) || |
| 906 | ((insn[1] & 0x31) == 0x21)) { |
| 907 | /* |
| 908 | * jmp near and far, absolute indirect |
| 909 | * ip is correct. And this is boostable |
| 910 | */ |
Masami Hiramatsu | 490154b | 2017-03-29 14:01:35 +0900 | [diff] [blame] | 911 | p->ainsn.boostable = true; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 912 | goto no_change; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | default: |
| 915 | break; |
| 916 | } |
| 917 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 918 | regs->ip += orig_ip - copy_ip; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 919 | |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 920 | no_change: |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 921 | restore_btf(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 923 | NOKPROBE_SYMBOL(resume_execution); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 925 | /* |
| 926 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they |
André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 927 | * remain disabled throughout this function. |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 928 | */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 929 | int kprobe_debug_handler(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 931 | struct kprobe *cur = kprobe_running(); |
| 932 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 933 | |
| 934 | if (!cur) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | return 0; |
| 936 | |
Yakov Lerner | acb5b8a | 2008-03-16 03:21:21 -0500 | [diff] [blame] | 937 | resume_execution(cur, regs, kcb); |
| 938 | regs->flags |= kcb->kprobe_saved_flags; |
Yakov Lerner | acb5b8a | 2008-03-16 03:21:21 -0500 | [diff] [blame] | 939 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 940 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
| 941 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 942 | cur->post_handler(cur, regs, 0); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 943 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 945 | /* Restore back the original saved kprobes variables and continue. */ |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 946 | if (kcb->kprobe_status == KPROBE_REENTER) { |
| 947 | restore_previous_kprobe(kcb); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 948 | goto out; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 949 | } |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 950 | reset_current_kprobe(); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 951 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | preempt_enable_no_resched(); |
| 953 | |
| 954 | /* |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 955 | * if somebody else is singlestepping across a probe point, flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | * will have TF set, in which case, continue the remaining processing |
| 957 | * of do_debug, as if this is not a probe hit. |
| 958 | */ |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 959 | if (regs->flags & X86_EFLAGS_TF) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | return 0; |
| 961 | |
| 962 | return 1; |
| 963 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 964 | NOKPROBE_SYMBOL(kprobe_debug_handler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 966 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 968 | struct kprobe *cur = kprobe_running(); |
| 969 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 970 | |
Masami Hiramatsu | 6381c24 | 2014-04-17 17:16:44 +0900 | [diff] [blame] | 971 | if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) { |
| 972 | /* This must happen on single-stepping */ |
| 973 | WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS && |
| 974 | kcb->kprobe_status != KPROBE_REENTER); |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 975 | /* |
| 976 | * We are here because the instruction being single |
| 977 | * stepped caused a page fault. We reset the current |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 978 | * kprobe and the ip points back to the probe address |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 979 | * and allow the page fault handler to continue as a |
| 980 | * normal page fault. |
| 981 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 982 | regs->ip = (unsigned long)cur->addr; |
Masami Hiramatsu | dcfc472 | 2016-06-11 23:06:53 +0900 | [diff] [blame] | 983 | /* |
| 984 | * Trap flag (TF) has been set here because this fault |
| 985 | * happened where the single stepping will be done. |
| 986 | * So clear it by resetting the current kprobe: |
| 987 | */ |
| 988 | regs->flags &= ~X86_EFLAGS_TF; |
| 989 | |
| 990 | /* |
| 991 | * If the TF flag was set before the kprobe hit, |
| 992 | * don't touch it: |
| 993 | */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 994 | regs->flags |= kcb->kprobe_old_flags; |
Masami Hiramatsu | dcfc472 | 2016-06-11 23:06:53 +0900 | [diff] [blame] | 995 | |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 996 | if (kcb->kprobe_status == KPROBE_REENTER) |
| 997 | restore_previous_kprobe(kcb); |
| 998 | else |
| 999 | reset_current_kprobe(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | preempt_enable_no_resched(); |
Masami Hiramatsu | 6381c24 | 2014-04-17 17:16:44 +0900 | [diff] [blame] | 1001 | } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE || |
| 1002 | kcb->kprobe_status == KPROBE_HIT_SSDONE) { |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 1003 | /* |
| 1004 | * We increment the nmissed count for accounting, |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1005 | * we can also use npre/npostfault count for accounting |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 1006 | * these specific fault cases. |
| 1007 | */ |
| 1008 | kprobes_inc_nmissed_count(cur); |
| 1009 | |
| 1010 | /* |
| 1011 | * We come here because instructions in the pre/post |
| 1012 | * handler caused the page_fault, this could happen |
| 1013 | * if handler tries to access user space by |
| 1014 | * copy_from_user(), get_user() etc. Let the |
| 1015 | * user-specified handler try to fix it first. |
| 1016 | */ |
| 1017 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) |
| 1018 | return 1; |
| 1019 | |
| 1020 | /* |
| 1021 | * In case the user-specified fault handler returned |
| 1022 | * zero, try to fix up. |
| 1023 | */ |
Tony Luck | 548acf1 | 2016-02-17 10:20:12 -0800 | [diff] [blame] | 1024 | if (fixup_exception(regs, trapnr)) |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1025 | return 1; |
Harvey Harrison | 6d48583 | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 1026 | |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 1027 | /* |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1028 | * fixup routine could not handle it, |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 1029 | * Let do_page_fault() fix it. |
| 1030 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | } |
Masami Hiramatsu | 6381c24 | 2014-04-17 17:16:44 +0900 | [diff] [blame] | 1032 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1033 | return 0; |
| 1034 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1035 | NOKPROBE_SYMBOL(kprobe_fault_handler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | |
| 1037 | /* |
| 1038 | * Wrapper routine for handling exceptions. |
| 1039 | */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1040 | int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, |
| 1041 | void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 | { |
Jan Engelhardt | ade1af7 | 2008-01-30 13:33:23 +0100 | [diff] [blame] | 1043 | struct die_args *args = data; |
Ananth N Mavinakayanahalli | 66ff2d06 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 1044 | int ret = NOTIFY_DONE; |
| 1045 | |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 1046 | if (args->regs && user_mode(args->regs)) |
bibo,mao | 2326c77 | 2006-03-26 01:38:21 -0800 | [diff] [blame] | 1047 | return ret; |
| 1048 | |
Masami Hiramatsu | 6f6343f | 2014-04-17 17:17:33 +0900 | [diff] [blame] | 1049 | if (val == DIE_GPF) { |
Quentin Barnes | b506a9d | 2008-01-30 13:32:32 +0100 | [diff] [blame] | 1050 | /* |
| 1051 | * To be potentially processing a kprobe fault and to |
| 1052 | * trust the result from kprobe_running(), we have |
| 1053 | * be non-preemptible. |
| 1054 | */ |
| 1055 | if (!preemptible() && kprobe_running() && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1056 | kprobe_fault_handler(args->regs, args->trapnr)) |
Ananth N Mavinakayanahalli | 66ff2d06 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 1057 | ret = NOTIFY_STOP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | } |
Ananth N Mavinakayanahalli | 66ff2d06 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 1059 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1060 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1061 | NOKPROBE_SYMBOL(kprobe_exceptions_notify); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1063 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | { |
| 1065 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 1066 | unsigned long addr; |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 1067 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 1069 | kcb->jprobe_saved_regs = *regs; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1070 | kcb->jprobe_saved_sp = stack_addr(regs); |
| 1071 | addr = (unsigned long)(kcb->jprobe_saved_sp); |
| 1072 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | /* |
| 1074 | * As Linus pointed out, gcc assumes that the callee |
| 1075 | * owns the argument space and could overwrite it, e.g. |
| 1076 | * tailcall optimization. So, to be absolutely safe |
| 1077 | * we also save and restore enough stack bytes to cover |
| 1078 | * the argument area. |
Dmitry Vyukov | 9254139 | 2016-10-11 14:13:38 +0200 | [diff] [blame] | 1079 | * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy |
| 1080 | * raw stack chunk with redzones: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | */ |
Dmitry Vyukov | 9254139 | 2016-10-11 14:13:38 +0200 | [diff] [blame] | 1082 | __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 1083 | regs->flags &= ~X86_EFLAGS_IF; |
Peter Zijlstra | 58dfe88 | 2007-10-11 22:25:25 +0200 | [diff] [blame] | 1084 | trace_hardirqs_off(); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 1085 | regs->ip = (unsigned long)(jp->entry); |
Steven Rostedt (Red Hat) | 237d28d | 2015-01-12 12:12:03 -0500 | [diff] [blame] | 1086 | |
| 1087 | /* |
| 1088 | * jprobes use jprobe_return() which skips the normal return |
| 1089 | * path of the function, and this messes up the accounting of the |
| 1090 | * function graph tracer to get messed up. |
| 1091 | * |
| 1092 | * Pause function graph tracing while performing the jprobe function. |
| 1093 | */ |
| 1094 | pause_graph_tracing(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | return 1; |
| 1096 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1097 | NOKPROBE_SYMBOL(setjmp_pre_handler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1099 | void jprobe_return(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 1101 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 1102 | |
Dmitry Vyukov | 9f7d416 | 2016-10-14 16:07:23 +0200 | [diff] [blame] | 1103 | /* Unpoison stack redzones in the frames we are going to jump over. */ |
| 1104 | kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp); |
| 1105 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1106 | asm volatile ( |
| 1107 | #ifdef CONFIG_X86_64 |
| 1108 | " xchg %%rbx,%%rsp \n" |
| 1109 | #else |
| 1110 | " xchgl %%ebx,%%esp \n" |
| 1111 | #endif |
| 1112 | " int3 \n" |
| 1113 | " .globl jprobe_return_end\n" |
| 1114 | " jprobe_return_end: \n" |
| 1115 | " nop \n"::"b" |
| 1116 | (kcb->jprobe_saved_sp):"memory"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1118 | NOKPROBE_SYMBOL(jprobe_return); |
| 1119 | NOKPROBE_SYMBOL(jprobe_return_end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1121 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 1123 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 1124 | u8 *addr = (u8 *) (regs->ip - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1125 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
Steven Rostedt (Red Hat) | 237d28d | 2015-01-12 12:12:03 -0500 | [diff] [blame] | 1126 | void *saved_sp = kcb->jprobe_saved_sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1128 | if ((addr > (u8 *) jprobe_return) && |
| 1129 | (addr < (u8 *) jprobe_return_end)) { |
Steven Rostedt (Red Hat) | 237d28d | 2015-01-12 12:12:03 -0500 | [diff] [blame] | 1130 | if (stack_addr(regs) != saved_sp) { |
Masami Hiramatsu | 29b6cd7 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 1131 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1132 | printk(KERN_ERR |
| 1133 | "current sp %p does not match saved sp %p\n", |
Steven Rostedt (Red Hat) | 237d28d | 2015-01-12 12:12:03 -0500 | [diff] [blame] | 1134 | stack_addr(regs), saved_sp); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1135 | printk(KERN_ERR "Saved registers for jprobe %p\n", jp); |
Jan Beulich | 57da8b9 | 2012-05-09 08:47:37 +0100 | [diff] [blame] | 1136 | show_regs(saved_regs); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1137 | printk(KERN_ERR "Current registers\n"); |
Jan Beulich | 57da8b9 | 2012-05-09 08:47:37 +0100 | [diff] [blame] | 1138 | show_regs(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | BUG(); |
| 1140 | } |
Steven Rostedt (Red Hat) | 237d28d | 2015-01-12 12:12:03 -0500 | [diff] [blame] | 1141 | /* It's OK to start function graph tracing again */ |
| 1142 | unpause_graph_tracing(); |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 1143 | *regs = kcb->jprobe_saved_regs; |
Dmitry Vyukov | 9254139 | 2016-10-11 14:13:38 +0200 | [diff] [blame] | 1144 | __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 1145 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | return 1; |
| 1147 | } |
| 1148 | return 0; |
| 1149 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 1150 | NOKPROBE_SYMBOL(longjmp_break_handler); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 1151 | |
Masami Hiramatsu | be8f274 | 2014-04-17 17:16:58 +0900 | [diff] [blame] | 1152 | bool arch_within_kprobe_blacklist(unsigned long addr) |
| 1153 | { |
| 1154 | return (addr >= (unsigned long)__kprobes_text_start && |
| 1155 | addr < (unsigned long)__kprobes_text_end) || |
| 1156 | (addr >= (unsigned long)__entry_text_start && |
| 1157 | addr < (unsigned long)__entry_text_end); |
| 1158 | } |
| 1159 | |
Rusty Lynch | 6772926 | 2005-07-05 18:54:50 -0700 | [diff] [blame] | 1160 | int __init arch_init_kprobes(void) |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 1161 | { |
Masami Hiramatsu | a7b0133 | 2013-07-18 20:47:50 +0900 | [diff] [blame] | 1162 | return 0; |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 1163 | } |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 1164 | |
Masami Hiramatsu | 7ec8a97 | 2014-04-17 17:17:47 +0900 | [diff] [blame] | 1165 | int arch_trampoline_kprobe(struct kprobe *p) |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 1166 | { |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 1167 | return 0; |
| 1168 | } |