blob: 4b52cb2ae6d620b2526de7e86291bdc137aa4f49 [file] [log] [blame]
Thomas Gleixner720e5962019-01-16 12:11:01 +01001// SPDX-License-Identifier: GPL-2.0+
Srikar Dronamraju2b144492012-02-09 14:56:42 +05302/*
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01003 * User-space Probes (UProbes)
Srikar Dronamraju2b144492012-02-09 14:56:42 +05304 *
Ingo Molnar35aa6212012-02-22 11:37:29 +01005 * Copyright (C) IBM Corporation, 2008-2012
Srikar Dronamraju2b144492012-02-09 14:56:42 +05306 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
Peter Zijlstra90eec102015-11-16 11:08:45 +01009 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
Srikar Dronamraju2b144492012-02-09 14:56:42 +053010 */
11
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/pagemap.h> /* read_mapping_page */
15#include <linux/slab.h>
16#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010017#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010018#include <linux/sched/coredump.h>
Josh Stonee8440c12013-01-13 19:03:34 +010019#include <linux/export.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053020#include <linux/rmap.h> /* anon_vma_prepare */
Paolo Bonzinif7842742024-04-05 07:58:15 -040021#include <linux/mmu_notifier.h>
Matthew Wilcox (Oracle)5fcd0792022-09-02 20:46:40 +010022#include <linux/swap.h> /* folio_free_swap */
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +053023#include <linux/ptrace.h> /* user_enable_single_step */
24#include <linux/kdebug.h> /* notifier mechanism */
Oleg Nesterov32cdba12012-11-14 19:03:42 +010025#include <linux/percpu-rwsem.h>
Oleg Nesterovaa59c532013-10-13 21:18:44 +020026#include <linux/task_work.h>
Oleg Nesterov40814f62014-05-19 20:41:36 +020027#include <linux/shmem_fs.h>
Song Liuf385cb82019-09-23 15:38:33 -070028#include <linux/khugepaged.h>
Ingo Molnar7b2d81d2012-02-17 09:27:41 +010029
Srikar Dronamraju2b144492012-02-09 14:56:42 +053030#include <linux/uprobes.h>
31
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +053032#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
33#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
34
Srikar Dronamraju2b144492012-02-09 14:56:42 +053035static struct rb_root uprobes_tree = RB_ROOT;
Oleg Nesterov441f1eb72012-11-25 19:54:29 +010036/*
37 * allows us to skip the uprobe_mmap if there are no uprobe events active
38 * at this time. Probably a fine grained per inode count is better?
39 */
40#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
Ingo Molnar7b2d81d2012-02-17 09:27:41 +010041
Jonathan Haslam0dc71522024-04-22 03:23:05 -070042static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -070043static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
Srikar Dronamraju2b144492012-02-09 14:56:42 +053044
Andrii Nakryiko86174082024-09-03 10:45:57 -070045DEFINE_STATIC_SRCU(uprobes_srcu);
46
Srikar Dronamraju2b144492012-02-09 14:56:42 +053047#define UPROBES_HASH_SZ 13
Srikar Dronamraju2b144492012-02-09 14:56:42 +053048/* serialize uprobe->pending_list */
49static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
Ingo Molnar7b2d81d2012-02-17 09:27:41 +010050#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
Srikar Dronamraju2b144492012-02-09 14:56:42 +053051
Oleg Nesterov2bf1acc2019-04-23 17:21:02 +020052DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
Oleg Nesterov32cdba12012-11-14 19:03:42 +010053
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +020054/* Have a copy of original instruction */
Oleg Nesterov71434f22012-09-30 21:12:44 +020055#define UPROBE_COPY_INSN 0
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +020056
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +053057struct uprobe {
58 struct rb_node rb_node; /* node in the rb tree */
Elena Reshetovace59b8e2019-01-16 13:20:27 +020059 refcount_t ref;
Oleg Nesterove591c8d2012-11-24 17:29:40 +010060 struct rw_semaphore register_rwsem;
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +053061 struct rw_semaphore consumer_rwsem;
62 struct list_head pending_list;
Andrii Nakryikocc01bd02024-09-03 10:45:59 -070063 struct list_head consumers;
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +053064 struct inode *inode; /* Also hold a ref to inode */
Andrii Nakryiko86174082024-09-03 10:45:57 -070065 struct rcu_head rcu;
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +053066 loff_t offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +053067 loff_t ref_ctr_offset;
Oleg Nesterov71434f22012-09-30 21:12:44 +020068 unsigned long flags;
Oleg Nesterovad439352013-11-19 17:20:21 +010069
70 /*
71 * The generic code assumes that it has two members of unknown type
72 * owned by the arch-specific code:
73 *
74 * insn - copy_insn() saves the original instruction here for
75 * arch_uprobe_analyze_insn().
76 *
77 * ixol - potentially modified instruction to execute out of
78 * line, copied to xol_area by xol_get_insn_slot().
79 */
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +053080 struct arch_uprobe arch;
81};
82
Ravi Bangoria1cc33162018-08-20 10:12:47 +053083struct delayed_uprobe {
84 struct list_head list;
85 struct uprobe *uprobe;
86 struct mm_struct *mm;
87};
88
89static DEFINE_MUTEX(delayed_uprobe_lock);
90static LIST_HEAD(delayed_uprobe_list);
91
Srikar Dronamraju2b144492012-02-09 14:56:42 +053092/*
Oleg Nesterovad439352013-11-19 17:20:21 +010093 * Execute out of line area: anonymous executable mapping installed
94 * by the probed task to execute the copy of the original instruction
95 * mangled by set_swbp().
96 *
Oleg Nesterovc912dae2013-11-09 19:49:39 +010097 * On a breakpoint hit, thread contests for a slot. It frees the
98 * slot after singlestep. Currently a fixed number of slots are
99 * allocated.
100 */
101struct xol_area {
Oleg Nesterov704bde32015-07-21 15:40:33 +0200102 wait_queue_head_t wq; /* if all slots are busy */
103 atomic_t slot_count; /* number of in-use slots */
104 unsigned long *bitmap; /* 0 = free slot */
Oleg Nesterovc912dae2013-11-09 19:49:39 +0100105
Oleg Nesterov2abbcc02024-09-11 15:14:37 +0200106 struct page *page;
Oleg Nesterovc912dae2013-11-09 19:49:39 +0100107 /*
108 * We keep the vma's vm_start rather than a pointer to the vma
109 * itself. The probed process or a naughty kernel module could make
110 * the vma go away, and we must handle that reasonably gracefully.
111 */
Oleg Nesterov704bde32015-07-21 15:40:33 +0200112 unsigned long vaddr; /* Page(s) of instruction slots */
Oleg Nesterovc912dae2013-11-09 19:49:39 +0100113};
114
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700115static void uprobe_warn(struct task_struct *t, const char *msg)
116{
117 pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg);
118}
119
Oleg Nesterovc912dae2013-11-09 19:49:39 +0100120/*
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530121 * valid_vma: Verify if the specified vma is an executable vma
122 * Relax restrictions while unregistering: vm_flags might have
123 * changed after breakpoint was inserted.
124 * - is_register: indicates if we are in register context.
125 * - Return 1 if the specified virtual address is in an
126 * executable vma.
127 */
128static bool valid_vma(struct vm_area_struct *vma, bool is_register)
129{
Oleg Nesterov13f59c52014-04-28 20:15:43 +0200130 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530131
Oleg Nesterove40cfce2012-09-16 19:31:39 +0200132 if (is_register)
133 flags |= VM_WRITE;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530134
Oleg Nesterove40cfce2012-09-16 19:31:39 +0200135 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530136}
137
Oleg Nesterov57683f72012-07-29 20:22:47 +0200138static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530139{
Oleg Nesterov57683f72012-07-29 20:22:47 +0200140 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530141}
142
Oleg Nesterovcb113b42012-07-29 20:22:42 +0200143static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
144{
145 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
146}
147
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530148/**
149 * __replace_page - replace page in vma by new page.
150 * based on replace_page in mm/ksm.c
151 *
152 * @vma: vma that holds the pte pointing to page
Oleg Nesterovc517ee72012-07-29 20:22:16 +0200153 * @addr: address the old @page is mapped at
Song Liufb4fb042019-09-23 15:38:22 -0700154 * @old_page: the page we are replacing by new_page
155 * @new_page: the modified page we replace page by
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530156 *
Song Liufb4fb042019-09-23 15:38:22 -0700157 * If @new_page is NULL, only unmap @old_page.
158 *
159 * Returns 0 on success, negative error code otherwise.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530160 */
Oleg Nesterovc517ee72012-07-29 20:22:16 +0200161static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
Oleg Nesterovbdfaa2e2016-08-17 17:37:04 +0200162 struct page *old_page, struct page *new_page)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530163{
Matthew Wilcox (Oracle)5fcd0792022-09-02 20:46:40 +0100164 struct folio *old_folio = page_folio(old_page);
Matthew Wilcox (Oracle)82e66bf2022-09-02 20:46:52 +0100165 struct folio *new_folio;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530166 struct mm_struct *mm = vma->vm_mm;
Matthew Wilcox (Oracle)5fcd0792022-09-02 20:46:40 +0100167 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
Oleg Nesterov9f924482012-07-29 20:22:20 +0200168 int err;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800169 struct mmu_notifier_range range;
Johannes Weiner00501b52014-08-08 14:19:20 -0700170
Alistair Popple7d4a8be2023-01-10 13:57:22 +1100171 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -0700172 addr + PAGE_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800173
Song Liufb4fb042019-09-23 15:38:22 -0700174 if (new_page) {
Matthew Wilcox (Oracle)82e66bf2022-09-02 20:46:52 +0100175 new_folio = page_folio(new_page);
176 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
Song Liufb4fb042019-09-23 15:38:22 -0700177 if (err)
178 return err;
179 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530180
Matthew Wilcox (Oracle)5fcd0792022-09-02 20:46:40 +0100181 /* For folio_free_swap() below */
182 folio_lock(old_folio);
Oleg Nesterov9f924482012-07-29 20:22:20 +0200183
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800184 mmu_notifier_invalidate_range_start(&range);
Oleg Nesterov9f924482012-07-29 20:22:20 +0200185 err = -EAGAIN;
Johannes Weiner9d82c692020-06-03 16:02:04 -0700186 if (!page_vma_mapped_walk(&pvmw))
Oleg Nesterov9f924482012-07-29 20:22:20 +0200187 goto unlock;
Kirill A. Shutemov14fa2da2017-02-24 14:58:07 -0800188 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530189
Song Liufb4fb042019-09-23 15:38:22 -0700190 if (new_page) {
Matthew Wilcox (Oracle)82e66bf2022-09-02 20:46:52 +0100191 folio_get(new_folio);
Barry Song15bde4a2024-06-18 11:11:35 +1200192 folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
Matthew Wilcox (Oracle)82e66bf2022-09-02 20:46:52 +0100193 folio_add_lru_vma(new_folio, vma);
Song Liufb4fb042019-09-23 15:38:22 -0700194 } else
195 /* no new page, just dec_mm_counter for old_page */
196 dec_mm_counter(mm, MM_ANONPAGES);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530197
Matthew Wilcox (Oracle)5fcd0792022-09-02 20:46:40 +0100198 if (!folio_test_anon(old_folio)) {
Kefeng Wang6b27cc6c2024-01-11 15:24:29 +0000199 dec_mm_counter(mm, mm_counter_file(old_folio));
Srikar Dronamraju7396fa82012-04-11 16:05:16 +0530200 inc_mm_counter(mm, MM_ANONPAGES);
201 }
202
Ryan Robertsc33c7942023-06-12 16:15:45 +0100203 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
Alistair Poppleec8832d2023-07-25 23:42:06 +1000204 ptep_clear_flush(vma, addr, pvmw.pte);
Song Liufb4fb042019-09-23 15:38:22 -0700205 if (new_page)
Paolo Bonzinif7842742024-04-05 07:58:15 -0400206 set_pte_at(mm, addr, pvmw.pte,
207 mk_pte(new_page, vma->vm_page_prot));
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530208
David Hildenbrand5cc96952023-12-20 23:44:48 +0100209 folio_remove_rmap_pte(old_folio, old_page, vma);
Matthew Wilcox (Oracle)5fcd0792022-09-02 20:46:40 +0100210 if (!folio_mapped(old_folio))
211 folio_free_swap(old_folio);
Kirill A. Shutemov14fa2da2017-02-24 14:58:07 -0800212 page_vma_mapped_walk_done(&pvmw);
Matthew Wilcox (Oracle)5fcd0792022-09-02 20:46:40 +0100213 folio_put(old_folio);
Oleg Nesterov194f8dc2012-07-29 20:22:49 +0200214
Oleg Nesterov9f924482012-07-29 20:22:20 +0200215 err = 0;
216 unlock:
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800217 mmu_notifier_invalidate_range_end(&range);
Matthew Wilcox (Oracle)5fcd0792022-09-02 20:46:40 +0100218 folio_unlock(old_folio);
Oleg Nesterov9f924482012-07-29 20:22:20 +0200219 return err;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530220}
221
222/**
Srikar Dronamraju5cb4ac32012-03-12 14:55:45 +0530223 * is_swbp_insn - check if instruction is breakpoint instruction.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530224 * @insn: instruction to be checked.
Srikar Dronamraju5cb4ac32012-03-12 14:55:45 +0530225 * Default implementation of is_swbp_insn
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530226 * Returns true if @insn is a breakpoint instruction.
227 */
Srikar Dronamraju5cb4ac32012-03-12 14:55:45 +0530228bool __weak is_swbp_insn(uprobe_opcode_t *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530229{
Srikar Dronamraju5cb4ac32012-03-12 14:55:45 +0530230 return *insn == UPROBE_SWBP_INSN;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530231}
232
Ananth N Mavinakayanahalli0908ad62013-03-22 20:46:27 +0530233/**
234 * is_trap_insn - check if instruction is breakpoint instruction.
235 * @insn: instruction to be checked.
236 * Default implementation of is_trap_insn
237 * Returns true if @insn is a breakpoint instruction.
238 *
239 * This function is needed for the case where an architecture has multiple
240 * trap instructions (like powerpc).
241 */
242bool __weak is_trap_insn(uprobe_opcode_t *insn)
243{
244 return is_swbp_insn(insn);
245}
246
Oleg Nesterovab0d8052013-03-24 18:24:37 +0100247static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
Oleg Nesterovcceb55a2012-09-23 21:10:18 +0200248{
249 void *kaddr = kmap_atomic(page);
Oleg Nesterovab0d8052013-03-24 18:24:37 +0100250 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
Oleg Nesterovcceb55a2012-09-23 21:10:18 +0200251 kunmap_atomic(kaddr);
252}
253
Oleg Nesterov5669cce2013-03-24 18:58:04 +0100254static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
255{
256 void *kaddr = kmap_atomic(page);
257 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
258 kunmap_atomic(kaddr);
259}
260
Oleg Nesteroved6f6a52012-09-23 21:30:44 +0200261static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
262{
263 uprobe_opcode_t old_opcode;
264 bool is_swbp;
265
Ananth N Mavinakayanahalli0908ad62013-03-22 20:46:27 +0530266 /*
267 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
268 * We do not check if it is any other 'trap variant' which could
269 * be conditional trap instruction such as the one powerpc supports.
270 *
271 * The logic is that we do not care if the underlying instruction
272 * is a trap variant; uprobes always wins over any other (gdb)
273 * breakpoint.
274 */
Oleg Nesterovab0d8052013-03-24 18:24:37 +0100275 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
Oleg Nesteroved6f6a52012-09-23 21:30:44 +0200276 is_swbp = is_swbp_insn(&old_opcode);
277
278 if (is_swbp_insn(new_opcode)) {
279 if (is_swbp) /* register: already installed? */
280 return 0;
281 } else {
282 if (!is_swbp) /* unregister: was it changed by us? */
Oleg Nesterov076a3652012-09-30 18:54:53 +0200283 return 0;
Oleg Nesteroved6f6a52012-09-23 21:30:44 +0200284 }
285
286 return 1;
287}
288
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530289static struct delayed_uprobe *
290delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
291{
292 struct delayed_uprobe *du;
293
294 list_for_each_entry(du, &delayed_uprobe_list, list)
295 if (du->uprobe == uprobe && du->mm == mm)
296 return du;
297 return NULL;
298}
299
300static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
301{
302 struct delayed_uprobe *du;
303
304 if (delayed_uprobe_check(uprobe, mm))
305 return 0;
306
307 du = kzalloc(sizeof(*du), GFP_KERNEL);
308 if (!du)
309 return -ENOMEM;
310
311 du->uprobe = uprobe;
312 du->mm = mm;
313 list_add(&du->list, &delayed_uprobe_list);
314 return 0;
315}
316
317static void delayed_uprobe_delete(struct delayed_uprobe *du)
318{
319 if (WARN_ON(!du))
320 return;
321 list_del(&du->list);
322 kfree(du);
323}
324
325static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
326{
327 struct list_head *pos, *q;
328 struct delayed_uprobe *du;
329
330 if (!uprobe && !mm)
331 return;
332
333 list_for_each_safe(pos, q, &delayed_uprobe_list) {
334 du = list_entry(pos, struct delayed_uprobe, list);
335
336 if (uprobe && du->uprobe != uprobe)
337 continue;
338 if (mm && du->mm != mm)
339 continue;
340
341 delayed_uprobe_delete(du);
342 }
343}
344
345static bool valid_ref_ctr_vma(struct uprobe *uprobe,
346 struct vm_area_struct *vma)
347{
348 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
349
350 return uprobe->ref_ctr_offset &&
351 vma->vm_file &&
352 file_inode(vma->vm_file) == uprobe->inode &&
353 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
354 vma->vm_start <= vaddr &&
355 vma->vm_end > vaddr;
356}
357
358static struct vm_area_struct *
359find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
360{
Matthew Wilcox (Oracle)fcb72a52022-09-06 19:48:58 +0000361 VMA_ITERATOR(vmi, mm, 0);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530362 struct vm_area_struct *tmp;
363
Matthew Wilcox (Oracle)fcb72a52022-09-06 19:48:58 +0000364 for_each_vma(vmi, tmp)
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530365 if (valid_ref_ctr_vma(uprobe, tmp))
366 return tmp;
367
368 return NULL;
369}
370
371static int
372__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
373{
374 void *kaddr;
375 struct page *page;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530376 int ret;
377 short *ptr;
378
379 if (!vaddr || !d)
380 return -EINVAL;
381
Peter Xu64019a22020-08-11 18:39:01 -0700382 ret = get_user_pages_remote(mm, vaddr, 1,
Lorenzo Stoakesca5e8632023-05-17 20:25:39 +0100383 FOLL_WRITE, &page, NULL);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530384 if (unlikely(ret <= 0)) {
385 /*
386 * We are asking for 1 page. If get_user_pages_remote() fails,
387 * it may return 0, in that case we have to return error.
388 */
389 return ret == 0 ? -EBUSY : ret;
390 }
391
392 kaddr = kmap_atomic(page);
393 ptr = kaddr + (vaddr & ~PAGE_MASK);
394
395 if (unlikely(*ptr + d < 0)) {
396 pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
397 "curr val: %d, delta: %d\n", vaddr, *ptr, d);
398 ret = -EINVAL;
399 goto out;
400 }
401
402 *ptr += d;
403 ret = 0;
404out:
405 kunmap_atomic(kaddr);
406 put_page(page);
407 return ret;
408}
409
410static void update_ref_ctr_warn(struct uprobe *uprobe,
411 struct mm_struct *mm, short d)
412{
413 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
414 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
415 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
416 (unsigned long long) uprobe->offset,
417 (unsigned long long) uprobe->ref_ctr_offset, mm);
418}
419
420static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
421 short d)
422{
423 struct vm_area_struct *rc_vma;
424 unsigned long rc_vaddr;
425 int ret = 0;
426
427 rc_vma = find_ref_ctr_vma(uprobe, mm);
428
429 if (rc_vma) {
430 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
431 ret = __update_ref_ctr(mm, rc_vaddr, d);
432 if (ret)
433 update_ref_ctr_warn(uprobe, mm, d);
434
435 if (d > 0)
436 return ret;
437 }
438
439 mutex_lock(&delayed_uprobe_lock);
440 if (d > 0)
441 ret = delayed_uprobe_add(uprobe, mm);
442 else
443 delayed_uprobe_remove(uprobe, mm);
444 mutex_unlock(&delayed_uprobe_lock);
445
446 return ret;
447}
448
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530449/*
450 * NOTE:
451 * Expect the breakpoint instruction to be the smallest size instruction for
452 * the architecture. If an arch has variable length instruction and the
453 * breakpoint instruction is not of the smallest length instruction
Ananth N Mavinakayanahalli0908ad62013-03-22 20:46:27 +0530454 * supported by that architecture then we need to modify is_trap_at_addr and
Oleg Nesterovf72d41f2013-11-05 19:50:39 +0100455 * uprobe_write_opcode accordingly. This would never be a problem for archs
456 * that have fixed length instructions.
Oleg Nesterov29dedee2014-05-05 16:38:18 +0200457 *
Oleg Nesterovf72d41f2013-11-05 19:50:39 +0100458 * uprobe_write_opcode - write the opcode at a given virtual address.
Qiujun Huang9ce4d212021-05-24 04:14:11 +0000459 * @auprobe: arch specific probepoint information.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530460 * @mm: the probed process address space.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530461 * @vaddr: the virtual address to store the opcode.
462 * @opcode: opcode to be written at @vaddr.
463 *
Oleg Nesterov84455e62024-08-01 15:27:09 +0200464 * Called with mm->mmap_lock held for read or write.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530465 * Return 0 (success) or a negative errno.
466 */
Ravi Bangoria6d437432018-08-09 09:48:52 +0530467int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
468 unsigned long vaddr, uprobe_opcode_t opcode)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530469{
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530470 struct uprobe *uprobe;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530471 struct page *old_page, *new_page;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530472 struct vm_area_struct *vma;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530473 int ret, is_register, ref_ctr_updated = 0;
Song Liuf385cb82019-09-23 15:38:33 -0700474 bool orig_page_huge = false;
Song Liuaa5de302019-10-18 20:20:40 -0700475 unsigned int gup_flags = FOLL_FORCE;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530476
477 is_register = is_swbp_insn(&opcode);
478 uprobe = container_of(auprobe, struct uprobe, arch);
Oleg Nesterovf4030722012-07-29 20:22:12 +0200479
Oleg Nesterov5323ce72012-06-15 17:43:28 +0200480retry:
Song Liuaa5de302019-10-18 20:20:40 -0700481 if (is_register)
482 gup_flags |= FOLL_SPLIT_PMD;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530483 /* Read the page with vaddr into memory */
Lorenzo Stoakesca5e8632023-05-17 20:25:39 +0100484 old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
Lorenzo Stoakes6a1960b2023-10-03 00:14:54 +0100485 if (IS_ERR(old_page))
486 return PTR_ERR(old_page);
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100487
Oleg Nesteroved6f6a52012-09-23 21:30:44 +0200488 ret = verify_opcode(old_page, vaddr, &opcode);
489 if (ret <= 0)
490 goto put_old;
491
Song Liuaa5de302019-10-18 20:20:40 -0700492 if (WARN(!is_register && PageCompound(old_page),
493 "uprobe unregister should never work on compound page\n")) {
494 ret = -EINVAL;
495 goto put_old;
496 }
497
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530498 /* We are going to replace instruction, update ref_ctr. */
499 if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
500 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
501 if (ret)
502 goto put_old;
503
504 ref_ctr_updated = 1;
505 }
506
Song Liufb4fb042019-09-23 15:38:22 -0700507 ret = 0;
508 if (!is_register && !PageAnon(old_page))
509 goto put_old;
510
Oleg Nesterov29dedee2014-05-05 16:38:18 +0200511 ret = anon_vma_prepare(vma);
512 if (ret)
513 goto put_old;
514
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530515 ret = -ENOMEM;
516 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
517 if (!new_page)
Oleg Nesterov9f924482012-07-29 20:22:20 +0200518 goto put_old;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530519
Oleg Nesterov29dedee2014-05-05 16:38:18 +0200520 __SetPageUptodate(new_page);
Oleg Nesterov3f471072013-03-24 19:04:36 +0100521 copy_highpage(new_page, old_page);
522 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530523
Song Liufb4fb042019-09-23 15:38:22 -0700524 if (!is_register) {
525 struct page *orig_page;
526 pgoff_t index;
527
528 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
529
530 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
531 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
532 index);
533
534 if (orig_page) {
535 if (PageUptodate(orig_page) &&
536 pages_identical(new_page, orig_page)) {
537 /* let go new_page */
538 put_page(new_page);
539 new_page = NULL;
Song Liuf385cb82019-09-23 15:38:33 -0700540
541 if (PageCompound(orig_page))
542 orig_page_huge = true;
Song Liufb4fb042019-09-23 15:38:22 -0700543 }
544 put_page(orig_page);
545 }
546 }
547
David Hildenbrand4dca82d2024-01-15 11:07:31 +0100548 ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
Song Liufb4fb042019-09-23 15:38:22 -0700549 if (new_page)
550 put_page(new_page);
Oleg Nesterov9f924482012-07-29 20:22:20 +0200551put_old:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100552 put_page(old_page);
553
Oleg Nesterov5323ce72012-06-15 17:43:28 +0200554 if (unlikely(ret == -EAGAIN))
555 goto retry;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530556
557 /* Revert back reference counter if instruction update failed. */
558 if (ret && is_register && ref_ctr_updated)
559 update_ref_ctr(uprobe, mm, -1);
560
Song Liuf385cb82019-09-23 15:38:33 -0700561 /* try collapse pmd for compound page */
562 if (!ret && orig_page_huge)
Zach O'Keefe34488392022-09-22 15:40:39 -0700563 collapse_pte_mapped_thp(mm, vaddr, false);
Song Liuf385cb82019-09-23 15:38:33 -0700564
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530565 return ret;
566}
567
568/**
Srikar Dronamraju5cb4ac32012-03-12 14:55:45 +0530569 * set_swbp - store breakpoint at a given address.
Srikar Dronamrajue3343e62012-03-12 14:55:30 +0530570 * @auprobe: arch specific probepoint information.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530571 * @mm: the probed process address space.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530572 * @vaddr: the virtual address to insert the opcode.
573 *
574 * For mm @mm, store the breakpoint instruction at @vaddr.
575 * Return 0 (success) or a negative errno.
576 */
Srikar Dronamraju5cb4ac32012-03-12 14:55:45 +0530577int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530578{
Ravi Bangoria6d437432018-08-09 09:48:52 +0530579 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530580}
581
582/**
583 * set_orig_insn - Restore the original instruction.
584 * @mm: the probed process address space.
Srikar Dronamrajue3343e62012-03-12 14:55:30 +0530585 * @auprobe: arch specific probepoint information.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530586 * @vaddr: the virtual address to insert the opcode.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530587 *
588 * For mm @mm, restore the original opcode (opcode) at @vaddr.
589 * Return 0 (success) or a negative errno.
590 */
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100591int __weak
Oleg Nesterovded86e72012-08-08 18:07:03 +0200592set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530593{
Ravi Bangoria6d437432018-08-09 09:48:52 +0530594 return uprobe_write_opcode(auprobe, mm, vaddr,
595 *(uprobe_opcode_t *)&auprobe->insn);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530596}
597
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700598/* uprobe should have guaranteed positive refcount */
Oleg Nesterovf2317222015-07-21 15:40:03 +0200599static struct uprobe *get_uprobe(struct uprobe *uprobe)
600{
Elena Reshetovace59b8e2019-01-16 13:20:27 +0200601 refcount_inc(&uprobe->ref);
Oleg Nesterovf2317222015-07-21 15:40:03 +0200602 return uprobe;
603}
604
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700605/*
606 * uprobe should have guaranteed lifetime, which can be either of:
607 * - caller already has refcount taken (and wants an extra one);
608 * - uprobe is RCU protected and won't be freed until after grace period;
609 * - we are holding uprobes_treelock (for read or write, doesn't matter).
610 */
611static struct uprobe *try_get_uprobe(struct uprobe *uprobe)
612{
613 if (refcount_inc_not_zero(&uprobe->ref))
614 return uprobe;
615 return NULL;
616}
617
618static inline bool uprobe_is_active(struct uprobe *uprobe)
619{
620 return !RB_EMPTY_NODE(&uprobe->rb_node);
621}
622
Andrii Nakryiko86174082024-09-03 10:45:57 -0700623static void uprobe_free_rcu(struct rcu_head *rcu)
624{
625 struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
626
627 kfree(uprobe);
628}
629
Oleg Nesterovf2317222015-07-21 15:40:03 +0200630static void put_uprobe(struct uprobe *uprobe)
631{
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700632 if (!refcount_dec_and_test(&uprobe->ref))
633 return;
634
635 write_lock(&uprobes_treelock);
636
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -0700637 if (uprobe_is_active(uprobe)) {
638 write_seqcount_begin(&uprobes_seqcount);
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700639 rb_erase(&uprobe->rb_node, &uprobes_tree);
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -0700640 write_seqcount_end(&uprobes_seqcount);
641 }
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700642
643 write_unlock(&uprobes_treelock);
644
645 /*
646 * If application munmap(exec_vma) before uprobe_unregister()
647 * gets called, we don't get a chance to remove uprobe from
648 * delayed_uprobe_list from remove_breakpoint(). Do it here.
649 */
650 mutex_lock(&delayed_uprobe_lock);
651 delayed_uprobe_remove(uprobe, NULL);
652 mutex_unlock(&delayed_uprobe_lock);
653
Andrii Nakryiko86174082024-09-03 10:45:57 -0700654 call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu);
Oleg Nesterovf2317222015-07-21 15:40:03 +0200655}
656
Peter Zijlstraa905e842020-04-29 17:06:27 +0200657static __always_inline
658int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
659 const struct uprobe *r)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530660{
Peter Zijlstraa905e842020-04-29 17:06:27 +0200661 if (l_inode < r->inode)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530662 return -1;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100663
Peter Zijlstraa905e842020-04-29 17:06:27 +0200664 if (l_inode > r->inode)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530665 return 1;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530666
Peter Zijlstraa905e842020-04-29 17:06:27 +0200667 if (l_offset < r->offset)
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100668 return -1;
669
Peter Zijlstraa905e842020-04-29 17:06:27 +0200670 if (l_offset > r->offset)
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100671 return 1;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530672
673 return 0;
674}
675
Peter Zijlstraa905e842020-04-29 17:06:27 +0200676#define __node_2_uprobe(node) \
677 rb_entry((node), struct uprobe, rb_node)
678
679struct __uprobe_key {
680 struct inode *inode;
681 loff_t offset;
682};
683
684static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
685{
686 const struct __uprobe_key *a = key;
687 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
688}
689
690static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
691{
692 struct uprobe *u = __node_2_uprobe(a);
693 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
694}
695
Andrii Nakryiko86174082024-09-03 10:45:57 -0700696/*
697 * Assumes being inside RCU protected region.
698 * No refcount is taken on returned uprobe.
699 */
700static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530701{
Peter Zijlstraa905e842020-04-29 17:06:27 +0200702 struct __uprobe_key key = {
703 .inode = inode,
704 .offset = offset,
705 };
Andrii Nakryiko86174082024-09-03 10:45:57 -0700706 struct rb_node *node;
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -0700707 unsigned int seq;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530708
Andrii Nakryiko86174082024-09-03 10:45:57 -0700709 lockdep_assert(srcu_read_lock_held(&uprobes_srcu));
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530710
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -0700711 do {
712 seq = read_seqcount_begin(&uprobes_seqcount);
713 node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
714 /*
715 * Lockless RB-tree lookups can result only in false negatives.
716 * If the element is found, it is correct and can be returned
717 * under RCU protection. If we find nothing, we need to
718 * validate that seqcount didn't change. If it did, we have to
719 * try again as we might have missed the element (false
720 * negative). If seqcount is unchanged, search truly failed.
721 */
722 if (node)
723 return __node_2_uprobe(node);
724 } while (read_seqcount_retry(&uprobes_seqcount, seq));
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100725
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -0700726 return NULL;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530727}
728
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700729/*
730 * Attempt to insert a new uprobe into uprobes_tree.
731 *
732 * If uprobe already exists (for given inode+offset), we just increment
733 * refcount of previously existing uprobe.
734 *
735 * If not, a provided new instance of uprobe is inserted into the tree (with
736 * assumed initial refcount == 1).
737 *
738 * In any case, we return a uprobe instance that ends up being in uprobes_tree.
739 * Caller has to clean up new uprobe instance, if it ended up not being
740 * inserted into the tree.
741 *
742 * We assume that uprobes_treelock is held for writing.
743 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530744static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
745{
Peter Zijlstraa905e842020-04-29 17:06:27 +0200746 struct rb_node *node;
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700747again:
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -0700748 node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700749 if (node) {
750 struct uprobe *u = __node_2_uprobe(node);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530751
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700752 if (!try_get_uprobe(u)) {
753 rb_erase(node, &uprobes_tree);
754 RB_CLEAR_NODE(&u->rb_node);
755 goto again;
756 }
757
758 return u;
759 }
760
761 return uprobe;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530762}
763
764/*
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700765 * Acquire uprobes_treelock and insert uprobe into uprobes_tree
766 * (or reuse existing one, see __insert_uprobe() comments above).
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530767 */
768static struct uprobe *insert_uprobe(struct uprobe *uprobe)
769{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530770 struct uprobe *u;
771
Jonathan Haslam0dc71522024-04-22 03:23:05 -0700772 write_lock(&uprobes_treelock);
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -0700773 write_seqcount_begin(&uprobes_seqcount);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530774 u = __insert_uprobe(uprobe);
Andrii Nakryikocd7bdd92024-09-03 10:46:02 -0700775 write_seqcount_end(&uprobes_seqcount);
Jonathan Haslam0dc71522024-04-22 03:23:05 -0700776 write_unlock(&uprobes_treelock);
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100777
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530778 return u;
779}
780
Ravi Bangoria22bad382018-08-20 10:12:48 +0530781static void
782ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
783{
784 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
785 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
786 uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
787 (unsigned long long) cur_uprobe->ref_ctr_offset,
788 (unsigned long long) uprobe->ref_ctr_offset);
789}
790
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530791static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
792 loff_t ref_ctr_offset)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530793{
794 struct uprobe *uprobe, *cur_uprobe;
795
796 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
797 if (!uprobe)
Andrii Nakryiko7c2bae22024-08-01 15:27:19 +0200798 return ERR_PTR(-ENOMEM);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530799
Song Liu61f94202018-04-23 10:21:35 -0700800 uprobe->inode = inode;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530801 uprobe->offset = offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530802 uprobe->ref_ctr_offset = ref_ctr_offset;
Andrii Nakryikocc01bd02024-09-03 10:45:59 -0700803 INIT_LIST_HEAD(&uprobe->consumers);
Oleg Nesterove591c8d2012-11-24 17:29:40 +0100804 init_rwsem(&uprobe->register_rwsem);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530805 init_rwsem(&uprobe->consumer_rwsem);
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700806 RB_CLEAR_NODE(&uprobe->rb_node);
807 refcount_set(&uprobe->ref, 1);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530808
809 /* add to uprobes_tree, sorted on inode:offset */
810 cur_uprobe = insert_uprobe(uprobe);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530811 /* a uprobe exists for this inode:offset combination */
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -0700812 if (cur_uprobe != uprobe) {
Ravi Bangoria22bad382018-08-20 10:12:48 +0530813 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
814 ref_ctr_mismatch_warn(cur_uprobe, uprobe);
815 put_uprobe(cur_uprobe);
816 kfree(uprobe);
817 return ERR_PTR(-EINVAL);
818 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530819 kfree(uprobe);
820 uprobe = cur_uprobe;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100821 }
822
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530823 return uprobe;
824}
825
Oleg Nesterov9a98e032012-11-23 20:15:17 +0100826static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530827{
828 down_write(&uprobe->consumer_rwsem);
Andrii Nakryikocc01bd02024-09-03 10:45:59 -0700829 list_add_rcu(&uc->cons_node, &uprobe->consumers);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530830 up_write(&uprobe->consumer_rwsem);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530831}
832
833/*
Srikar Dronamrajue3343e62012-03-12 14:55:30 +0530834 * For uprobe @uprobe, delete the consumer @uc.
Andrii Nakryikocc01bd02024-09-03 10:45:59 -0700835 * Should never be called with consumer that's not part of @uprobe->consumers.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530836 */
Andrii Nakryikocc01bd02024-09-03 10:45:59 -0700837static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530838{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530839 down_write(&uprobe->consumer_rwsem);
Andrii Nakryikocc01bd02024-09-03 10:45:59 -0700840 list_del_rcu(&uc->cons_node);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530841 up_write(&uprobe->consumer_rwsem);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530842}
843
Oleg Nesterov2ded0982013-11-07 19:41:57 +0100844static int __copy_insn(struct address_space *mapping, struct file *filp,
845 void *insn, int nbytes, loff_t offset)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530846{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530847 struct page *page;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530848 /*
Oleg Nesterov40814f62014-05-19 20:41:36 +0200849 * Ensure that the page that has the original instruction is populated
Matthew Wilcox (Oracle)7e0a1262022-04-29 11:53:28 -0400850 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
Oleg Nesterov40814f62014-05-19 20:41:36 +0200851 * see uprobe_register().
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530852 */
Matthew Wilcox (Oracle)7e0a1262022-04-29 11:53:28 -0400853 if (mapping->a_ops->read_folio)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300854 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
Oleg Nesterov40814f62014-05-19 20:41:36 +0200855 else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300856 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530857 if (IS_ERR(page))
858 return PTR_ERR(page);
859
Oleg Nesterov2edb7b52013-03-24 18:37:48 +0100860 copy_from_page(page, offset, insn, nbytes);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300861 put_page(page);
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100862
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530863 return 0;
864}
865
Oleg Nesterovd4366152012-06-15 17:43:42 +0200866static int copy_insn(struct uprobe *uprobe, struct file *filp)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530867{
Oleg Nesterov2ded0982013-11-07 19:41:57 +0100868 struct address_space *mapping = uprobe->inode->i_mapping;
869 loff_t offs = uprobe->offset;
Oleg Nesterov803200e2013-11-09 17:58:54 +0100870 void *insn = &uprobe->arch.insn;
871 int size = sizeof(uprobe->arch.insn);
Oleg Nesterov2ded0982013-11-07 19:41:57 +0100872 int len, err = -EIO;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530873
Oleg Nesterov2ded0982013-11-07 19:41:57 +0100874 /* Copy only available bytes, -EIO if nothing was read */
875 do {
876 if (offs >= i_size_read(uprobe->inode))
877 break;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530878
Oleg Nesterov2ded0982013-11-07 19:41:57 +0100879 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
880 err = __copy_insn(mapping, filp, insn, len, offs);
Oleg Nesterovfc36f592012-06-15 17:43:44 +0200881 if (err)
Oleg Nesterov2ded0982013-11-07 19:41:57 +0100882 break;
883
884 insn += len;
885 offs += len;
886 size -= len;
887 } while (size);
888
889 return err;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530890}
891
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +0200892static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
893 struct mm_struct *mm, unsigned long vaddr)
894{
895 int ret = 0;
896
Oleg Nesterov71434f22012-09-30 21:12:44 +0200897 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +0200898 return ret;
899
Oleg Nesterovd4d3ccc2012-11-24 18:51:34 +0100900 /* TODO: move this into _register, until then we abuse this sem. */
901 down_write(&uprobe->consumer_rwsem);
Oleg Nesterov71434f22012-09-30 21:12:44 +0200902 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
Oleg Nesterov4710f05f2012-09-30 20:31:41 +0200903 goto out;
904
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +0200905 ret = copy_insn(uprobe, file);
906 if (ret)
907 goto out;
908
909 ret = -ENOTSUPP;
Oleg Nesterov803200e2013-11-09 17:58:54 +0100910 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +0200911 goto out;
912
913 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
914 if (ret)
915 goto out;
916
Andrea Parri09d3f012018-11-22 17:10:31 +0100917 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
Oleg Nesterov71434f22012-09-30 21:12:44 +0200918 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +0200919
920 out:
Oleg Nesterovd4d3ccc2012-11-24 18:51:34 +0100921 up_write(&uprobe->consumer_rwsem);
Oleg Nesterov4710f05f2012-09-30 20:31:41 +0200922
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +0200923 return ret;
924}
925
Andrii Nakryiko59da8802024-09-03 10:45:58 -0700926static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
Oleg Nesterov806a98b2012-12-27 18:21:11 +0100927{
Andrii Nakryiko59da8802024-09-03 10:45:58 -0700928 return !uc->filter || uc->filter(uc, mm);
Oleg Nesterov806a98b2012-12-27 18:21:11 +0100929}
930
Andrii Nakryiko59da8802024-09-03 10:45:58 -0700931static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
Oleg Nesterov63633cb2012-11-22 18:30:15 +0100932{
Oleg Nesterov1ff6fee2012-11-24 18:15:46 +0100933 struct uprobe_consumer *uc;
934 bool ret = false;
935
936 down_read(&uprobe->consumer_rwsem);
Andrii Nakryikocc01bd02024-09-03 10:45:59 -0700937 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
938 srcu_read_lock_held(&uprobes_srcu)) {
Andrii Nakryiko59da8802024-09-03 10:45:58 -0700939 ret = consumer_filter(uc, mm);
Oleg Nesterov1ff6fee2012-11-24 18:15:46 +0100940 if (ret)
941 break;
942 }
943 up_read(&uprobe->consumer_rwsem);
944
945 return ret;
Oleg Nesterov63633cb2012-11-22 18:30:15 +0100946}
947
Srikar Dronamrajue3343e62012-03-12 14:55:30 +0530948static int
949install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
Oleg Nesterov816c03f2012-06-15 17:43:55 +0200950 struct vm_area_struct *vma, unsigned long vaddr)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530951{
Oleg Nesterovf8ac4ec2012-08-08 17:11:42 +0200952 bool first_uprobe;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530953 int ret;
954
Oleg Nesterovcb9a19f2012-09-30 20:11:45 +0200955 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
956 if (ret)
957 return ret;
Srikar Dronamraju682968e2012-03-30 23:56:46 +0530958
Oleg Nesterovf8ac4ec2012-08-08 17:11:42 +0200959 /*
960 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
961 * the task can hit this breakpoint right after __replace_page().
962 */
963 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
964 if (first_uprobe)
965 set_bit(MMF_HAS_UPROBES, &mm->flags);
966
Oleg Nesterov816c03f2012-06-15 17:43:55 +0200967 ret = set_swbp(&uprobe->arch, mm, vaddr);
Oleg Nesterov9f68f6722012-08-19 16:15:09 +0200968 if (!ret)
969 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
970 else if (first_uprobe)
Oleg Nesterovf8ac4ec2012-08-08 17:11:42 +0200971 clear_bit(MMF_HAS_UPROBES, &mm->flags);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530972
973 return ret;
974}
975
Oleg Nesterov076a3652012-09-30 18:54:53 +0200976static int
Oleg Nesterov816c03f2012-06-15 17:43:55 +0200977remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530978{
Oleg Nesterov9f68f6722012-08-19 16:15:09 +0200979 set_bit(MMF_RECALC_UPROBES, &mm->flags);
Oleg Nesterov076a3652012-09-30 18:54:53 +0200980 return set_orig_insn(&uprobe->arch, mm, vaddr);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530981}
982
Oleg Nesterov26872092012-06-15 17:43:33 +0200983struct map_info {
984 struct map_info *next;
985 struct mm_struct *mm;
Oleg Nesterov816c03f2012-06-15 17:43:55 +0200986 unsigned long vaddr;
Oleg Nesterov26872092012-06-15 17:43:33 +0200987};
988
989static inline struct map_info *free_map_info(struct map_info *info)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530990{
Oleg Nesterov26872092012-06-15 17:43:33 +0200991 struct map_info *next = info->next;
992 kfree(info);
993 return next;
994}
995
996static struct map_info *
997build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
998{
999 unsigned long pgoff = offset >> PAGE_SHIFT;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301000 struct vm_area_struct *vma;
Oleg Nesterov26872092012-06-15 17:43:33 +02001001 struct map_info *curr = NULL;
1002 struct map_info *prev = NULL;
1003 struct map_info *info;
1004 int more = 0;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001005
Oleg Nesterov26872092012-06-15 17:43:33 +02001006 again:
Davidlohr Bueso4a23717a2014-12-12 16:54:30 -08001007 i_mmap_lock_read(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -07001008 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301009 if (!valid_vma(vma, is_register))
1010 continue;
1011
Oleg Nesterov7a5bfb62012-06-15 17:43:36 +02001012 if (!prev && !more) {
1013 /*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -08001014 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
Oleg Nesterov7a5bfb62012-06-15 17:43:36 +02001015 * reclaim. This is optimistic, no harm done if it fails.
1016 */
1017 prev = kmalloc(sizeof(struct map_info),
1018 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
1019 if (prev)
1020 prev->next = NULL;
1021 }
Oleg Nesterov26872092012-06-15 17:43:33 +02001022 if (!prev) {
1023 more++;
1024 continue;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301025 }
1026
Vegard Nossum388f7932017-02-27 14:30:13 -08001027 if (!mmget_not_zero(vma->vm_mm))
Oleg Nesterov26872092012-06-15 17:43:33 +02001028 continue;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001029
Oleg Nesterov26872092012-06-15 17:43:33 +02001030 info = prev;
1031 prev = prev->next;
1032 info->next = curr;
1033 curr = info;
1034
1035 info->mm = vma->vm_mm;
Oleg Nesterov57683f72012-07-29 20:22:47 +02001036 info->vaddr = offset_to_vaddr(vma, offset);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301037 }
Davidlohr Bueso4a23717a2014-12-12 16:54:30 -08001038 i_mmap_unlock_read(mapping);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301039
Oleg Nesterov26872092012-06-15 17:43:33 +02001040 if (!more)
1041 goto out;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001042
Oleg Nesterov26872092012-06-15 17:43:33 +02001043 prev = curr;
1044 while (curr) {
1045 mmput(curr->mm);
1046 curr = curr->next;
1047 }
1048
1049 do {
1050 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1051 if (!info) {
1052 curr = ERR_PTR(-ENOMEM);
1053 goto out;
1054 }
1055 info->next = prev;
1056 prev = info;
1057 } while (--more);
1058
1059 goto again;
1060 out:
1061 while (prev)
1062 prev = free_map_info(prev);
1063 return curr;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301064}
1065
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001066static int
1067register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301068{
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001069 bool is_register = !!new;
Oleg Nesterov26872092012-06-15 17:43:33 +02001070 struct map_info *info;
1071 int err = 0;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301072
Oleg Nesterov32cdba12012-11-14 19:03:42 +01001073 percpu_down_write(&dup_mmap_sem);
Oleg Nesterov26872092012-06-15 17:43:33 +02001074 info = build_map_info(uprobe->inode->i_mapping,
1075 uprobe->offset, is_register);
Oleg Nesterov32cdba12012-11-14 19:03:42 +01001076 if (IS_ERR(info)) {
1077 err = PTR_ERR(info);
1078 goto out;
1079 }
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001080
Oleg Nesterov26872092012-06-15 17:43:33 +02001081 while (info) {
1082 struct mm_struct *mm = info->mm;
1083 struct vm_area_struct *vma;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001084
Oleg Nesterov076a3652012-09-30 18:54:53 +02001085 if (err && is_register)
Oleg Nesterov26872092012-06-15 17:43:33 +02001086 goto free;
Oleg Nesterov84455e62024-08-01 15:27:09 +02001087 /*
1088 * We take mmap_lock for writing to avoid the race with
Andrii Nakryiko86174082024-09-03 10:45:57 -07001089 * find_active_uprobe_rcu() which takes mmap_lock for reading.
Oleg Nesterov84455e62024-08-01 15:27:09 +02001090 * Thus this install_breakpoint() can not make
Andrii Nakryiko86174082024-09-03 10:45:57 -07001091 * is_trap_at_addr() true right after find_uprobe_rcu()
1092 * returns NULL in find_active_uprobe_rcu().
Oleg Nesterov84455e62024-08-01 15:27:09 +02001093 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001094 mmap_write_lock(mm);
Oleg Nesterovf4d6dfe2012-07-29 20:22:44 +02001095 vma = find_vma(mm, info->vaddr);
1096 if (!vma || !valid_vma(vma, is_register) ||
Oleg Nesterovf2817692013-03-17 18:54:44 +01001097 file_inode(vma->vm_file) != uprobe->inode)
Oleg Nesterov26872092012-06-15 17:43:33 +02001098 goto unlock;
1099
Oleg Nesterovf4d6dfe2012-07-29 20:22:44 +02001100 if (vma->vm_start > info->vaddr ||
1101 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
Oleg Nesterov26872092012-06-15 17:43:33 +02001102 goto unlock;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301103
Oleg Nesterov806a98b2012-12-27 18:21:11 +01001104 if (is_register) {
1105 /* consult only the "caller", new consumer. */
Andrii Nakryiko59da8802024-09-03 10:45:58 -07001106 if (consumer_filter(new, mm))
Oleg Nesterov806a98b2012-12-27 18:21:11 +01001107 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1108 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
Andrii Nakryiko59da8802024-09-03 10:45:58 -07001109 if (!filter_chain(uprobe, mm))
Oleg Nesterov806a98b2012-12-27 18:21:11 +01001110 err |= remove_breakpoint(uprobe, mm, info->vaddr);
1111 }
Oleg Nesterov78f74112012-08-08 17:35:08 +02001112
Oleg Nesterov26872092012-06-15 17:43:33 +02001113 unlock:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001114 mmap_write_unlock(mm);
Oleg Nesterov26872092012-06-15 17:43:33 +02001115 free:
1116 mmput(mm);
1117 info = free_map_info(info);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301118 }
Oleg Nesterov32cdba12012-11-14 19:03:42 +01001119 out:
1120 percpu_up_write(&dup_mmap_sem);
Oleg Nesterov26872092012-06-15 17:43:33 +02001121 return err;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301122}
1123
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001124/**
Peter Zijlstra04b01622024-09-03 10:46:00 -07001125 * uprobe_unregister_nosync - unregister an already registered probe.
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001126 * @uprobe: uprobe to remove
Ravi Bangoria38e967a2018-08-09 09:48:51 +05301127 * @uc: identify which probe if multiple probes are colocated.
1128 */
Peter Zijlstra04b01622024-09-03 10:46:00 -07001129void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
Ravi Bangoria38e967a2018-08-09 09:48:51 +05301130{
Oleg Nesterov70408be2024-08-01 15:27:44 +02001131 int err;
1132
Ravi Bangoria38e967a2018-08-09 09:48:51 +05301133 down_write(&uprobe->register_rwsem);
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07001134 consumer_del(uprobe, uc);
1135 err = register_for_each_vma(uprobe, NULL);
Ravi Bangoria38e967a2018-08-09 09:48:51 +05301136 up_write(&uprobe->register_rwsem);
Oleg Nesterov12026d202024-08-01 15:27:49 +02001137
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07001138 /* TODO : cant unregister? schedule a worker thread */
1139 if (unlikely(err)) {
1140 uprobe_warn(current, "unregister, leaking uprobe");
Peter Zijlstra04b01622024-09-03 10:46:00 -07001141 return;
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07001142 }
1143
1144 put_uprobe(uprobe);
Peter Zijlstra04b01622024-09-03 10:46:00 -07001145}
1146EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07001147
Peter Zijlstra04b01622024-09-03 10:46:00 -07001148void uprobe_unregister_sync(void)
1149{
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07001150 /*
1151 * Now that handler_chain() and handle_uretprobe_chain() iterate over
1152 * uprobe->consumers list under RCU protection without holding
1153 * uprobe->register_rwsem, we need to wait for RCU grace period to
1154 * make sure that we can't call into just unregistered
1155 * uprobe_consumer's callbacks anymore. If we don't do that, fast and
1156 * unlucky enough caller can free consumer's memory and cause
1157 * handler_chain() or handle_uretprobe_chain() to do an use-after-free.
1158 */
1159 synchronize_srcu(&uprobes_srcu);
Ravi Bangoria38e967a2018-08-09 09:48:51 +05301160}
Peter Zijlstra04b01622024-09-03 10:46:00 -07001161EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
Ravi Bangoria38e967a2018-08-09 09:48:51 +05301162
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001163/**
Oleg Nesterove04332e2024-08-01 15:27:28 +02001164 * uprobe_register - register a probe
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301165 * @inode: the file in which the probe has to be placed.
1166 * @offset: offset from the start of the file.
Oleg Nesterove04332e2024-08-01 15:27:28 +02001167 * @ref_ctr_offset: offset of SDT marker / reference counter
Srikar Dronamrajue3343e62012-03-12 14:55:30 +05301168 * @uc: information on howto handle the probe..
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301169 *
Oleg Nesterove04332e2024-08-01 15:27:28 +02001170 * Apart from the access refcount, uprobe_register() takes a creation
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301171 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1172 * inserted into the rbtree (i.e first consumer for a @inode:@offset
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001173 * tuple). Creation refcount stops uprobe_unregister from freeing the
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301174 * @uprobe even before the register operation is complete. Creation
Srikar Dronamrajue3343e62012-03-12 14:55:30 +05301175 * refcount is released when the last @uc for the @uprobe
Oleg Nesterove04332e2024-08-01 15:27:28 +02001176 * unregisters. Caller of uprobe_register() is required to keep @inode
Song Liu61f94202018-04-23 10:21:35 -07001177 * (and the containing mount) referenced.
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301178 *
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001179 * Return: pointer to the new uprobe on success or an ERR_PTR on failure.
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301180 */
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001181struct uprobe *uprobe_register(struct inode *inode,
1182 loff_t offset, loff_t ref_ctr_offset,
1183 struct uprobe_consumer *uc)
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301184{
1185 struct uprobe *uprobe;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001186 int ret;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301187
Anton Arapovea024872013-04-03 18:00:31 +02001188 /* Uprobe must have at least one set consumer */
1189 if (!uc->handler && !uc->ret_handler)
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001190 return ERR_PTR(-EINVAL);
Anton Arapovea024872013-04-03 18:00:31 +02001191
Oleg Nesterov40814f62014-05-19 20:41:36 +02001192 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
Matthew Wilcox (Oracle)5efe7442022-04-29 08:43:23 -04001193 if (!inode->i_mapping->a_ops->read_folio &&
Matthew Wilcox (Oracle)5efe7442022-04-29 08:43:23 -04001194 !shmem_mapping(inode->i_mapping))
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001195 return ERR_PTR(-EIO);
Oleg Nesterovf0744af2012-11-21 18:01:43 +01001196 /* Racy, just to catch the obvious mistakes */
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301197 if (offset > i_size_read(inode))
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001198 return ERR_PTR(-EINVAL);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301199
Oleg Nesterov013b2de2020-05-04 18:47:25 +02001200 /*
1201 * This ensures that copy_from_page(), copy_to_page() and
1202 * __update_ref_ctr() can't cross page boundary.
1203 */
1204 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001205 return ERR_PTR(-EINVAL);
Oleg Nesterov013b2de2020-05-04 18:47:25 +02001206 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001207 return ERR_PTR(-EINVAL);
Oleg Nesterov013b2de2020-05-04 18:47:25 +02001208
Ravi Bangoria1cc33162018-08-20 10:12:47 +05301209 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
Ravi Bangoria22bad382018-08-20 10:12:48 +05301210 if (IS_ERR(uprobe))
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001211 return uprobe;
Ravi Bangoria22bad382018-08-20 10:12:48 +05301212
Oleg Nesterov66d06df2012-11-25 22:48:37 +01001213 down_write(&uprobe->register_rwsem);
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -07001214 consumer_add(uprobe, uc);
1215 ret = register_for_each_vma(uprobe, uc);
Oleg Nesterov66d06df2012-11-25 22:48:37 +01001216 up_write(&uprobe->register_rwsem);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301217
Oleg Nesterovbb18c5d2024-08-01 15:27:39 +02001218 if (ret) {
Peter Zijlstra04b01622024-09-03 10:46:00 -07001219 uprobe_unregister_nosync(uprobe, uc);
1220 /*
1221 * Registration might have partially succeeded, so we can have
1222 * this consumer being called right at this time. We need to
1223 * sync here. It's ok, it's unlikely slow path.
1224 */
1225 uprobe_unregister_sync();
Oleg Nesterovbb18c5d2024-08-01 15:27:39 +02001226 return ERR_PTR(ret);
1227 }
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001228
Oleg Nesterovbb18c5d2024-08-01 15:27:39 +02001229 return uprobe;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301230}
Josh Stonee8440c12013-01-13 19:03:34 +01001231EXPORT_SYMBOL_GPL(uprobe_register);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301232
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001233/**
1234 * uprobe_apply - add or remove the breakpoints according to @uc->filter
1235 * @uprobe: uprobe which "owns" the breakpoint
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001236 * @uc: consumer which wants to add more or remove some breakpoints
1237 * @add: add or remove the breakpoints
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001238 * Return: 0 on success or negative error code.
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001239 */
Oleg Nesterov3c83a9a2024-08-01 15:27:34 +02001240int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add)
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001241{
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001242 struct uprobe_consumer *con;
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07001243 int ret = -ENOENT, srcu_idx;
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001244
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001245 down_write(&uprobe->register_rwsem);
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07001246
1247 srcu_idx = srcu_read_lock(&uprobes_srcu);
1248 list_for_each_entry_srcu(con, &uprobe->consumers, cons_node,
1249 srcu_read_lock_held(&uprobes_srcu)) {
1250 if (con == uc) {
1251 ret = register_for_each_vma(uprobe, add ? uc : NULL);
1252 break;
1253 }
1254 }
1255 srcu_read_unlock(&uprobes_srcu, srcu_idx);
1256
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001257 up_write(&uprobe->register_rwsem);
Oleg Nesterovbdf86472013-02-03 19:21:12 +01001258
1259 return ret;
1260}
1261
Oleg Nesterovda1816b2012-12-29 17:49:11 +01001262static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1263{
Matthew Wilcox (Oracle)fcb72a52022-09-06 19:48:58 +00001264 VMA_ITERATOR(vmi, mm, 0);
Oleg Nesterovda1816b2012-12-29 17:49:11 +01001265 struct vm_area_struct *vma;
1266 int err = 0;
1267
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001268 mmap_read_lock(mm);
Matthew Wilcox (Oracle)fcb72a52022-09-06 19:48:58 +00001269 for_each_vma(vmi, vma) {
Oleg Nesterovda1816b2012-12-29 17:49:11 +01001270 unsigned long vaddr;
1271 loff_t offset;
1272
1273 if (!valid_vma(vma, false) ||
Oleg Nesterovf2817692013-03-17 18:54:44 +01001274 file_inode(vma->vm_file) != uprobe->inode)
Oleg Nesterovda1816b2012-12-29 17:49:11 +01001275 continue;
1276
1277 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1278 if (uprobe->offset < offset ||
1279 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1280 continue;
1281
1282 vaddr = offset_to_vaddr(vma, uprobe->offset);
1283 err |= remove_breakpoint(uprobe, mm, vaddr);
1284 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001285 mmap_read_unlock(mm);
Oleg Nesterovda1816b2012-12-29 17:49:11 +01001286
1287 return err;
1288}
1289
Oleg Nesterov891c3972012-07-29 20:22:40 +02001290static struct rb_node *
1291find_node_in_range(struct inode *inode, loff_t min, loff_t max)
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301292{
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301293 struct rb_node *n = uprobes_tree.rb_node;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301294
1295 while (n) {
Oleg Nesterov891c3972012-07-29 20:22:40 +02001296 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001297
Oleg Nesterov891c3972012-07-29 20:22:40 +02001298 if (inode < u->inode) {
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301299 n = n->rb_left;
Oleg Nesterov891c3972012-07-29 20:22:40 +02001300 } else if (inode > u->inode) {
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301301 n = n->rb_right;
Oleg Nesterov891c3972012-07-29 20:22:40 +02001302 } else {
1303 if (max < u->offset)
1304 n = n->rb_left;
1305 else if (min > u->offset)
1306 n = n->rb_right;
1307 else
1308 break;
1309 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301310 }
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001311
Oleg Nesterov891c3972012-07-29 20:22:40 +02001312 return n;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301313}
1314
1315/*
Oleg Nesterov891c3972012-07-29 20:22:40 +02001316 * For a given range in vma, build a list of probes that need to be inserted.
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301317 */
Oleg Nesterov891c3972012-07-29 20:22:40 +02001318static void build_probe_list(struct inode *inode,
1319 struct vm_area_struct *vma,
1320 unsigned long start, unsigned long end,
1321 struct list_head *head)
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301322{
Oleg Nesterov891c3972012-07-29 20:22:40 +02001323 loff_t min, max;
Oleg Nesterov891c3972012-07-29 20:22:40 +02001324 struct rb_node *n, *t;
1325 struct uprobe *u;
1326
1327 INIT_LIST_HEAD(head);
Oleg Nesterovcb113b42012-07-29 20:22:42 +02001328 min = vaddr_to_offset(vma, start);
Oleg Nesterov891c3972012-07-29 20:22:40 +02001329 max = min + (end - start) - 1;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301330
Jonathan Haslam0dc71522024-04-22 03:23:05 -07001331 read_lock(&uprobes_treelock);
Oleg Nesterov891c3972012-07-29 20:22:40 +02001332 n = find_node_in_range(inode, min, max);
1333 if (n) {
1334 for (t = n; t; t = rb_prev(t)) {
1335 u = rb_entry(t, struct uprobe, rb_node);
1336 if (u->inode != inode || u->offset < min)
1337 break;
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -07001338 /* if uprobe went away, it's safe to ignore it */
1339 if (try_get_uprobe(u))
1340 list_add(&u->pending_list, head);
Oleg Nesterov891c3972012-07-29 20:22:40 +02001341 }
1342 for (t = n; (t = rb_next(t)); ) {
1343 u = rb_entry(t, struct uprobe, rb_node);
1344 if (u->inode != inode || u->offset > max)
1345 break;
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -07001346 /* if uprobe went away, it's safe to ignore it */
1347 if (try_get_uprobe(u))
1348 list_add(&u->pending_list, head);
Oleg Nesterov891c3972012-07-29 20:22:40 +02001349 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301350 }
Jonathan Haslam0dc71522024-04-22 03:23:05 -07001351 read_unlock(&uprobes_treelock);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301352}
1353
Ravi Bangoria1cc33162018-08-20 10:12:47 +05301354/* @vma contains reference counter, not the probed instruction. */
1355static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1356{
1357 struct list_head *pos, *q;
1358 struct delayed_uprobe *du;
1359 unsigned long vaddr;
1360 int ret = 0, err = 0;
1361
1362 mutex_lock(&delayed_uprobe_lock);
1363 list_for_each_safe(pos, q, &delayed_uprobe_list) {
1364 du = list_entry(pos, struct delayed_uprobe, list);
1365
1366 if (du->mm != vma->vm_mm ||
1367 !valid_ref_ctr_vma(du->uprobe, vma))
1368 continue;
1369
1370 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1371 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1372 if (ret) {
1373 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1374 if (!err)
1375 err = ret;
1376 }
1377 delayed_uprobe_delete(du);
1378 }
1379 mutex_unlock(&delayed_uprobe_lock);
1380 return err;
1381}
1382
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301383/*
Liam R. Howlett0503ea82023-01-20 11:26:49 -05001384 * Called from mmap_region/vma_merge with mm->mmap_lock acquired.
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301385 *
Oleg Nesterov5e5be712012-08-06 14:49:56 +02001386 * Currently we ignore all errors and always return 0, the callers
1387 * can't handle the failure anyway.
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301388 */
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001389int uprobe_mmap(struct vm_area_struct *vma)
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301390{
1391 struct list_head tmp_list;
Oleg Nesterov665605a2012-07-29 20:22:29 +02001392 struct uprobe *uprobe, *u;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301393 struct inode *inode;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301394
Ravi Bangoria1cc33162018-08-20 10:12:47 +05301395 if (no_uprobe_events())
1396 return 0;
1397
1398 if (vma->vm_file &&
1399 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1400 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1401 delayed_ref_ctr_inc(vma);
1402
1403 if (!valid_vma(vma, true))
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001404 return 0;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301405
Oleg Nesterovf2817692013-03-17 18:54:44 +01001406 inode = file_inode(vma->vm_file);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301407 if (!inode)
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01001408 return 0;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301409
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301410 mutex_lock(uprobes_mmap_hash(inode));
Oleg Nesterov891c3972012-07-29 20:22:40 +02001411 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
Oleg Nesterov806a98b2012-12-27 18:21:11 +01001412 /*
1413 * We can race with uprobe_unregister(), this uprobe can be already
1414 * removed. But in this case filter_chain() must return false, all
1415 * consumers have gone away.
1416 */
Oleg Nesterov665605a2012-07-29 20:22:29 +02001417 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
Oleg Nesterov806a98b2012-12-27 18:21:11 +01001418 if (!fatal_signal_pending(current) &&
Andrii Nakryiko59da8802024-09-03 10:45:58 -07001419 filter_chain(uprobe, vma->vm_mm)) {
Oleg Nesterov57683f72012-07-29 20:22:47 +02001420 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
Oleg Nesterov5e5be712012-08-06 14:49:56 +02001421 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301422 }
1423 put_uprobe(uprobe);
1424 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301425 mutex_unlock(uprobes_mmap_hash(inode));
1426
Oleg Nesterov5e5be712012-08-06 14:49:56 +02001427 return 0;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301428}
1429
Oleg Nesterov9f68f6722012-08-19 16:15:09 +02001430static bool
1431vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1432{
1433 loff_t min, max;
1434 struct inode *inode;
1435 struct rb_node *n;
1436
Oleg Nesterovf2817692013-03-17 18:54:44 +01001437 inode = file_inode(vma->vm_file);
Oleg Nesterov9f68f6722012-08-19 16:15:09 +02001438
1439 min = vaddr_to_offset(vma, start);
1440 max = min + (end - start) - 1;
1441
Jonathan Haslam0dc71522024-04-22 03:23:05 -07001442 read_lock(&uprobes_treelock);
Oleg Nesterov9f68f6722012-08-19 16:15:09 +02001443 n = find_node_in_range(inode, min, max);
Jonathan Haslam0dc71522024-04-22 03:23:05 -07001444 read_unlock(&uprobes_treelock);
Oleg Nesterov9f68f6722012-08-19 16:15:09 +02001445
1446 return !!n;
1447}
1448
Srikar Dronamraju682968e2012-03-30 23:56:46 +05301449/*
1450 * Called in context of a munmap of a vma.
1451 */
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +05301452void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
Srikar Dronamraju682968e2012-03-30 23:56:46 +05301453{
Oleg Nesterov441f1eb72012-11-25 19:54:29 +01001454 if (no_uprobe_events() || !valid_vma(vma, false))
Srikar Dronamraju682968e2012-03-30 23:56:46 +05301455 return;
1456
Oleg Nesterov2fd611a2012-07-29 20:22:31 +02001457 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1458 return;
1459
Oleg Nesterov9f68f6722012-08-19 16:15:09 +02001460 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1461 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
Oleg Nesterovf8ac4ec2012-08-08 17:11:42 +02001462 return;
1463
Oleg Nesterov9f68f6722012-08-19 16:15:09 +02001464 if (vma_has_uprobes(vma, start, end))
1465 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
Srikar Dronamraju682968e2012-03-30 23:56:46 +05301466}
1467
Oleg Nesterov6d27a312024-09-11 15:14:07 +02001468static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
1469 struct vm_area_struct *vma, struct vm_fault *vmf)
1470{
1471 struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
1472
Oleg Nesterov2abbcc02024-09-11 15:14:37 +02001473 vmf->page = area->page;
Oleg Nesterov6d27a312024-09-11 15:14:07 +02001474 get_page(vmf->page);
1475 return 0;
1476}
1477
1478static const struct vm_special_mapping xol_mapping = {
1479 .name = "[uprobes]",
1480 .fault = xol_fault,
1481};
1482
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301483/* Slot allocation for XOL */
Oleg Nesterov6441ec82013-10-13 21:18:35 +02001484static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301485{
Oleg Nesterov704bde32015-07-21 15:40:33 +02001486 struct vm_area_struct *vma;
1487 int ret;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301488
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001489 if (mmap_write_lock_killable(mm))
Michal Hocko598fdc12016-05-23 16:26:08 -07001490 return -EINTR;
1491
Oleg Nesterov704bde32015-07-21 15:40:33 +02001492 if (mm->uprobes_state.xol_area) {
1493 ret = -EALREADY;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301494 goto fail;
Oleg Nesterov704bde32015-07-21 15:40:33 +02001495 }
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301496
Oleg Nesterovaf0d95a2013-10-13 21:18:38 +02001497 if (!area->vaddr) {
1498 /* Try to map as high as possible, this is only a hint. */
1499 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1500 PAGE_SIZE, 0, 0);
Gaowei Puff68dac2019-11-30 17:51:03 -08001501 if (IS_ERR_VALUE(area->vaddr)) {
Oleg Nesterovaf0d95a2013-10-13 21:18:38 +02001502 ret = area->vaddr;
1503 goto fail;
1504 }
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301505 }
1506
Oleg Nesterov704bde32015-07-21 15:40:33 +02001507 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1508 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
Oleg Nesterov6d27a312024-09-11 15:14:07 +02001509 &xol_mapping);
Oleg Nesterov704bde32015-07-21 15:40:33 +02001510 if (IS_ERR(vma)) {
1511 ret = PTR_ERR(vma);
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301512 goto fail;
Oleg Nesterov704bde32015-07-21 15:40:33 +02001513 }
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301514
Oleg Nesterov704bde32015-07-21 15:40:33 +02001515 ret = 0;
Paul E. McKenney5c6338b2017-10-09 11:08:53 -07001516 /* pairs with get_xol_area() */
1517 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
Oleg Nesterovc8a82532012-12-30 17:40:39 +01001518 fail:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001519 mmap_write_unlock(mm);
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301520
1521 return ret;
1522}
1523
Jiri Olsaff474a782024-06-12 08:44:28 +09001524void * __weak arch_uprobe_trampoline(unsigned long *psize)
1525{
1526 static uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1527
1528 *psize = UPROBE_SWBP_INSN_SIZE;
1529 return &insn;
1530}
1531
Oleg Nesterovaf0d95a2013-10-13 21:18:38 +02001532static struct xol_area *__create_xol_area(unsigned long vaddr)
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301533{
Oleg Nesterov9b545df2012-12-31 16:39:49 +01001534 struct mm_struct *mm = current->mm;
Jiri Olsaff474a782024-06-12 08:44:28 +09001535 unsigned long insns_size;
Oleg Nesterov6441ec82013-10-13 21:18:35 +02001536 struct xol_area *area;
Jiri Olsaff474a782024-06-12 08:44:28 +09001537 void *insns;
Oleg Nesterov9b545df2012-12-31 16:39:49 +01001538
Sven Schnellee240b0f2024-09-03 12:23:12 +02001539 area = kzalloc(sizeof(*area), GFP_KERNEL);
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301540 if (unlikely(!area))
Oleg Nesterovc8a82532012-12-30 17:40:39 +01001541 goto out;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301542
Kees Cook6396bb22018-06-12 14:03:40 -07001543 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1544 GFP_KERNEL);
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301545 if (!area->bitmap)
Oleg Nesterovc8a82532012-12-30 17:40:39 +01001546 goto free_area;
1547
Oleg Nesterov34820302024-09-29 18:20:47 +02001548 area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
Oleg Nesterov2abbcc02024-09-11 15:14:37 +02001549 if (!area->page)
Oleg Nesterovc8a82532012-12-30 17:40:39 +01001550 goto free_bitmap;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301551
Oleg Nesterovaf0d95a2013-10-13 21:18:38 +02001552 area->vaddr = vaddr;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301553 init_waitqueue_head(&area->wq);
Oleg Nesterov6441ec82013-10-13 21:18:35 +02001554 /* Reserve the 1st slot for get_trampoline_vaddr() */
1555 set_bit(0, area->bitmap);
1556 atomic_set(&area->slot_count, 1);
Jiri Olsaff474a782024-06-12 08:44:28 +09001557 insns = arch_uprobe_trampoline(&insns_size);
Oleg Nesterov2abbcc02024-09-11 15:14:37 +02001558 arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
Anton Arapove78aebf2013-04-03 18:00:32 +02001559
Oleg Nesterov6441ec82013-10-13 21:18:35 +02001560 if (!xol_add_vma(mm, area))
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301561 return area;
1562
Oleg Nesterov2abbcc02024-09-11 15:14:37 +02001563 __free_page(area->page);
Oleg Nesterovc8a82532012-12-30 17:40:39 +01001564 free_bitmap:
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301565 kfree(area->bitmap);
Oleg Nesterovc8a82532012-12-30 17:40:39 +01001566 free_area:
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301567 kfree(area);
Oleg Nesterovc8a82532012-12-30 17:40:39 +01001568 out:
Oleg Nesterov6441ec82013-10-13 21:18:35 +02001569 return NULL;
1570}
1571
1572/*
1573 * get_xol_area - Allocate process's xol_area if necessary.
1574 * This area will be used for storing instructions for execution out of line.
1575 *
1576 * Returns the allocated area or NULL.
1577 */
1578static struct xol_area *get_xol_area(void)
1579{
1580 struct mm_struct *mm = current->mm;
1581 struct xol_area *area;
1582
1583 if (!mm->uprobes_state.xol_area)
Oleg Nesterovaf0d95a2013-10-13 21:18:38 +02001584 __create_xol_area(0);
Oleg Nesterov6441ec82013-10-13 21:18:35 +02001585
Paul E. McKenney5c6338b2017-10-09 11:08:53 -07001586 /* Pairs with xol_add_vma() smp_store_release() */
1587 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
Oleg Nesterov9b545df2012-12-31 16:39:49 +01001588 return area;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301589}
1590
1591/*
1592 * uprobe_clear_state - Free the area allocated for slots.
1593 */
1594void uprobe_clear_state(struct mm_struct *mm)
1595{
1596 struct xol_area *area = mm->uprobes_state.xol_area;
1597
Ravi Bangoria1cc33162018-08-20 10:12:47 +05301598 mutex_lock(&delayed_uprobe_lock);
1599 delayed_uprobe_remove(NULL, mm);
1600 mutex_unlock(&delayed_uprobe_lock);
1601
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301602 if (!area)
1603 return;
1604
Oleg Nesterov2abbcc02024-09-11 15:14:37 +02001605 put_page(area->page);
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301606 kfree(area->bitmap);
1607 kfree(area);
1608}
1609
Oleg Nesterov32cdba12012-11-14 19:03:42 +01001610void uprobe_start_dup_mmap(void)
1611{
1612 percpu_down_read(&dup_mmap_sem);
1613}
1614
1615void uprobe_end_dup_mmap(void)
1616{
1617 percpu_up_read(&dup_mmap_sem);
1618}
1619
Oleg Nesterovf8ac4ec2012-08-08 17:11:42 +02001620void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1621{
Oleg Nesterov9f68f6722012-08-19 16:15:09 +02001622 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
Oleg Nesterovf8ac4ec2012-08-08 17:11:42 +02001623 set_bit(MMF_HAS_UPROBES, &newmm->flags);
Oleg Nesterov9f68f6722012-08-19 16:15:09 +02001624 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1625 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1626 }
Oleg Nesterovf8ac4ec2012-08-08 17:11:42 +02001627}
1628
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301629/*
1630 * - search for a free slot.
1631 */
1632static unsigned long xol_take_insn_slot(struct xol_area *area)
1633{
1634 unsigned long slot_addr;
1635 int slot_nr;
1636
1637 do {
1638 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1639 if (slot_nr < UINSNS_PER_PAGE) {
1640 if (!test_and_set_bit(slot_nr, area->bitmap))
1641 break;
1642
1643 slot_nr = UINSNS_PER_PAGE;
1644 continue;
1645 }
1646 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1647 } while (slot_nr >= UINSNS_PER_PAGE);
1648
1649 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1650 atomic_inc(&area->slot_count);
1651
1652 return slot_addr;
1653}
1654
1655/*
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001656 * xol_get_insn_slot - allocate a slot for xol.
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301657 * Returns the allocated slot address or 0.
1658 */
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001659static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301660{
1661 struct xol_area *area;
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001662 unsigned long xol_vaddr;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301663
Oleg Nesterov9b545df2012-12-31 16:39:49 +01001664 area = get_xol_area();
1665 if (!area)
1666 return 0;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301667
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001668 xol_vaddr = xol_take_insn_slot(area);
1669 if (unlikely(!xol_vaddr))
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301670 return 0;
1671
Oleg Nesterov2abbcc02024-09-11 15:14:37 +02001672 arch_uprobe_copy_ixol(area->page, xol_vaddr,
Victor Kamensky72e6ae22014-04-29 04:20:52 +01001673 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301674
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001675 return xol_vaddr;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301676}
1677
1678/*
1679 * xol_free_insn_slot - If slot was earlier allocated by
1680 * @xol_get_insn_slot(), make the slot available for
1681 * subsequent requests.
1682 */
1683static void xol_free_insn_slot(struct task_struct *tsk)
1684{
1685 struct xol_area *area;
1686 unsigned long vma_end;
1687 unsigned long slot_addr;
1688
1689 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1690 return;
1691
1692 slot_addr = tsk->utask->xol_vaddr;
Oleg Nesterovaf4355e2012-12-31 18:37:11 +01001693 if (unlikely(!slot_addr))
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301694 return;
1695
1696 area = tsk->mm->uprobes_state.xol_area;
1697 vma_end = area->vaddr + PAGE_SIZE;
1698 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1699 unsigned long offset;
1700 int slot_nr;
1701
1702 offset = slot_addr - area->vaddr;
1703 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1704 if (slot_nr >= UINSNS_PER_PAGE)
1705 return;
1706
1707 clear_bit(slot_nr, area->bitmap);
1708 atomic_dec(&area->slot_count);
Oleg Nesterov2a742ce2015-07-21 15:40:36 +02001709 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301710 if (waitqueue_active(&area->wq))
1711 wake_up(&area->wq);
1712
1713 tsk->utask->xol_vaddr = 0;
1714 }
1715}
1716
Victor Kamensky72e6ae22014-04-29 04:20:52 +01001717void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1718 void *src, unsigned long len)
1719{
1720 /* Initialize the slot */
1721 copy_to_page(page, vaddr, src, len);
1722
1723 /*
Christoph Hellwig885f7f82020-06-07 21:42:22 -07001724 * We probably need flush_icache_user_page() but it needs vma.
Victor Kamensky72e6ae22014-04-29 04:20:52 +01001725 * This should work on most of architectures by default. If
1726 * architecture needs to do something different it can define
1727 * its own version of the function.
1728 */
1729 flush_dcache_page(page);
1730}
1731
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301732/**
1733 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1734 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1735 * instruction.
1736 * Return the address of the breakpoint instruction.
1737 */
1738unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1739{
1740 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1741}
1742
Oleg Nesterovb02ef202014-05-12 18:24:45 +02001743unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1744{
1745 struct uprobe_task *utask = current->utask;
1746
1747 if (unlikely(utask && utask->active_uprobe))
1748 return utask->vaddr;
1749
1750 return instruction_pointer(regs);
1751}
1752
Oleg Nesterov2bb5e842015-07-21 15:40:06 +02001753static struct return_instance *free_ret_instance(struct return_instance *ri)
1754{
1755 struct return_instance *next = ri->next;
1756 put_uprobe(ri->uprobe);
1757 kfree(ri);
1758 return next;
1759}
1760
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301761/*
1762 * Called with no locks held.
Tobias Tefke788faab2018-07-09 12:57:15 +02001763 * Called in context of an exiting or an exec-ing thread.
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301764 */
1765void uprobe_free_utask(struct task_struct *t)
1766{
1767 struct uprobe_task *utask = t->utask;
Oleg Nesterov2bb5e842015-07-21 15:40:06 +02001768 struct return_instance *ri;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301769
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301770 if (!utask)
1771 return;
1772
1773 if (utask->active_uprobe)
1774 put_uprobe(utask->active_uprobe);
1775
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001776 ri = utask->return_instances;
Oleg Nesterov2bb5e842015-07-21 15:40:06 +02001777 while (ri)
1778 ri = free_ret_instance(ri);
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001779
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301780 xol_free_insn_slot(t);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301781 kfree(utask);
1782 t->utask = NULL;
1783}
1784
1785/*
Randy Dunlapc034f482021-02-25 17:21:10 -08001786 * Allocate a uprobe_task object for the task if necessary.
Oleg Nesterov5a2df662012-12-31 17:03:32 +01001787 * Called when the thread hits a breakpoint.
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301788 *
1789 * Returns:
1790 * - pointer to new uprobe_task on success
1791 * - NULL otherwise
1792 */
Oleg Nesterov5a2df662012-12-31 17:03:32 +01001793static struct uprobe_task *get_utask(void)
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301794{
Oleg Nesterov5a2df662012-12-31 17:03:32 +01001795 if (!current->utask)
1796 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1797 return current->utask;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301798}
1799
Oleg Nesterov248d3a72013-10-13 21:18:41 +02001800static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1801{
1802 struct uprobe_task *n_utask;
1803 struct return_instance **p, *o, *n;
1804
1805 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1806 if (!n_utask)
1807 return -ENOMEM;
1808 t->utask = n_utask;
1809
1810 p = &n_utask->return_instances;
1811 for (o = o_utask->return_instances; o; o = o->next) {
1812 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1813 if (!n)
1814 return -ENOMEM;
1815
1816 *n = *o;
Andrii Nakryiko3f7f1a62024-09-03 10:45:56 -07001817 /*
1818 * uprobe's refcnt has to be positive at this point, kept by
1819 * utask->return_instances items; return_instances can't be
1820 * removed right now, as task is blocked due to duping; so
1821 * get_uprobe() is safe to use here.
1822 */
Oleg Nesterovf2317222015-07-21 15:40:03 +02001823 get_uprobe(n->uprobe);
Oleg Nesterov248d3a72013-10-13 21:18:41 +02001824 n->next = NULL;
1825
1826 *p = n;
1827 p = &n->next;
1828 n_utask->depth++;
1829 }
1830
1831 return 0;
1832}
1833
Oleg Nesterovaa59c532013-10-13 21:18:44 +02001834static void dup_xol_work(struct callback_head *work)
1835{
Oleg Nesterovaa59c532013-10-13 21:18:44 +02001836 if (current->flags & PF_EXITING)
1837 return;
1838
Michal Hocko598fdc12016-05-23 16:26:08 -07001839 if (!__create_xol_area(current->utask->dup_xol_addr) &&
1840 !fatal_signal_pending(current))
Oleg Nesterovaa59c532013-10-13 21:18:44 +02001841 uprobe_warn(current, "dup xol area");
1842}
1843
Anton Arapove78aebf2013-04-03 18:00:32 +02001844/*
Oleg Nesterovb68e0742013-10-13 21:18:31 +02001845 * Called in context of a new clone/fork from copy_process.
1846 */
Oleg Nesterov3ab67962013-10-16 19:39:37 +02001847void uprobe_copy_process(struct task_struct *t, unsigned long flags)
Oleg Nesterovb68e0742013-10-13 21:18:31 +02001848{
Oleg Nesterov248d3a72013-10-13 21:18:41 +02001849 struct uprobe_task *utask = current->utask;
1850 struct mm_struct *mm = current->mm;
Oleg Nesterovaa59c532013-10-13 21:18:44 +02001851 struct xol_area *area;
Oleg Nesterov248d3a72013-10-13 21:18:41 +02001852
Oleg Nesterovb68e0742013-10-13 21:18:31 +02001853 t->utask = NULL;
Oleg Nesterov248d3a72013-10-13 21:18:41 +02001854
Oleg Nesterov3ab67962013-10-16 19:39:37 +02001855 if (!utask || !utask->return_instances)
1856 return;
1857
1858 if (mm == t->mm && !(flags & CLONE_VFORK))
Oleg Nesterov248d3a72013-10-13 21:18:41 +02001859 return;
1860
1861 if (dup_utask(t, utask))
1862 return uprobe_warn(t, "dup ret instances");
Oleg Nesterovaa59c532013-10-13 21:18:44 +02001863
1864 /* The task can fork() after dup_xol_work() fails */
1865 area = mm->uprobes_state.xol_area;
1866 if (!area)
1867 return uprobe_warn(t, "dup xol area");
1868
Oleg Nesterov3ab67962013-10-16 19:39:37 +02001869 if (mm == t->mm)
1870 return;
1871
Oleg Nesterov32473432013-11-08 18:52:21 +01001872 t->utask->dup_xol_addr = area->vaddr;
1873 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
Jens Axboe91989c72020-10-16 09:02:26 -06001874 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
Oleg Nesterovb68e0742013-10-13 21:18:31 +02001875}
1876
1877/*
Anton Arapove78aebf2013-04-03 18:00:32 +02001878 * Current area->vaddr notion assume the trampoline address is always
1879 * equal area->vaddr.
1880 *
1881 * Returns -1 in case the xol_area is not allocated.
1882 */
Jiri Olsaff474a782024-06-12 08:44:28 +09001883unsigned long uprobe_get_trampoline_vaddr(void)
Anton Arapove78aebf2013-04-03 18:00:32 +02001884{
1885 struct xol_area *area;
1886 unsigned long trampoline_vaddr = -1;
1887
Paul E. McKenney5c6338b2017-10-09 11:08:53 -07001888 /* Pairs with xol_add_vma() smp_store_release() */
1889 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
Anton Arapove78aebf2013-04-03 18:00:32 +02001890 if (area)
1891 trampoline_vaddr = area->vaddr;
1892
1893 return trampoline_vaddr;
1894}
1895
Oleg Nesterovdb087ef2015-07-21 15:40:28 +02001896static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1897 struct pt_regs *regs)
Oleg Nesterova5b7e1a2015-07-21 15:40:23 +02001898{
1899 struct return_instance *ri = utask->return_instances;
Oleg Nesterovdb087ef2015-07-21 15:40:28 +02001900 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
Oleg Nesterov86dcb702015-07-21 15:40:26 +02001901
1902 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
Oleg Nesterova5b7e1a2015-07-21 15:40:23 +02001903 ri = free_ret_instance(ri);
1904 utask->depth--;
1905 }
1906 utask->return_instances = ri;
1907}
1908
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001909static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1910{
1911 struct return_instance *ri;
1912 struct uprobe_task *utask;
1913 unsigned long orig_ret_vaddr, trampoline_vaddr;
Oleg Nesterovdb087ef2015-07-21 15:40:28 +02001914 bool chained;
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001915
1916 if (!get_xol_area())
1917 return;
1918
1919 utask = get_utask();
1920 if (!utask)
1921 return;
1922
Anton Arapovded49c52013-04-03 18:00:37 +02001923 if (utask->depth >= MAX_URETPROBE_DEPTH) {
1924 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1925 " nestedness limit pid/tgid=%d/%d\n",
1926 current->pid, current->tgid);
1927 return;
1928 }
1929
Andrii Nakryiko86174082024-09-03 10:45:57 -07001930 /* we need to bump refcount to store uprobe in utask */
1931 if (!try_get_uprobe(uprobe))
1932 return;
1933
Oleg Nesterov6c58d0e2015-07-21 15:40:10 +02001934 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001935 if (!ri)
Andrii Nakryiko86174082024-09-03 10:45:57 -07001936 goto fail;
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001937
Jiri Olsaff474a782024-06-12 08:44:28 +09001938 trampoline_vaddr = uprobe_get_trampoline_vaddr();
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001939 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1940 if (orig_ret_vaddr == -1)
1941 goto fail;
1942
Oleg Nesterova5b7e1a2015-07-21 15:40:23 +02001943 /* drop the entries invalidated by longjmp() */
Oleg Nesterovdb087ef2015-07-21 15:40:28 +02001944 chained = (orig_ret_vaddr == trampoline_vaddr);
1945 cleanup_return_instances(utask, chained, regs);
Oleg Nesterova5b7e1a2015-07-21 15:40:23 +02001946
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001947 /*
1948 * We don't want to keep trampoline address in stack, rather keep the
1949 * original return address of first caller thru all the consequent
1950 * instances. This also makes breakpoint unwrapping easier.
1951 */
Oleg Nesterovdb087ef2015-07-21 15:40:28 +02001952 if (chained) {
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001953 if (!utask->return_instances) {
1954 /*
1955 * This situation is not possible. Likely we have an
1956 * attack from user-space.
1957 */
Oleg Nesterov6c58d0e2015-07-21 15:40:10 +02001958 uprobe_warn(current, "handle tail call");
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001959 goto fail;
1960 }
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001961 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1962 }
Andrii Nakryiko86174082024-09-03 10:45:57 -07001963 ri->uprobe = uprobe;
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001964 ri->func = instruction_pointer(regs);
Oleg Nesterov7b868e42015-07-21 15:40:18 +02001965 ri->stack = user_stack_pointer(regs);
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001966 ri->orig_ret_vaddr = orig_ret_vaddr;
1967 ri->chained = chained;
1968
Anton Arapovded49c52013-04-03 18:00:37 +02001969 utask->depth++;
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001970 ri->next = utask->return_instances;
1971 utask->return_instances = ri;
1972
1973 return;
Andrii Nakryiko86174082024-09-03 10:45:57 -07001974fail:
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001975 kfree(ri);
Andrii Nakryiko86174082024-09-03 10:45:57 -07001976 put_uprobe(uprobe);
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02001977}
1978
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301979/* Prepare to single-step probed instruction out of line. */
1980static int
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001981pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05301982{
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001983 struct uprobe_task *utask;
1984 unsigned long xol_vaddr;
Oleg Nesterovaba51022012-12-31 18:12:48 +01001985 int err;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05301986
Oleg Nesterov608e7422012-12-31 18:20:42 +01001987 utask = get_utask();
1988 if (!utask)
1989 return -ENOMEM;
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001990
Andrii Nakryiko86174082024-09-03 10:45:57 -07001991 if (!try_get_uprobe(uprobe))
1992 return -EINVAL;
1993
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001994 xol_vaddr = xol_get_insn_slot(uprobe);
Andrii Nakryiko86174082024-09-03 10:45:57 -07001995 if (!xol_vaddr) {
1996 err = -ENOMEM;
1997 goto err_out;
1998 }
Oleg Nesterova6cb3f62012-12-31 18:00:06 +01001999
2000 utask->xol_vaddr = xol_vaddr;
2001 utask->vaddr = bp_vaddr;
2002
Oleg Nesterovaba51022012-12-31 18:12:48 +01002003 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
2004 if (unlikely(err)) {
2005 xol_free_insn_slot(current);
Andrii Nakryiko86174082024-09-03 10:45:57 -07002006 goto err_out;
Oleg Nesterovaba51022012-12-31 18:12:48 +01002007 }
2008
Oleg Nesterov608e7422012-12-31 18:20:42 +01002009 utask->active_uprobe = uprobe;
2010 utask->state = UTASK_SSTEP;
Oleg Nesterovaba51022012-12-31 18:12:48 +01002011 return 0;
Andrii Nakryiko86174082024-09-03 10:45:57 -07002012err_out:
2013 put_uprobe(uprobe);
2014 return err;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302015}
2016
2017/*
2018 * If we are singlestepping, then ensure this thread is not connected to
2019 * non-fatal signals until completion of singlestep. When xol insn itself
2020 * triggers the signal, restart the original insn even if the task is
2021 * already SIGKILL'ed (since coredump should report the correct ip). This
2022 * is even more important if the task has a handler for SIGSEGV/etc, The
2023 * _same_ instruction should be repeated again after return from the signal
2024 * handler, and SSTEP can never finish in this case.
2025 */
2026bool uprobe_deny_signal(void)
2027{
2028 struct task_struct *t = current;
2029 struct uprobe_task *utask = t->utask;
2030
2031 if (likely(!utask || !utask->active_uprobe))
2032 return false;
2033
2034 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
2035
Jens Axboe5c251e92020-10-26 14:32:27 -06002036 if (task_sigpending(t)) {
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302037 spin_lock_irq(&t->sighand->siglock);
2038 clear_tsk_thread_flag(t, TIF_SIGPENDING);
2039 spin_unlock_irq(&t->sighand->siglock);
2040
2041 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
2042 utask->state = UTASK_SSTEP_TRAPPED;
2043 set_tsk_thread_flag(t, TIF_UPROBE);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302044 }
2045 }
2046
2047 return true;
2048}
2049
Oleg Nesterov499a4f32012-08-19 17:41:34 +02002050static void mmf_recalc_uprobes(struct mm_struct *mm)
2051{
Matthew Wilcox (Oracle)fcb72a52022-09-06 19:48:58 +00002052 VMA_ITERATOR(vmi, mm, 0);
Oleg Nesterov499a4f32012-08-19 17:41:34 +02002053 struct vm_area_struct *vma;
2054
Matthew Wilcox (Oracle)fcb72a52022-09-06 19:48:58 +00002055 for_each_vma(vmi, vma) {
Oleg Nesterov499a4f32012-08-19 17:41:34 +02002056 if (!valid_vma(vma, false))
2057 continue;
2058 /*
2059 * This is not strictly accurate, we can race with
2060 * uprobe_unregister() and see the already removed
2061 * uprobe if delete_uprobe() was not yet called.
Oleg Nesterov63633cb2012-11-22 18:30:15 +01002062 * Or this uprobe can be filtered out.
Oleg Nesterov499a4f32012-08-19 17:41:34 +02002063 */
2064 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2065 return;
2066 }
2067
2068 clear_bit(MMF_HAS_UPROBES, &mm->flags);
2069}
2070
Ananth N Mavinakayanahalli0908ad62013-03-22 20:46:27 +05302071static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
Oleg Nesterovec75fba2012-09-23 21:55:19 +02002072{
2073 struct page *page;
2074 uprobe_opcode_t opcode;
2075 int result;
2076
Oleg Nesterov013b2de2020-05-04 18:47:25 +02002077 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2078 return -EINVAL;
2079
Oleg Nesterovec75fba2012-09-23 21:55:19 +02002080 pagefault_disable();
Linus Torvaldsbd28b142016-05-22 17:21:27 -07002081 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
Oleg Nesterovec75fba2012-09-23 21:55:19 +02002082 pagefault_enable();
2083
2084 if (likely(result == 0))
2085 goto out;
2086
Oleg Nesterov300b0562024-08-01 15:27:14 +02002087 result = get_user_pages(vaddr, 1, FOLL_FORCE, &page);
Oleg Nesterovec75fba2012-09-23 21:55:19 +02002088 if (result < 0)
2089 return result;
2090
Oleg Nesterovab0d8052013-03-24 18:24:37 +01002091 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
Oleg Nesterovec75fba2012-09-23 21:55:19 +02002092 put_page(page);
2093 out:
Ananth N Mavinakayanahalli0908ad62013-03-22 20:46:27 +05302094 /* This needs to return true for any variant of the trap insn */
2095 return is_trap_insn(&opcode);
Oleg Nesterovec75fba2012-09-23 21:55:19 +02002096}
2097
Andrii Nakryiko86174082024-09-03 10:45:57 -07002098/* assumes being inside RCU protected region */
2099static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp)
Oleg Nesterov3a9ea052012-05-29 21:28:57 +02002100{
2101 struct mm_struct *mm = current->mm;
2102 struct uprobe *uprobe = NULL;
2103 struct vm_area_struct *vma;
2104
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002105 mmap_read_lock(mm);
Liam Howlett9016dde2021-06-28 19:39:35 -07002106 vma = vma_lookup(mm, bp_vaddr);
2107 if (vma) {
Oleg Nesterov3a9ea052012-05-29 21:28:57 +02002108 if (valid_vma(vma, false)) {
Oleg Nesterovf2817692013-03-17 18:54:44 +01002109 struct inode *inode = file_inode(vma->vm_file);
Oleg Nesterovcb113b42012-07-29 20:22:42 +02002110 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
Oleg Nesterov3a9ea052012-05-29 21:28:57 +02002111
Andrii Nakryiko86174082024-09-03 10:45:57 -07002112 uprobe = find_uprobe_rcu(inode, offset);
Oleg Nesterov3a9ea052012-05-29 21:28:57 +02002113 }
Oleg Nesterovd790d342012-05-29 21:29:14 +02002114
2115 if (!uprobe)
Ananth N Mavinakayanahalli0908ad62013-03-22 20:46:27 +05302116 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
Oleg Nesterovd790d342012-05-29 21:29:14 +02002117 } else {
2118 *is_swbp = -EFAULT;
Oleg Nesterov3a9ea052012-05-29 21:28:57 +02002119 }
Oleg Nesterov499a4f32012-08-19 17:41:34 +02002120
2121 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2122 mmf_recalc_uprobes(mm);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002123 mmap_read_unlock(mm);
Oleg Nesterov3a9ea052012-05-29 21:28:57 +02002124
2125 return uprobe;
2126}
2127
Oleg Nesterovda1816b2012-12-29 17:49:11 +01002128static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2129{
2130 struct uprobe_consumer *uc;
2131 int remove = UPROBE_HANDLER_REMOVE;
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02002132 bool need_prep = false; /* prepare return uprobe, when needed */
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07002133 bool has_consumers = false;
Oleg Nesterovda1816b2012-12-29 17:49:11 +01002134
Andrii Nakryikocfa7f3d2024-07-29 10:52:23 -07002135 current->utask->auprobe = &uprobe->arch;
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07002136
2137 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
2138 srcu_read_lock_held(&uprobes_srcu)) {
Anton Arapovea024872013-04-03 18:00:31 +02002139 int rc = 0;
Oleg Nesterovda1816b2012-12-29 17:49:11 +01002140
Anton Arapovea024872013-04-03 18:00:31 +02002141 if (uc->handler) {
2142 rc = uc->handler(uc, regs);
2143 WARN(rc & ~UPROBE_HANDLER_MASK,
Sakari Ailusd75f7732019-03-25 21:32:28 +02002144 "bad rc=0x%x from %ps()\n", rc, uc->handler);
Anton Arapovea024872013-04-03 18:00:31 +02002145 }
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02002146
2147 if (uc->ret_handler)
2148 need_prep = true;
2149
Oleg Nesterovda1816b2012-12-29 17:49:11 +01002150 remove &= rc;
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07002151 has_consumers = true;
Oleg Nesterovda1816b2012-12-29 17:49:11 +01002152 }
Andrii Nakryikocfa7f3d2024-07-29 10:52:23 -07002153 current->utask->auprobe = NULL;
Oleg Nesterovda1816b2012-12-29 17:49:11 +01002154
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02002155 if (need_prep && !remove)
2156 prepare_uretprobe(uprobe, regs); /* put bp at return */
2157
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07002158 if (remove && has_consumers) {
2159 down_read(&uprobe->register_rwsem);
2160
2161 /* re-check that removal is still required, this time under lock */
2162 if (!filter_chain(uprobe, current->mm)) {
2163 WARN_ON(!uprobe_is_active(uprobe));
2164 unapply_uprobe(uprobe, current->mm);
2165 }
2166
2167 up_read(&uprobe->register_rwsem);
Oleg Nesterovda1816b2012-12-29 17:49:11 +01002168 }
Oleg Nesterovda1816b2012-12-29 17:49:11 +01002169}
2170
Anton Arapovfec88982013-04-03 18:00:36 +02002171static void
2172handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2173{
2174 struct uprobe *uprobe = ri->uprobe;
2175 struct uprobe_consumer *uc;
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07002176 int srcu_idx;
Anton Arapovfec88982013-04-03 18:00:36 +02002177
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07002178 srcu_idx = srcu_read_lock(&uprobes_srcu);
2179 list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
2180 srcu_read_lock_held(&uprobes_srcu)) {
Anton Arapovfec88982013-04-03 18:00:36 +02002181 if (uc->ret_handler)
2182 uc->ret_handler(uc, ri->func, regs);
2183 }
Andrii Nakryikocc01bd02024-09-03 10:45:59 -07002184 srcu_read_unlock(&uprobes_srcu, srcu_idx);
Anton Arapovfec88982013-04-03 18:00:36 +02002185}
2186
Oleg Nesterova83cfeb2015-07-21 15:40:13 +02002187static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2188{
2189 bool chained;
2190
2191 do {
2192 chained = ri->chained;
2193 ri = ri->next; /* can't be NULL if chained */
2194 } while (chained);
2195
2196 return ri;
2197}
2198
Jiri Olsaff474a782024-06-12 08:44:28 +09002199void uprobe_handle_trampoline(struct pt_regs *regs)
Anton Arapovfec88982013-04-03 18:00:36 +02002200{
2201 struct uprobe_task *utask;
Oleg Nesterova83cfeb2015-07-21 15:40:13 +02002202 struct return_instance *ri, *next;
Oleg Nesterov5eeb50d2015-07-21 15:40:21 +02002203 bool valid;
Anton Arapovfec88982013-04-03 18:00:36 +02002204
2205 utask = current->utask;
2206 if (!utask)
Oleg Nesterov0b5256c2015-07-21 15:40:08 +02002207 goto sigill;
Anton Arapovfec88982013-04-03 18:00:36 +02002208
2209 ri = utask->return_instances;
2210 if (!ri)
Oleg Nesterov0b5256c2015-07-21 15:40:08 +02002211 goto sigill;
Anton Arapovfec88982013-04-03 18:00:36 +02002212
Oleg Nesterova83cfeb2015-07-21 15:40:13 +02002213 do {
Oleg Nesterov5eeb50d2015-07-21 15:40:21 +02002214 /*
2215 * We should throw out the frames invalidated by longjmp().
2216 * If this chain is valid, then the next one should be alive
2217 * or NULL; the latter case means that nobody but ri->func
2218 * could hit this trampoline on return. TODO: sigaltstack().
2219 */
2220 next = find_next_ret_chain(ri);
Oleg Nesterov86dcb702015-07-21 15:40:26 +02002221 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
Oleg Nesterov5eeb50d2015-07-21 15:40:21 +02002222
2223 instruction_pointer_set(regs, ri->orig_ret_vaddr);
2224 do {
Andrii Nakryiko4a365eb82024-05-21 18:38:43 -07002225 /* pop current instance from the stack of pending return instances,
2226 * as it's not pending anymore: we just fixed up original
2227 * instruction pointer in regs and are about to call handlers;
2228 * this allows fixup_uretprobe_trampoline_entries() to properly fix up
2229 * captured stack traces from uretprobe handlers, in which pending
2230 * trampoline addresses on the stack are replaced with correct
2231 * original return addresses
2232 */
2233 utask->return_instances = ri->next;
Oleg Nesterov5eeb50d2015-07-21 15:40:21 +02002234 if (valid)
2235 handle_uretprobe_chain(ri, regs);
2236 ri = free_ret_instance(ri);
2237 utask->depth--;
2238 } while (ri != next);
2239 } while (!valid);
Anton Arapovfec88982013-04-03 18:00:36 +02002240
2241 utask->return_instances = ri;
Oleg Nesterov0b5256c2015-07-21 15:40:08 +02002242 return;
Anton Arapovfec88982013-04-03 18:00:36 +02002243
Oleg Nesterov0b5256c2015-07-21 15:40:08 +02002244 sigill:
2245 uprobe_warn(current, "handle uretprobe, sending SIGILL.");
Eric W. Biederman3cf5d072019-05-23 10:17:27 -05002246 force_sig(SIGILL);
Oleg Nesterov0b5256c2015-07-21 15:40:08 +02002247
Anton Arapovfec88982013-04-03 18:00:36 +02002248}
2249
David A. Long6fe50a22014-02-03 14:25:49 -05002250bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2251{
2252 return false;
2253}
2254
Oleg Nesterov86dcb702015-07-21 15:40:26 +02002255bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2256 struct pt_regs *regs)
Oleg Nesterov97da8972015-07-21 15:40:16 +02002257{
2258 return true;
2259}
2260
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302261/*
2262 * Run handler and ask thread to singlestep.
2263 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2264 */
2265static void handle_swbp(struct pt_regs *regs)
2266{
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302267 struct uprobe *uprobe;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302268 unsigned long bp_vaddr;
Andrii Nakryiko86174082024-09-03 10:45:57 -07002269 int is_swbp, srcu_idx;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302270
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302271 bp_vaddr = uprobe_get_swbp_addr(regs);
Jiri Olsaff474a782024-06-12 08:44:28 +09002272 if (bp_vaddr == uprobe_get_trampoline_vaddr())
2273 return uprobe_handle_trampoline(regs);
Anton Arapovfec88982013-04-03 18:00:36 +02002274
Andrii Nakryiko86174082024-09-03 10:45:57 -07002275 srcu_idx = srcu_read_lock(&uprobes_srcu);
2276
2277 uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302278 if (!uprobe) {
Oleg Nesterov56bb4cf2012-05-29 21:29:47 +02002279 if (is_swbp > 0) {
2280 /* No matching uprobe; signal SIGTRAP. */
Oleg Nesterovfe5ed7a2020-07-23 17:44:20 +02002281 force_sig(SIGTRAP);
Oleg Nesterov56bb4cf2012-05-29 21:29:47 +02002282 } else {
2283 /*
2284 * Either we raced with uprobe_unregister() or we can't
2285 * access this memory. The latter is only possible if
2286 * another thread plays with our ->mm. In both cases
2287 * we can simply restart. If this vma was unmapped we
2288 * can pretend this insn was not executed yet and get
2289 * the (correct) SIGSEGV after restart.
2290 */
2291 instruction_pointer_set(regs, bp_vaddr);
2292 }
Andrii Nakryiko86174082024-09-03 10:45:57 -07002293 goto out;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302294 }
Oleg Nesterov74e59df2012-12-30 15:54:08 +01002295
2296 /* change it in advance for ->handler() and restart */
2297 instruction_pointer_set(regs, bp_vaddr);
2298
Oleg Nesterov142b18d2012-09-29 21:56:57 +02002299 /*
2300 * TODO: move copy_insn/etc into _register and remove this hack.
2301 * After we hit the bp, _unregister + _register can install the
2302 * new and not-yet-analyzed uprobe at the same address, restart.
2303 */
Oleg Nesterov71434f22012-09-30 21:12:44 +02002304 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
Oleg Nesterov74e59df2012-12-30 15:54:08 +01002305 goto out;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302306
Andrea Parri09d3f012018-11-22 17:10:31 +01002307 /*
2308 * Pairs with the smp_wmb() in prepare_uprobe().
2309 *
2310 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2311 * we must also see the stores to &uprobe->arch performed by the
2312 * prepare_uprobe() call.
2313 */
2314 smp_rmb();
2315
Oleg Nesterov72fd2932013-11-26 09:35:25 +09002316 /* Tracing handlers use ->utask to communicate with fetch methods */
2317 if (!get_utask())
2318 goto out;
2319
David A. Long6fe50a22014-02-03 14:25:49 -05002320 if (arch_uprobe_ignore(&uprobe->arch, regs))
2321 goto out;
2322
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302323 handler_chain(uprobe, regs);
David A. Long6fe50a22014-02-03 14:25:49 -05002324
Oleg Nesterov8a6b1732014-03-30 18:56:22 +02002325 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
Oleg Nesterov0578a972012-09-14 18:31:23 +02002326 goto out;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302327
Andrii Nakryiko86174082024-09-03 10:45:57 -07002328 if (pre_ssout(uprobe, regs, bp_vaddr))
2329 goto out;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302330
Oleg Nesterov0578a972012-09-14 18:31:23 +02002331out:
Andrii Nakryiko86174082024-09-03 10:45:57 -07002332 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2333 srcu_read_unlock(&uprobes_srcu, srcu_idx);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302334}
2335
2336/*
2337 * Perform required fix-ups and disable singlestep.
2338 * Allow pending signals to take effect.
2339 */
2340static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2341{
2342 struct uprobe *uprobe;
Oleg Nesterov014940b2014-04-03 20:20:10 +02002343 int err = 0;
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302344
2345 uprobe = utask->active_uprobe;
2346 if (utask->state == UTASK_SSTEP_ACK)
Oleg Nesterov014940b2014-04-03 20:20:10 +02002347 err = arch_uprobe_post_xol(&uprobe->arch, regs);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302348 else if (utask->state == UTASK_SSTEP_TRAPPED)
2349 arch_uprobe_abort_xol(&uprobe->arch, regs);
2350 else
2351 WARN_ON_ONCE(1);
2352
2353 put_uprobe(uprobe);
2354 utask->active_uprobe = NULL;
2355 utask->state = UTASK_RUNNING;
Srikar Dronamrajud4b3b6382012-03-30 23:56:31 +05302356 xol_free_insn_slot(current);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302357
2358 spin_lock_irq(&current->sighand->siglock);
2359 recalc_sigpending(); /* see uprobe_deny_signal() */
2360 spin_unlock_irq(&current->sighand->siglock);
Oleg Nesterov014940b2014-04-03 20:20:10 +02002361
2362 if (unlikely(err)) {
2363 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
Eric W. Biederman3cf5d072019-05-23 10:17:27 -05002364 force_sig(SIGILL);
Oleg Nesterov014940b2014-04-03 20:20:10 +02002365 }
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302366}
2367
2368/*
Oleg Nesterov1b08e9072012-09-14 18:52:10 +02002369 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2370 * allows the thread to return from interrupt. After that handle_swbp()
2371 * sets utask->active_uprobe.
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302372 *
Oleg Nesterov1b08e9072012-09-14 18:52:10 +02002373 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2374 * and allows the thread to return from interrupt.
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302375 *
2376 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2377 * uprobe_notify_resume().
2378 */
2379void uprobe_notify_resume(struct pt_regs *regs)
2380{
2381 struct uprobe_task *utask;
2382
Oleg Nesterovdb023ea2012-09-14 19:05:46 +02002383 clear_thread_flag(TIF_UPROBE);
2384
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302385 utask = current->utask;
Oleg Nesterov1b08e9072012-09-14 18:52:10 +02002386 if (utask && utask->active_uprobe)
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302387 handle_singlestep(utask, regs);
Oleg Nesterov1b08e9072012-09-14 18:52:10 +02002388 else
2389 handle_swbp(regs);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302390}
2391
2392/*
2393 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2394 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2395 */
2396int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2397{
Anton Arapov0dfd0eb2013-04-03 18:00:35 +02002398 if (!current->mm)
2399 return 0;
2400
2401 if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2402 (!current->utask || !current->utask->return_instances))
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302403 return 0;
2404
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302405 set_thread_flag(TIF_UPROBE);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302406 return 1;
2407}
2408
2409/*
2410 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2411 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2412 */
2413int uprobe_post_sstep_notifier(struct pt_regs *regs)
2414{
2415 struct uprobe_task *utask = current->utask;
2416
2417 if (!current->mm || !utask || !utask->active_uprobe)
2418 /* task is currently not uprobed */
2419 return 0;
2420
2421 utask->state = UTASK_SSTEP_ACK;
2422 set_thread_flag(TIF_UPROBE);
2423 return 1;
2424}
2425
2426static struct notifier_block uprobe_exception_nb = {
2427 .notifier_call = arch_uprobe_exception_notify,
2428 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
2429};
2430
Nadav Amitaad42dd2019-04-26 16:22:44 -07002431void __init uprobes_init(void)
Srikar Dronamraju2b144492012-02-09 14:56:42 +05302432{
2433 int i;
2434
Oleg Nesterov66d06df2012-11-25 22:48:37 +01002435 for (i = 0; i < UPROBES_HASH_SZ; i++)
Srikar Dronamraju2b144492012-02-09 14:56:42 +05302436 mutex_init(&uprobes_mmap_mutex[i]);
Srikar Dronamraju0326f5a92012-03-13 23:30:11 +05302437
Nadav Amitaad42dd2019-04-26 16:22:44 -07002438 BUG_ON(register_die_notifier(&uprobe_exception_nb));
Srikar Dronamraju2b144492012-02-09 14:56:42 +05302439}