Thomas Gleixner | 720e596 | 2019-01-16 12:11:01 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 2 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 3 | * User-space Probes (UProbes) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 4 | * |
Ingo Molnar | 35aa621 | 2012-02-22 11:37:29 +0100 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2008-2012 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 6 | * Authors: |
| 7 | * Srikar Dronamraju |
| 8 | * Jim Keniston |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 9 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/highmem.h> |
| 14 | #include <linux/pagemap.h> /* read_mapping_page */ |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/sched.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 17 | #include <linux/sched/mm.h> |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 18 | #include <linux/sched/coredump.h> |
Josh Stone | e8440c1 | 2013-01-13 19:03:34 +0100 | [diff] [blame] | 19 | #include <linux/export.h> |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 20 | #include <linux/rmap.h> /* anon_vma_prepare */ |
Paolo Bonzini | f784274 | 2024-04-05 07:58:15 -0400 | [diff] [blame] | 21 | #include <linux/mmu_notifier.h> |
Matthew Wilcox (Oracle) | 5fcd079 | 2022-09-02 20:46:40 +0100 | [diff] [blame] | 22 | #include <linux/swap.h> /* folio_free_swap */ |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 23 | #include <linux/ptrace.h> /* user_enable_single_step */ |
| 24 | #include <linux/kdebug.h> /* notifier mechanism */ |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 25 | #include <linux/percpu-rwsem.h> |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 26 | #include <linux/task_work.h> |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 27 | #include <linux/shmem_fs.h> |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 28 | #include <linux/khugepaged.h> |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 29 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 30 | #include <linux/uprobes.h> |
| 31 | |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 32 | #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) |
| 33 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE |
| 34 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 35 | static struct rb_root uprobes_tree = RB_ROOT; |
Oleg Nesterov | 441f1eb7 | 2012-11-25 19:54:29 +0100 | [diff] [blame] | 36 | /* |
| 37 | * allows us to skip the uprobe_mmap if there are no uprobe events active |
| 38 | * at this time. Probably a fine grained per inode count is better? |
| 39 | */ |
| 40 | #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 41 | |
Jonathan Haslam | 0dc7152 | 2024-04-22 03:23:05 -0700 | [diff] [blame] | 42 | static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */ |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 43 | static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 44 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 45 | DEFINE_STATIC_SRCU(uprobes_srcu); |
| 46 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 47 | #define UPROBES_HASH_SZ 13 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 48 | /* serialize uprobe->pending_list */ |
| 49 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 50 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 51 | |
Oleg Nesterov | 2bf1acc | 2019-04-23 17:21:02 +0200 | [diff] [blame] | 52 | DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 53 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 54 | /* Have a copy of original instruction */ |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 55 | #define UPROBE_COPY_INSN 0 |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 56 | |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 57 | struct uprobe { |
| 58 | struct rb_node rb_node; /* node in the rb tree */ |
Elena Reshetova | ce59b8e | 2019-01-16 13:20:27 +0200 | [diff] [blame] | 59 | refcount_t ref; |
Oleg Nesterov | e591c8d | 2012-11-24 17:29:40 +0100 | [diff] [blame] | 60 | struct rw_semaphore register_rwsem; |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 61 | struct rw_semaphore consumer_rwsem; |
| 62 | struct list_head pending_list; |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 63 | struct list_head consumers; |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 64 | struct inode *inode; /* Also hold a ref to inode */ |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 65 | struct rcu_head rcu; |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 66 | loff_t offset; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 67 | loff_t ref_ctr_offset; |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 68 | unsigned long flags; |
Oleg Nesterov | ad43935 | 2013-11-19 17:20:21 +0100 | [diff] [blame] | 69 | |
| 70 | /* |
| 71 | * The generic code assumes that it has two members of unknown type |
| 72 | * owned by the arch-specific code: |
| 73 | * |
| 74 | * insn - copy_insn() saves the original instruction here for |
| 75 | * arch_uprobe_analyze_insn(). |
| 76 | * |
| 77 | * ixol - potentially modified instruction to execute out of |
| 78 | * line, copied to xol_area by xol_get_insn_slot(). |
| 79 | */ |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 80 | struct arch_uprobe arch; |
| 81 | }; |
| 82 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 83 | struct delayed_uprobe { |
| 84 | struct list_head list; |
| 85 | struct uprobe *uprobe; |
| 86 | struct mm_struct *mm; |
| 87 | }; |
| 88 | |
| 89 | static DEFINE_MUTEX(delayed_uprobe_lock); |
| 90 | static LIST_HEAD(delayed_uprobe_list); |
| 91 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 92 | /* |
Oleg Nesterov | ad43935 | 2013-11-19 17:20:21 +0100 | [diff] [blame] | 93 | * Execute out of line area: anonymous executable mapping installed |
| 94 | * by the probed task to execute the copy of the original instruction |
| 95 | * mangled by set_swbp(). |
| 96 | * |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 97 | * On a breakpoint hit, thread contests for a slot. It frees the |
| 98 | * slot after singlestep. Currently a fixed number of slots are |
| 99 | * allocated. |
| 100 | */ |
| 101 | struct xol_area { |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 102 | wait_queue_head_t wq; /* if all slots are busy */ |
| 103 | atomic_t slot_count; /* number of in-use slots */ |
| 104 | unsigned long *bitmap; /* 0 = free slot */ |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 105 | |
Oleg Nesterov | 2abbcc0 | 2024-09-11 15:14:37 +0200 | [diff] [blame] | 106 | struct page *page; |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 107 | /* |
| 108 | * We keep the vma's vm_start rather than a pointer to the vma |
| 109 | * itself. The probed process or a naughty kernel module could make |
| 110 | * the vma go away, and we must handle that reasonably gracefully. |
| 111 | */ |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 112 | unsigned long vaddr; /* Page(s) of instruction slots */ |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 113 | }; |
| 114 | |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 115 | static void uprobe_warn(struct task_struct *t, const char *msg) |
| 116 | { |
| 117 | pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg); |
| 118 | } |
| 119 | |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 120 | /* |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 121 | * valid_vma: Verify if the specified vma is an executable vma |
| 122 | * Relax restrictions while unregistering: vm_flags might have |
| 123 | * changed after breakpoint was inserted. |
| 124 | * - is_register: indicates if we are in register context. |
| 125 | * - Return 1 if the specified virtual address is in an |
| 126 | * executable vma. |
| 127 | */ |
| 128 | static bool valid_vma(struct vm_area_struct *vma, bool is_register) |
| 129 | { |
Oleg Nesterov | 13f59c5 | 2014-04-28 20:15:43 +0200 | [diff] [blame] | 130 | vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 131 | |
Oleg Nesterov | e40cfce | 2012-09-16 19:31:39 +0200 | [diff] [blame] | 132 | if (is_register) |
| 133 | flags |= VM_WRITE; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 134 | |
Oleg Nesterov | e40cfce | 2012-09-16 19:31:39 +0200 | [diff] [blame] | 135 | return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 136 | } |
| 137 | |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 138 | static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 139 | { |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 140 | return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 141 | } |
| 142 | |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 143 | static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) |
| 144 | { |
| 145 | return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); |
| 146 | } |
| 147 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 148 | /** |
| 149 | * __replace_page - replace page in vma by new page. |
| 150 | * based on replace_page in mm/ksm.c |
| 151 | * |
| 152 | * @vma: vma that holds the pte pointing to page |
Oleg Nesterov | c517ee7 | 2012-07-29 20:22:16 +0200 | [diff] [blame] | 153 | * @addr: address the old @page is mapped at |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 154 | * @old_page: the page we are replacing by new_page |
| 155 | * @new_page: the modified page we replace page by |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 156 | * |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 157 | * If @new_page is NULL, only unmap @old_page. |
| 158 | * |
| 159 | * Returns 0 on success, negative error code otherwise. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 160 | */ |
Oleg Nesterov | c517ee7 | 2012-07-29 20:22:16 +0200 | [diff] [blame] | 161 | static int __replace_page(struct vm_area_struct *vma, unsigned long addr, |
Oleg Nesterov | bdfaa2e | 2016-08-17 17:37:04 +0200 | [diff] [blame] | 162 | struct page *old_page, struct page *new_page) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 163 | { |
Matthew Wilcox (Oracle) | 5fcd079 | 2022-09-02 20:46:40 +0100 | [diff] [blame] | 164 | struct folio *old_folio = page_folio(old_page); |
Matthew Wilcox (Oracle) | 82e66bf | 2022-09-02 20:46:52 +0100 | [diff] [blame] | 165 | struct folio *new_folio; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 166 | struct mm_struct *mm = vma->vm_mm; |
Matthew Wilcox (Oracle) | 5fcd079 | 2022-09-02 20:46:40 +0100 | [diff] [blame] | 167 | DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 168 | int err; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 169 | struct mmu_notifier_range range; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 170 | |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 171 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, |
Jérôme Glisse | 6f4f13e | 2019-05-13 17:20:49 -0700 | [diff] [blame] | 172 | addr + PAGE_SIZE); |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 173 | |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 174 | if (new_page) { |
Matthew Wilcox (Oracle) | 82e66bf | 2022-09-02 20:46:52 +0100 | [diff] [blame] | 175 | new_folio = page_folio(new_page); |
| 176 | err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL); |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 177 | if (err) |
| 178 | return err; |
| 179 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 180 | |
Matthew Wilcox (Oracle) | 5fcd079 | 2022-09-02 20:46:40 +0100 | [diff] [blame] | 181 | /* For folio_free_swap() below */ |
| 182 | folio_lock(old_folio); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 183 | |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 184 | mmu_notifier_invalidate_range_start(&range); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 185 | err = -EAGAIN; |
Johannes Weiner | 9d82c69 | 2020-06-03 16:02:04 -0700 | [diff] [blame] | 186 | if (!page_vma_mapped_walk(&pvmw)) |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 187 | goto unlock; |
Kirill A. Shutemov | 14fa2da | 2017-02-24 14:58:07 -0800 | [diff] [blame] | 188 | VM_BUG_ON_PAGE(addr != pvmw.address, old_page); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 189 | |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 190 | if (new_page) { |
Matthew Wilcox (Oracle) | 82e66bf | 2022-09-02 20:46:52 +0100 | [diff] [blame] | 191 | folio_get(new_folio); |
Barry Song | 15bde4a | 2024-06-18 11:11:35 +1200 | [diff] [blame] | 192 | folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE); |
Matthew Wilcox (Oracle) | 82e66bf | 2022-09-02 20:46:52 +0100 | [diff] [blame] | 193 | folio_add_lru_vma(new_folio, vma); |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 194 | } else |
| 195 | /* no new page, just dec_mm_counter for old_page */ |
| 196 | dec_mm_counter(mm, MM_ANONPAGES); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 197 | |
Matthew Wilcox (Oracle) | 5fcd079 | 2022-09-02 20:46:40 +0100 | [diff] [blame] | 198 | if (!folio_test_anon(old_folio)) { |
Kefeng Wang | 6b27cc6c | 2024-01-11 15:24:29 +0000 | [diff] [blame] | 199 | dec_mm_counter(mm, mm_counter_file(old_folio)); |
Srikar Dronamraju | 7396fa8 | 2012-04-11 16:05:16 +0530 | [diff] [blame] | 200 | inc_mm_counter(mm, MM_ANONPAGES); |
| 201 | } |
| 202 | |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 203 | flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); |
Alistair Popple | ec8832d | 2023-07-25 23:42:06 +1000 | [diff] [blame] | 204 | ptep_clear_flush(vma, addr, pvmw.pte); |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 205 | if (new_page) |
Paolo Bonzini | f784274 | 2024-04-05 07:58:15 -0400 | [diff] [blame] | 206 | set_pte_at(mm, addr, pvmw.pte, |
| 207 | mk_pte(new_page, vma->vm_page_prot)); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 208 | |
David Hildenbrand | 5cc9695 | 2023-12-20 23:44:48 +0100 | [diff] [blame] | 209 | folio_remove_rmap_pte(old_folio, old_page, vma); |
Matthew Wilcox (Oracle) | 5fcd079 | 2022-09-02 20:46:40 +0100 | [diff] [blame] | 210 | if (!folio_mapped(old_folio)) |
| 211 | folio_free_swap(old_folio); |
Kirill A. Shutemov | 14fa2da | 2017-02-24 14:58:07 -0800 | [diff] [blame] | 212 | page_vma_mapped_walk_done(&pvmw); |
Matthew Wilcox (Oracle) | 5fcd079 | 2022-09-02 20:46:40 +0100 | [diff] [blame] | 213 | folio_put(old_folio); |
Oleg Nesterov | 194f8dc | 2012-07-29 20:22:49 +0200 | [diff] [blame] | 214 | |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 215 | err = 0; |
| 216 | unlock: |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 217 | mmu_notifier_invalidate_range_end(&range); |
Matthew Wilcox (Oracle) | 5fcd079 | 2022-09-02 20:46:40 +0100 | [diff] [blame] | 218 | folio_unlock(old_folio); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 219 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | /** |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 223 | * is_swbp_insn - check if instruction is breakpoint instruction. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 224 | * @insn: instruction to be checked. |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 225 | * Default implementation of is_swbp_insn |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 226 | * Returns true if @insn is a breakpoint instruction. |
| 227 | */ |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 228 | bool __weak is_swbp_insn(uprobe_opcode_t *insn) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 229 | { |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 230 | return *insn == UPROBE_SWBP_INSN; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 231 | } |
| 232 | |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 233 | /** |
| 234 | * is_trap_insn - check if instruction is breakpoint instruction. |
| 235 | * @insn: instruction to be checked. |
| 236 | * Default implementation of is_trap_insn |
| 237 | * Returns true if @insn is a breakpoint instruction. |
| 238 | * |
| 239 | * This function is needed for the case where an architecture has multiple |
| 240 | * trap instructions (like powerpc). |
| 241 | */ |
| 242 | bool __weak is_trap_insn(uprobe_opcode_t *insn) |
| 243 | { |
| 244 | return is_swbp_insn(insn); |
| 245 | } |
| 246 | |
Oleg Nesterov | ab0d805 | 2013-03-24 18:24:37 +0100 | [diff] [blame] | 247 | static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) |
Oleg Nesterov | cceb55a | 2012-09-23 21:10:18 +0200 | [diff] [blame] | 248 | { |
| 249 | void *kaddr = kmap_atomic(page); |
Oleg Nesterov | ab0d805 | 2013-03-24 18:24:37 +0100 | [diff] [blame] | 250 | memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); |
Oleg Nesterov | cceb55a | 2012-09-23 21:10:18 +0200 | [diff] [blame] | 251 | kunmap_atomic(kaddr); |
| 252 | } |
| 253 | |
Oleg Nesterov | 5669cce | 2013-03-24 18:58:04 +0100 | [diff] [blame] | 254 | static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) |
| 255 | { |
| 256 | void *kaddr = kmap_atomic(page); |
| 257 | memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); |
| 258 | kunmap_atomic(kaddr); |
| 259 | } |
| 260 | |
Oleg Nesterov | ed6f6a5 | 2012-09-23 21:30:44 +0200 | [diff] [blame] | 261 | static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) |
| 262 | { |
| 263 | uprobe_opcode_t old_opcode; |
| 264 | bool is_swbp; |
| 265 | |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 266 | /* |
| 267 | * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. |
| 268 | * We do not check if it is any other 'trap variant' which could |
| 269 | * be conditional trap instruction such as the one powerpc supports. |
| 270 | * |
| 271 | * The logic is that we do not care if the underlying instruction |
| 272 | * is a trap variant; uprobes always wins over any other (gdb) |
| 273 | * breakpoint. |
| 274 | */ |
Oleg Nesterov | ab0d805 | 2013-03-24 18:24:37 +0100 | [diff] [blame] | 275 | copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); |
Oleg Nesterov | ed6f6a5 | 2012-09-23 21:30:44 +0200 | [diff] [blame] | 276 | is_swbp = is_swbp_insn(&old_opcode); |
| 277 | |
| 278 | if (is_swbp_insn(new_opcode)) { |
| 279 | if (is_swbp) /* register: already installed? */ |
| 280 | return 0; |
| 281 | } else { |
| 282 | if (!is_swbp) /* unregister: was it changed by us? */ |
Oleg Nesterov | 076a365 | 2012-09-30 18:54:53 +0200 | [diff] [blame] | 283 | return 0; |
Oleg Nesterov | ed6f6a5 | 2012-09-23 21:30:44 +0200 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | return 1; |
| 287 | } |
| 288 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 289 | static struct delayed_uprobe * |
| 290 | delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) |
| 291 | { |
| 292 | struct delayed_uprobe *du; |
| 293 | |
| 294 | list_for_each_entry(du, &delayed_uprobe_list, list) |
| 295 | if (du->uprobe == uprobe && du->mm == mm) |
| 296 | return du; |
| 297 | return NULL; |
| 298 | } |
| 299 | |
| 300 | static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) |
| 301 | { |
| 302 | struct delayed_uprobe *du; |
| 303 | |
| 304 | if (delayed_uprobe_check(uprobe, mm)) |
| 305 | return 0; |
| 306 | |
| 307 | du = kzalloc(sizeof(*du), GFP_KERNEL); |
| 308 | if (!du) |
| 309 | return -ENOMEM; |
| 310 | |
| 311 | du->uprobe = uprobe; |
| 312 | du->mm = mm; |
| 313 | list_add(&du->list, &delayed_uprobe_list); |
| 314 | return 0; |
| 315 | } |
| 316 | |
| 317 | static void delayed_uprobe_delete(struct delayed_uprobe *du) |
| 318 | { |
| 319 | if (WARN_ON(!du)) |
| 320 | return; |
| 321 | list_del(&du->list); |
| 322 | kfree(du); |
| 323 | } |
| 324 | |
| 325 | static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) |
| 326 | { |
| 327 | struct list_head *pos, *q; |
| 328 | struct delayed_uprobe *du; |
| 329 | |
| 330 | if (!uprobe && !mm) |
| 331 | return; |
| 332 | |
| 333 | list_for_each_safe(pos, q, &delayed_uprobe_list) { |
| 334 | du = list_entry(pos, struct delayed_uprobe, list); |
| 335 | |
| 336 | if (uprobe && du->uprobe != uprobe) |
| 337 | continue; |
| 338 | if (mm && du->mm != mm) |
| 339 | continue; |
| 340 | |
| 341 | delayed_uprobe_delete(du); |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | static bool valid_ref_ctr_vma(struct uprobe *uprobe, |
| 346 | struct vm_area_struct *vma) |
| 347 | { |
| 348 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); |
| 349 | |
| 350 | return uprobe->ref_ctr_offset && |
| 351 | vma->vm_file && |
| 352 | file_inode(vma->vm_file) == uprobe->inode && |
| 353 | (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && |
| 354 | vma->vm_start <= vaddr && |
| 355 | vma->vm_end > vaddr; |
| 356 | } |
| 357 | |
| 358 | static struct vm_area_struct * |
| 359 | find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) |
| 360 | { |
Matthew Wilcox (Oracle) | fcb72a5 | 2022-09-06 19:48:58 +0000 | [diff] [blame] | 361 | VMA_ITERATOR(vmi, mm, 0); |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 362 | struct vm_area_struct *tmp; |
| 363 | |
Matthew Wilcox (Oracle) | fcb72a5 | 2022-09-06 19:48:58 +0000 | [diff] [blame] | 364 | for_each_vma(vmi, tmp) |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 365 | if (valid_ref_ctr_vma(uprobe, tmp)) |
| 366 | return tmp; |
| 367 | |
| 368 | return NULL; |
| 369 | } |
| 370 | |
| 371 | static int |
| 372 | __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) |
| 373 | { |
| 374 | void *kaddr; |
| 375 | struct page *page; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 376 | int ret; |
| 377 | short *ptr; |
| 378 | |
| 379 | if (!vaddr || !d) |
| 380 | return -EINVAL; |
| 381 | |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 382 | ret = get_user_pages_remote(mm, vaddr, 1, |
Lorenzo Stoakes | ca5e863 | 2023-05-17 20:25:39 +0100 | [diff] [blame] | 383 | FOLL_WRITE, &page, NULL); |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 384 | if (unlikely(ret <= 0)) { |
| 385 | /* |
| 386 | * We are asking for 1 page. If get_user_pages_remote() fails, |
| 387 | * it may return 0, in that case we have to return error. |
| 388 | */ |
| 389 | return ret == 0 ? -EBUSY : ret; |
| 390 | } |
| 391 | |
| 392 | kaddr = kmap_atomic(page); |
| 393 | ptr = kaddr + (vaddr & ~PAGE_MASK); |
| 394 | |
| 395 | if (unlikely(*ptr + d < 0)) { |
| 396 | pr_warn("ref_ctr going negative. vaddr: 0x%lx, " |
| 397 | "curr val: %d, delta: %d\n", vaddr, *ptr, d); |
| 398 | ret = -EINVAL; |
| 399 | goto out; |
| 400 | } |
| 401 | |
| 402 | *ptr += d; |
| 403 | ret = 0; |
| 404 | out: |
| 405 | kunmap_atomic(kaddr); |
| 406 | put_page(page); |
| 407 | return ret; |
| 408 | } |
| 409 | |
| 410 | static void update_ref_ctr_warn(struct uprobe *uprobe, |
| 411 | struct mm_struct *mm, short d) |
| 412 | { |
| 413 | pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " |
| 414 | "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", |
| 415 | d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, |
| 416 | (unsigned long long) uprobe->offset, |
| 417 | (unsigned long long) uprobe->ref_ctr_offset, mm); |
| 418 | } |
| 419 | |
| 420 | static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, |
| 421 | short d) |
| 422 | { |
| 423 | struct vm_area_struct *rc_vma; |
| 424 | unsigned long rc_vaddr; |
| 425 | int ret = 0; |
| 426 | |
| 427 | rc_vma = find_ref_ctr_vma(uprobe, mm); |
| 428 | |
| 429 | if (rc_vma) { |
| 430 | rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); |
| 431 | ret = __update_ref_ctr(mm, rc_vaddr, d); |
| 432 | if (ret) |
| 433 | update_ref_ctr_warn(uprobe, mm, d); |
| 434 | |
| 435 | if (d > 0) |
| 436 | return ret; |
| 437 | } |
| 438 | |
| 439 | mutex_lock(&delayed_uprobe_lock); |
| 440 | if (d > 0) |
| 441 | ret = delayed_uprobe_add(uprobe, mm); |
| 442 | else |
| 443 | delayed_uprobe_remove(uprobe, mm); |
| 444 | mutex_unlock(&delayed_uprobe_lock); |
| 445 | |
| 446 | return ret; |
| 447 | } |
| 448 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 449 | /* |
| 450 | * NOTE: |
| 451 | * Expect the breakpoint instruction to be the smallest size instruction for |
| 452 | * the architecture. If an arch has variable length instruction and the |
| 453 | * breakpoint instruction is not of the smallest length instruction |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 454 | * supported by that architecture then we need to modify is_trap_at_addr and |
Oleg Nesterov | f72d41f | 2013-11-05 19:50:39 +0100 | [diff] [blame] | 455 | * uprobe_write_opcode accordingly. This would never be a problem for archs |
| 456 | * that have fixed length instructions. |
Oleg Nesterov | 29dedee | 2014-05-05 16:38:18 +0200 | [diff] [blame] | 457 | * |
Oleg Nesterov | f72d41f | 2013-11-05 19:50:39 +0100 | [diff] [blame] | 458 | * uprobe_write_opcode - write the opcode at a given virtual address. |
Qiujun Huang | 9ce4d21 | 2021-05-24 04:14:11 +0000 | [diff] [blame] | 459 | * @auprobe: arch specific probepoint information. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 460 | * @mm: the probed process address space. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 461 | * @vaddr: the virtual address to store the opcode. |
| 462 | * @opcode: opcode to be written at @vaddr. |
| 463 | * |
Oleg Nesterov | 84455e6 | 2024-08-01 15:27:09 +0200 | [diff] [blame] | 464 | * Called with mm->mmap_lock held for read or write. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 465 | * Return 0 (success) or a negative errno. |
| 466 | */ |
Ravi Bangoria | 6d43743 | 2018-08-09 09:48:52 +0530 | [diff] [blame] | 467 | int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, |
| 468 | unsigned long vaddr, uprobe_opcode_t opcode) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 469 | { |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 470 | struct uprobe *uprobe; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 471 | struct page *old_page, *new_page; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 472 | struct vm_area_struct *vma; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 473 | int ret, is_register, ref_ctr_updated = 0; |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 474 | bool orig_page_huge = false; |
Song Liu | aa5de30 | 2019-10-18 20:20:40 -0700 | [diff] [blame] | 475 | unsigned int gup_flags = FOLL_FORCE; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 476 | |
| 477 | is_register = is_swbp_insn(&opcode); |
| 478 | uprobe = container_of(auprobe, struct uprobe, arch); |
Oleg Nesterov | f403072 | 2012-07-29 20:22:12 +0200 | [diff] [blame] | 479 | |
Oleg Nesterov | 5323ce7 | 2012-06-15 17:43:28 +0200 | [diff] [blame] | 480 | retry: |
Song Liu | aa5de30 | 2019-10-18 20:20:40 -0700 | [diff] [blame] | 481 | if (is_register) |
| 482 | gup_flags |= FOLL_SPLIT_PMD; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 483 | /* Read the page with vaddr into memory */ |
Lorenzo Stoakes | ca5e863 | 2023-05-17 20:25:39 +0100 | [diff] [blame] | 484 | old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma); |
Lorenzo Stoakes | 6a1960b | 2023-10-03 00:14:54 +0100 | [diff] [blame] | 485 | if (IS_ERR(old_page)) |
| 486 | return PTR_ERR(old_page); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 487 | |
Oleg Nesterov | ed6f6a5 | 2012-09-23 21:30:44 +0200 | [diff] [blame] | 488 | ret = verify_opcode(old_page, vaddr, &opcode); |
| 489 | if (ret <= 0) |
| 490 | goto put_old; |
| 491 | |
Song Liu | aa5de30 | 2019-10-18 20:20:40 -0700 | [diff] [blame] | 492 | if (WARN(!is_register && PageCompound(old_page), |
| 493 | "uprobe unregister should never work on compound page\n")) { |
| 494 | ret = -EINVAL; |
| 495 | goto put_old; |
| 496 | } |
| 497 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 498 | /* We are going to replace instruction, update ref_ctr. */ |
| 499 | if (!ref_ctr_updated && uprobe->ref_ctr_offset) { |
| 500 | ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); |
| 501 | if (ret) |
| 502 | goto put_old; |
| 503 | |
| 504 | ref_ctr_updated = 1; |
| 505 | } |
| 506 | |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 507 | ret = 0; |
| 508 | if (!is_register && !PageAnon(old_page)) |
| 509 | goto put_old; |
| 510 | |
Oleg Nesterov | 29dedee | 2014-05-05 16:38:18 +0200 | [diff] [blame] | 511 | ret = anon_vma_prepare(vma); |
| 512 | if (ret) |
| 513 | goto put_old; |
| 514 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 515 | ret = -ENOMEM; |
| 516 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); |
| 517 | if (!new_page) |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 518 | goto put_old; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 519 | |
Oleg Nesterov | 29dedee | 2014-05-05 16:38:18 +0200 | [diff] [blame] | 520 | __SetPageUptodate(new_page); |
Oleg Nesterov | 3f47107 | 2013-03-24 19:04:36 +0100 | [diff] [blame] | 521 | copy_highpage(new_page, old_page); |
| 522 | copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 523 | |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 524 | if (!is_register) { |
| 525 | struct page *orig_page; |
| 526 | pgoff_t index; |
| 527 | |
| 528 | VM_BUG_ON_PAGE(!PageAnon(old_page), old_page); |
| 529 | |
| 530 | index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; |
| 531 | orig_page = find_get_page(vma->vm_file->f_inode->i_mapping, |
| 532 | index); |
| 533 | |
| 534 | if (orig_page) { |
| 535 | if (PageUptodate(orig_page) && |
| 536 | pages_identical(new_page, orig_page)) { |
| 537 | /* let go new_page */ |
| 538 | put_page(new_page); |
| 539 | new_page = NULL; |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 540 | |
| 541 | if (PageCompound(orig_page)) |
| 542 | orig_page_huge = true; |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 543 | } |
| 544 | put_page(orig_page); |
| 545 | } |
| 546 | } |
| 547 | |
David Hildenbrand | 4dca82d | 2024-01-15 11:07:31 +0100 | [diff] [blame] | 548 | ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page); |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 549 | if (new_page) |
| 550 | put_page(new_page); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 551 | put_old: |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 552 | put_page(old_page); |
| 553 | |
Oleg Nesterov | 5323ce7 | 2012-06-15 17:43:28 +0200 | [diff] [blame] | 554 | if (unlikely(ret == -EAGAIN)) |
| 555 | goto retry; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 556 | |
| 557 | /* Revert back reference counter if instruction update failed. */ |
| 558 | if (ret && is_register && ref_ctr_updated) |
| 559 | update_ref_ctr(uprobe, mm, -1); |
| 560 | |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 561 | /* try collapse pmd for compound page */ |
| 562 | if (!ret && orig_page_huge) |
Zach O'Keefe | 3448839 | 2022-09-22 15:40:39 -0700 | [diff] [blame] | 563 | collapse_pte_mapped_thp(mm, vaddr, false); |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 564 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 565 | return ret; |
| 566 | } |
| 567 | |
| 568 | /** |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 569 | * set_swbp - store breakpoint at a given address. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 570 | * @auprobe: arch specific probepoint information. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 571 | * @mm: the probed process address space. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 572 | * @vaddr: the virtual address to insert the opcode. |
| 573 | * |
| 574 | * For mm @mm, store the breakpoint instruction at @vaddr. |
| 575 | * Return 0 (success) or a negative errno. |
| 576 | */ |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 577 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 578 | { |
Ravi Bangoria | 6d43743 | 2018-08-09 09:48:52 +0530 | [diff] [blame] | 579 | return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 580 | } |
| 581 | |
| 582 | /** |
| 583 | * set_orig_insn - Restore the original instruction. |
| 584 | * @mm: the probed process address space. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 585 | * @auprobe: arch specific probepoint information. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 586 | * @vaddr: the virtual address to insert the opcode. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 587 | * |
| 588 | * For mm @mm, restore the original opcode (opcode) at @vaddr. |
| 589 | * Return 0 (success) or a negative errno. |
| 590 | */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 591 | int __weak |
Oleg Nesterov | ded86e7 | 2012-08-08 18:07:03 +0200 | [diff] [blame] | 592 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 593 | { |
Ravi Bangoria | 6d43743 | 2018-08-09 09:48:52 +0530 | [diff] [blame] | 594 | return uprobe_write_opcode(auprobe, mm, vaddr, |
| 595 | *(uprobe_opcode_t *)&auprobe->insn); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 596 | } |
| 597 | |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 598 | /* uprobe should have guaranteed positive refcount */ |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 599 | static struct uprobe *get_uprobe(struct uprobe *uprobe) |
| 600 | { |
Elena Reshetova | ce59b8e | 2019-01-16 13:20:27 +0200 | [diff] [blame] | 601 | refcount_inc(&uprobe->ref); |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 602 | return uprobe; |
| 603 | } |
| 604 | |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 605 | /* |
| 606 | * uprobe should have guaranteed lifetime, which can be either of: |
| 607 | * - caller already has refcount taken (and wants an extra one); |
| 608 | * - uprobe is RCU protected and won't be freed until after grace period; |
| 609 | * - we are holding uprobes_treelock (for read or write, doesn't matter). |
| 610 | */ |
| 611 | static struct uprobe *try_get_uprobe(struct uprobe *uprobe) |
| 612 | { |
| 613 | if (refcount_inc_not_zero(&uprobe->ref)) |
| 614 | return uprobe; |
| 615 | return NULL; |
| 616 | } |
| 617 | |
| 618 | static inline bool uprobe_is_active(struct uprobe *uprobe) |
| 619 | { |
| 620 | return !RB_EMPTY_NODE(&uprobe->rb_node); |
| 621 | } |
| 622 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 623 | static void uprobe_free_rcu(struct rcu_head *rcu) |
| 624 | { |
| 625 | struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); |
| 626 | |
| 627 | kfree(uprobe); |
| 628 | } |
| 629 | |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 630 | static void put_uprobe(struct uprobe *uprobe) |
| 631 | { |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 632 | if (!refcount_dec_and_test(&uprobe->ref)) |
| 633 | return; |
| 634 | |
| 635 | write_lock(&uprobes_treelock); |
| 636 | |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 637 | if (uprobe_is_active(uprobe)) { |
| 638 | write_seqcount_begin(&uprobes_seqcount); |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 639 | rb_erase(&uprobe->rb_node, &uprobes_tree); |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 640 | write_seqcount_end(&uprobes_seqcount); |
| 641 | } |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 642 | |
| 643 | write_unlock(&uprobes_treelock); |
| 644 | |
| 645 | /* |
| 646 | * If application munmap(exec_vma) before uprobe_unregister() |
| 647 | * gets called, we don't get a chance to remove uprobe from |
| 648 | * delayed_uprobe_list from remove_breakpoint(). Do it here. |
| 649 | */ |
| 650 | mutex_lock(&delayed_uprobe_lock); |
| 651 | delayed_uprobe_remove(uprobe, NULL); |
| 652 | mutex_unlock(&delayed_uprobe_lock); |
| 653 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 654 | call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu); |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 655 | } |
| 656 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 657 | static __always_inline |
| 658 | int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, |
| 659 | const struct uprobe *r) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 660 | { |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 661 | if (l_inode < r->inode) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 662 | return -1; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 663 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 664 | if (l_inode > r->inode) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 665 | return 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 666 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 667 | if (l_offset < r->offset) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 668 | return -1; |
| 669 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 670 | if (l_offset > r->offset) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 671 | return 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 672 | |
| 673 | return 0; |
| 674 | } |
| 675 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 676 | #define __node_2_uprobe(node) \ |
| 677 | rb_entry((node), struct uprobe, rb_node) |
| 678 | |
| 679 | struct __uprobe_key { |
| 680 | struct inode *inode; |
| 681 | loff_t offset; |
| 682 | }; |
| 683 | |
| 684 | static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) |
| 685 | { |
| 686 | const struct __uprobe_key *a = key; |
| 687 | return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); |
| 688 | } |
| 689 | |
| 690 | static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) |
| 691 | { |
| 692 | struct uprobe *u = __node_2_uprobe(a); |
| 693 | return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); |
| 694 | } |
| 695 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 696 | /* |
| 697 | * Assumes being inside RCU protected region. |
| 698 | * No refcount is taken on returned uprobe. |
| 699 | */ |
| 700 | static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 701 | { |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 702 | struct __uprobe_key key = { |
| 703 | .inode = inode, |
| 704 | .offset = offset, |
| 705 | }; |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 706 | struct rb_node *node; |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 707 | unsigned int seq; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 708 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 709 | lockdep_assert(srcu_read_lock_held(&uprobes_srcu)); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 710 | |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 711 | do { |
| 712 | seq = read_seqcount_begin(&uprobes_seqcount); |
| 713 | node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key); |
| 714 | /* |
| 715 | * Lockless RB-tree lookups can result only in false negatives. |
| 716 | * If the element is found, it is correct and can be returned |
| 717 | * under RCU protection. If we find nothing, we need to |
| 718 | * validate that seqcount didn't change. If it did, we have to |
| 719 | * try again as we might have missed the element (false |
| 720 | * negative). If seqcount is unchanged, search truly failed. |
| 721 | */ |
| 722 | if (node) |
| 723 | return __node_2_uprobe(node); |
| 724 | } while (read_seqcount_retry(&uprobes_seqcount, seq)); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 725 | |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 726 | return NULL; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 727 | } |
| 728 | |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 729 | /* |
| 730 | * Attempt to insert a new uprobe into uprobes_tree. |
| 731 | * |
| 732 | * If uprobe already exists (for given inode+offset), we just increment |
| 733 | * refcount of previously existing uprobe. |
| 734 | * |
| 735 | * If not, a provided new instance of uprobe is inserted into the tree (with |
| 736 | * assumed initial refcount == 1). |
| 737 | * |
| 738 | * In any case, we return a uprobe instance that ends up being in uprobes_tree. |
| 739 | * Caller has to clean up new uprobe instance, if it ended up not being |
| 740 | * inserted into the tree. |
| 741 | * |
| 742 | * We assume that uprobes_treelock is held for writing. |
| 743 | */ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 744 | static struct uprobe *__insert_uprobe(struct uprobe *uprobe) |
| 745 | { |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 746 | struct rb_node *node; |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 747 | again: |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 748 | node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 749 | if (node) { |
| 750 | struct uprobe *u = __node_2_uprobe(node); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 751 | |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 752 | if (!try_get_uprobe(u)) { |
| 753 | rb_erase(node, &uprobes_tree); |
| 754 | RB_CLEAR_NODE(&u->rb_node); |
| 755 | goto again; |
| 756 | } |
| 757 | |
| 758 | return u; |
| 759 | } |
| 760 | |
| 761 | return uprobe; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 762 | } |
| 763 | |
| 764 | /* |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 765 | * Acquire uprobes_treelock and insert uprobe into uprobes_tree |
| 766 | * (or reuse existing one, see __insert_uprobe() comments above). |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 767 | */ |
| 768 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) |
| 769 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 770 | struct uprobe *u; |
| 771 | |
Jonathan Haslam | 0dc7152 | 2024-04-22 03:23:05 -0700 | [diff] [blame] | 772 | write_lock(&uprobes_treelock); |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 773 | write_seqcount_begin(&uprobes_seqcount); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 774 | u = __insert_uprobe(uprobe); |
Andrii Nakryiko | cd7bdd9 | 2024-09-03 10:46:02 -0700 | [diff] [blame] | 775 | write_seqcount_end(&uprobes_seqcount); |
Jonathan Haslam | 0dc7152 | 2024-04-22 03:23:05 -0700 | [diff] [blame] | 776 | write_unlock(&uprobes_treelock); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 777 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 778 | return u; |
| 779 | } |
| 780 | |
Ravi Bangoria | 22bad38 | 2018-08-20 10:12:48 +0530 | [diff] [blame] | 781 | static void |
| 782 | ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) |
| 783 | { |
| 784 | pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " |
| 785 | "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", |
| 786 | uprobe->inode->i_ino, (unsigned long long) uprobe->offset, |
| 787 | (unsigned long long) cur_uprobe->ref_ctr_offset, |
| 788 | (unsigned long long) uprobe->ref_ctr_offset); |
| 789 | } |
| 790 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 791 | static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, |
| 792 | loff_t ref_ctr_offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 793 | { |
| 794 | struct uprobe *uprobe, *cur_uprobe; |
| 795 | |
| 796 | uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); |
| 797 | if (!uprobe) |
Andrii Nakryiko | 7c2bae2 | 2024-08-01 15:27:19 +0200 | [diff] [blame] | 798 | return ERR_PTR(-ENOMEM); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 799 | |
Song Liu | 61f9420 | 2018-04-23 10:21:35 -0700 | [diff] [blame] | 800 | uprobe->inode = inode; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 801 | uprobe->offset = offset; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 802 | uprobe->ref_ctr_offset = ref_ctr_offset; |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 803 | INIT_LIST_HEAD(&uprobe->consumers); |
Oleg Nesterov | e591c8d | 2012-11-24 17:29:40 +0100 | [diff] [blame] | 804 | init_rwsem(&uprobe->register_rwsem); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 805 | init_rwsem(&uprobe->consumer_rwsem); |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 806 | RB_CLEAR_NODE(&uprobe->rb_node); |
| 807 | refcount_set(&uprobe->ref, 1); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 808 | |
| 809 | /* add to uprobes_tree, sorted on inode:offset */ |
| 810 | cur_uprobe = insert_uprobe(uprobe); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 811 | /* a uprobe exists for this inode:offset combination */ |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 812 | if (cur_uprobe != uprobe) { |
Ravi Bangoria | 22bad38 | 2018-08-20 10:12:48 +0530 | [diff] [blame] | 813 | if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { |
| 814 | ref_ctr_mismatch_warn(cur_uprobe, uprobe); |
| 815 | put_uprobe(cur_uprobe); |
| 816 | kfree(uprobe); |
| 817 | return ERR_PTR(-EINVAL); |
| 818 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 819 | kfree(uprobe); |
| 820 | uprobe = cur_uprobe; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 821 | } |
| 822 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 823 | return uprobe; |
| 824 | } |
| 825 | |
Oleg Nesterov | 9a98e03 | 2012-11-23 20:15:17 +0100 | [diff] [blame] | 826 | static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 827 | { |
| 828 | down_write(&uprobe->consumer_rwsem); |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 829 | list_add_rcu(&uc->cons_node, &uprobe->consumers); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 830 | up_write(&uprobe->consumer_rwsem); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 831 | } |
| 832 | |
| 833 | /* |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 834 | * For uprobe @uprobe, delete the consumer @uc. |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 835 | * Should never be called with consumer that's not part of @uprobe->consumers. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 836 | */ |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 837 | static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 838 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 839 | down_write(&uprobe->consumer_rwsem); |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 840 | list_del_rcu(&uc->cons_node); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 841 | up_write(&uprobe->consumer_rwsem); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 842 | } |
| 843 | |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 844 | static int __copy_insn(struct address_space *mapping, struct file *filp, |
| 845 | void *insn, int nbytes, loff_t offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 846 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 847 | struct page *page; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 848 | /* |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 849 | * Ensure that the page that has the original instruction is populated |
Matthew Wilcox (Oracle) | 7e0a126 | 2022-04-29 11:53:28 -0400 | [diff] [blame] | 850 | * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 851 | * see uprobe_register(). |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 852 | */ |
Matthew Wilcox (Oracle) | 7e0a126 | 2022-04-29 11:53:28 -0400 | [diff] [blame] | 853 | if (mapping->a_ops->read_folio) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 854 | page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 855 | else |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 856 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 857 | if (IS_ERR(page)) |
| 858 | return PTR_ERR(page); |
| 859 | |
Oleg Nesterov | 2edb7b5 | 2013-03-24 18:37:48 +0100 | [diff] [blame] | 860 | copy_from_page(page, offset, insn, nbytes); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 861 | put_page(page); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 862 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 863 | return 0; |
| 864 | } |
| 865 | |
Oleg Nesterov | d436615 | 2012-06-15 17:43:42 +0200 | [diff] [blame] | 866 | static int copy_insn(struct uprobe *uprobe, struct file *filp) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 867 | { |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 868 | struct address_space *mapping = uprobe->inode->i_mapping; |
| 869 | loff_t offs = uprobe->offset; |
Oleg Nesterov | 803200e | 2013-11-09 17:58:54 +0100 | [diff] [blame] | 870 | void *insn = &uprobe->arch.insn; |
| 871 | int size = sizeof(uprobe->arch.insn); |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 872 | int len, err = -EIO; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 873 | |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 874 | /* Copy only available bytes, -EIO if nothing was read */ |
| 875 | do { |
| 876 | if (offs >= i_size_read(uprobe->inode)) |
| 877 | break; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 878 | |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 879 | len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); |
| 880 | err = __copy_insn(mapping, filp, insn, len, offs); |
Oleg Nesterov | fc36f59 | 2012-06-15 17:43:44 +0200 | [diff] [blame] | 881 | if (err) |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 882 | break; |
| 883 | |
| 884 | insn += len; |
| 885 | offs += len; |
| 886 | size -= len; |
| 887 | } while (size); |
| 888 | |
| 889 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 890 | } |
| 891 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 892 | static int prepare_uprobe(struct uprobe *uprobe, struct file *file, |
| 893 | struct mm_struct *mm, unsigned long vaddr) |
| 894 | { |
| 895 | int ret = 0; |
| 896 | |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 897 | if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 898 | return ret; |
| 899 | |
Oleg Nesterov | d4d3ccc | 2012-11-24 18:51:34 +0100 | [diff] [blame] | 900 | /* TODO: move this into _register, until then we abuse this sem. */ |
| 901 | down_write(&uprobe->consumer_rwsem); |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 902 | if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) |
Oleg Nesterov | 4710f05f | 2012-09-30 20:31:41 +0200 | [diff] [blame] | 903 | goto out; |
| 904 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 905 | ret = copy_insn(uprobe, file); |
| 906 | if (ret) |
| 907 | goto out; |
| 908 | |
| 909 | ret = -ENOTSUPP; |
Oleg Nesterov | 803200e | 2013-11-09 17:58:54 +0100 | [diff] [blame] | 910 | if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 911 | goto out; |
| 912 | |
| 913 | ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); |
| 914 | if (ret) |
| 915 | goto out; |
| 916 | |
Andrea Parri | 09d3f01 | 2018-11-22 17:10:31 +0100 | [diff] [blame] | 917 | smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 918 | set_bit(UPROBE_COPY_INSN, &uprobe->flags); |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 919 | |
| 920 | out: |
Oleg Nesterov | d4d3ccc | 2012-11-24 18:51:34 +0100 | [diff] [blame] | 921 | up_write(&uprobe->consumer_rwsem); |
Oleg Nesterov | 4710f05f | 2012-09-30 20:31:41 +0200 | [diff] [blame] | 922 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 923 | return ret; |
| 924 | } |
| 925 | |
Andrii Nakryiko | 59da880 | 2024-09-03 10:45:58 -0700 | [diff] [blame] | 926 | static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm) |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 927 | { |
Andrii Nakryiko | 59da880 | 2024-09-03 10:45:58 -0700 | [diff] [blame] | 928 | return !uc->filter || uc->filter(uc, mm); |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 929 | } |
| 930 | |
Andrii Nakryiko | 59da880 | 2024-09-03 10:45:58 -0700 | [diff] [blame] | 931 | static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) |
Oleg Nesterov | 63633cb | 2012-11-22 18:30:15 +0100 | [diff] [blame] | 932 | { |
Oleg Nesterov | 1ff6fee | 2012-11-24 18:15:46 +0100 | [diff] [blame] | 933 | struct uprobe_consumer *uc; |
| 934 | bool ret = false; |
| 935 | |
| 936 | down_read(&uprobe->consumer_rwsem); |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 937 | list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, |
| 938 | srcu_read_lock_held(&uprobes_srcu)) { |
Andrii Nakryiko | 59da880 | 2024-09-03 10:45:58 -0700 | [diff] [blame] | 939 | ret = consumer_filter(uc, mm); |
Oleg Nesterov | 1ff6fee | 2012-11-24 18:15:46 +0100 | [diff] [blame] | 940 | if (ret) |
| 941 | break; |
| 942 | } |
| 943 | up_read(&uprobe->consumer_rwsem); |
| 944 | |
| 945 | return ret; |
Oleg Nesterov | 63633cb | 2012-11-22 18:30:15 +0100 | [diff] [blame] | 946 | } |
| 947 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 948 | static int |
| 949 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 950 | struct vm_area_struct *vma, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 951 | { |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 952 | bool first_uprobe; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 953 | int ret; |
| 954 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 955 | ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); |
| 956 | if (ret) |
| 957 | return ret; |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 958 | |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 959 | /* |
| 960 | * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), |
| 961 | * the task can hit this breakpoint right after __replace_page(). |
| 962 | */ |
| 963 | first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); |
| 964 | if (first_uprobe) |
| 965 | set_bit(MMF_HAS_UPROBES, &mm->flags); |
| 966 | |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 967 | ret = set_swbp(&uprobe->arch, mm, vaddr); |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 968 | if (!ret) |
| 969 | clear_bit(MMF_RECALC_UPROBES, &mm->flags); |
| 970 | else if (first_uprobe) |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 971 | clear_bit(MMF_HAS_UPROBES, &mm->flags); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 972 | |
| 973 | return ret; |
| 974 | } |
| 975 | |
Oleg Nesterov | 076a365 | 2012-09-30 18:54:53 +0200 | [diff] [blame] | 976 | static int |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 977 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 978 | { |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 979 | set_bit(MMF_RECALC_UPROBES, &mm->flags); |
Oleg Nesterov | 076a365 | 2012-09-30 18:54:53 +0200 | [diff] [blame] | 980 | return set_orig_insn(&uprobe->arch, mm, vaddr); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 981 | } |
| 982 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 983 | struct map_info { |
| 984 | struct map_info *next; |
| 985 | struct mm_struct *mm; |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 986 | unsigned long vaddr; |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 987 | }; |
| 988 | |
| 989 | static inline struct map_info *free_map_info(struct map_info *info) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 990 | { |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 991 | struct map_info *next = info->next; |
| 992 | kfree(info); |
| 993 | return next; |
| 994 | } |
| 995 | |
| 996 | static struct map_info * |
| 997 | build_map_info(struct address_space *mapping, loff_t offset, bool is_register) |
| 998 | { |
| 999 | unsigned long pgoff = offset >> PAGE_SHIFT; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1000 | struct vm_area_struct *vma; |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1001 | struct map_info *curr = NULL; |
| 1002 | struct map_info *prev = NULL; |
| 1003 | struct map_info *info; |
| 1004 | int more = 0; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1005 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1006 | again: |
Davidlohr Bueso | 4a23717a | 2014-12-12 16:54:30 -0800 | [diff] [blame] | 1007 | i_mmap_lock_read(mapping); |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 1008 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1009 | if (!valid_vma(vma, is_register)) |
| 1010 | continue; |
| 1011 | |
Oleg Nesterov | 7a5bfb6 | 2012-06-15 17:43:36 +0200 | [diff] [blame] | 1012 | if (!prev && !more) { |
| 1013 | /* |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 1014 | * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through |
Oleg Nesterov | 7a5bfb6 | 2012-06-15 17:43:36 +0200 | [diff] [blame] | 1015 | * reclaim. This is optimistic, no harm done if it fails. |
| 1016 | */ |
| 1017 | prev = kmalloc(sizeof(struct map_info), |
| 1018 | GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); |
| 1019 | if (prev) |
| 1020 | prev->next = NULL; |
| 1021 | } |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1022 | if (!prev) { |
| 1023 | more++; |
| 1024 | continue; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1025 | } |
| 1026 | |
Vegard Nossum | 388f793 | 2017-02-27 14:30:13 -0800 | [diff] [blame] | 1027 | if (!mmget_not_zero(vma->vm_mm)) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1028 | continue; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1029 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1030 | info = prev; |
| 1031 | prev = prev->next; |
| 1032 | info->next = curr; |
| 1033 | curr = info; |
| 1034 | |
| 1035 | info->mm = vma->vm_mm; |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 1036 | info->vaddr = offset_to_vaddr(vma, offset); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1037 | } |
Davidlohr Bueso | 4a23717a | 2014-12-12 16:54:30 -0800 | [diff] [blame] | 1038 | i_mmap_unlock_read(mapping); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1039 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1040 | if (!more) |
| 1041 | goto out; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1042 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1043 | prev = curr; |
| 1044 | while (curr) { |
| 1045 | mmput(curr->mm); |
| 1046 | curr = curr->next; |
| 1047 | } |
| 1048 | |
| 1049 | do { |
| 1050 | info = kmalloc(sizeof(struct map_info), GFP_KERNEL); |
| 1051 | if (!info) { |
| 1052 | curr = ERR_PTR(-ENOMEM); |
| 1053 | goto out; |
| 1054 | } |
| 1055 | info->next = prev; |
| 1056 | prev = info; |
| 1057 | } while (--more); |
| 1058 | |
| 1059 | goto again; |
| 1060 | out: |
| 1061 | while (prev) |
| 1062 | prev = free_map_info(prev); |
| 1063 | return curr; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1064 | } |
| 1065 | |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1066 | static int |
| 1067 | register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1068 | { |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1069 | bool is_register = !!new; |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1070 | struct map_info *info; |
| 1071 | int err = 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1072 | |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 1073 | percpu_down_write(&dup_mmap_sem); |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1074 | info = build_map_info(uprobe->inode->i_mapping, |
| 1075 | uprobe->offset, is_register); |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 1076 | if (IS_ERR(info)) { |
| 1077 | err = PTR_ERR(info); |
| 1078 | goto out; |
| 1079 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1080 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1081 | while (info) { |
| 1082 | struct mm_struct *mm = info->mm; |
| 1083 | struct vm_area_struct *vma; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1084 | |
Oleg Nesterov | 076a365 | 2012-09-30 18:54:53 +0200 | [diff] [blame] | 1085 | if (err && is_register) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1086 | goto free; |
Oleg Nesterov | 84455e6 | 2024-08-01 15:27:09 +0200 | [diff] [blame] | 1087 | /* |
| 1088 | * We take mmap_lock for writing to avoid the race with |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1089 | * find_active_uprobe_rcu() which takes mmap_lock for reading. |
Oleg Nesterov | 84455e6 | 2024-08-01 15:27:09 +0200 | [diff] [blame] | 1090 | * Thus this install_breakpoint() can not make |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1091 | * is_trap_at_addr() true right after find_uprobe_rcu() |
| 1092 | * returns NULL in find_active_uprobe_rcu(). |
Oleg Nesterov | 84455e6 | 2024-08-01 15:27:09 +0200 | [diff] [blame] | 1093 | */ |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1094 | mmap_write_lock(mm); |
Oleg Nesterov | f4d6dfe | 2012-07-29 20:22:44 +0200 | [diff] [blame] | 1095 | vma = find_vma(mm, info->vaddr); |
| 1096 | if (!vma || !valid_vma(vma, is_register) || |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 1097 | file_inode(vma->vm_file) != uprobe->inode) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1098 | goto unlock; |
| 1099 | |
Oleg Nesterov | f4d6dfe | 2012-07-29 20:22:44 +0200 | [diff] [blame] | 1100 | if (vma->vm_start > info->vaddr || |
| 1101 | vaddr_to_offset(vma, info->vaddr) != uprobe->offset) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1102 | goto unlock; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1103 | |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1104 | if (is_register) { |
| 1105 | /* consult only the "caller", new consumer. */ |
Andrii Nakryiko | 59da880 | 2024-09-03 10:45:58 -0700 | [diff] [blame] | 1106 | if (consumer_filter(new, mm)) |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1107 | err = install_breakpoint(uprobe, mm, vma, info->vaddr); |
| 1108 | } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { |
Andrii Nakryiko | 59da880 | 2024-09-03 10:45:58 -0700 | [diff] [blame] | 1109 | if (!filter_chain(uprobe, mm)) |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1110 | err |= remove_breakpoint(uprobe, mm, info->vaddr); |
| 1111 | } |
Oleg Nesterov | 78f7411 | 2012-08-08 17:35:08 +0200 | [diff] [blame] | 1112 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1113 | unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1114 | mmap_write_unlock(mm); |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1115 | free: |
| 1116 | mmput(mm); |
| 1117 | info = free_map_info(info); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1118 | } |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 1119 | out: |
| 1120 | percpu_up_write(&dup_mmap_sem); |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1121 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1122 | } |
| 1123 | |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1124 | /** |
Peter Zijlstra | 04b0162 | 2024-09-03 10:46:00 -0700 | [diff] [blame] | 1125 | * uprobe_unregister_nosync - unregister an already registered probe. |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1126 | * @uprobe: uprobe to remove |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1127 | * @uc: identify which probe if multiple probes are colocated. |
| 1128 | */ |
Peter Zijlstra | 04b0162 | 2024-09-03 10:46:00 -0700 | [diff] [blame] | 1129 | void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1130 | { |
Oleg Nesterov | 70408be | 2024-08-01 15:27:44 +0200 | [diff] [blame] | 1131 | int err; |
| 1132 | |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1133 | down_write(&uprobe->register_rwsem); |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 1134 | consumer_del(uprobe, uc); |
| 1135 | err = register_for_each_vma(uprobe, NULL); |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1136 | up_write(&uprobe->register_rwsem); |
Oleg Nesterov | 12026d20 | 2024-08-01 15:27:49 +0200 | [diff] [blame] | 1137 | |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 1138 | /* TODO : cant unregister? schedule a worker thread */ |
| 1139 | if (unlikely(err)) { |
| 1140 | uprobe_warn(current, "unregister, leaking uprobe"); |
Peter Zijlstra | 04b0162 | 2024-09-03 10:46:00 -0700 | [diff] [blame] | 1141 | return; |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 1142 | } |
| 1143 | |
| 1144 | put_uprobe(uprobe); |
Peter Zijlstra | 04b0162 | 2024-09-03 10:46:00 -0700 | [diff] [blame] | 1145 | } |
| 1146 | EXPORT_SYMBOL_GPL(uprobe_unregister_nosync); |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 1147 | |
Peter Zijlstra | 04b0162 | 2024-09-03 10:46:00 -0700 | [diff] [blame] | 1148 | void uprobe_unregister_sync(void) |
| 1149 | { |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 1150 | /* |
| 1151 | * Now that handler_chain() and handle_uretprobe_chain() iterate over |
| 1152 | * uprobe->consumers list under RCU protection without holding |
| 1153 | * uprobe->register_rwsem, we need to wait for RCU grace period to |
| 1154 | * make sure that we can't call into just unregistered |
| 1155 | * uprobe_consumer's callbacks anymore. If we don't do that, fast and |
| 1156 | * unlucky enough caller can free consumer's memory and cause |
| 1157 | * handler_chain() or handle_uretprobe_chain() to do an use-after-free. |
| 1158 | */ |
| 1159 | synchronize_srcu(&uprobes_srcu); |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1160 | } |
Peter Zijlstra | 04b0162 | 2024-09-03 10:46:00 -0700 | [diff] [blame] | 1161 | EXPORT_SYMBOL_GPL(uprobe_unregister_sync); |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1162 | |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1163 | /** |
Oleg Nesterov | e04332e | 2024-08-01 15:27:28 +0200 | [diff] [blame] | 1164 | * uprobe_register - register a probe |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1165 | * @inode: the file in which the probe has to be placed. |
| 1166 | * @offset: offset from the start of the file. |
Oleg Nesterov | e04332e | 2024-08-01 15:27:28 +0200 | [diff] [blame] | 1167 | * @ref_ctr_offset: offset of SDT marker / reference counter |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 1168 | * @uc: information on howto handle the probe.. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1169 | * |
Oleg Nesterov | e04332e | 2024-08-01 15:27:28 +0200 | [diff] [blame] | 1170 | * Apart from the access refcount, uprobe_register() takes a creation |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1171 | * refcount (thro alloc_uprobe) if and only if this @uprobe is getting |
| 1172 | * inserted into the rbtree (i.e first consumer for a @inode:@offset |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1173 | * tuple). Creation refcount stops uprobe_unregister from freeing the |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1174 | * @uprobe even before the register operation is complete. Creation |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 1175 | * refcount is released when the last @uc for the @uprobe |
Oleg Nesterov | e04332e | 2024-08-01 15:27:28 +0200 | [diff] [blame] | 1176 | * unregisters. Caller of uprobe_register() is required to keep @inode |
Song Liu | 61f9420 | 2018-04-23 10:21:35 -0700 | [diff] [blame] | 1177 | * (and the containing mount) referenced. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1178 | * |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1179 | * Return: pointer to the new uprobe on success or an ERR_PTR on failure. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1180 | */ |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1181 | struct uprobe *uprobe_register(struct inode *inode, |
| 1182 | loff_t offset, loff_t ref_ctr_offset, |
| 1183 | struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1184 | { |
| 1185 | struct uprobe *uprobe; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1186 | int ret; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1187 | |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 1188 | /* Uprobe must have at least one set consumer */ |
| 1189 | if (!uc->handler && !uc->ret_handler) |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1190 | return ERR_PTR(-EINVAL); |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 1191 | |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 1192 | /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ |
Matthew Wilcox (Oracle) | 5efe744 | 2022-04-29 08:43:23 -0400 | [diff] [blame] | 1193 | if (!inode->i_mapping->a_ops->read_folio && |
Matthew Wilcox (Oracle) | 5efe744 | 2022-04-29 08:43:23 -0400 | [diff] [blame] | 1194 | !shmem_mapping(inode->i_mapping)) |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1195 | return ERR_PTR(-EIO); |
Oleg Nesterov | f0744af | 2012-11-21 18:01:43 +0100 | [diff] [blame] | 1196 | /* Racy, just to catch the obvious mistakes */ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1197 | if (offset > i_size_read(inode)) |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1198 | return ERR_PTR(-EINVAL); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1199 | |
Oleg Nesterov | 013b2de | 2020-05-04 18:47:25 +0200 | [diff] [blame] | 1200 | /* |
| 1201 | * This ensures that copy_from_page(), copy_to_page() and |
| 1202 | * __update_ref_ctr() can't cross page boundary. |
| 1203 | */ |
| 1204 | if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1205 | return ERR_PTR(-EINVAL); |
Oleg Nesterov | 013b2de | 2020-05-04 18:47:25 +0200 | [diff] [blame] | 1206 | if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1207 | return ERR_PTR(-EINVAL); |
Oleg Nesterov | 013b2de | 2020-05-04 18:47:25 +0200 | [diff] [blame] | 1208 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1209 | uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); |
Ravi Bangoria | 22bad38 | 2018-08-20 10:12:48 +0530 | [diff] [blame] | 1210 | if (IS_ERR(uprobe)) |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1211 | return uprobe; |
Ravi Bangoria | 22bad38 | 2018-08-20 10:12:48 +0530 | [diff] [blame] | 1212 | |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 1213 | down_write(&uprobe->register_rwsem); |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 1214 | consumer_add(uprobe, uc); |
| 1215 | ret = register_for_each_vma(uprobe, uc); |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 1216 | up_write(&uprobe->register_rwsem); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1217 | |
Oleg Nesterov | bb18c5d | 2024-08-01 15:27:39 +0200 | [diff] [blame] | 1218 | if (ret) { |
Peter Zijlstra | 04b0162 | 2024-09-03 10:46:00 -0700 | [diff] [blame] | 1219 | uprobe_unregister_nosync(uprobe, uc); |
| 1220 | /* |
| 1221 | * Registration might have partially succeeded, so we can have |
| 1222 | * this consumer being called right at this time. We need to |
| 1223 | * sync here. It's ok, it's unlikely slow path. |
| 1224 | */ |
| 1225 | uprobe_unregister_sync(); |
Oleg Nesterov | bb18c5d | 2024-08-01 15:27:39 +0200 | [diff] [blame] | 1226 | return ERR_PTR(ret); |
| 1227 | } |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1228 | |
Oleg Nesterov | bb18c5d | 2024-08-01 15:27:39 +0200 | [diff] [blame] | 1229 | return uprobe; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1230 | } |
Josh Stone | e8440c1 | 2013-01-13 19:03:34 +0100 | [diff] [blame] | 1231 | EXPORT_SYMBOL_GPL(uprobe_register); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1232 | |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1233 | /** |
| 1234 | * uprobe_apply - add or remove the breakpoints according to @uc->filter |
| 1235 | * @uprobe: uprobe which "owns" the breakpoint |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1236 | * @uc: consumer which wants to add more or remove some breakpoints |
| 1237 | * @add: add or remove the breakpoints |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1238 | * Return: 0 on success or negative error code. |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1239 | */ |
Oleg Nesterov | 3c83a9a | 2024-08-01 15:27:34 +0200 | [diff] [blame] | 1240 | int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1241 | { |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1242 | struct uprobe_consumer *con; |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 1243 | int ret = -ENOENT, srcu_idx; |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1244 | |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1245 | down_write(&uprobe->register_rwsem); |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 1246 | |
| 1247 | srcu_idx = srcu_read_lock(&uprobes_srcu); |
| 1248 | list_for_each_entry_srcu(con, &uprobe->consumers, cons_node, |
| 1249 | srcu_read_lock_held(&uprobes_srcu)) { |
| 1250 | if (con == uc) { |
| 1251 | ret = register_for_each_vma(uprobe, add ? uc : NULL); |
| 1252 | break; |
| 1253 | } |
| 1254 | } |
| 1255 | srcu_read_unlock(&uprobes_srcu, srcu_idx); |
| 1256 | |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1257 | up_write(&uprobe->register_rwsem); |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1258 | |
| 1259 | return ret; |
| 1260 | } |
| 1261 | |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1262 | static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) |
| 1263 | { |
Matthew Wilcox (Oracle) | fcb72a5 | 2022-09-06 19:48:58 +0000 | [diff] [blame] | 1264 | VMA_ITERATOR(vmi, mm, 0); |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1265 | struct vm_area_struct *vma; |
| 1266 | int err = 0; |
| 1267 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1268 | mmap_read_lock(mm); |
Matthew Wilcox (Oracle) | fcb72a5 | 2022-09-06 19:48:58 +0000 | [diff] [blame] | 1269 | for_each_vma(vmi, vma) { |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1270 | unsigned long vaddr; |
| 1271 | loff_t offset; |
| 1272 | |
| 1273 | if (!valid_vma(vma, false) || |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 1274 | file_inode(vma->vm_file) != uprobe->inode) |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1275 | continue; |
| 1276 | |
| 1277 | offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
| 1278 | if (uprobe->offset < offset || |
| 1279 | uprobe->offset >= offset + vma->vm_end - vma->vm_start) |
| 1280 | continue; |
| 1281 | |
| 1282 | vaddr = offset_to_vaddr(vma, uprobe->offset); |
| 1283 | err |= remove_breakpoint(uprobe, mm, vaddr); |
| 1284 | } |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1285 | mmap_read_unlock(mm); |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1286 | |
| 1287 | return err; |
| 1288 | } |
| 1289 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1290 | static struct rb_node * |
| 1291 | find_node_in_range(struct inode *inode, loff_t min, loff_t max) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1292 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1293 | struct rb_node *n = uprobes_tree.rb_node; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1294 | |
| 1295 | while (n) { |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1296 | struct uprobe *u = rb_entry(n, struct uprobe, rb_node); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1297 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1298 | if (inode < u->inode) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1299 | n = n->rb_left; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1300 | } else if (inode > u->inode) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1301 | n = n->rb_right; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1302 | } else { |
| 1303 | if (max < u->offset) |
| 1304 | n = n->rb_left; |
| 1305 | else if (min > u->offset) |
| 1306 | n = n->rb_right; |
| 1307 | else |
| 1308 | break; |
| 1309 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1310 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1311 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1312 | return n; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1313 | } |
| 1314 | |
| 1315 | /* |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1316 | * For a given range in vma, build a list of probes that need to be inserted. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1317 | */ |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1318 | static void build_probe_list(struct inode *inode, |
| 1319 | struct vm_area_struct *vma, |
| 1320 | unsigned long start, unsigned long end, |
| 1321 | struct list_head *head) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1322 | { |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1323 | loff_t min, max; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1324 | struct rb_node *n, *t; |
| 1325 | struct uprobe *u; |
| 1326 | |
| 1327 | INIT_LIST_HEAD(head); |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 1328 | min = vaddr_to_offset(vma, start); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1329 | max = min + (end - start) - 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1330 | |
Jonathan Haslam | 0dc7152 | 2024-04-22 03:23:05 -0700 | [diff] [blame] | 1331 | read_lock(&uprobes_treelock); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1332 | n = find_node_in_range(inode, min, max); |
| 1333 | if (n) { |
| 1334 | for (t = n; t; t = rb_prev(t)) { |
| 1335 | u = rb_entry(t, struct uprobe, rb_node); |
| 1336 | if (u->inode != inode || u->offset < min) |
| 1337 | break; |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 1338 | /* if uprobe went away, it's safe to ignore it */ |
| 1339 | if (try_get_uprobe(u)) |
| 1340 | list_add(&u->pending_list, head); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1341 | } |
| 1342 | for (t = n; (t = rb_next(t)); ) { |
| 1343 | u = rb_entry(t, struct uprobe, rb_node); |
| 1344 | if (u->inode != inode || u->offset > max) |
| 1345 | break; |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 1346 | /* if uprobe went away, it's safe to ignore it */ |
| 1347 | if (try_get_uprobe(u)) |
| 1348 | list_add(&u->pending_list, head); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1349 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1350 | } |
Jonathan Haslam | 0dc7152 | 2024-04-22 03:23:05 -0700 | [diff] [blame] | 1351 | read_unlock(&uprobes_treelock); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1352 | } |
| 1353 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1354 | /* @vma contains reference counter, not the probed instruction. */ |
| 1355 | static int delayed_ref_ctr_inc(struct vm_area_struct *vma) |
| 1356 | { |
| 1357 | struct list_head *pos, *q; |
| 1358 | struct delayed_uprobe *du; |
| 1359 | unsigned long vaddr; |
| 1360 | int ret = 0, err = 0; |
| 1361 | |
| 1362 | mutex_lock(&delayed_uprobe_lock); |
| 1363 | list_for_each_safe(pos, q, &delayed_uprobe_list) { |
| 1364 | du = list_entry(pos, struct delayed_uprobe, list); |
| 1365 | |
| 1366 | if (du->mm != vma->vm_mm || |
| 1367 | !valid_ref_ctr_vma(du->uprobe, vma)) |
| 1368 | continue; |
| 1369 | |
| 1370 | vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); |
| 1371 | ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); |
| 1372 | if (ret) { |
| 1373 | update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); |
| 1374 | if (!err) |
| 1375 | err = ret; |
| 1376 | } |
| 1377 | delayed_uprobe_delete(du); |
| 1378 | } |
| 1379 | mutex_unlock(&delayed_uprobe_lock); |
| 1380 | return err; |
| 1381 | } |
| 1382 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1383 | /* |
Liam R. Howlett | 0503ea8 | 2023-01-20 11:26:49 -0500 | [diff] [blame] | 1384 | * Called from mmap_region/vma_merge with mm->mmap_lock acquired. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1385 | * |
Oleg Nesterov | 5e5be71 | 2012-08-06 14:49:56 +0200 | [diff] [blame] | 1386 | * Currently we ignore all errors and always return 0, the callers |
| 1387 | * can't handle the failure anyway. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1388 | */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1389 | int uprobe_mmap(struct vm_area_struct *vma) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1390 | { |
| 1391 | struct list_head tmp_list; |
Oleg Nesterov | 665605a | 2012-07-29 20:22:29 +0200 | [diff] [blame] | 1392 | struct uprobe *uprobe, *u; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1393 | struct inode *inode; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1394 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1395 | if (no_uprobe_events()) |
| 1396 | return 0; |
| 1397 | |
| 1398 | if (vma->vm_file && |
| 1399 | (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && |
| 1400 | test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) |
| 1401 | delayed_ref_ctr_inc(vma); |
| 1402 | |
| 1403 | if (!valid_vma(vma, true)) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1404 | return 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1405 | |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 1406 | inode = file_inode(vma->vm_file); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1407 | if (!inode) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1408 | return 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1409 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1410 | mutex_lock(uprobes_mmap_hash(inode)); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1411 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1412 | /* |
| 1413 | * We can race with uprobe_unregister(), this uprobe can be already |
| 1414 | * removed. But in this case filter_chain() must return false, all |
| 1415 | * consumers have gone away. |
| 1416 | */ |
Oleg Nesterov | 665605a | 2012-07-29 20:22:29 +0200 | [diff] [blame] | 1417 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1418 | if (!fatal_signal_pending(current) && |
Andrii Nakryiko | 59da880 | 2024-09-03 10:45:58 -0700 | [diff] [blame] | 1419 | filter_chain(uprobe, vma->vm_mm)) { |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 1420 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); |
Oleg Nesterov | 5e5be71 | 2012-08-06 14:49:56 +0200 | [diff] [blame] | 1421 | install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1422 | } |
| 1423 | put_uprobe(uprobe); |
| 1424 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1425 | mutex_unlock(uprobes_mmap_hash(inode)); |
| 1426 | |
Oleg Nesterov | 5e5be71 | 2012-08-06 14:49:56 +0200 | [diff] [blame] | 1427 | return 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1428 | } |
| 1429 | |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1430 | static bool |
| 1431 | vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 1432 | { |
| 1433 | loff_t min, max; |
| 1434 | struct inode *inode; |
| 1435 | struct rb_node *n; |
| 1436 | |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 1437 | inode = file_inode(vma->vm_file); |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1438 | |
| 1439 | min = vaddr_to_offset(vma, start); |
| 1440 | max = min + (end - start) - 1; |
| 1441 | |
Jonathan Haslam | 0dc7152 | 2024-04-22 03:23:05 -0700 | [diff] [blame] | 1442 | read_lock(&uprobes_treelock); |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1443 | n = find_node_in_range(inode, min, max); |
Jonathan Haslam | 0dc7152 | 2024-04-22 03:23:05 -0700 | [diff] [blame] | 1444 | read_unlock(&uprobes_treelock); |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1445 | |
| 1446 | return !!n; |
| 1447 | } |
| 1448 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1449 | /* |
| 1450 | * Called in context of a munmap of a vma. |
| 1451 | */ |
Srikar Dronamraju | cbc91f7 | 2012-04-11 16:05:27 +0530 | [diff] [blame] | 1452 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1453 | { |
Oleg Nesterov | 441f1eb7 | 2012-11-25 19:54:29 +0100 | [diff] [blame] | 1454 | if (no_uprobe_events() || !valid_vma(vma, false)) |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1455 | return; |
| 1456 | |
Oleg Nesterov | 2fd611a | 2012-07-29 20:22:31 +0200 | [diff] [blame] | 1457 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ |
| 1458 | return; |
| 1459 | |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1460 | if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || |
| 1461 | test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 1462 | return; |
| 1463 | |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1464 | if (vma_has_uprobes(vma, start, end)) |
| 1465 | set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1466 | } |
| 1467 | |
Oleg Nesterov | 6d27a31 | 2024-09-11 15:14:07 +0200 | [diff] [blame] | 1468 | static vm_fault_t xol_fault(const struct vm_special_mapping *sm, |
| 1469 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 1470 | { |
| 1471 | struct xol_area *area = vma->vm_mm->uprobes_state.xol_area; |
| 1472 | |
Oleg Nesterov | 2abbcc0 | 2024-09-11 15:14:37 +0200 | [diff] [blame] | 1473 | vmf->page = area->page; |
Oleg Nesterov | 6d27a31 | 2024-09-11 15:14:07 +0200 | [diff] [blame] | 1474 | get_page(vmf->page); |
| 1475 | return 0; |
| 1476 | } |
| 1477 | |
| 1478 | static const struct vm_special_mapping xol_mapping = { |
| 1479 | .name = "[uprobes]", |
| 1480 | .fault = xol_fault, |
| 1481 | }; |
| 1482 | |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1483 | /* Slot allocation for XOL */ |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1484 | static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1485 | { |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1486 | struct vm_area_struct *vma; |
| 1487 | int ret; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1488 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1489 | if (mmap_write_lock_killable(mm)) |
Michal Hocko | 598fdc1 | 2016-05-23 16:26:08 -0700 | [diff] [blame] | 1490 | return -EINTR; |
| 1491 | |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1492 | if (mm->uprobes_state.xol_area) { |
| 1493 | ret = -EALREADY; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1494 | goto fail; |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1495 | } |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1496 | |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1497 | if (!area->vaddr) { |
| 1498 | /* Try to map as high as possible, this is only a hint. */ |
| 1499 | area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, |
| 1500 | PAGE_SIZE, 0, 0); |
Gaowei Pu | ff68dac | 2019-11-30 17:51:03 -0800 | [diff] [blame] | 1501 | if (IS_ERR_VALUE(area->vaddr)) { |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1502 | ret = area->vaddr; |
| 1503 | goto fail; |
| 1504 | } |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1505 | } |
| 1506 | |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1507 | vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, |
| 1508 | VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, |
Oleg Nesterov | 6d27a31 | 2024-09-11 15:14:07 +0200 | [diff] [blame] | 1509 | &xol_mapping); |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1510 | if (IS_ERR(vma)) { |
| 1511 | ret = PTR_ERR(vma); |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1512 | goto fail; |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1513 | } |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1514 | |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1515 | ret = 0; |
Paul E. McKenney | 5c6338b | 2017-10-09 11:08:53 -0700 | [diff] [blame] | 1516 | /* pairs with get_xol_area() */ |
| 1517 | smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1518 | fail: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1519 | mmap_write_unlock(mm); |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1520 | |
| 1521 | return ret; |
| 1522 | } |
| 1523 | |
Jiri Olsa | ff474a78 | 2024-06-12 08:44:28 +0900 | [diff] [blame] | 1524 | void * __weak arch_uprobe_trampoline(unsigned long *psize) |
| 1525 | { |
| 1526 | static uprobe_opcode_t insn = UPROBE_SWBP_INSN; |
| 1527 | |
| 1528 | *psize = UPROBE_SWBP_INSN_SIZE; |
| 1529 | return &insn; |
| 1530 | } |
| 1531 | |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1532 | static struct xol_area *__create_xol_area(unsigned long vaddr) |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1533 | { |
Oleg Nesterov | 9b545df | 2012-12-31 16:39:49 +0100 | [diff] [blame] | 1534 | struct mm_struct *mm = current->mm; |
Jiri Olsa | ff474a78 | 2024-06-12 08:44:28 +0900 | [diff] [blame] | 1535 | unsigned long insns_size; |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1536 | struct xol_area *area; |
Jiri Olsa | ff474a78 | 2024-06-12 08:44:28 +0900 | [diff] [blame] | 1537 | void *insns; |
Oleg Nesterov | 9b545df | 2012-12-31 16:39:49 +0100 | [diff] [blame] | 1538 | |
Sven Schnelle | e240b0f | 2024-09-03 12:23:12 +0200 | [diff] [blame] | 1539 | area = kzalloc(sizeof(*area), GFP_KERNEL); |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1540 | if (unlikely(!area)) |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1541 | goto out; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1542 | |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 1543 | area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), |
| 1544 | GFP_KERNEL); |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1545 | if (!area->bitmap) |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1546 | goto free_area; |
| 1547 | |
Oleg Nesterov | 3482030 | 2024-09-29 18:20:47 +0200 | [diff] [blame] | 1548 | area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); |
Oleg Nesterov | 2abbcc0 | 2024-09-11 15:14:37 +0200 | [diff] [blame] | 1549 | if (!area->page) |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1550 | goto free_bitmap; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1551 | |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1552 | area->vaddr = vaddr; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1553 | init_waitqueue_head(&area->wq); |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1554 | /* Reserve the 1st slot for get_trampoline_vaddr() */ |
| 1555 | set_bit(0, area->bitmap); |
| 1556 | atomic_set(&area->slot_count, 1); |
Jiri Olsa | ff474a78 | 2024-06-12 08:44:28 +0900 | [diff] [blame] | 1557 | insns = arch_uprobe_trampoline(&insns_size); |
Oleg Nesterov | 2abbcc0 | 2024-09-11 15:14:37 +0200 | [diff] [blame] | 1558 | arch_uprobe_copy_ixol(area->page, 0, insns, insns_size); |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1559 | |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1560 | if (!xol_add_vma(mm, area)) |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1561 | return area; |
| 1562 | |
Oleg Nesterov | 2abbcc0 | 2024-09-11 15:14:37 +0200 | [diff] [blame] | 1563 | __free_page(area->page); |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1564 | free_bitmap: |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1565 | kfree(area->bitmap); |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1566 | free_area: |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1567 | kfree(area); |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1568 | out: |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1569 | return NULL; |
| 1570 | } |
| 1571 | |
| 1572 | /* |
| 1573 | * get_xol_area - Allocate process's xol_area if necessary. |
| 1574 | * This area will be used for storing instructions for execution out of line. |
| 1575 | * |
| 1576 | * Returns the allocated area or NULL. |
| 1577 | */ |
| 1578 | static struct xol_area *get_xol_area(void) |
| 1579 | { |
| 1580 | struct mm_struct *mm = current->mm; |
| 1581 | struct xol_area *area; |
| 1582 | |
| 1583 | if (!mm->uprobes_state.xol_area) |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1584 | __create_xol_area(0); |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1585 | |
Paul E. McKenney | 5c6338b | 2017-10-09 11:08:53 -0700 | [diff] [blame] | 1586 | /* Pairs with xol_add_vma() smp_store_release() */ |
| 1587 | area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ |
Oleg Nesterov | 9b545df | 2012-12-31 16:39:49 +0100 | [diff] [blame] | 1588 | return area; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1589 | } |
| 1590 | |
| 1591 | /* |
| 1592 | * uprobe_clear_state - Free the area allocated for slots. |
| 1593 | */ |
| 1594 | void uprobe_clear_state(struct mm_struct *mm) |
| 1595 | { |
| 1596 | struct xol_area *area = mm->uprobes_state.xol_area; |
| 1597 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1598 | mutex_lock(&delayed_uprobe_lock); |
| 1599 | delayed_uprobe_remove(NULL, mm); |
| 1600 | mutex_unlock(&delayed_uprobe_lock); |
| 1601 | |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1602 | if (!area) |
| 1603 | return; |
| 1604 | |
Oleg Nesterov | 2abbcc0 | 2024-09-11 15:14:37 +0200 | [diff] [blame] | 1605 | put_page(area->page); |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1606 | kfree(area->bitmap); |
| 1607 | kfree(area); |
| 1608 | } |
| 1609 | |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 1610 | void uprobe_start_dup_mmap(void) |
| 1611 | { |
| 1612 | percpu_down_read(&dup_mmap_sem); |
| 1613 | } |
| 1614 | |
| 1615 | void uprobe_end_dup_mmap(void) |
| 1616 | { |
| 1617 | percpu_up_read(&dup_mmap_sem); |
| 1618 | } |
| 1619 | |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 1620 | void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) |
| 1621 | { |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1622 | if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 1623 | set_bit(MMF_HAS_UPROBES, &newmm->flags); |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1624 | /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ |
| 1625 | set_bit(MMF_RECALC_UPROBES, &newmm->flags); |
| 1626 | } |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 1627 | } |
| 1628 | |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1629 | /* |
| 1630 | * - search for a free slot. |
| 1631 | */ |
| 1632 | static unsigned long xol_take_insn_slot(struct xol_area *area) |
| 1633 | { |
| 1634 | unsigned long slot_addr; |
| 1635 | int slot_nr; |
| 1636 | |
| 1637 | do { |
| 1638 | slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); |
| 1639 | if (slot_nr < UINSNS_PER_PAGE) { |
| 1640 | if (!test_and_set_bit(slot_nr, area->bitmap)) |
| 1641 | break; |
| 1642 | |
| 1643 | slot_nr = UINSNS_PER_PAGE; |
| 1644 | continue; |
| 1645 | } |
| 1646 | wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); |
| 1647 | } while (slot_nr >= UINSNS_PER_PAGE); |
| 1648 | |
| 1649 | slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); |
| 1650 | atomic_inc(&area->slot_count); |
| 1651 | |
| 1652 | return slot_addr; |
| 1653 | } |
| 1654 | |
| 1655 | /* |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1656 | * xol_get_insn_slot - allocate a slot for xol. |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1657 | * Returns the allocated slot address or 0. |
| 1658 | */ |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1659 | static unsigned long xol_get_insn_slot(struct uprobe *uprobe) |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1660 | { |
| 1661 | struct xol_area *area; |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1662 | unsigned long xol_vaddr; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1663 | |
Oleg Nesterov | 9b545df | 2012-12-31 16:39:49 +0100 | [diff] [blame] | 1664 | area = get_xol_area(); |
| 1665 | if (!area) |
| 1666 | return 0; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1667 | |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1668 | xol_vaddr = xol_take_insn_slot(area); |
| 1669 | if (unlikely(!xol_vaddr)) |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1670 | return 0; |
| 1671 | |
Oleg Nesterov | 2abbcc0 | 2024-09-11 15:14:37 +0200 | [diff] [blame] | 1672 | arch_uprobe_copy_ixol(area->page, xol_vaddr, |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 1673 | &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1674 | |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1675 | return xol_vaddr; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1676 | } |
| 1677 | |
| 1678 | /* |
| 1679 | * xol_free_insn_slot - If slot was earlier allocated by |
| 1680 | * @xol_get_insn_slot(), make the slot available for |
| 1681 | * subsequent requests. |
| 1682 | */ |
| 1683 | static void xol_free_insn_slot(struct task_struct *tsk) |
| 1684 | { |
| 1685 | struct xol_area *area; |
| 1686 | unsigned long vma_end; |
| 1687 | unsigned long slot_addr; |
| 1688 | |
| 1689 | if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) |
| 1690 | return; |
| 1691 | |
| 1692 | slot_addr = tsk->utask->xol_vaddr; |
Oleg Nesterov | af4355e | 2012-12-31 18:37:11 +0100 | [diff] [blame] | 1693 | if (unlikely(!slot_addr)) |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1694 | return; |
| 1695 | |
| 1696 | area = tsk->mm->uprobes_state.xol_area; |
| 1697 | vma_end = area->vaddr + PAGE_SIZE; |
| 1698 | if (area->vaddr <= slot_addr && slot_addr < vma_end) { |
| 1699 | unsigned long offset; |
| 1700 | int slot_nr; |
| 1701 | |
| 1702 | offset = slot_addr - area->vaddr; |
| 1703 | slot_nr = offset / UPROBE_XOL_SLOT_BYTES; |
| 1704 | if (slot_nr >= UINSNS_PER_PAGE) |
| 1705 | return; |
| 1706 | |
| 1707 | clear_bit(slot_nr, area->bitmap); |
| 1708 | atomic_dec(&area->slot_count); |
Oleg Nesterov | 2a742ce | 2015-07-21 15:40:36 +0200 | [diff] [blame] | 1709 | smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1710 | if (waitqueue_active(&area->wq)) |
| 1711 | wake_up(&area->wq); |
| 1712 | |
| 1713 | tsk->utask->xol_vaddr = 0; |
| 1714 | } |
| 1715 | } |
| 1716 | |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 1717 | void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, |
| 1718 | void *src, unsigned long len) |
| 1719 | { |
| 1720 | /* Initialize the slot */ |
| 1721 | copy_to_page(page, vaddr, src, len); |
| 1722 | |
| 1723 | /* |
Christoph Hellwig | 885f7f8 | 2020-06-07 21:42:22 -0700 | [diff] [blame] | 1724 | * We probably need flush_icache_user_page() but it needs vma. |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 1725 | * This should work on most of architectures by default. If |
| 1726 | * architecture needs to do something different it can define |
| 1727 | * its own version of the function. |
| 1728 | */ |
| 1729 | flush_dcache_page(page); |
| 1730 | } |
| 1731 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1732 | /** |
| 1733 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs |
| 1734 | * @regs: Reflects the saved state of the task after it has hit a breakpoint |
| 1735 | * instruction. |
| 1736 | * Return the address of the breakpoint instruction. |
| 1737 | */ |
| 1738 | unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) |
| 1739 | { |
| 1740 | return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; |
| 1741 | } |
| 1742 | |
Oleg Nesterov | b02ef20 | 2014-05-12 18:24:45 +0200 | [diff] [blame] | 1743 | unsigned long uprobe_get_trap_addr(struct pt_regs *regs) |
| 1744 | { |
| 1745 | struct uprobe_task *utask = current->utask; |
| 1746 | |
| 1747 | if (unlikely(utask && utask->active_uprobe)) |
| 1748 | return utask->vaddr; |
| 1749 | |
| 1750 | return instruction_pointer(regs); |
| 1751 | } |
| 1752 | |
Oleg Nesterov | 2bb5e84 | 2015-07-21 15:40:06 +0200 | [diff] [blame] | 1753 | static struct return_instance *free_ret_instance(struct return_instance *ri) |
| 1754 | { |
| 1755 | struct return_instance *next = ri->next; |
| 1756 | put_uprobe(ri->uprobe); |
| 1757 | kfree(ri); |
| 1758 | return next; |
| 1759 | } |
| 1760 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1761 | /* |
| 1762 | * Called with no locks held. |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 1763 | * Called in context of an exiting or an exec-ing thread. |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1764 | */ |
| 1765 | void uprobe_free_utask(struct task_struct *t) |
| 1766 | { |
| 1767 | struct uprobe_task *utask = t->utask; |
Oleg Nesterov | 2bb5e84 | 2015-07-21 15:40:06 +0200 | [diff] [blame] | 1768 | struct return_instance *ri; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1769 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1770 | if (!utask) |
| 1771 | return; |
| 1772 | |
| 1773 | if (utask->active_uprobe) |
| 1774 | put_uprobe(utask->active_uprobe); |
| 1775 | |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1776 | ri = utask->return_instances; |
Oleg Nesterov | 2bb5e84 | 2015-07-21 15:40:06 +0200 | [diff] [blame] | 1777 | while (ri) |
| 1778 | ri = free_ret_instance(ri); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1779 | |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1780 | xol_free_insn_slot(t); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1781 | kfree(utask); |
| 1782 | t->utask = NULL; |
| 1783 | } |
| 1784 | |
| 1785 | /* |
Randy Dunlap | c034f48 | 2021-02-25 17:21:10 -0800 | [diff] [blame] | 1786 | * Allocate a uprobe_task object for the task if necessary. |
Oleg Nesterov | 5a2df66 | 2012-12-31 17:03:32 +0100 | [diff] [blame] | 1787 | * Called when the thread hits a breakpoint. |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1788 | * |
| 1789 | * Returns: |
| 1790 | * - pointer to new uprobe_task on success |
| 1791 | * - NULL otherwise |
| 1792 | */ |
Oleg Nesterov | 5a2df66 | 2012-12-31 17:03:32 +0100 | [diff] [blame] | 1793 | static struct uprobe_task *get_utask(void) |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1794 | { |
Oleg Nesterov | 5a2df66 | 2012-12-31 17:03:32 +0100 | [diff] [blame] | 1795 | if (!current->utask) |
| 1796 | current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); |
| 1797 | return current->utask; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1798 | } |
| 1799 | |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1800 | static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) |
| 1801 | { |
| 1802 | struct uprobe_task *n_utask; |
| 1803 | struct return_instance **p, *o, *n; |
| 1804 | |
| 1805 | n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); |
| 1806 | if (!n_utask) |
| 1807 | return -ENOMEM; |
| 1808 | t->utask = n_utask; |
| 1809 | |
| 1810 | p = &n_utask->return_instances; |
| 1811 | for (o = o_utask->return_instances; o; o = o->next) { |
| 1812 | n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); |
| 1813 | if (!n) |
| 1814 | return -ENOMEM; |
| 1815 | |
| 1816 | *n = *o; |
Andrii Nakryiko | 3f7f1a6 | 2024-09-03 10:45:56 -0700 | [diff] [blame] | 1817 | /* |
| 1818 | * uprobe's refcnt has to be positive at this point, kept by |
| 1819 | * utask->return_instances items; return_instances can't be |
| 1820 | * removed right now, as task is blocked due to duping; so |
| 1821 | * get_uprobe() is safe to use here. |
| 1822 | */ |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 1823 | get_uprobe(n->uprobe); |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1824 | n->next = NULL; |
| 1825 | |
| 1826 | *p = n; |
| 1827 | p = &n->next; |
| 1828 | n_utask->depth++; |
| 1829 | } |
| 1830 | |
| 1831 | return 0; |
| 1832 | } |
| 1833 | |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1834 | static void dup_xol_work(struct callback_head *work) |
| 1835 | { |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1836 | if (current->flags & PF_EXITING) |
| 1837 | return; |
| 1838 | |
Michal Hocko | 598fdc1 | 2016-05-23 16:26:08 -0700 | [diff] [blame] | 1839 | if (!__create_xol_area(current->utask->dup_xol_addr) && |
| 1840 | !fatal_signal_pending(current)) |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1841 | uprobe_warn(current, "dup xol area"); |
| 1842 | } |
| 1843 | |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1844 | /* |
Oleg Nesterov | b68e074 | 2013-10-13 21:18:31 +0200 | [diff] [blame] | 1845 | * Called in context of a new clone/fork from copy_process. |
| 1846 | */ |
Oleg Nesterov | 3ab6796 | 2013-10-16 19:39:37 +0200 | [diff] [blame] | 1847 | void uprobe_copy_process(struct task_struct *t, unsigned long flags) |
Oleg Nesterov | b68e074 | 2013-10-13 21:18:31 +0200 | [diff] [blame] | 1848 | { |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1849 | struct uprobe_task *utask = current->utask; |
| 1850 | struct mm_struct *mm = current->mm; |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1851 | struct xol_area *area; |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1852 | |
Oleg Nesterov | b68e074 | 2013-10-13 21:18:31 +0200 | [diff] [blame] | 1853 | t->utask = NULL; |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1854 | |
Oleg Nesterov | 3ab6796 | 2013-10-16 19:39:37 +0200 | [diff] [blame] | 1855 | if (!utask || !utask->return_instances) |
| 1856 | return; |
| 1857 | |
| 1858 | if (mm == t->mm && !(flags & CLONE_VFORK)) |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1859 | return; |
| 1860 | |
| 1861 | if (dup_utask(t, utask)) |
| 1862 | return uprobe_warn(t, "dup ret instances"); |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1863 | |
| 1864 | /* The task can fork() after dup_xol_work() fails */ |
| 1865 | area = mm->uprobes_state.xol_area; |
| 1866 | if (!area) |
| 1867 | return uprobe_warn(t, "dup xol area"); |
| 1868 | |
Oleg Nesterov | 3ab6796 | 2013-10-16 19:39:37 +0200 | [diff] [blame] | 1869 | if (mm == t->mm) |
| 1870 | return; |
| 1871 | |
Oleg Nesterov | 3247343 | 2013-11-08 18:52:21 +0100 | [diff] [blame] | 1872 | t->utask->dup_xol_addr = area->vaddr; |
| 1873 | init_task_work(&t->utask->dup_xol_work, dup_xol_work); |
Jens Axboe | 91989c7 | 2020-10-16 09:02:26 -0600 | [diff] [blame] | 1874 | task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); |
Oleg Nesterov | b68e074 | 2013-10-13 21:18:31 +0200 | [diff] [blame] | 1875 | } |
| 1876 | |
| 1877 | /* |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1878 | * Current area->vaddr notion assume the trampoline address is always |
| 1879 | * equal area->vaddr. |
| 1880 | * |
| 1881 | * Returns -1 in case the xol_area is not allocated. |
| 1882 | */ |
Jiri Olsa | ff474a78 | 2024-06-12 08:44:28 +0900 | [diff] [blame] | 1883 | unsigned long uprobe_get_trampoline_vaddr(void) |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1884 | { |
| 1885 | struct xol_area *area; |
| 1886 | unsigned long trampoline_vaddr = -1; |
| 1887 | |
Paul E. McKenney | 5c6338b | 2017-10-09 11:08:53 -0700 | [diff] [blame] | 1888 | /* Pairs with xol_add_vma() smp_store_release() */ |
| 1889 | area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1890 | if (area) |
| 1891 | trampoline_vaddr = area->vaddr; |
| 1892 | |
| 1893 | return trampoline_vaddr; |
| 1894 | } |
| 1895 | |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1896 | static void cleanup_return_instances(struct uprobe_task *utask, bool chained, |
| 1897 | struct pt_regs *regs) |
Oleg Nesterov | a5b7e1a | 2015-07-21 15:40:23 +0200 | [diff] [blame] | 1898 | { |
| 1899 | struct return_instance *ri = utask->return_instances; |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1900 | enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; |
Oleg Nesterov | 86dcb70 | 2015-07-21 15:40:26 +0200 | [diff] [blame] | 1901 | |
| 1902 | while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { |
Oleg Nesterov | a5b7e1a | 2015-07-21 15:40:23 +0200 | [diff] [blame] | 1903 | ri = free_ret_instance(ri); |
| 1904 | utask->depth--; |
| 1905 | } |
| 1906 | utask->return_instances = ri; |
| 1907 | } |
| 1908 | |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1909 | static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) |
| 1910 | { |
| 1911 | struct return_instance *ri; |
| 1912 | struct uprobe_task *utask; |
| 1913 | unsigned long orig_ret_vaddr, trampoline_vaddr; |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1914 | bool chained; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1915 | |
| 1916 | if (!get_xol_area()) |
| 1917 | return; |
| 1918 | |
| 1919 | utask = get_utask(); |
| 1920 | if (!utask) |
| 1921 | return; |
| 1922 | |
Anton Arapov | ded49c5 | 2013-04-03 18:00:37 +0200 | [diff] [blame] | 1923 | if (utask->depth >= MAX_URETPROBE_DEPTH) { |
| 1924 | printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" |
| 1925 | " nestedness limit pid/tgid=%d/%d\n", |
| 1926 | current->pid, current->tgid); |
| 1927 | return; |
| 1928 | } |
| 1929 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1930 | /* we need to bump refcount to store uprobe in utask */ |
| 1931 | if (!try_get_uprobe(uprobe)) |
| 1932 | return; |
| 1933 | |
Oleg Nesterov | 6c58d0e | 2015-07-21 15:40:10 +0200 | [diff] [blame] | 1934 | ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1935 | if (!ri) |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1936 | goto fail; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1937 | |
Jiri Olsa | ff474a78 | 2024-06-12 08:44:28 +0900 | [diff] [blame] | 1938 | trampoline_vaddr = uprobe_get_trampoline_vaddr(); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1939 | orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); |
| 1940 | if (orig_ret_vaddr == -1) |
| 1941 | goto fail; |
| 1942 | |
Oleg Nesterov | a5b7e1a | 2015-07-21 15:40:23 +0200 | [diff] [blame] | 1943 | /* drop the entries invalidated by longjmp() */ |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1944 | chained = (orig_ret_vaddr == trampoline_vaddr); |
| 1945 | cleanup_return_instances(utask, chained, regs); |
Oleg Nesterov | a5b7e1a | 2015-07-21 15:40:23 +0200 | [diff] [blame] | 1946 | |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1947 | /* |
| 1948 | * We don't want to keep trampoline address in stack, rather keep the |
| 1949 | * original return address of first caller thru all the consequent |
| 1950 | * instances. This also makes breakpoint unwrapping easier. |
| 1951 | */ |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1952 | if (chained) { |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1953 | if (!utask->return_instances) { |
| 1954 | /* |
| 1955 | * This situation is not possible. Likely we have an |
| 1956 | * attack from user-space. |
| 1957 | */ |
Oleg Nesterov | 6c58d0e | 2015-07-21 15:40:10 +0200 | [diff] [blame] | 1958 | uprobe_warn(current, "handle tail call"); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1959 | goto fail; |
| 1960 | } |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1961 | orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; |
| 1962 | } |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1963 | ri->uprobe = uprobe; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1964 | ri->func = instruction_pointer(regs); |
Oleg Nesterov | 7b868e4 | 2015-07-21 15:40:18 +0200 | [diff] [blame] | 1965 | ri->stack = user_stack_pointer(regs); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1966 | ri->orig_ret_vaddr = orig_ret_vaddr; |
| 1967 | ri->chained = chained; |
| 1968 | |
Anton Arapov | ded49c5 | 2013-04-03 18:00:37 +0200 | [diff] [blame] | 1969 | utask->depth++; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1970 | ri->next = utask->return_instances; |
| 1971 | utask->return_instances = ri; |
| 1972 | |
| 1973 | return; |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1974 | fail: |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1975 | kfree(ri); |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1976 | put_uprobe(uprobe); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1977 | } |
| 1978 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1979 | /* Prepare to single-step probed instruction out of line. */ |
| 1980 | static int |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1981 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1982 | { |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1983 | struct uprobe_task *utask; |
| 1984 | unsigned long xol_vaddr; |
Oleg Nesterov | aba5102 | 2012-12-31 18:12:48 +0100 | [diff] [blame] | 1985 | int err; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1986 | |
Oleg Nesterov | 608e742 | 2012-12-31 18:20:42 +0100 | [diff] [blame] | 1987 | utask = get_utask(); |
| 1988 | if (!utask) |
| 1989 | return -ENOMEM; |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1990 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1991 | if (!try_get_uprobe(uprobe)) |
| 1992 | return -EINVAL; |
| 1993 | |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1994 | xol_vaddr = xol_get_insn_slot(uprobe); |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 1995 | if (!xol_vaddr) { |
| 1996 | err = -ENOMEM; |
| 1997 | goto err_out; |
| 1998 | } |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1999 | |
| 2000 | utask->xol_vaddr = xol_vaddr; |
| 2001 | utask->vaddr = bp_vaddr; |
| 2002 | |
Oleg Nesterov | aba5102 | 2012-12-31 18:12:48 +0100 | [diff] [blame] | 2003 | err = arch_uprobe_pre_xol(&uprobe->arch, regs); |
| 2004 | if (unlikely(err)) { |
| 2005 | xol_free_insn_slot(current); |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2006 | goto err_out; |
Oleg Nesterov | aba5102 | 2012-12-31 18:12:48 +0100 | [diff] [blame] | 2007 | } |
| 2008 | |
Oleg Nesterov | 608e742 | 2012-12-31 18:20:42 +0100 | [diff] [blame] | 2009 | utask->active_uprobe = uprobe; |
| 2010 | utask->state = UTASK_SSTEP; |
Oleg Nesterov | aba5102 | 2012-12-31 18:12:48 +0100 | [diff] [blame] | 2011 | return 0; |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2012 | err_out: |
| 2013 | put_uprobe(uprobe); |
| 2014 | return err; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2015 | } |
| 2016 | |
| 2017 | /* |
| 2018 | * If we are singlestepping, then ensure this thread is not connected to |
| 2019 | * non-fatal signals until completion of singlestep. When xol insn itself |
| 2020 | * triggers the signal, restart the original insn even if the task is |
| 2021 | * already SIGKILL'ed (since coredump should report the correct ip). This |
| 2022 | * is even more important if the task has a handler for SIGSEGV/etc, The |
| 2023 | * _same_ instruction should be repeated again after return from the signal |
| 2024 | * handler, and SSTEP can never finish in this case. |
| 2025 | */ |
| 2026 | bool uprobe_deny_signal(void) |
| 2027 | { |
| 2028 | struct task_struct *t = current; |
| 2029 | struct uprobe_task *utask = t->utask; |
| 2030 | |
| 2031 | if (likely(!utask || !utask->active_uprobe)) |
| 2032 | return false; |
| 2033 | |
| 2034 | WARN_ON_ONCE(utask->state != UTASK_SSTEP); |
| 2035 | |
Jens Axboe | 5c251e9 | 2020-10-26 14:32:27 -0600 | [diff] [blame] | 2036 | if (task_sigpending(t)) { |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2037 | spin_lock_irq(&t->sighand->siglock); |
| 2038 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
| 2039 | spin_unlock_irq(&t->sighand->siglock); |
| 2040 | |
| 2041 | if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { |
| 2042 | utask->state = UTASK_SSTEP_TRAPPED; |
| 2043 | set_tsk_thread_flag(t, TIF_UPROBE); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2044 | } |
| 2045 | } |
| 2046 | |
| 2047 | return true; |
| 2048 | } |
| 2049 | |
Oleg Nesterov | 499a4f3 | 2012-08-19 17:41:34 +0200 | [diff] [blame] | 2050 | static void mmf_recalc_uprobes(struct mm_struct *mm) |
| 2051 | { |
Matthew Wilcox (Oracle) | fcb72a5 | 2022-09-06 19:48:58 +0000 | [diff] [blame] | 2052 | VMA_ITERATOR(vmi, mm, 0); |
Oleg Nesterov | 499a4f3 | 2012-08-19 17:41:34 +0200 | [diff] [blame] | 2053 | struct vm_area_struct *vma; |
| 2054 | |
Matthew Wilcox (Oracle) | fcb72a5 | 2022-09-06 19:48:58 +0000 | [diff] [blame] | 2055 | for_each_vma(vmi, vma) { |
Oleg Nesterov | 499a4f3 | 2012-08-19 17:41:34 +0200 | [diff] [blame] | 2056 | if (!valid_vma(vma, false)) |
| 2057 | continue; |
| 2058 | /* |
| 2059 | * This is not strictly accurate, we can race with |
| 2060 | * uprobe_unregister() and see the already removed |
| 2061 | * uprobe if delete_uprobe() was not yet called. |
Oleg Nesterov | 63633cb | 2012-11-22 18:30:15 +0100 | [diff] [blame] | 2062 | * Or this uprobe can be filtered out. |
Oleg Nesterov | 499a4f3 | 2012-08-19 17:41:34 +0200 | [diff] [blame] | 2063 | */ |
| 2064 | if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) |
| 2065 | return; |
| 2066 | } |
| 2067 | |
| 2068 | clear_bit(MMF_HAS_UPROBES, &mm->flags); |
| 2069 | } |
| 2070 | |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 2071 | static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2072 | { |
| 2073 | struct page *page; |
| 2074 | uprobe_opcode_t opcode; |
| 2075 | int result; |
| 2076 | |
Oleg Nesterov | 013b2de | 2020-05-04 18:47:25 +0200 | [diff] [blame] | 2077 | if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) |
| 2078 | return -EINVAL; |
| 2079 | |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2080 | pagefault_disable(); |
Linus Torvalds | bd28b14 | 2016-05-22 17:21:27 -0700 | [diff] [blame] | 2081 | result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2082 | pagefault_enable(); |
| 2083 | |
| 2084 | if (likely(result == 0)) |
| 2085 | goto out; |
| 2086 | |
Oleg Nesterov | 300b056 | 2024-08-01 15:27:14 +0200 | [diff] [blame] | 2087 | result = get_user_pages(vaddr, 1, FOLL_FORCE, &page); |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2088 | if (result < 0) |
| 2089 | return result; |
| 2090 | |
Oleg Nesterov | ab0d805 | 2013-03-24 18:24:37 +0100 | [diff] [blame] | 2091 | copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2092 | put_page(page); |
| 2093 | out: |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 2094 | /* This needs to return true for any variant of the trap insn */ |
| 2095 | return is_trap_insn(&opcode); |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2096 | } |
| 2097 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2098 | /* assumes being inside RCU protected region */ |
| 2099 | static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp) |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2100 | { |
| 2101 | struct mm_struct *mm = current->mm; |
| 2102 | struct uprobe *uprobe = NULL; |
| 2103 | struct vm_area_struct *vma; |
| 2104 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2105 | mmap_read_lock(mm); |
Liam Howlett | 9016dde | 2021-06-28 19:39:35 -0700 | [diff] [blame] | 2106 | vma = vma_lookup(mm, bp_vaddr); |
| 2107 | if (vma) { |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2108 | if (valid_vma(vma, false)) { |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 2109 | struct inode *inode = file_inode(vma->vm_file); |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 2110 | loff_t offset = vaddr_to_offset(vma, bp_vaddr); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2111 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2112 | uprobe = find_uprobe_rcu(inode, offset); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2113 | } |
Oleg Nesterov | d790d34 | 2012-05-29 21:29:14 +0200 | [diff] [blame] | 2114 | |
| 2115 | if (!uprobe) |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 2116 | *is_swbp = is_trap_at_addr(mm, bp_vaddr); |
Oleg Nesterov | d790d34 | 2012-05-29 21:29:14 +0200 | [diff] [blame] | 2117 | } else { |
| 2118 | *is_swbp = -EFAULT; |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2119 | } |
Oleg Nesterov | 499a4f3 | 2012-08-19 17:41:34 +0200 | [diff] [blame] | 2120 | |
| 2121 | if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) |
| 2122 | mmf_recalc_uprobes(mm); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2123 | mmap_read_unlock(mm); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2124 | |
| 2125 | return uprobe; |
| 2126 | } |
| 2127 | |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2128 | static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) |
| 2129 | { |
| 2130 | struct uprobe_consumer *uc; |
| 2131 | int remove = UPROBE_HANDLER_REMOVE; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 2132 | bool need_prep = false; /* prepare return uprobe, when needed */ |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 2133 | bool has_consumers = false; |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2134 | |
Andrii Nakryiko | cfa7f3d | 2024-07-29 10:52:23 -0700 | [diff] [blame] | 2135 | current->utask->auprobe = &uprobe->arch; |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 2136 | |
| 2137 | list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, |
| 2138 | srcu_read_lock_held(&uprobes_srcu)) { |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 2139 | int rc = 0; |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2140 | |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 2141 | if (uc->handler) { |
| 2142 | rc = uc->handler(uc, regs); |
| 2143 | WARN(rc & ~UPROBE_HANDLER_MASK, |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 2144 | "bad rc=0x%x from %ps()\n", rc, uc->handler); |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 2145 | } |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 2146 | |
| 2147 | if (uc->ret_handler) |
| 2148 | need_prep = true; |
| 2149 | |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2150 | remove &= rc; |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 2151 | has_consumers = true; |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2152 | } |
Andrii Nakryiko | cfa7f3d | 2024-07-29 10:52:23 -0700 | [diff] [blame] | 2153 | current->utask->auprobe = NULL; |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2154 | |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 2155 | if (need_prep && !remove) |
| 2156 | prepare_uretprobe(uprobe, regs); /* put bp at return */ |
| 2157 | |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 2158 | if (remove && has_consumers) { |
| 2159 | down_read(&uprobe->register_rwsem); |
| 2160 | |
| 2161 | /* re-check that removal is still required, this time under lock */ |
| 2162 | if (!filter_chain(uprobe, current->mm)) { |
| 2163 | WARN_ON(!uprobe_is_active(uprobe)); |
| 2164 | unapply_uprobe(uprobe, current->mm); |
| 2165 | } |
| 2166 | |
| 2167 | up_read(&uprobe->register_rwsem); |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2168 | } |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2169 | } |
| 2170 | |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2171 | static void |
| 2172 | handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) |
| 2173 | { |
| 2174 | struct uprobe *uprobe = ri->uprobe; |
| 2175 | struct uprobe_consumer *uc; |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 2176 | int srcu_idx; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2177 | |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 2178 | srcu_idx = srcu_read_lock(&uprobes_srcu); |
| 2179 | list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, |
| 2180 | srcu_read_lock_held(&uprobes_srcu)) { |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2181 | if (uc->ret_handler) |
| 2182 | uc->ret_handler(uc, ri->func, regs); |
| 2183 | } |
Andrii Nakryiko | cc01bd0 | 2024-09-03 10:45:59 -0700 | [diff] [blame] | 2184 | srcu_read_unlock(&uprobes_srcu, srcu_idx); |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2185 | } |
| 2186 | |
Oleg Nesterov | a83cfeb | 2015-07-21 15:40:13 +0200 | [diff] [blame] | 2187 | static struct return_instance *find_next_ret_chain(struct return_instance *ri) |
| 2188 | { |
| 2189 | bool chained; |
| 2190 | |
| 2191 | do { |
| 2192 | chained = ri->chained; |
| 2193 | ri = ri->next; /* can't be NULL if chained */ |
| 2194 | } while (chained); |
| 2195 | |
| 2196 | return ri; |
| 2197 | } |
| 2198 | |
Jiri Olsa | ff474a78 | 2024-06-12 08:44:28 +0900 | [diff] [blame] | 2199 | void uprobe_handle_trampoline(struct pt_regs *regs) |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2200 | { |
| 2201 | struct uprobe_task *utask; |
Oleg Nesterov | a83cfeb | 2015-07-21 15:40:13 +0200 | [diff] [blame] | 2202 | struct return_instance *ri, *next; |
Oleg Nesterov | 5eeb50d | 2015-07-21 15:40:21 +0200 | [diff] [blame] | 2203 | bool valid; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2204 | |
| 2205 | utask = current->utask; |
| 2206 | if (!utask) |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2207 | goto sigill; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2208 | |
| 2209 | ri = utask->return_instances; |
| 2210 | if (!ri) |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2211 | goto sigill; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2212 | |
Oleg Nesterov | a83cfeb | 2015-07-21 15:40:13 +0200 | [diff] [blame] | 2213 | do { |
Oleg Nesterov | 5eeb50d | 2015-07-21 15:40:21 +0200 | [diff] [blame] | 2214 | /* |
| 2215 | * We should throw out the frames invalidated by longjmp(). |
| 2216 | * If this chain is valid, then the next one should be alive |
| 2217 | * or NULL; the latter case means that nobody but ri->func |
| 2218 | * could hit this trampoline on return. TODO: sigaltstack(). |
| 2219 | */ |
| 2220 | next = find_next_ret_chain(ri); |
Oleg Nesterov | 86dcb70 | 2015-07-21 15:40:26 +0200 | [diff] [blame] | 2221 | valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); |
Oleg Nesterov | 5eeb50d | 2015-07-21 15:40:21 +0200 | [diff] [blame] | 2222 | |
| 2223 | instruction_pointer_set(regs, ri->orig_ret_vaddr); |
| 2224 | do { |
Andrii Nakryiko | 4a365eb8 | 2024-05-21 18:38:43 -0700 | [diff] [blame] | 2225 | /* pop current instance from the stack of pending return instances, |
| 2226 | * as it's not pending anymore: we just fixed up original |
| 2227 | * instruction pointer in regs and are about to call handlers; |
| 2228 | * this allows fixup_uretprobe_trampoline_entries() to properly fix up |
| 2229 | * captured stack traces from uretprobe handlers, in which pending |
| 2230 | * trampoline addresses on the stack are replaced with correct |
| 2231 | * original return addresses |
| 2232 | */ |
| 2233 | utask->return_instances = ri->next; |
Oleg Nesterov | 5eeb50d | 2015-07-21 15:40:21 +0200 | [diff] [blame] | 2234 | if (valid) |
| 2235 | handle_uretprobe_chain(ri, regs); |
| 2236 | ri = free_ret_instance(ri); |
| 2237 | utask->depth--; |
| 2238 | } while (ri != next); |
| 2239 | } while (!valid); |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2240 | |
| 2241 | utask->return_instances = ri; |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2242 | return; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2243 | |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2244 | sigill: |
| 2245 | uprobe_warn(current, "handle uretprobe, sending SIGILL."); |
Eric W. Biederman | 3cf5d07 | 2019-05-23 10:17:27 -0500 | [diff] [blame] | 2246 | force_sig(SIGILL); |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2247 | |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2248 | } |
| 2249 | |
David A. Long | 6fe50a2 | 2014-02-03 14:25:49 -0500 | [diff] [blame] | 2250 | bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) |
| 2251 | { |
| 2252 | return false; |
| 2253 | } |
| 2254 | |
Oleg Nesterov | 86dcb70 | 2015-07-21 15:40:26 +0200 | [diff] [blame] | 2255 | bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, |
| 2256 | struct pt_regs *regs) |
Oleg Nesterov | 97da897 | 2015-07-21 15:40:16 +0200 | [diff] [blame] | 2257 | { |
| 2258 | return true; |
| 2259 | } |
| 2260 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2261 | /* |
| 2262 | * Run handler and ask thread to singlestep. |
| 2263 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. |
| 2264 | */ |
| 2265 | static void handle_swbp(struct pt_regs *regs) |
| 2266 | { |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2267 | struct uprobe *uprobe; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2268 | unsigned long bp_vaddr; |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2269 | int is_swbp, srcu_idx; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2270 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2271 | bp_vaddr = uprobe_get_swbp_addr(regs); |
Jiri Olsa | ff474a78 | 2024-06-12 08:44:28 +0900 | [diff] [blame] | 2272 | if (bp_vaddr == uprobe_get_trampoline_vaddr()) |
| 2273 | return uprobe_handle_trampoline(regs); |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2274 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2275 | srcu_idx = srcu_read_lock(&uprobes_srcu); |
| 2276 | |
| 2277 | uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2278 | if (!uprobe) { |
Oleg Nesterov | 56bb4cf | 2012-05-29 21:29:47 +0200 | [diff] [blame] | 2279 | if (is_swbp > 0) { |
| 2280 | /* No matching uprobe; signal SIGTRAP. */ |
Oleg Nesterov | fe5ed7a | 2020-07-23 17:44:20 +0200 | [diff] [blame] | 2281 | force_sig(SIGTRAP); |
Oleg Nesterov | 56bb4cf | 2012-05-29 21:29:47 +0200 | [diff] [blame] | 2282 | } else { |
| 2283 | /* |
| 2284 | * Either we raced with uprobe_unregister() or we can't |
| 2285 | * access this memory. The latter is only possible if |
| 2286 | * another thread plays with our ->mm. In both cases |
| 2287 | * we can simply restart. If this vma was unmapped we |
| 2288 | * can pretend this insn was not executed yet and get |
| 2289 | * the (correct) SIGSEGV after restart. |
| 2290 | */ |
| 2291 | instruction_pointer_set(regs, bp_vaddr); |
| 2292 | } |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2293 | goto out; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2294 | } |
Oleg Nesterov | 74e59df | 2012-12-30 15:54:08 +0100 | [diff] [blame] | 2295 | |
| 2296 | /* change it in advance for ->handler() and restart */ |
| 2297 | instruction_pointer_set(regs, bp_vaddr); |
| 2298 | |
Oleg Nesterov | 142b18d | 2012-09-29 21:56:57 +0200 | [diff] [blame] | 2299 | /* |
| 2300 | * TODO: move copy_insn/etc into _register and remove this hack. |
| 2301 | * After we hit the bp, _unregister + _register can install the |
| 2302 | * new and not-yet-analyzed uprobe at the same address, restart. |
| 2303 | */ |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 2304 | if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) |
Oleg Nesterov | 74e59df | 2012-12-30 15:54:08 +0100 | [diff] [blame] | 2305 | goto out; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2306 | |
Andrea Parri | 09d3f01 | 2018-11-22 17:10:31 +0100 | [diff] [blame] | 2307 | /* |
| 2308 | * Pairs with the smp_wmb() in prepare_uprobe(). |
| 2309 | * |
| 2310 | * Guarantees that if we see the UPROBE_COPY_INSN bit set, then |
| 2311 | * we must also see the stores to &uprobe->arch performed by the |
| 2312 | * prepare_uprobe() call. |
| 2313 | */ |
| 2314 | smp_rmb(); |
| 2315 | |
Oleg Nesterov | 72fd293 | 2013-11-26 09:35:25 +0900 | [diff] [blame] | 2316 | /* Tracing handlers use ->utask to communicate with fetch methods */ |
| 2317 | if (!get_utask()) |
| 2318 | goto out; |
| 2319 | |
David A. Long | 6fe50a2 | 2014-02-03 14:25:49 -0500 | [diff] [blame] | 2320 | if (arch_uprobe_ignore(&uprobe->arch, regs)) |
| 2321 | goto out; |
| 2322 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2323 | handler_chain(uprobe, regs); |
David A. Long | 6fe50a2 | 2014-02-03 14:25:49 -0500 | [diff] [blame] | 2324 | |
Oleg Nesterov | 8a6b173 | 2014-03-30 18:56:22 +0200 | [diff] [blame] | 2325 | if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) |
Oleg Nesterov | 0578a97 | 2012-09-14 18:31:23 +0200 | [diff] [blame] | 2326 | goto out; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2327 | |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2328 | if (pre_ssout(uprobe, regs, bp_vaddr)) |
| 2329 | goto out; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2330 | |
Oleg Nesterov | 0578a97 | 2012-09-14 18:31:23 +0200 | [diff] [blame] | 2331 | out: |
Andrii Nakryiko | 8617408 | 2024-09-03 10:45:57 -0700 | [diff] [blame] | 2332 | /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ |
| 2333 | srcu_read_unlock(&uprobes_srcu, srcu_idx); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2334 | } |
| 2335 | |
| 2336 | /* |
| 2337 | * Perform required fix-ups and disable singlestep. |
| 2338 | * Allow pending signals to take effect. |
| 2339 | */ |
| 2340 | static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) |
| 2341 | { |
| 2342 | struct uprobe *uprobe; |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 2343 | int err = 0; |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2344 | |
| 2345 | uprobe = utask->active_uprobe; |
| 2346 | if (utask->state == UTASK_SSTEP_ACK) |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 2347 | err = arch_uprobe_post_xol(&uprobe->arch, regs); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2348 | else if (utask->state == UTASK_SSTEP_TRAPPED) |
| 2349 | arch_uprobe_abort_xol(&uprobe->arch, regs); |
| 2350 | else |
| 2351 | WARN_ON_ONCE(1); |
| 2352 | |
| 2353 | put_uprobe(uprobe); |
| 2354 | utask->active_uprobe = NULL; |
| 2355 | utask->state = UTASK_RUNNING; |
Srikar Dronamraju | d4b3b638 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 2356 | xol_free_insn_slot(current); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2357 | |
| 2358 | spin_lock_irq(¤t->sighand->siglock); |
| 2359 | recalc_sigpending(); /* see uprobe_deny_signal() */ |
| 2360 | spin_unlock_irq(¤t->sighand->siglock); |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 2361 | |
| 2362 | if (unlikely(err)) { |
| 2363 | uprobe_warn(current, "execute the probed insn, sending SIGILL."); |
Eric W. Biederman | 3cf5d07 | 2019-05-23 10:17:27 -0500 | [diff] [blame] | 2364 | force_sig(SIGILL); |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 2365 | } |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2366 | } |
| 2367 | |
| 2368 | /* |
Oleg Nesterov | 1b08e907 | 2012-09-14 18:52:10 +0200 | [diff] [blame] | 2369 | * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and |
| 2370 | * allows the thread to return from interrupt. After that handle_swbp() |
| 2371 | * sets utask->active_uprobe. |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2372 | * |
Oleg Nesterov | 1b08e907 | 2012-09-14 18:52:10 +0200 | [diff] [blame] | 2373 | * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag |
| 2374 | * and allows the thread to return from interrupt. |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2375 | * |
| 2376 | * While returning to userspace, thread notices the TIF_UPROBE flag and calls |
| 2377 | * uprobe_notify_resume(). |
| 2378 | */ |
| 2379 | void uprobe_notify_resume(struct pt_regs *regs) |
| 2380 | { |
| 2381 | struct uprobe_task *utask; |
| 2382 | |
Oleg Nesterov | db023ea | 2012-09-14 19:05:46 +0200 | [diff] [blame] | 2383 | clear_thread_flag(TIF_UPROBE); |
| 2384 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2385 | utask = current->utask; |
Oleg Nesterov | 1b08e907 | 2012-09-14 18:52:10 +0200 | [diff] [blame] | 2386 | if (utask && utask->active_uprobe) |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2387 | handle_singlestep(utask, regs); |
Oleg Nesterov | 1b08e907 | 2012-09-14 18:52:10 +0200 | [diff] [blame] | 2388 | else |
| 2389 | handle_swbp(regs); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2390 | } |
| 2391 | |
| 2392 | /* |
| 2393 | * uprobe_pre_sstep_notifier gets called from interrupt context as part of |
| 2394 | * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. |
| 2395 | */ |
| 2396 | int uprobe_pre_sstep_notifier(struct pt_regs *regs) |
| 2397 | { |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 2398 | if (!current->mm) |
| 2399 | return 0; |
| 2400 | |
| 2401 | if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && |
| 2402 | (!current->utask || !current->utask->return_instances)) |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2403 | return 0; |
| 2404 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2405 | set_thread_flag(TIF_UPROBE); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2406 | return 1; |
| 2407 | } |
| 2408 | |
| 2409 | /* |
| 2410 | * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier |
| 2411 | * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. |
| 2412 | */ |
| 2413 | int uprobe_post_sstep_notifier(struct pt_regs *regs) |
| 2414 | { |
| 2415 | struct uprobe_task *utask = current->utask; |
| 2416 | |
| 2417 | if (!current->mm || !utask || !utask->active_uprobe) |
| 2418 | /* task is currently not uprobed */ |
| 2419 | return 0; |
| 2420 | |
| 2421 | utask->state = UTASK_SSTEP_ACK; |
| 2422 | set_thread_flag(TIF_UPROBE); |
| 2423 | return 1; |
| 2424 | } |
| 2425 | |
| 2426 | static struct notifier_block uprobe_exception_nb = { |
| 2427 | .notifier_call = arch_uprobe_exception_notify, |
| 2428 | .priority = INT_MAX-1, /* notified after kprobes, kgdb */ |
| 2429 | }; |
| 2430 | |
Nadav Amit | aad42dd | 2019-04-26 16:22:44 -0700 | [diff] [blame] | 2431 | void __init uprobes_init(void) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 2432 | { |
| 2433 | int i; |
| 2434 | |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 2435 | for (i = 0; i < UPROBES_HASH_SZ; i++) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 2436 | mutex_init(&uprobes_mmap_mutex[i]); |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2437 | |
Nadav Amit | aad42dd | 2019-04-26 16:22:44 -0700 | [diff] [blame] | 2438 | BUG_ON(register_die_notifier(&uprobe_exception_nb)); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 2439 | } |