Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 2 | |
| 3 | #define pr_fmt(fmt) "ref_tracker: " fmt |
| 4 | |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 5 | #include <linux/export.h> |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 6 | #include <linux/list_sort.h> |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 7 | #include <linux/ref_tracker.h> |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/stacktrace.h> |
| 10 | #include <linux/stackdepot.h> |
| 11 | |
| 12 | #define REF_TRACKER_STACK_ENTRIES 16 |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 13 | #define STACK_BUF_SIZE 1024 |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 14 | |
| 15 | struct ref_tracker { |
| 16 | struct list_head head; /* anchor into dir->list or dir->quarantine */ |
| 17 | bool dead; |
| 18 | depot_stack_handle_t alloc_stack_handle; |
| 19 | depot_stack_handle_t free_stack_handle; |
| 20 | }; |
| 21 | |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 22 | struct ref_tracker_dir_stats { |
| 23 | int total; |
| 24 | int count; |
| 25 | struct { |
| 26 | depot_stack_handle_t stack_handle; |
| 27 | unsigned int count; |
| 28 | } stacks[]; |
| 29 | }; |
| 30 | |
| 31 | static struct ref_tracker_dir_stats * |
| 32 | ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) |
| 33 | { |
| 34 | struct ref_tracker_dir_stats *stats; |
| 35 | struct ref_tracker *tracker; |
| 36 | |
| 37 | stats = kmalloc(struct_size(stats, stacks, limit), |
| 38 | GFP_NOWAIT | __GFP_NOWARN); |
| 39 | if (!stats) |
| 40 | return ERR_PTR(-ENOMEM); |
| 41 | stats->total = 0; |
| 42 | stats->count = 0; |
| 43 | |
| 44 | list_for_each_entry(tracker, &dir->list, head) { |
| 45 | depot_stack_handle_t stack = tracker->alloc_stack_handle; |
| 46 | int i; |
| 47 | |
| 48 | ++stats->total; |
| 49 | for (i = 0; i < stats->count; ++i) |
| 50 | if (stats->stacks[i].stack_handle == stack) |
| 51 | break; |
| 52 | if (i >= limit) |
| 53 | continue; |
| 54 | if (i >= stats->count) { |
| 55 | stats->stacks[i].stack_handle = stack; |
| 56 | stats->stacks[i].count = 0; |
| 57 | ++stats->count; |
| 58 | } |
| 59 | ++stats->stacks[i].count; |
| 60 | } |
| 61 | |
| 62 | return stats; |
| 63 | } |
| 64 | |
Andrzej Hajda | 227c6c83 | 2023-06-02 12:21:35 +0200 | [diff] [blame] | 65 | struct ostream { |
| 66 | char *buf; |
| 67 | int size, used; |
| 68 | }; |
| 69 | |
| 70 | #define pr_ostream(stream, fmt, args...) \ |
| 71 | ({ \ |
| 72 | struct ostream *_s = (stream); \ |
| 73 | \ |
| 74 | if (!_s->buf) { \ |
| 75 | pr_err(fmt, ##args); \ |
| 76 | } else { \ |
| 77 | int ret, len = _s->size - _s->used; \ |
| 78 | ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \ |
| 79 | _s->used += min(ret, len); \ |
| 80 | } \ |
| 81 | }) |
| 82 | |
| 83 | static void |
| 84 | __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, |
| 85 | unsigned int display_limit, struct ostream *s) |
Andrzej Hajda | 7a113ff | 2023-06-02 12:21:33 +0200 | [diff] [blame] | 86 | { |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 87 | struct ref_tracker_dir_stats *stats; |
| 88 | unsigned int i = 0, skipped; |
| 89 | depot_stack_handle_t stack; |
| 90 | char *sbuf; |
Andrzej Hajda | 7a113ff | 2023-06-02 12:21:33 +0200 | [diff] [blame] | 91 | |
| 92 | lockdep_assert_held(&dir->lock); |
| 93 | |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 94 | if (list_empty(&dir->list)) |
| 95 | return; |
| 96 | |
| 97 | stats = ref_tracker_get_stats(dir, display_limit); |
| 98 | if (IS_ERR(stats)) { |
Andrzej Hajda | 227c6c83 | 2023-06-02 12:21:35 +0200 | [diff] [blame] | 99 | pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n", |
| 100 | dir->name, dir, stats); |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 101 | return; |
Andrzej Hajda | 7a113ff | 2023-06-02 12:21:33 +0200 | [diff] [blame] | 102 | } |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 103 | |
| 104 | sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN); |
| 105 | |
| 106 | for (i = 0, skipped = stats->total; i < stats->count; ++i) { |
| 107 | stack = stats->stacks[i].stack_handle; |
| 108 | if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) |
| 109 | sbuf[0] = 0; |
Andrzej Hajda | 227c6c83 | 2023-06-02 12:21:35 +0200 | [diff] [blame] | 110 | pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir, |
| 111 | stats->stacks[i].count, stats->total, sbuf); |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 112 | skipped -= stats->stacks[i].count; |
| 113 | } |
| 114 | |
| 115 | if (skipped) |
Andrzej Hajda | 227c6c83 | 2023-06-02 12:21:35 +0200 | [diff] [blame] | 116 | pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n", |
| 117 | dir->name, dir, skipped, stats->total); |
Andrzej Hajda | b6d7c0e | 2023-06-02 12:21:34 +0200 | [diff] [blame] | 118 | |
| 119 | kfree(sbuf); |
| 120 | |
| 121 | kfree(stats); |
Andrzej Hajda | 7a113ff | 2023-06-02 12:21:33 +0200 | [diff] [blame] | 122 | } |
Andrzej Hajda | 227c6c83 | 2023-06-02 12:21:35 +0200 | [diff] [blame] | 123 | |
| 124 | void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, |
| 125 | unsigned int display_limit) |
| 126 | { |
| 127 | struct ostream os = {}; |
| 128 | |
| 129 | __ref_tracker_dir_pr_ostream(dir, display_limit, &os); |
| 130 | } |
Andrzej Hajda | 7a113ff | 2023-06-02 12:21:33 +0200 | [diff] [blame] | 131 | EXPORT_SYMBOL(ref_tracker_dir_print_locked); |
| 132 | |
| 133 | void ref_tracker_dir_print(struct ref_tracker_dir *dir, |
| 134 | unsigned int display_limit) |
| 135 | { |
| 136 | unsigned long flags; |
| 137 | |
| 138 | spin_lock_irqsave(&dir->lock, flags); |
| 139 | ref_tracker_dir_print_locked(dir, display_limit); |
| 140 | spin_unlock_irqrestore(&dir->lock, flags); |
| 141 | } |
| 142 | EXPORT_SYMBOL(ref_tracker_dir_print); |
| 143 | |
Andrzej Hajda | 227c6c83 | 2023-06-02 12:21:35 +0200 | [diff] [blame] | 144 | int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size) |
| 145 | { |
| 146 | struct ostream os = { .buf = buf, .size = size }; |
| 147 | unsigned long flags; |
| 148 | |
| 149 | spin_lock_irqsave(&dir->lock, flags); |
| 150 | __ref_tracker_dir_pr_ostream(dir, 16, &os); |
| 151 | spin_unlock_irqrestore(&dir->lock, flags); |
| 152 | |
| 153 | return os.used; |
| 154 | } |
| 155 | EXPORT_SYMBOL(ref_tracker_dir_snprint); |
| 156 | |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 157 | void ref_tracker_dir_exit(struct ref_tracker_dir *dir) |
| 158 | { |
| 159 | struct ref_tracker *tracker, *n; |
| 160 | unsigned long flags; |
| 161 | bool leak = false; |
| 162 | |
Eric Dumazet | e3ececf | 2022-02-04 14:42:35 -0800 | [diff] [blame] | 163 | dir->dead = true; |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 164 | spin_lock_irqsave(&dir->lock, flags); |
| 165 | list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { |
| 166 | list_del(&tracker->head); |
| 167 | kfree(tracker); |
| 168 | dir->quarantine_avail++; |
| 169 | } |
Andrzej Hajda | 7a113ff | 2023-06-02 12:21:33 +0200 | [diff] [blame] | 170 | if (!list_empty(&dir->list)) { |
| 171 | ref_tracker_dir_print_locked(dir, 16); |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 172 | leak = true; |
Andrzej Hajda | 7a113ff | 2023-06-02 12:21:33 +0200 | [diff] [blame] | 173 | list_for_each_entry_safe(tracker, n, &dir->list, head) { |
| 174 | list_del(&tracker->head); |
| 175 | kfree(tracker); |
| 176 | } |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 177 | } |
| 178 | spin_unlock_irqrestore(&dir->lock, flags); |
| 179 | WARN_ON_ONCE(leak); |
| 180 | WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); |
Eric Dumazet | 8fd5522 | 2022-02-04 14:42:36 -0800 | [diff] [blame] | 181 | WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1); |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 182 | } |
| 183 | EXPORT_SYMBOL(ref_tracker_dir_exit); |
| 184 | |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 185 | int ref_tracker_alloc(struct ref_tracker_dir *dir, |
| 186 | struct ref_tracker **trackerp, |
| 187 | gfp_t gfp) |
| 188 | { |
| 189 | unsigned long entries[REF_TRACKER_STACK_ENTRIES]; |
| 190 | struct ref_tracker *tracker; |
| 191 | unsigned int nr_entries; |
Andrzej Hajda | acd8f0e | 2023-06-02 12:21:36 +0200 | [diff] [blame] | 192 | gfp_t gfp_mask = gfp | __GFP_NOWARN; |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 193 | unsigned long flags; |
| 194 | |
Eric Dumazet | e3ececf | 2022-02-04 14:42:35 -0800 | [diff] [blame] | 195 | WARN_ON_ONCE(dir->dead); |
| 196 | |
Eric Dumazet | 8fd5522 | 2022-02-04 14:42:36 -0800 | [diff] [blame] | 197 | if (!trackerp) { |
| 198 | refcount_inc(&dir->no_tracker); |
| 199 | return 0; |
| 200 | } |
Eric Dumazet | c12837d | 2022-01-12 03:14:45 -0800 | [diff] [blame] | 201 | if (gfp & __GFP_DIRECT_RECLAIM) |
| 202 | gfp_mask |= __GFP_NOFAIL; |
| 203 | *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 204 | if (unlikely(!tracker)) { |
| 205 | pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); |
| 206 | refcount_inc(&dir->untracked); |
| 207 | return -ENOMEM; |
| 208 | } |
| 209 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 210 | tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); |
| 211 | |
| 212 | spin_lock_irqsave(&dir->lock, flags); |
| 213 | list_add(&tracker->head, &dir->list); |
| 214 | spin_unlock_irqrestore(&dir->lock, flags); |
| 215 | return 0; |
| 216 | } |
| 217 | EXPORT_SYMBOL_GPL(ref_tracker_alloc); |
| 218 | |
| 219 | int ref_tracker_free(struct ref_tracker_dir *dir, |
| 220 | struct ref_tracker **trackerp) |
| 221 | { |
| 222 | unsigned long entries[REF_TRACKER_STACK_ENTRIES]; |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 223 | depot_stack_handle_t stack_handle; |
Eric Dumazet | 8fd5522 | 2022-02-04 14:42:36 -0800 | [diff] [blame] | 224 | struct ref_tracker *tracker; |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 225 | unsigned int nr_entries; |
| 226 | unsigned long flags; |
| 227 | |
Eric Dumazet | e3ececf | 2022-02-04 14:42:35 -0800 | [diff] [blame] | 228 | WARN_ON_ONCE(dir->dead); |
| 229 | |
Eric Dumazet | 8fd5522 | 2022-02-04 14:42:36 -0800 | [diff] [blame] | 230 | if (!trackerp) { |
| 231 | refcount_dec(&dir->no_tracker); |
| 232 | return 0; |
| 233 | } |
| 234 | tracker = *trackerp; |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 235 | if (!tracker) { |
| 236 | refcount_dec(&dir->untracked); |
| 237 | return -EEXIST; |
| 238 | } |
| 239 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); |
Andrzej Hajda | acd8f0e | 2023-06-02 12:21:36 +0200 | [diff] [blame] | 240 | stack_handle = stack_depot_save(entries, nr_entries, |
| 241 | GFP_NOWAIT | __GFP_NOWARN); |
Eric Dumazet | 4e66934 | 2021-12-04 20:21:55 -0800 | [diff] [blame] | 242 | |
| 243 | spin_lock_irqsave(&dir->lock, flags); |
| 244 | if (tracker->dead) { |
| 245 | pr_err("reference already released.\n"); |
| 246 | if (tracker->alloc_stack_handle) { |
| 247 | pr_err("allocated in:\n"); |
| 248 | stack_depot_print(tracker->alloc_stack_handle); |
| 249 | } |
| 250 | if (tracker->free_stack_handle) { |
| 251 | pr_err("freed in:\n"); |
| 252 | stack_depot_print(tracker->free_stack_handle); |
| 253 | } |
| 254 | spin_unlock_irqrestore(&dir->lock, flags); |
| 255 | WARN_ON_ONCE(1); |
| 256 | return -EINVAL; |
| 257 | } |
| 258 | tracker->dead = true; |
| 259 | |
| 260 | tracker->free_stack_handle = stack_handle; |
| 261 | |
| 262 | list_move_tail(&tracker->head, &dir->quarantine); |
| 263 | if (!dir->quarantine_avail) { |
| 264 | tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head); |
| 265 | list_del(&tracker->head); |
| 266 | } else { |
| 267 | dir->quarantine_avail--; |
| 268 | tracker = NULL; |
| 269 | } |
| 270 | spin_unlock_irqrestore(&dir->lock, flags); |
| 271 | |
| 272 | kfree(tracker); |
| 273 | return 0; |
| 274 | } |
| 275 | EXPORT_SYMBOL_GPL(ref_tracker_free); |