Andrey Konovalov | e886bf9 | 2018-12-28 00:31:14 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 2 | /* |
| 3 | * KASAN quarantine. |
| 4 | * |
| 5 | * Author: Alexander Potapenko <glider@google.com> |
| 6 | * Copyright (C) 2016 Google, Inc. |
| 7 | * |
| 8 | * Based on code by Dmitry Chernenkov. |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/gfp.h> |
| 12 | #include <linux/hash.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/percpu.h> |
| 16 | #include <linux/printk.h> |
| 17 | #include <linux/shrinker.h> |
| 18 | #include <linux/slab.h> |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 19 | #include <linux/srcu.h> |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 20 | #include <linux/string.h> |
| 21 | #include <linux/types.h> |
Kuan-Ying Lee | 6c82d45 | 2020-12-11 13:36:49 -0800 | [diff] [blame] | 22 | #include <linux/cpuhotplug.h> |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 23 | |
| 24 | #include "../slab.h" |
| 25 | #include "kasan.h" |
| 26 | |
| 27 | /* Data structure and operations for quarantine queues. */ |
| 28 | |
| 29 | /* |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 30 | * Each queue is a single-linked list, which also stores the total size of |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 31 | * objects inside of it. |
| 32 | */ |
| 33 | struct qlist_head { |
| 34 | struct qlist_node *head; |
| 35 | struct qlist_node *tail; |
| 36 | size_t bytes; |
Kuan-Ying Lee | 6c82d45 | 2020-12-11 13:36:49 -0800 | [diff] [blame] | 37 | bool offline; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 38 | }; |
| 39 | |
| 40 | #define QLIST_INIT { NULL, NULL, 0 } |
| 41 | |
| 42 | static bool qlist_empty(struct qlist_head *q) |
| 43 | { |
| 44 | return !q->head; |
| 45 | } |
| 46 | |
| 47 | static void qlist_init(struct qlist_head *q) |
| 48 | { |
| 49 | q->head = q->tail = NULL; |
| 50 | q->bytes = 0; |
| 51 | } |
| 52 | |
| 53 | static void qlist_put(struct qlist_head *q, struct qlist_node *qlink, |
| 54 | size_t size) |
| 55 | { |
| 56 | if (unlikely(qlist_empty(q))) |
| 57 | q->head = qlink; |
| 58 | else |
| 59 | q->tail->next = qlink; |
| 60 | q->tail = qlink; |
| 61 | qlink->next = NULL; |
| 62 | q->bytes += size; |
| 63 | } |
| 64 | |
| 65 | static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) |
| 66 | { |
| 67 | if (unlikely(qlist_empty(from))) |
| 68 | return; |
| 69 | |
| 70 | if (qlist_empty(to)) { |
| 71 | *to = *from; |
| 72 | qlist_init(from); |
| 73 | return; |
| 74 | } |
| 75 | |
| 76 | to->tail->next = from->head; |
| 77 | to->tail = from->tail; |
| 78 | to->bytes += from->bytes; |
| 79 | |
| 80 | qlist_init(from); |
| 81 | } |
| 82 | |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 83 | #define QUARANTINE_PERCPU_SIZE (1 << 20) |
| 84 | #define QUARANTINE_BATCHES \ |
| 85 | (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS) |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 86 | |
| 87 | /* |
| 88 | * The object quarantine consists of per-cpu queues and a global queue, |
| 89 | * guarded by quarantine_lock. |
| 90 | */ |
| 91 | static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine); |
| 92 | |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 93 | /* Round-robin FIFO array of batches. */ |
| 94 | static struct qlist_head global_quarantine[QUARANTINE_BATCHES]; |
| 95 | static int quarantine_head; |
| 96 | static int quarantine_tail; |
| 97 | /* Total size of all objects in global_quarantine across all batches. */ |
| 98 | static unsigned long quarantine_size; |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 99 | static DEFINE_RAW_SPINLOCK(quarantine_lock); |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 100 | DEFINE_STATIC_SRCU(remove_cache_srcu); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 101 | |
Zqiang | 07d067e | 2022-04-29 14:36:58 -0700 | [diff] [blame] | 102 | #ifdef CONFIG_PREEMPT_RT |
| 103 | struct cpu_shrink_qlist { |
| 104 | raw_spinlock_t lock; |
| 105 | struct qlist_head qlist; |
| 106 | }; |
| 107 | |
| 108 | static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = { |
| 109 | .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock), |
| 110 | }; |
| 111 | #endif |
| 112 | |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 113 | /* Maximum size of the global queue. */ |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 114 | static unsigned long quarantine_max_size; |
| 115 | |
| 116 | /* |
| 117 | * Target size of a batch in global_quarantine. |
| 118 | * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM. |
| 119 | */ |
| 120 | static unsigned long quarantine_batch_size; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 121 | |
| 122 | /* |
| 123 | * The fraction of physical memory the quarantine is allowed to occupy. |
| 124 | * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep |
| 125 | * the ratio low to avoid OOM. |
| 126 | */ |
| 127 | #define QUARANTINE_FRACTION 32 |
| 128 | |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 129 | static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink) |
| 130 | { |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 131 | return virt_to_slab(qlink)->slab_cache; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) |
| 135 | { |
| 136 | struct kasan_free_meta *free_info = |
| 137 | container_of(qlink, struct kasan_free_meta, |
| 138 | quarantine_link); |
| 139 | |
| 140 | return ((void *)free_info) - cache->kasan_info.free_meta_offset; |
| 141 | } |
| 142 | |
| 143 | static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) |
| 144 | { |
| 145 | void *object = qlink_to_object(qlink, cache); |
Andrey Konovalov | 26dca99 | 2022-01-14 14:05:01 -0800 | [diff] [blame] | 146 | struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 147 | unsigned long flags; |
| 148 | |
Andrey Ryabinin | f7376ae | 2016-08-02 14:02:46 -0700 | [diff] [blame] | 149 | if (IS_ENABLED(CONFIG_SLAB)) |
| 150 | local_irq_save(flags); |
| 151 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 152 | /* |
Andrey Konovalov | 26dca99 | 2022-01-14 14:05:01 -0800 | [diff] [blame] | 153 | * If init_on_free is enabled and KASAN's free metadata is stored in |
| 154 | * the object, zero the metadata. Otherwise, the object's memory will |
| 155 | * not be properly zeroed, as KASAN saves the metadata after the slab |
| 156 | * allocator zeroes the object. |
| 157 | */ |
| 158 | if (slab_want_init_on_free(cache) && |
| 159 | cache->kasan_info.free_meta_offset == 0) |
| 160 | memzero_explicit(meta, sizeof(*meta)); |
| 161 | |
| 162 | /* |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 163 | * As the object now gets freed from the quarantine, assume that its |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 164 | * free track is no longer valid. |
| 165 | */ |
Andrey Konovalov | 06bc4cf | 2022-05-12 20:23:08 -0700 | [diff] [blame] | 166 | *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 167 | |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 168 | ___cache_free(cache, object, _THIS_IP_); |
Andrey Ryabinin | f7376ae | 2016-08-02 14:02:46 -0700 | [diff] [blame] | 169 | |
| 170 | if (IS_ENABLED(CONFIG_SLAB)) |
| 171 | local_irq_restore(flags); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) |
| 175 | { |
| 176 | struct qlist_node *qlink; |
| 177 | |
| 178 | if (unlikely(qlist_empty(q))) |
| 179 | return; |
| 180 | |
| 181 | qlink = q->head; |
| 182 | while (qlink) { |
| 183 | struct kmem_cache *obj_cache = |
| 184 | cache ? cache : qlink_to_cache(qlink); |
| 185 | struct qlist_node *next = qlink->next; |
| 186 | |
| 187 | qlink_free(qlink, obj_cache); |
| 188 | qlink = next; |
| 189 | } |
| 190 | qlist_init(q); |
| 191 | } |
| 192 | |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 193 | bool kasan_quarantine_put(struct kmem_cache *cache, void *object) |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 194 | { |
| 195 | unsigned long flags; |
| 196 | struct qlist_head *q; |
| 197 | struct qlist_head temp = QLIST_INIT; |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 198 | struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 199 | |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 200 | /* |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 201 | * If there's no metadata for this object, don't put it into |
| 202 | * quarantine. |
| 203 | */ |
| 204 | if (!meta) |
| 205 | return false; |
| 206 | |
| 207 | /* |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 208 | * Note: irq must be disabled until after we move the batch to the |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 209 | * global quarantine. Otherwise kasan_quarantine_remove_cache() can |
| 210 | * miss some objects belonging to the cache if they are in our local |
| 211 | * temp list. kasan_quarantine_remove_cache() executes on_each_cpu() |
| 212 | * at the beginning which ensures that it either sees the objects in |
| 213 | * per-cpu lists or in the global quarantine. |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 214 | */ |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 215 | local_irq_save(flags); |
| 216 | |
| 217 | q = this_cpu_ptr(&cpu_quarantine); |
Kuan-Ying Lee | 6c82d45 | 2020-12-11 13:36:49 -0800 | [diff] [blame] | 218 | if (q->offline) { |
| 219 | local_irq_restore(flags); |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 220 | return false; |
Kuan-Ying Lee | 6c82d45 | 2020-12-11 13:36:49 -0800 | [diff] [blame] | 221 | } |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 222 | qlist_put(q, &meta->quarantine_link, cache->size); |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 223 | if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 224 | qlist_move_all(q, &temp); |
| 225 | |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 226 | raw_spin_lock(&quarantine_lock); |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 227 | WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); |
| 228 | qlist_move_all(&temp, &global_quarantine[quarantine_tail]); |
| 229 | if (global_quarantine[quarantine_tail].bytes >= |
| 230 | READ_ONCE(quarantine_batch_size)) { |
| 231 | int new_tail; |
| 232 | |
| 233 | new_tail = quarantine_tail + 1; |
| 234 | if (new_tail == QUARANTINE_BATCHES) |
| 235 | new_tail = 0; |
| 236 | if (new_tail != quarantine_head) |
| 237 | quarantine_tail = new_tail; |
| 238 | } |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 239 | raw_spin_unlock(&quarantine_lock); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 240 | } |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 241 | |
| 242 | local_irq_restore(flags); |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 243 | |
| 244 | return true; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 245 | } |
| 246 | |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 247 | void kasan_quarantine_reduce(void) |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 248 | { |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 249 | size_t total_size, new_quarantine_size, percpu_quarantines; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 250 | unsigned long flags; |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 251 | int srcu_idx; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 252 | struct qlist_head to_free = QLIST_INIT; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 253 | |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 254 | if (likely(READ_ONCE(quarantine_size) <= |
| 255 | READ_ONCE(quarantine_max_size))) |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 256 | return; |
| 257 | |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 258 | /* |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 259 | * srcu critical section ensures that kasan_quarantine_remove_cache() |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 260 | * will not miss objects belonging to the cache while they are in our |
| 261 | * local to_free list. srcu is chosen because (1) it gives us private |
| 262 | * grace period domain that does not interfere with anything else, |
| 263 | * and (2) it allows synchronize_srcu() to return without waiting |
| 264 | * if there are no pending read critical sections (which is the |
| 265 | * expected case). |
| 266 | */ |
| 267 | srcu_idx = srcu_read_lock(&remove_cache_srcu); |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 268 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 269 | |
| 270 | /* |
| 271 | * Update quarantine size in case of hotplug. Allocate a fraction of |
| 272 | * the installed memory to quarantine minus per-cpu queue limits. |
| 273 | */ |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 274 | total_size = (totalram_pages() << PAGE_SHIFT) / |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 275 | QUARANTINE_FRACTION; |
Alexander Potapenko | c3cee37 | 2016-08-02 14:02:58 -0700 | [diff] [blame] | 276 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 277 | new_quarantine_size = (total_size < percpu_quarantines) ? |
| 278 | 0 : total_size - percpu_quarantines; |
| 279 | WRITE_ONCE(quarantine_max_size, new_quarantine_size); |
| 280 | /* Aim at consuming at most 1/2 of slots in quarantine. */ |
| 281 | WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, |
| 282 | 2 * total_size / QUARANTINE_BATCHES)); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 283 | |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 284 | if (likely(quarantine_size > quarantine_max_size)) { |
| 285 | qlist_move_all(&global_quarantine[quarantine_head], &to_free); |
| 286 | WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes); |
| 287 | quarantine_head++; |
| 288 | if (quarantine_head == QUARANTINE_BATCHES) |
| 289 | quarantine_head = 0; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 290 | } |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 291 | |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 292 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 293 | |
| 294 | qlist_free_all(&to_free, NULL); |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 295 | srcu_read_unlock(&remove_cache_srcu, srcu_idx); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | static void qlist_move_cache(struct qlist_head *from, |
| 299 | struct qlist_head *to, |
| 300 | struct kmem_cache *cache) |
| 301 | { |
Joonsoo Kim | 0ab686d | 2016-07-14 12:07:17 -0700 | [diff] [blame] | 302 | struct qlist_node *curr; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 303 | |
| 304 | if (unlikely(qlist_empty(from))) |
| 305 | return; |
| 306 | |
| 307 | curr = from->head; |
Joonsoo Kim | 0ab686d | 2016-07-14 12:07:17 -0700 | [diff] [blame] | 308 | qlist_init(from); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 309 | while (curr) { |
Joonsoo Kim | 0ab686d | 2016-07-14 12:07:17 -0700 | [diff] [blame] | 310 | struct qlist_node *next = curr->next; |
| 311 | struct kmem_cache *obj_cache = qlink_to_cache(curr); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 312 | |
Joonsoo Kim | 0ab686d | 2016-07-14 12:07:17 -0700 | [diff] [blame] | 313 | if (obj_cache == cache) |
| 314 | qlist_put(to, curr, obj_cache->size); |
| 315 | else |
| 316 | qlist_put(from, curr, obj_cache->size); |
| 317 | |
| 318 | curr = next; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 319 | } |
| 320 | } |
| 321 | |
Zqiang | 07d067e | 2022-04-29 14:36:58 -0700 | [diff] [blame] | 322 | #ifndef CONFIG_PREEMPT_RT |
| 323 | static void __per_cpu_remove_cache(struct qlist_head *q, void *arg) |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 324 | { |
| 325 | struct kmem_cache *cache = arg; |
| 326 | struct qlist_head to_free = QLIST_INIT; |
Zqiang | 07d067e | 2022-04-29 14:36:58 -0700 | [diff] [blame] | 327 | |
| 328 | qlist_move_cache(q, &to_free, cache); |
| 329 | qlist_free_all(&to_free, cache); |
| 330 | } |
| 331 | #else |
| 332 | static void __per_cpu_remove_cache(struct qlist_head *q, void *arg) |
| 333 | { |
| 334 | struct kmem_cache *cache = arg; |
| 335 | unsigned long flags; |
| 336 | struct cpu_shrink_qlist *sq; |
| 337 | |
| 338 | sq = this_cpu_ptr(&shrink_qlist); |
| 339 | raw_spin_lock_irqsave(&sq->lock, flags); |
| 340 | qlist_move_cache(q, &sq->qlist, cache); |
| 341 | raw_spin_unlock_irqrestore(&sq->lock, flags); |
| 342 | } |
| 343 | #endif |
| 344 | |
| 345 | static void per_cpu_remove_cache(void *arg) |
| 346 | { |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 347 | struct qlist_head *q; |
| 348 | |
| 349 | q = this_cpu_ptr(&cpu_quarantine); |
Zqiang | 31fa985 | 2022-04-27 12:41:56 -0700 | [diff] [blame] | 350 | /* |
| 351 | * Ensure the ordering between the writing to q->offline and |
| 352 | * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted |
| 353 | * by interrupt. |
| 354 | */ |
| 355 | if (READ_ONCE(q->offline)) |
| 356 | return; |
Zqiang | 07d067e | 2022-04-29 14:36:58 -0700 | [diff] [blame] | 357 | __per_cpu_remove_cache(q, arg); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 358 | } |
| 359 | |
Greg Thelen | f9fa1d9 | 2017-02-24 15:00:05 -0800 | [diff] [blame] | 360 | /* Free all quarantined objects belonging to cache. */ |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 361 | void kasan_quarantine_remove_cache(struct kmem_cache *cache) |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 362 | { |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 363 | unsigned long flags, i; |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 364 | struct qlist_head to_free = QLIST_INIT; |
| 365 | |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 366 | /* |
| 367 | * Must be careful to not miss any objects that are being moved from |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 368 | * per-cpu list to the global quarantine in kasan_quarantine_put(), |
| 369 | * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu() |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 370 | * achieves the first goal, while synchronize_srcu() achieves the |
| 371 | * second. |
| 372 | */ |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 373 | on_each_cpu(per_cpu_remove_cache, cache, 1); |
| 374 | |
Zqiang | 07d067e | 2022-04-29 14:36:58 -0700 | [diff] [blame] | 375 | #ifdef CONFIG_PREEMPT_RT |
| 376 | { |
| 377 | int cpu; |
| 378 | struct cpu_shrink_qlist *sq; |
| 379 | |
| 380 | for_each_online_cpu(cpu) { |
| 381 | sq = per_cpu_ptr(&shrink_qlist, cpu); |
| 382 | raw_spin_lock_irqsave(&sq->lock, flags); |
| 383 | qlist_move_cache(&sq->qlist, &to_free, cache); |
| 384 | raw_spin_unlock_irqrestore(&sq->lock, flags); |
| 385 | } |
| 386 | qlist_free_all(&to_free, cache); |
| 387 | } |
| 388 | #endif |
| 389 | |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 390 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
Dmitry Vyukov | 68fd814 | 2017-03-09 16:17:28 -0800 | [diff] [blame] | 391 | for (i = 0; i < QUARANTINE_BATCHES; i++) { |
| 392 | if (qlist_empty(&global_quarantine[i])) |
| 393 | continue; |
Dmitry Vyukov | 64abdcb | 2016-12-12 16:44:56 -0800 | [diff] [blame] | 394 | qlist_move_cache(&global_quarantine[i], &to_free, cache); |
Dmitry Vyukov | 68fd814 | 2017-03-09 16:17:28 -0800 | [diff] [blame] | 395 | /* Scanning whole quarantine can take a while. */ |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 396 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
Dmitry Vyukov | 68fd814 | 2017-03-09 16:17:28 -0800 | [diff] [blame] | 397 | cond_resched(); |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 398 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
Dmitry Vyukov | 68fd814 | 2017-03-09 16:17:28 -0800 | [diff] [blame] | 399 | } |
Clark Williams | 026d1ea | 2018-10-26 15:10:32 -0700 | [diff] [blame] | 400 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 401 | |
| 402 | qlist_free_all(&to_free, cache); |
Dmitry Vyukov | ce5bec5 | 2017-03-09 16:17:32 -0800 | [diff] [blame] | 403 | |
| 404 | synchronize_srcu(&remove_cache_srcu); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 405 | } |
Kuan-Ying Lee | 6c82d45 | 2020-12-11 13:36:49 -0800 | [diff] [blame] | 406 | |
| 407 | static int kasan_cpu_online(unsigned int cpu) |
| 408 | { |
| 409 | this_cpu_ptr(&cpu_quarantine)->offline = false; |
| 410 | return 0; |
| 411 | } |
| 412 | |
| 413 | static int kasan_cpu_offline(unsigned int cpu) |
| 414 | { |
| 415 | struct qlist_head *q; |
| 416 | |
| 417 | q = this_cpu_ptr(&cpu_quarantine); |
| 418 | /* Ensure the ordering between the writing to q->offline and |
| 419 | * qlist_free_all. Otherwise, cpu_quarantine may be corrupted |
| 420 | * by interrupt. |
| 421 | */ |
| 422 | WRITE_ONCE(q->offline, true); |
| 423 | barrier(); |
| 424 | qlist_free_all(q, NULL); |
| 425 | return 0; |
| 426 | } |
| 427 | |
| 428 | static int __init kasan_cpu_quarantine_init(void) |
| 429 | { |
| 430 | int ret = 0; |
| 431 | |
| 432 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online", |
| 433 | kasan_cpu_online, kasan_cpu_offline); |
| 434 | if (ret < 0) |
| 435 | pr_err("kasan cpu quarantine register failed [%d]\n", ret); |
| 436 | return ret; |
| 437 | } |
| 438 | late_initcall(kasan_cpu_quarantine_init); |