blob: 75585077eb6dd46388ccdb05676aee7893dfc3d3 [file] [log] [blame]
Andrey Konovalove886bf92018-12-28 00:31:14 -08001// SPDX-License-Identifier: GPL-2.0
Alexander Potapenko55834c52016-05-20 16:59:11 -07002/*
3 * KASAN quarantine.
4 *
5 * Author: Alexander Potapenko <glider@google.com>
6 * Copyright (C) 2016 Google, Inc.
7 *
8 * Based on code by Dmitry Chernenkov.
Alexander Potapenko55834c52016-05-20 16:59:11 -07009 */
10
11#include <linux/gfp.h>
12#include <linux/hash.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/percpu.h>
16#include <linux/printk.h>
17#include <linux/shrinker.h>
18#include <linux/slab.h>
Dmitry Vyukovce5bec52017-03-09 16:17:32 -080019#include <linux/srcu.h>
Alexander Potapenko55834c52016-05-20 16:59:11 -070020#include <linux/string.h>
21#include <linux/types.h>
Kuan-Ying Lee6c82d452020-12-11 13:36:49 -080022#include <linux/cpuhotplug.h>
Alexander Potapenko55834c52016-05-20 16:59:11 -070023
24#include "../slab.h"
25#include "kasan.h"
26
27/* Data structure and operations for quarantine queues. */
28
29/*
Ingo Molnarf0953a12021-05-06 18:06:47 -070030 * Each queue is a single-linked list, which also stores the total size of
Alexander Potapenko55834c52016-05-20 16:59:11 -070031 * objects inside of it.
32 */
33struct qlist_head {
34 struct qlist_node *head;
35 struct qlist_node *tail;
36 size_t bytes;
Kuan-Ying Lee6c82d452020-12-11 13:36:49 -080037 bool offline;
Alexander Potapenko55834c52016-05-20 16:59:11 -070038};
39
40#define QLIST_INIT { NULL, NULL, 0 }
41
42static bool qlist_empty(struct qlist_head *q)
43{
44 return !q->head;
45}
46
47static void qlist_init(struct qlist_head *q)
48{
49 q->head = q->tail = NULL;
50 q->bytes = 0;
51}
52
53static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
54 size_t size)
55{
56 if (unlikely(qlist_empty(q)))
57 q->head = qlink;
58 else
59 q->tail->next = qlink;
60 q->tail = qlink;
61 qlink->next = NULL;
62 q->bytes += size;
63}
64
65static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
66{
67 if (unlikely(qlist_empty(from)))
68 return;
69
70 if (qlist_empty(to)) {
71 *to = *from;
72 qlist_init(from);
73 return;
74 }
75
76 to->tail->next = from->head;
77 to->tail = from->tail;
78 to->bytes += from->bytes;
79
80 qlist_init(from);
81}
82
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -080083#define QUARANTINE_PERCPU_SIZE (1 << 20)
84#define QUARANTINE_BATCHES \
85 (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
Alexander Potapenko55834c52016-05-20 16:59:11 -070086
87/*
88 * The object quarantine consists of per-cpu queues and a global queue,
89 * guarded by quarantine_lock.
90 */
91static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
92
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -080093/* Round-robin FIFO array of batches. */
94static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
95static int quarantine_head;
96static int quarantine_tail;
97/* Total size of all objects in global_quarantine across all batches. */
98static unsigned long quarantine_size;
Clark Williams026d1ea2018-10-26 15:10:32 -070099static DEFINE_RAW_SPINLOCK(quarantine_lock);
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800100DEFINE_STATIC_SRCU(remove_cache_srcu);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700101
Zqiang07d067e2022-04-29 14:36:58 -0700102#ifdef CONFIG_PREEMPT_RT
103struct cpu_shrink_qlist {
104 raw_spinlock_t lock;
105 struct qlist_head qlist;
106};
107
108static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = {
109 .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock),
110};
111#endif
112
Alexander Potapenko55834c52016-05-20 16:59:11 -0700113/* Maximum size of the global queue. */
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -0800114static unsigned long quarantine_max_size;
115
116/*
117 * Target size of a batch in global_quarantine.
118 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
119 */
120static unsigned long quarantine_batch_size;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700121
122/*
123 * The fraction of physical memory the quarantine is allowed to occupy.
124 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
125 * the ratio low to avoid OOM.
126 */
127#define QUARANTINE_FRACTION 32
128
Alexander Potapenko55834c52016-05-20 16:59:11 -0700129static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
130{
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100131 return virt_to_slab(qlink)->slab_cache;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700132}
133
134static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
135{
136 struct kasan_free_meta *free_info =
137 container_of(qlink, struct kasan_free_meta,
138 quarantine_link);
139
140 return ((void *)free_info) - cache->kasan_info.free_meta_offset;
141}
142
143static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
144{
145 void *object = qlink_to_object(qlink, cache);
Andrey Konovalov26dca992022-01-14 14:05:01 -0800146 struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700147 unsigned long flags;
148
Andrey Ryabininf7376ae2016-08-02 14:02:46 -0700149 if (IS_ENABLED(CONFIG_SLAB))
150 local_irq_save(flags);
151
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800152 /*
Andrey Konovalov26dca992022-01-14 14:05:01 -0800153 * If init_on_free is enabled and KASAN's free metadata is stored in
154 * the object, zero the metadata. Otherwise, the object's memory will
155 * not be properly zeroed, as KASAN saves the metadata after the slab
156 * allocator zeroes the object.
157 */
158 if (slab_want_init_on_free(cache) &&
159 cache->kasan_info.free_meta_offset == 0)
160 memzero_explicit(meta, sizeof(*meta));
161
162 /*
Ingo Molnarf0953a12021-05-06 18:06:47 -0700163 * As the object now gets freed from the quarantine, assume that its
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800164 * free track is no longer valid.
165 */
Andrey Konovalov06bc4cf2022-05-12 20:23:08 -0700166 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800167
Alexander Potapenko55834c52016-05-20 16:59:11 -0700168 ___cache_free(cache, object, _THIS_IP_);
Andrey Ryabininf7376ae2016-08-02 14:02:46 -0700169
170 if (IS_ENABLED(CONFIG_SLAB))
171 local_irq_restore(flags);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700172}
173
174static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
175{
176 struct qlist_node *qlink;
177
178 if (unlikely(qlist_empty(q)))
179 return;
180
181 qlink = q->head;
182 while (qlink) {
183 struct kmem_cache *obj_cache =
184 cache ? cache : qlink_to_cache(qlink);
185 struct qlist_node *next = qlink->next;
186
187 qlink_free(qlink, obj_cache);
188 qlink = next;
189 }
190 qlist_init(q);
191}
192
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800193bool kasan_quarantine_put(struct kmem_cache *cache, void *object)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700194{
195 unsigned long flags;
196 struct qlist_head *q;
197 struct qlist_head temp = QLIST_INIT;
Andrey Konovalov64767922020-12-22 12:02:34 -0800198 struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700199
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800200 /*
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800201 * If there's no metadata for this object, don't put it into
202 * quarantine.
203 */
204 if (!meta)
205 return false;
206
207 /*
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800208 * Note: irq must be disabled until after we move the batch to the
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800209 * global quarantine. Otherwise kasan_quarantine_remove_cache() can
210 * miss some objects belonging to the cache if they are in our local
211 * temp list. kasan_quarantine_remove_cache() executes on_each_cpu()
212 * at the beginning which ensures that it either sees the objects in
213 * per-cpu lists or in the global quarantine.
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800214 */
Alexander Potapenko55834c52016-05-20 16:59:11 -0700215 local_irq_save(flags);
216
217 q = this_cpu_ptr(&cpu_quarantine);
Kuan-Ying Lee6c82d452020-12-11 13:36:49 -0800218 if (q->offline) {
219 local_irq_restore(flags);
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800220 return false;
Kuan-Ying Lee6c82d452020-12-11 13:36:49 -0800221 }
Andrey Konovalov64767922020-12-22 12:02:34 -0800222 qlist_put(q, &meta->quarantine_link, cache->size);
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800223 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
Alexander Potapenko55834c52016-05-20 16:59:11 -0700224 qlist_move_all(q, &temp);
225
Clark Williams026d1ea2018-10-26 15:10:32 -0700226 raw_spin_lock(&quarantine_lock);
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -0800227 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
228 qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
229 if (global_quarantine[quarantine_tail].bytes >=
230 READ_ONCE(quarantine_batch_size)) {
231 int new_tail;
232
233 new_tail = quarantine_tail + 1;
234 if (new_tail == QUARANTINE_BATCHES)
235 new_tail = 0;
236 if (new_tail != quarantine_head)
237 quarantine_tail = new_tail;
238 }
Clark Williams026d1ea2018-10-26 15:10:32 -0700239 raw_spin_unlock(&quarantine_lock);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700240 }
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800241
242 local_irq_restore(flags);
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800243
244 return true;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700245}
246
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800247void kasan_quarantine_reduce(void)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700248{
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -0800249 size_t total_size, new_quarantine_size, percpu_quarantines;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700250 unsigned long flags;
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800251 int srcu_idx;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700252 struct qlist_head to_free = QLIST_INIT;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700253
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -0800254 if (likely(READ_ONCE(quarantine_size) <=
255 READ_ONCE(quarantine_max_size)))
Alexander Potapenko55834c52016-05-20 16:59:11 -0700256 return;
257
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800258 /*
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800259 * srcu critical section ensures that kasan_quarantine_remove_cache()
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800260 * will not miss objects belonging to the cache while they are in our
261 * local to_free list. srcu is chosen because (1) it gives us private
262 * grace period domain that does not interfere with anything else,
263 * and (2) it allows synchronize_srcu() to return without waiting
264 * if there are no pending read critical sections (which is the
265 * expected case).
266 */
267 srcu_idx = srcu_read_lock(&remove_cache_srcu);
Clark Williams026d1ea2018-10-26 15:10:32 -0700268 raw_spin_lock_irqsave(&quarantine_lock, flags);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700269
270 /*
271 * Update quarantine size in case of hotplug. Allocate a fraction of
272 * the installed memory to quarantine minus per-cpu queue limits.
273 */
Arun KSca79b0c2018-12-28 00:34:29 -0800274 total_size = (totalram_pages() << PAGE_SHIFT) /
Alexander Potapenko55834c52016-05-20 16:59:11 -0700275 QUARANTINE_FRACTION;
Alexander Potapenkoc3cee372016-08-02 14:02:58 -0700276 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -0800277 new_quarantine_size = (total_size < percpu_quarantines) ?
278 0 : total_size - percpu_quarantines;
279 WRITE_ONCE(quarantine_max_size, new_quarantine_size);
280 /* Aim at consuming at most 1/2 of slots in quarantine. */
281 WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
282 2 * total_size / QUARANTINE_BATCHES));
Alexander Potapenko55834c52016-05-20 16:59:11 -0700283
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -0800284 if (likely(quarantine_size > quarantine_max_size)) {
285 qlist_move_all(&global_quarantine[quarantine_head], &to_free);
286 WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
287 quarantine_head++;
288 if (quarantine_head == QUARANTINE_BATCHES)
289 quarantine_head = 0;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700290 }
Alexander Potapenko55834c52016-05-20 16:59:11 -0700291
Clark Williams026d1ea2018-10-26 15:10:32 -0700292 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700293
294 qlist_free_all(&to_free, NULL);
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800295 srcu_read_unlock(&remove_cache_srcu, srcu_idx);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700296}
297
298static void qlist_move_cache(struct qlist_head *from,
299 struct qlist_head *to,
300 struct kmem_cache *cache)
301{
Joonsoo Kim0ab686d2016-07-14 12:07:17 -0700302 struct qlist_node *curr;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700303
304 if (unlikely(qlist_empty(from)))
305 return;
306
307 curr = from->head;
Joonsoo Kim0ab686d2016-07-14 12:07:17 -0700308 qlist_init(from);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700309 while (curr) {
Joonsoo Kim0ab686d2016-07-14 12:07:17 -0700310 struct qlist_node *next = curr->next;
311 struct kmem_cache *obj_cache = qlink_to_cache(curr);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700312
Joonsoo Kim0ab686d2016-07-14 12:07:17 -0700313 if (obj_cache == cache)
314 qlist_put(to, curr, obj_cache->size);
315 else
316 qlist_put(from, curr, obj_cache->size);
317
318 curr = next;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700319 }
320}
321
Zqiang07d067e2022-04-29 14:36:58 -0700322#ifndef CONFIG_PREEMPT_RT
323static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700324{
325 struct kmem_cache *cache = arg;
326 struct qlist_head to_free = QLIST_INIT;
Zqiang07d067e2022-04-29 14:36:58 -0700327
328 qlist_move_cache(q, &to_free, cache);
329 qlist_free_all(&to_free, cache);
330}
331#else
332static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
333{
334 struct kmem_cache *cache = arg;
335 unsigned long flags;
336 struct cpu_shrink_qlist *sq;
337
338 sq = this_cpu_ptr(&shrink_qlist);
339 raw_spin_lock_irqsave(&sq->lock, flags);
340 qlist_move_cache(q, &sq->qlist, cache);
341 raw_spin_unlock_irqrestore(&sq->lock, flags);
342}
343#endif
344
345static void per_cpu_remove_cache(void *arg)
346{
Alexander Potapenko55834c52016-05-20 16:59:11 -0700347 struct qlist_head *q;
348
349 q = this_cpu_ptr(&cpu_quarantine);
Zqiang31fa9852022-04-27 12:41:56 -0700350 /*
351 * Ensure the ordering between the writing to q->offline and
352 * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted
353 * by interrupt.
354 */
355 if (READ_ONCE(q->offline))
356 return;
Zqiang07d067e2022-04-29 14:36:58 -0700357 __per_cpu_remove_cache(q, arg);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700358}
359
Greg Thelenf9fa1d92017-02-24 15:00:05 -0800360/* Free all quarantined objects belonging to cache. */
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800361void kasan_quarantine_remove_cache(struct kmem_cache *cache)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700362{
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -0800363 unsigned long flags, i;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700364 struct qlist_head to_free = QLIST_INIT;
365
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800366 /*
367 * Must be careful to not miss any objects that are being moved from
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800368 * per-cpu list to the global quarantine in kasan_quarantine_put(),
369 * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu()
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800370 * achieves the first goal, while synchronize_srcu() achieves the
371 * second.
372 */
Alexander Potapenko55834c52016-05-20 16:59:11 -0700373 on_each_cpu(per_cpu_remove_cache, cache, 1);
374
Zqiang07d067e2022-04-29 14:36:58 -0700375#ifdef CONFIG_PREEMPT_RT
376 {
377 int cpu;
378 struct cpu_shrink_qlist *sq;
379
380 for_each_online_cpu(cpu) {
381 sq = per_cpu_ptr(&shrink_qlist, cpu);
382 raw_spin_lock_irqsave(&sq->lock, flags);
383 qlist_move_cache(&sq->qlist, &to_free, cache);
384 raw_spin_unlock_irqrestore(&sq->lock, flags);
385 }
386 qlist_free_all(&to_free, cache);
387 }
388#endif
389
Clark Williams026d1ea2018-10-26 15:10:32 -0700390 raw_spin_lock_irqsave(&quarantine_lock, flags);
Dmitry Vyukov68fd8142017-03-09 16:17:28 -0800391 for (i = 0; i < QUARANTINE_BATCHES; i++) {
392 if (qlist_empty(&global_quarantine[i]))
393 continue;
Dmitry Vyukov64abdcb2016-12-12 16:44:56 -0800394 qlist_move_cache(&global_quarantine[i], &to_free, cache);
Dmitry Vyukov68fd8142017-03-09 16:17:28 -0800395 /* Scanning whole quarantine can take a while. */
Clark Williams026d1ea2018-10-26 15:10:32 -0700396 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
Dmitry Vyukov68fd8142017-03-09 16:17:28 -0800397 cond_resched();
Clark Williams026d1ea2018-10-26 15:10:32 -0700398 raw_spin_lock_irqsave(&quarantine_lock, flags);
Dmitry Vyukov68fd8142017-03-09 16:17:28 -0800399 }
Clark Williams026d1ea2018-10-26 15:10:32 -0700400 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700401
402 qlist_free_all(&to_free, cache);
Dmitry Vyukovce5bec52017-03-09 16:17:32 -0800403
404 synchronize_srcu(&remove_cache_srcu);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700405}
Kuan-Ying Lee6c82d452020-12-11 13:36:49 -0800406
407static int kasan_cpu_online(unsigned int cpu)
408{
409 this_cpu_ptr(&cpu_quarantine)->offline = false;
410 return 0;
411}
412
413static int kasan_cpu_offline(unsigned int cpu)
414{
415 struct qlist_head *q;
416
417 q = this_cpu_ptr(&cpu_quarantine);
418 /* Ensure the ordering between the writing to q->offline and
419 * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
420 * by interrupt.
421 */
422 WRITE_ONCE(q->offline, true);
423 barrier();
424 qlist_free_all(q, NULL);
425 return 0;
426}
427
428static int __init kasan_cpu_quarantine_init(void)
429{
430 int ret = 0;
431
432 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
433 kasan_cpu_online, kasan_cpu_offline);
434 if (ret < 0)
435 pr_err("kasan cpu quarantine register failed [%d]\n", ret);
436 return ret;
437}
438late_initcall(kasan_cpu_quarantine_init);