Andrey Konovalov | e886bf9 | 2018-12-28 00:31:14 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 2 | /* |
Andrey Konovalov | 2bd926b | 2018-12-28 00:29:53 -0800 | [diff] [blame] | 3 | * This file contains core generic KASAN code. |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
Andrey Ryabinin | 2baf9e8 | 2015-08-14 15:35:13 -0700 | [diff] [blame] | 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 7 | * |
Andrey Konovalov | 5d0926e | 2015-11-05 18:51:12 -0800 | [diff] [blame] | 8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
Andrey Konovalov | 5f21f3a | 2018-02-06 15:36:41 -0800 | [diff] [blame] | 9 | * Andrey Konovalov <andreyknvl@gmail.com> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 10 | */ |
| 11 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 12 | #include <linux/export.h> |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 13 | #include <linux/interrupt.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 14 | #include <linux/init.h> |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 15 | #include <linux/kasan.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 16 | #include <linux/kernel.h> |
Alexander Potapenko | 2b83052 | 2021-02-25 17:19:21 -0800 | [diff] [blame] | 17 | #include <linux/kfence.h> |
Andrey Ryabinin | 4593725 | 2015-11-20 15:57:18 -0800 | [diff] [blame] | 18 | #include <linux/kmemleak.h> |
Mark Rutland | e3ae116 | 2016-03-09 14:08:15 -0800 | [diff] [blame] | 19 | #include <linux/linkage.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 20 | #include <linux/memblock.h> |
Andrey Ryabinin | 786a895 | 2015-02-13 14:39:21 -0800 | [diff] [blame] | 21 | #include <linux/memory.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 22 | #include <linux/mm.h> |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 23 | #include <linux/module.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 24 | #include <linux/printk.h> |
| 25 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 26 | #include <linux/sched/task_stack.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 27 | #include <linux/slab.h> |
Andrey Konovalov | a414d42 | 2023-12-19 22:19:51 +0100 | [diff] [blame] | 28 | #include <linux/spinlock.h> |
Andrey Konovalov | 022012d | 2023-11-20 18:47:13 +0100 | [diff] [blame] | 29 | #include <linux/stackdepot.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 30 | #include <linux/stacktrace.h> |
| 31 | #include <linux/string.h> |
| 32 | #include <linux/types.h> |
Andrey Ryabinin | a5af5aa | 2015-03-12 16:26:11 -0700 | [diff] [blame] | 33 | #include <linux/vmalloc.h> |
Dmitry Vyukov | 9f7d416 | 2016-10-14 16:07:23 +0200 | [diff] [blame] | 34 | #include <linux/bug.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 35 | |
| 36 | #include "kasan.h" |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 37 | #include "../slab.h" |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 38 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 39 | /* |
| 40 | * All functions below always inlined so compiler could |
| 41 | * perform better optimizations in each of __asan_loadX/__assn_storeX |
| 42 | * depending on memory access size X. |
| 43 | */ |
| 44 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 45 | static __always_inline bool memory_is_poisoned_1(const void *addr) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 46 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 47 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 48 | |
| 49 | if (unlikely(shadow_value)) { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 50 | s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 51 | return unlikely(last_accessible_byte >= shadow_value); |
| 52 | } |
| 53 | |
| 54 | return false; |
| 55 | } |
| 56 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 57 | static __always_inline bool memory_is_poisoned_2_4_8(const void *addr, |
Andrey Ryabinin | c634d80 | 2017-07-10 15:50:24 -0700 | [diff] [blame] | 58 | unsigned long size) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 59 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 60 | u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 61 | |
Andrey Ryabinin | c634d80 | 2017-07-10 15:50:24 -0700 | [diff] [blame] | 62 | /* |
| 63 | * Access crosses 8(shadow size)-byte boundary. Such access maps |
| 64 | * into 2 shadow bytes, so we need to check them both. |
| 65 | */ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 66 | if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) |
Andrey Ryabinin | c634d80 | 2017-07-10 15:50:24 -0700 | [diff] [blame] | 67 | return *shadow_addr || memory_is_poisoned_1(addr + size - 1); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 68 | |
Andrey Ryabinin | c634d80 | 2017-07-10 15:50:24 -0700 | [diff] [blame] | 69 | return memory_is_poisoned_1(addr + size - 1); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 70 | } |
| 71 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 72 | static __always_inline bool memory_is_poisoned_16(const void *addr) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 73 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 74 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 75 | |
Andrey Ryabinin | c634d80 | 2017-07-10 15:50:24 -0700 | [diff] [blame] | 76 | /* Unaligned 16-bytes access maps into 3 shadow bytes. */ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 77 | if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE))) |
Andrey Ryabinin | c634d80 | 2017-07-10 15:50:24 -0700 | [diff] [blame] | 78 | return *shadow_addr || memory_is_poisoned_1(addr + 15); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 79 | |
Andrey Ryabinin | c634d80 | 2017-07-10 15:50:24 -0700 | [diff] [blame] | 80 | return *shadow_addr; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 81 | } |
| 82 | |
Joonsoo Kim | f5bd62c | 2017-07-10 15:50:37 -0700 | [diff] [blame] | 83 | static __always_inline unsigned long bytes_is_nonzero(const u8 *start, |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 84 | size_t size) |
| 85 | { |
| 86 | while (size) { |
| 87 | if (unlikely(*start)) |
| 88 | return (unsigned long)start; |
| 89 | start++; |
| 90 | size--; |
| 91 | } |
| 92 | |
| 93 | return 0; |
| 94 | } |
| 95 | |
Joonsoo Kim | f5bd62c | 2017-07-10 15:50:37 -0700 | [diff] [blame] | 96 | static __always_inline unsigned long memory_is_nonzero(const void *start, |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 97 | const void *end) |
| 98 | { |
| 99 | unsigned int words; |
| 100 | unsigned long ret; |
| 101 | unsigned int prefix = (unsigned long)start % 8; |
| 102 | |
| 103 | if (end - start <= 16) |
Joonsoo Kim | f5bd62c | 2017-07-10 15:50:37 -0700 | [diff] [blame] | 104 | return bytes_is_nonzero(start, end - start); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 105 | |
| 106 | if (prefix) { |
| 107 | prefix = 8 - prefix; |
Joonsoo Kim | f5bd62c | 2017-07-10 15:50:37 -0700 | [diff] [blame] | 108 | ret = bytes_is_nonzero(start, prefix); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 109 | if (unlikely(ret)) |
| 110 | return ret; |
| 111 | start += prefix; |
| 112 | } |
| 113 | |
| 114 | words = (end - start) / 8; |
| 115 | while (words) { |
| 116 | if (unlikely(*(u64 *)start)) |
Joonsoo Kim | f5bd62c | 2017-07-10 15:50:37 -0700 | [diff] [blame] | 117 | return bytes_is_nonzero(start, 8); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 118 | start += 8; |
| 119 | words--; |
| 120 | } |
| 121 | |
Joonsoo Kim | f5bd62c | 2017-07-10 15:50:37 -0700 | [diff] [blame] | 122 | return bytes_is_nonzero(start, (end - start) % 8); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 123 | } |
| 124 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 125 | static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 126 | { |
| 127 | unsigned long ret; |
| 128 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 129 | ret = memory_is_nonzero(kasan_mem_to_shadow(addr), |
| 130 | kasan_mem_to_shadow(addr + size - 1) + 1); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 131 | |
| 132 | if (unlikely(ret)) { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 133 | const void *last_byte = addr + size - 1; |
| 134 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte); |
Andrey Konovalov | 05c56e7 | 2023-07-04 02:52:05 +0200 | [diff] [blame] | 135 | s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 136 | |
| 137 | if (unlikely(ret != (unsigned long)last_shadow || |
Andrey Konovalov | 05c56e7 | 2023-07-04 02:52:05 +0200 | [diff] [blame] | 138 | last_accessible_byte >= *last_shadow)) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 139 | return true; |
| 140 | } |
| 141 | return false; |
| 142 | } |
| 143 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 144 | static __always_inline bool memory_is_poisoned(const void *addr, size_t size) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 145 | { |
| 146 | if (__builtin_constant_p(size)) { |
| 147 | switch (size) { |
| 148 | case 1: |
| 149 | return memory_is_poisoned_1(addr); |
| 150 | case 2: |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 151 | case 4: |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 152 | case 8: |
Andrey Ryabinin | c634d80 | 2017-07-10 15:50:24 -0700 | [diff] [blame] | 153 | return memory_is_poisoned_2_4_8(addr, size); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 154 | case 16: |
| 155 | return memory_is_poisoned_16(addr); |
| 156 | default: |
| 157 | BUILD_BUG(); |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | return memory_is_poisoned_n(addr, size); |
| 162 | } |
| 163 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 164 | static __always_inline bool check_region_inline(const void *addr, |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 165 | size_t size, bool write, |
| 166 | unsigned long ret_ip) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 167 | { |
Daniel Axtens | af3751f3 | 2021-06-28 19:40:42 -0700 | [diff] [blame] | 168 | if (!kasan_arch_is_ready()) |
| 169 | return true; |
| 170 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 171 | if (unlikely(size == 0)) |
Marco Elver | b5f6e0f | 2019-07-11 20:54:07 -0700 | [diff] [blame] | 172 | return true; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 173 | |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 174 | if (unlikely(addr + size < addr)) |
| 175 | return !kasan_report(addr, size, write, ret_ip); |
| 176 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 177 | if (unlikely(!addr_has_metadata(addr))) |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 178 | return !kasan_report(addr, size, write, ret_ip); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 179 | |
| 180 | if (likely(!memory_is_poisoned(addr, size))) |
Marco Elver | b5f6e0f | 2019-07-11 20:54:07 -0700 | [diff] [blame] | 181 | return true; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 182 | |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 183 | return !kasan_report(addr, size, write, ret_ip); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 184 | } |
| 185 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 186 | bool kasan_check_range(const void *addr, size_t size, bool write, |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 187 | unsigned long ret_ip) |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 188 | { |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 189 | return check_region_inline(addr, size, write, ret_ip); |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 190 | } |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 191 | |
Andrey Konovalov | 611806b | 2021-02-24 12:05:50 -0800 | [diff] [blame] | 192 | bool kasan_byte_accessible(const void *addr) |
Andrey Konovalov | 2cdbed6 | 2020-12-22 12:00:46 -0800 | [diff] [blame] | 193 | { |
Christophe Leroy | 55d77ba | 2023-01-26 08:04:47 +0100 | [diff] [blame] | 194 | s8 shadow_byte; |
| 195 | |
| 196 | if (!kasan_arch_is_ready()) |
| 197 | return true; |
| 198 | |
| 199 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr)); |
Andrey Konovalov | 2cdbed6 | 2020-12-22 12:00:46 -0800 | [diff] [blame] | 200 | |
Andrey Konovalov | 611806b | 2021-02-24 12:05:50 -0800 | [diff] [blame] | 201 | return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE; |
Andrey Konovalov | 2cdbed6 | 2020-12-22 12:00:46 -0800 | [diff] [blame] | 202 | } |
| 203 | |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 204 | void kasan_cache_shrink(struct kmem_cache *cache) |
| 205 | { |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 206 | kasan_quarantine_remove_cache(cache); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 207 | } |
| 208 | |
Greg Thelen | f9fa1d9 | 2017-02-24 15:00:05 -0800 | [diff] [blame] | 209 | void kasan_cache_shutdown(struct kmem_cache *cache) |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 210 | { |
Shakeel Butt | f9e13c0 | 2018-04-05 16:21:57 -0700 | [diff] [blame] | 211 | if (!__kmem_cache_empty(cache)) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 212 | kasan_quarantine_remove_cache(cache); |
Alexander Potapenko | 55834c5 | 2016-05-20 16:59:11 -0700 | [diff] [blame] | 213 | } |
| 214 | |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 215 | static void register_global(struct kasan_global *global) |
| 216 | { |
Andrey Konovalov | 1f60062 | 2020-12-22 12:00:24 -0800 | [diff] [blame] | 217 | size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE); |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 218 | |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 219 | kasan_unpoison(global->beg, global->size, false); |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 220 | |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 221 | kasan_poison(global->beg + aligned_size, |
Andrey Konovalov | cebd0eb | 2020-12-22 12:00:21 -0800 | [diff] [blame] | 222 | global->size_with_redzone - aligned_size, |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 223 | KASAN_GLOBAL_REDZONE, false); |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 224 | } |
| 225 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 226 | void __asan_register_globals(void *ptr, ssize_t size) |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 227 | { |
| 228 | int i; |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 229 | struct kasan_global *globals = ptr; |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 230 | |
| 231 | for (i = 0; i < size; i++) |
| 232 | register_global(&globals[i]); |
| 233 | } |
| 234 | EXPORT_SYMBOL(__asan_register_globals); |
| 235 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 236 | void __asan_unregister_globals(void *ptr, ssize_t size) |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 237 | { |
| 238 | } |
| 239 | EXPORT_SYMBOL(__asan_unregister_globals); |
| 240 | |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 241 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 242 | void __asan_load##size(void *addr) \ |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 243 | { \ |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 244 | check_region_inline(addr, size, false, _RET_IP_); \ |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 245 | } \ |
| 246 | EXPORT_SYMBOL(__asan_load##size); \ |
| 247 | __alias(__asan_load##size) \ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 248 | void __asan_load##size##_noabort(void *); \ |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 249 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 250 | void __asan_store##size(void *addr) \ |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 251 | { \ |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 252 | check_region_inline(addr, size, true, _RET_IP_); \ |
Andrey Ryabinin | 936bb4b | 2016-05-20 16:59:20 -0700 | [diff] [blame] | 253 | } \ |
| 254 | EXPORT_SYMBOL(__asan_store##size); \ |
| 255 | __alias(__asan_store##size) \ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 256 | void __asan_store##size##_noabort(void *); \ |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 257 | EXPORT_SYMBOL(__asan_store##size##_noabort) |
| 258 | |
| 259 | DEFINE_ASAN_LOAD_STORE(1); |
| 260 | DEFINE_ASAN_LOAD_STORE(2); |
| 261 | DEFINE_ASAN_LOAD_STORE(4); |
| 262 | DEFINE_ASAN_LOAD_STORE(8); |
| 263 | DEFINE_ASAN_LOAD_STORE(16); |
| 264 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 265 | void __asan_loadN(void *addr, ssize_t size) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 266 | { |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 267 | kasan_check_range(addr, size, false, _RET_IP_); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 268 | } |
| 269 | EXPORT_SYMBOL(__asan_loadN); |
| 270 | |
| 271 | __alias(__asan_loadN) |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 272 | void __asan_loadN_noabort(void *, ssize_t); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 273 | EXPORT_SYMBOL(__asan_loadN_noabort); |
| 274 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 275 | void __asan_storeN(void *addr, ssize_t size) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 276 | { |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 277 | kasan_check_range(addr, size, true, _RET_IP_); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 278 | } |
| 279 | EXPORT_SYMBOL(__asan_storeN); |
| 280 | |
| 281 | __alias(__asan_storeN) |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 282 | void __asan_storeN_noabort(void *, ssize_t); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 283 | EXPORT_SYMBOL(__asan_storeN_noabort); |
| 284 | |
| 285 | /* to shut up compiler complaints */ |
| 286 | void __asan_handle_no_return(void) {} |
| 287 | EXPORT_SYMBOL(__asan_handle_no_return); |
Andrey Ryabinin | 786a895 | 2015-02-13 14:39:21 -0800 | [diff] [blame] | 288 | |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 289 | /* Emitted by compiler to poison alloca()ed objects. */ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 290 | void __asan_alloca_poison(void *addr, ssize_t size) |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 291 | { |
Andrey Konovalov | 1f60062 | 2020-12-22 12:00:24 -0800 | [diff] [blame] | 292 | size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE); |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 293 | size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - |
| 294 | rounded_up_size; |
Andrey Konovalov | 1f60062 | 2020-12-22 12:00:24 -0800 | [diff] [blame] | 295 | size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE); |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 296 | |
| 297 | const void *left_redzone = (const void *)(addr - |
| 298 | KASAN_ALLOCA_REDZONE_SIZE); |
| 299 | const void *right_redzone = (const void *)(addr + rounded_up_size); |
| 300 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 301 | WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE)); |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 302 | |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 303 | kasan_unpoison((const void *)(addr + rounded_down_size), |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 304 | size - rounded_down_size, false); |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 305 | kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 306 | KASAN_ALLOCA_LEFT, false); |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 307 | kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE, |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 308 | KASAN_ALLOCA_RIGHT, false); |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 309 | } |
| 310 | EXPORT_SYMBOL(__asan_alloca_poison); |
| 311 | |
| 312 | /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 313 | void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom) |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 314 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 315 | if (unlikely(!stack_top || stack_top > (void *)stack_bottom)) |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 316 | return; |
| 317 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 318 | kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false); |
Paul Lawrence | 342061e | 2018-02-06 15:36:11 -0800 | [diff] [blame] | 319 | } |
| 320 | EXPORT_SYMBOL(__asan_allocas_unpoison); |
| 321 | |
Alexander Potapenko | d321599 | 2018-02-06 15:36:20 -0800 | [diff] [blame] | 322 | /* Emitted by the compiler to [un]poison local variables. */ |
| 323 | #define DEFINE_ASAN_SET_SHADOW(byte) \ |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 324 | void __asan_set_shadow_##byte(const void *addr, ssize_t size) \ |
Alexander Potapenko | d321599 | 2018-02-06 15:36:20 -0800 | [diff] [blame] | 325 | { \ |
| 326 | __memset((void *)addr, 0x##byte, size); \ |
| 327 | } \ |
| 328 | EXPORT_SYMBOL(__asan_set_shadow_##byte) |
| 329 | |
| 330 | DEFINE_ASAN_SET_SHADOW(00); |
| 331 | DEFINE_ASAN_SET_SHADOW(f1); |
| 332 | DEFINE_ASAN_SET_SHADOW(f2); |
| 333 | DEFINE_ASAN_SET_SHADOW(f3); |
| 334 | DEFINE_ASAN_SET_SHADOW(f5); |
| 335 | DEFINE_ASAN_SET_SHADOW(f8); |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 336 | |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 337 | /* |
| 338 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
| 339 | * For larger allocations larger redzones are used. |
| 340 | */ |
| 341 | static inline unsigned int optimal_redzone(unsigned int object_size) |
| 342 | { |
| 343 | return |
| 344 | object_size <= 64 - 16 ? 16 : |
| 345 | object_size <= 128 - 32 ? 32 : |
| 346 | object_size <= 512 - 64 ? 64 : |
| 347 | object_size <= 4096 - 128 ? 128 : |
| 348 | object_size <= (1 << 14) - 256 ? 256 : |
| 349 | object_size <= (1 << 15) - 512 ? 512 : |
| 350 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; |
| 351 | } |
| 352 | |
Andrey Konovalov | 682ed08 | 2022-09-05 23:05:33 +0200 | [diff] [blame] | 353 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
| 354 | slab_flags_t *flags) |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 355 | { |
| 356 | unsigned int ok_size; |
| 357 | unsigned int optimal_size; |
Juntong Deng | a5989d4 | 2023-11-20 04:46:29 +0800 | [diff] [blame] | 358 | unsigned int rem_free_meta_size; |
| 359 | unsigned int orig_alloc_meta_offset; |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 360 | |
Andrey Konovalov | 682ed08 | 2022-09-05 23:05:33 +0200 | [diff] [blame] | 361 | if (!kasan_requires_meta()) |
| 362 | return; |
| 363 | |
| 364 | /* |
Vlastimil Babka | 96d8dbb | 2024-02-23 19:27:19 +0100 | [diff] [blame] | 365 | * SLAB_KASAN is used to mark caches that are sanitized by KASAN and |
| 366 | * that thus have per-object metadata. Currently, this flag is used in |
| 367 | * slab_ksize() to account for per-object metadata when calculating the |
| 368 | * size of the accessible memory within the object. Additionally, we use |
| 369 | * SLAB_NO_MERGE to prevent merging of caches with per-object metadata. |
Andrey Konovalov | 682ed08 | 2022-09-05 23:05:33 +0200 | [diff] [blame] | 370 | */ |
Vlastimil Babka | 96d8dbb | 2024-02-23 19:27:19 +0100 | [diff] [blame] | 371 | *flags |= SLAB_KASAN | SLAB_NO_MERGE; |
Andrey Konovalov | 682ed08 | 2022-09-05 23:05:33 +0200 | [diff] [blame] | 372 | |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 373 | ok_size = *size; |
| 374 | |
Andrey Konovalov | f6940e8 | 2023-12-21 19:35:37 +0100 | [diff] [blame] | 375 | /* Add alloc meta into the redzone. */ |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 376 | cache->kasan_info.alloc_meta_offset = *size; |
| 377 | *size += sizeof(struct kasan_alloc_meta); |
| 378 | |
Andrey Konovalov | f6940e8 | 2023-12-21 19:35:37 +0100 | [diff] [blame] | 379 | /* If alloc meta doesn't fit, don't add it. */ |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 380 | if (*size > KMALLOC_MAX_SIZE) { |
| 381 | cache->kasan_info.alloc_meta_offset = 0; |
| 382 | *size = ok_size; |
| 383 | /* Continue, since free meta might still fit. */ |
| 384 | } |
| 385 | |
Juntong Deng | a5989d4 | 2023-11-20 04:46:29 +0800 | [diff] [blame] | 386 | ok_size = *size; |
| 387 | orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset; |
| 388 | |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 389 | /* |
Andrey Konovalov | f6940e8 | 2023-12-21 19:35:37 +0100 | [diff] [blame] | 390 | * Store free meta in the redzone when it's not possible to store |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 391 | * it in the object. This is the case when: |
| 392 | * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can |
| 393 | * be touched after it was freed, or |
| 394 | * 2. Object has a constructor, which means it's expected to |
Andrey Konovalov | f6940e8 | 2023-12-21 19:35:37 +0100 | [diff] [blame] | 395 | * retain its content until the next allocation. |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 396 | */ |
Juntong Deng | a5989d4 | 2023-11-20 04:46:29 +0800 | [diff] [blame] | 397 | if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) { |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 398 | cache->kasan_info.free_meta_offset = *size; |
| 399 | *size += sizeof(struct kasan_free_meta); |
Andrey Konovalov | f6940e8 | 2023-12-21 19:35:37 +0100 | [diff] [blame] | 400 | goto free_meta_added; |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 401 | } |
| 402 | |
Andrey Konovalov | f6940e8 | 2023-12-21 19:35:37 +0100 | [diff] [blame] | 403 | /* |
| 404 | * Otherwise, if the object is large enough to contain free meta, |
| 405 | * store it within the object. |
| 406 | */ |
| 407 | if (sizeof(struct kasan_free_meta) <= cache->object_size) { |
| 408 | /* cache->kasan_info.free_meta_offset = 0 is implied. */ |
| 409 | goto free_meta_added; |
| 410 | } |
| 411 | |
| 412 | /* |
| 413 | * For smaller objects, store the beginning of free meta within the |
| 414 | * object and the end in the redzone. And thus shift the location of |
| 415 | * alloc meta to free up space for free meta. |
| 416 | * This is only possible when slub_debug is disabled, as otherwise |
| 417 | * the end of free meta will overlap with slub_debug metadata. |
| 418 | */ |
| 419 | if (!__slub_debug_enabled()) { |
| 420 | rem_free_meta_size = sizeof(struct kasan_free_meta) - |
| 421 | cache->object_size; |
| 422 | *size += rem_free_meta_size; |
| 423 | if (cache->kasan_info.alloc_meta_offset != 0) |
| 424 | cache->kasan_info.alloc_meta_offset += rem_free_meta_size; |
| 425 | goto free_meta_added; |
| 426 | } |
| 427 | |
| 428 | /* |
| 429 | * If the object is small and slub_debug is enabled, store free meta |
| 430 | * in the redzone after alloc meta. |
| 431 | */ |
| 432 | cache->kasan_info.free_meta_offset = *size; |
| 433 | *size += sizeof(struct kasan_free_meta); |
| 434 | |
| 435 | free_meta_added: |
Juntong Deng | a5989d4 | 2023-11-20 04:46:29 +0800 | [diff] [blame] | 436 | /* If free meta doesn't fit, don't add it. */ |
| 437 | if (*size > KMALLOC_MAX_SIZE) { |
| 438 | cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
| 439 | cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset; |
| 440 | *size = ok_size; |
| 441 | } |
| 442 | |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 443 | /* Calculate size with optimal redzone. */ |
| 444 | optimal_size = cache->object_size + optimal_redzone(cache->object_size); |
Andrey Konovalov | f6940e8 | 2023-12-21 19:35:37 +0100 | [diff] [blame] | 445 | /* Limit it with KMALLOC_MAX_SIZE. */ |
Andrey Konovalov | 5935143 | 2022-09-05 23:05:27 +0200 | [diff] [blame] | 446 | if (optimal_size > KMALLOC_MAX_SIZE) |
| 447 | optimal_size = KMALLOC_MAX_SIZE; |
| 448 | /* Use optimal size if the size with added metas is not large enough. */ |
| 449 | if (*size < optimal_size) |
| 450 | *size = optimal_size; |
| 451 | } |
| 452 | |
Andrey Konovalov | 2f35680 | 2022-09-05 23:05:25 +0200 | [diff] [blame] | 453 | struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, |
| 454 | const void *object) |
| 455 | { |
| 456 | if (!cache->kasan_info.alloc_meta_offset) |
| 457 | return NULL; |
| 458 | return (void *)object + cache->kasan_info.alloc_meta_offset; |
| 459 | } |
| 460 | |
| 461 | struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, |
| 462 | const void *object) |
| 463 | { |
| 464 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
| 465 | if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) |
| 466 | return NULL; |
| 467 | return (void *)object + cache->kasan_info.free_meta_offset; |
| 468 | } |
| 469 | |
Andrey Konovalov | 836daba | 2022-09-05 23:05:23 +0200 | [diff] [blame] | 470 | void kasan_init_object_meta(struct kmem_cache *cache, const void *object) |
| 471 | { |
| 472 | struct kasan_alloc_meta *alloc_meta; |
| 473 | |
| 474 | alloc_meta = kasan_get_alloc_meta(cache, object); |
Andrey Konovalov | a414d42 | 2023-12-19 22:19:51 +0100 | [diff] [blame] | 475 | if (alloc_meta) { |
Andrey Konovalov | 63b85ac | 2023-12-26 23:51:21 +0100 | [diff] [blame] | 476 | /* Zero out alloc meta to mark it as invalid. */ |
Andrey Konovalov | 836daba | 2022-09-05 23:05:23 +0200 | [diff] [blame] | 477 | __memset(alloc_meta, 0, sizeof(*alloc_meta)); |
Andrey Konovalov | a414d42 | 2023-12-19 22:19:51 +0100 | [diff] [blame] | 478 | } |
Andrey Konovalov | 63b85ac | 2023-12-26 23:51:21 +0100 | [diff] [blame] | 479 | |
| 480 | /* |
| 481 | * Explicitly marking free meta as invalid is not required: the shadow |
| 482 | * value for the first 8 bytes of a newly allocated object is not |
| 483 | * KASAN_SLAB_FREE_META. |
| 484 | */ |
| 485 | } |
| 486 | |
| 487 | static void release_alloc_meta(struct kasan_alloc_meta *meta) |
| 488 | { |
Marco Elver | 711d349 | 2024-01-29 11:07:02 +0100 | [diff] [blame] | 489 | /* Zero out alloc meta to mark it as invalid. */ |
| 490 | __memset(meta, 0, sizeof(*meta)); |
Andrey Konovalov | 63b85ac | 2023-12-26 23:51:21 +0100 | [diff] [blame] | 491 | } |
| 492 | |
| 493 | static void release_free_meta(const void *object, struct kasan_free_meta *meta) |
| 494 | { |
Benjamin Gray | 2597c99 | 2024-02-13 14:39:58 +1100 | [diff] [blame] | 495 | if (!kasan_arch_is_ready()) |
| 496 | return; |
| 497 | |
Andrey Konovalov | 63b85ac | 2023-12-26 23:51:21 +0100 | [diff] [blame] | 498 | /* Check if free meta is valid. */ |
| 499 | if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META) |
| 500 | return; |
| 501 | |
Andrey Konovalov | 63b85ac | 2023-12-26 23:51:21 +0100 | [diff] [blame] | 502 | /* Mark free meta as invalid. */ |
| 503 | *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; |
| 504 | } |
| 505 | |
Feng Tang | 5d1ba31 | 2022-10-21 11:24:04 +0800 | [diff] [blame] | 506 | size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) |
Andrey Konovalov | f372bde | 2022-09-05 23:05:29 +0200 | [diff] [blame] | 507 | { |
Feng Tang | 5d1ba31 | 2022-10-21 11:24:04 +0800 | [diff] [blame] | 508 | struct kasan_cache *info = &cache->kasan_info; |
| 509 | |
Andrey Konovalov | f372bde | 2022-09-05 23:05:29 +0200 | [diff] [blame] | 510 | if (!kasan_requires_meta()) |
| 511 | return 0; |
Feng Tang | 5d1ba31 | 2022-10-21 11:24:04 +0800 | [diff] [blame] | 512 | |
| 513 | if (in_object) |
| 514 | return (info->free_meta_offset ? |
| 515 | 0 : sizeof(struct kasan_free_meta)); |
| 516 | else |
| 517 | return (info->alloc_meta_offset ? |
| 518 | sizeof(struct kasan_alloc_meta) : 0) + |
| 519 | ((info->free_meta_offset && |
| 520 | info->free_meta_offset != KASAN_NO_FREE_META) ? |
| 521 | sizeof(struct kasan_free_meta) : 0); |
Andrey Konovalov | f372bde | 2022-09-05 23:05:29 +0200 | [diff] [blame] | 522 | } |
| 523 | |
Andrey Konovalov | 022012d | 2023-11-20 18:47:13 +0100 | [diff] [blame] | 524 | static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 525 | { |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 526 | struct slab *slab = kasan_addr_to_slab(addr); |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 527 | struct kmem_cache *cache; |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 528 | struct kasan_alloc_meta *alloc_meta; |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 529 | void *object; |
| 530 | |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 531 | if (is_kfence_address(addr) || !slab) |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 532 | return; |
| 533 | |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 534 | cache = slab->slab_cache; |
| 535 | object = nearest_obj(cache, slab, addr); |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 536 | alloc_meta = kasan_get_alloc_meta(cache, object); |
Walter Wu | 13384f6 | 2020-12-29 15:14:46 -0800 | [diff] [blame] | 537 | if (!alloc_meta) |
| 538 | return; |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 539 | |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 540 | alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; |
Marco Elver | 711d349 | 2024-01-29 11:07:02 +0100 | [diff] [blame] | 541 | alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); |
Marco Elver | 7cb3007 | 2021-11-05 13:35:46 -0700 | [diff] [blame] | 542 | } |
| 543 | |
| 544 | void kasan_record_aux_stack(void *addr) |
| 545 | { |
Marco Elver | 711d349 | 2024-01-29 11:07:02 +0100 | [diff] [blame] | 546 | return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC); |
Marco Elver | 7cb3007 | 2021-11-05 13:35:46 -0700 | [diff] [blame] | 547 | } |
| 548 | |
| 549 | void kasan_record_aux_stack_noalloc(void *addr) |
| 550 | { |
Marco Elver | 711d349 | 2024-01-29 11:07:02 +0100 | [diff] [blame] | 551 | return __kasan_record_aux_stack(addr, 0); |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 552 | } |
Walter Wu | e4b7818 | 2020-08-06 23:24:39 -0700 | [diff] [blame] | 553 | |
Andrey Konovalov | ccf643e | 2022-09-05 23:05:19 +0200 | [diff] [blame] | 554 | void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) |
| 555 | { |
| 556 | struct kasan_alloc_meta *alloc_meta; |
| 557 | |
| 558 | alloc_meta = kasan_get_alloc_meta(cache, object); |
Andrey Konovalov | 773688a | 2023-11-20 18:47:19 +0100 | [diff] [blame] | 559 | if (!alloc_meta) |
| 560 | return; |
| 561 | |
Marco Elver | 711d349 | 2024-01-29 11:07:02 +0100 | [diff] [blame] | 562 | /* Invalidate previous stack traces (might exist for krealloc or mempool). */ |
Andrey Konovalov | 63b85ac | 2023-12-26 23:51:21 +0100 | [diff] [blame] | 563 | release_alloc_meta(alloc_meta); |
Andrey Konovalov | 773688a | 2023-11-20 18:47:19 +0100 | [diff] [blame] | 564 | |
Andrey Konovalov | fd4064f | 2023-12-21 19:35:39 +0100 | [diff] [blame] | 565 | kasan_save_track(&alloc_meta->alloc_track, flags); |
Andrey Konovalov | ccf643e | 2022-09-05 23:05:19 +0200 | [diff] [blame] | 566 | } |
| 567 | |
Andrey Konovalov | 6b07434 | 2022-09-05 23:05:34 +0200 | [diff] [blame] | 568 | void kasan_save_free_info(struct kmem_cache *cache, void *object) |
Walter Wu | e4b7818 | 2020-08-06 23:24:39 -0700 | [diff] [blame] | 569 | { |
| 570 | struct kasan_free_meta *free_meta; |
| 571 | |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 572 | free_meta = kasan_get_free_meta(cache, object); |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 573 | if (!free_meta) |
| 574 | return; |
Walter Wu | e4b7818 | 2020-08-06 23:24:39 -0700 | [diff] [blame] | 575 | |
Marco Elver | 711d349 | 2024-01-29 11:07:02 +0100 | [diff] [blame] | 576 | /* Invalidate previous stack trace (might exist for mempool). */ |
Andrey Konovalov | 63b85ac | 2023-12-26 23:51:21 +0100 | [diff] [blame] | 577 | release_free_meta(object, free_meta); |
| 578 | |
Andrey Konovalov | fd4064f | 2023-12-21 19:35:39 +0100 | [diff] [blame] | 579 | kasan_save_track(&free_meta->free_track, 0); |
Andrey Konovalov | 63b85ac | 2023-12-26 23:51:21 +0100 | [diff] [blame] | 580 | |
| 581 | /* Mark free meta as valid. */ |
| 582 | *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META; |
Walter Wu | e4b7818 | 2020-08-06 23:24:39 -0700 | [diff] [blame] | 583 | } |