Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * This file contains KASAN runtime code that manages shadow memory for |
| 4 | * generic and software tag-based KASAN modes. |
| 5 | * |
| 6 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
| 7 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
| 8 | * |
| 9 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
| 10 | * Andrey Konovalov <andreyknvl@gmail.com> |
| 11 | */ |
| 12 | |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/kasan.h> |
| 15 | #include <linux/kernel.h> |
Alexander Potapenko | 2b83052 | 2021-02-25 17:19:21 -0800 | [diff] [blame] | 16 | #include <linux/kfence.h> |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 17 | #include <linux/kmemleak.h> |
| 18 | #include <linux/memory.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/string.h> |
| 21 | #include <linux/types.h> |
| 22 | #include <linux/vmalloc.h> |
| 23 | |
| 24 | #include <asm/cacheflush.h> |
| 25 | #include <asm/tlbflush.h> |
| 26 | |
| 27 | #include "kasan.h" |
| 28 | |
| 29 | bool __kasan_check_read(const volatile void *p, unsigned int size) |
| 30 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 31 | return kasan_check_range((void *)p, size, false, _RET_IP_); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 32 | } |
| 33 | EXPORT_SYMBOL(__kasan_check_read); |
| 34 | |
| 35 | bool __kasan_check_write(const volatile void *p, unsigned int size) |
| 36 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 37 | return kasan_check_range((void *)p, size, true, _RET_IP_); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 38 | } |
| 39 | EXPORT_SYMBOL(__kasan_check_write); |
| 40 | |
Marco Elver | 36be5cb | 2023-02-24 09:59:40 +0100 | [diff] [blame] | 41 | #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY) |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 42 | /* |
| 43 | * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be |
| 44 | * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions |
| 45 | * for the sites they want to instrument. |
Marco Elver | 36be5cb | 2023-02-24 09:59:40 +0100 | [diff] [blame] | 46 | * |
| 47 | * If we have a compiler that can instrument meminstrinsics, never override |
| 48 | * these, so that non-instrumented files can safely consider them as builtins. |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 49 | */ |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 50 | #undef memset |
| 51 | void *memset(void *addr, int c, size_t len) |
| 52 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 53 | if (!kasan_check_range(addr, len, true, _RET_IP_)) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 54 | return NULL; |
| 55 | |
| 56 | return __memset(addr, c, len); |
| 57 | } |
| 58 | |
| 59 | #ifdef __HAVE_ARCH_MEMMOVE |
| 60 | #undef memmove |
| 61 | void *memmove(void *dest, const void *src, size_t len) |
| 62 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 63 | if (!kasan_check_range(src, len, false, _RET_IP_) || |
| 64 | !kasan_check_range(dest, len, true, _RET_IP_)) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 65 | return NULL; |
| 66 | |
| 67 | return __memmove(dest, src, len); |
| 68 | } |
| 69 | #endif |
| 70 | |
| 71 | #undef memcpy |
| 72 | void *memcpy(void *dest, const void *src, size_t len) |
| 73 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 74 | if (!kasan_check_range(src, len, false, _RET_IP_) || |
| 75 | !kasan_check_range(dest, len, true, _RET_IP_)) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 76 | return NULL; |
| 77 | |
| 78 | return __memcpy(dest, src, len); |
| 79 | } |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 80 | #endif |
| 81 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 82 | void *__asan_memset(void *addr, int c, ssize_t len) |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 83 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 84 | if (!kasan_check_range(addr, len, true, _RET_IP_)) |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 85 | return NULL; |
| 86 | |
| 87 | return __memset(addr, c, len); |
| 88 | } |
| 89 | EXPORT_SYMBOL(__asan_memset); |
| 90 | |
| 91 | #ifdef __HAVE_ARCH_MEMMOVE |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 92 | void *__asan_memmove(void *dest, const void *src, ssize_t len) |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 93 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 94 | if (!kasan_check_range(src, len, false, _RET_IP_) || |
| 95 | !kasan_check_range(dest, len, true, _RET_IP_)) |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 96 | return NULL; |
| 97 | |
| 98 | return __memmove(dest, src, len); |
| 99 | } |
| 100 | EXPORT_SYMBOL(__asan_memmove); |
| 101 | #endif |
| 102 | |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 103 | void *__asan_memcpy(void *dest, const void *src, ssize_t len) |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 104 | { |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 105 | if (!kasan_check_range(src, len, false, _RET_IP_) || |
| 106 | !kasan_check_range(dest, len, true, _RET_IP_)) |
Peter Zijlstra | 69d4c0d | 2023-01-12 20:43:58 +0100 | [diff] [blame] | 107 | return NULL; |
| 108 | |
| 109 | return __memcpy(dest, src, len); |
| 110 | } |
| 111 | EXPORT_SYMBOL(__asan_memcpy); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 112 | |
Marco Elver | 51287dc | 2023-02-24 09:59:39 +0100 | [diff] [blame] | 113 | #ifdef CONFIG_KASAN_SW_TAGS |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 114 | void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset); |
Marco Elver | 51287dc | 2023-02-24 09:59:39 +0100 | [diff] [blame] | 115 | EXPORT_SYMBOL(__hwasan_memset); |
| 116 | #ifdef __HAVE_ARCH_MEMMOVE |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 117 | void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove); |
Marco Elver | 51287dc | 2023-02-24 09:59:39 +0100 | [diff] [blame] | 118 | EXPORT_SYMBOL(__hwasan_memmove); |
| 119 | #endif |
Arnd Bergmann | bb6e04a | 2023-05-09 16:57:21 +0200 | [diff] [blame] | 120 | void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy); |
Marco Elver | 51287dc | 2023-02-24 09:59:39 +0100 | [diff] [blame] | 121 | EXPORT_SYMBOL(__hwasan_memcpy); |
| 122 | #endif |
| 123 | |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 124 | void kasan_poison(const void *addr, size_t size, u8 value, bool init) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 125 | { |
| 126 | void *shadow_start, *shadow_end; |
| 127 | |
Daniel Axtens | af3751f3 | 2021-06-28 19:40:42 -0700 | [diff] [blame] | 128 | if (!kasan_arch_is_ready()) |
| 129 | return; |
| 130 | |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 131 | /* |
| 132 | * Perform shadow offset calculation based on untagged address, as |
Andrey Konovalov | 1ce9a05 | 2023-12-19 23:29:03 +0100 | [diff] [blame] | 133 | * some of the callers (e.g. kasan_poison_new_object) pass tagged |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 134 | * addresses to this function. |
| 135 | */ |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 136 | addr = kasan_reset_tag(addr); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 137 | |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 138 | if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) |
| 139 | return; |
| 140 | if (WARN_ON(size & KASAN_GRANULE_MASK)) |
| 141 | return; |
| 142 | |
| 143 | shadow_start = kasan_mem_to_shadow(addr); |
| 144 | shadow_end = kasan_mem_to_shadow(addr + size); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 145 | |
| 146 | __memset(shadow_start, value, shadow_end - shadow_start); |
| 147 | } |
Andrey Konovalov | f2fffc0 | 2023-12-21 21:04:51 +0100 | [diff] [blame] | 148 | EXPORT_SYMBOL_GPL(kasan_poison); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 149 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 150 | #ifdef CONFIG_KASAN_GENERIC |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 151 | void kasan_poison_last_granule(const void *addr, size_t size) |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 152 | { |
Daniel Axtens | af3751f3 | 2021-06-28 19:40:42 -0700 | [diff] [blame] | 153 | if (!kasan_arch_is_ready()) |
| 154 | return; |
| 155 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 156 | if (size & KASAN_GRANULE_MASK) { |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 157 | u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 158 | *shadow = size & KASAN_GRANULE_MASK; |
| 159 | } |
| 160 | } |
| 161 | #endif |
| 162 | |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 163 | void kasan_unpoison(const void *addr, size_t size, bool init) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 164 | { |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 165 | u8 tag = get_tag(addr); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 166 | |
| 167 | /* |
| 168 | * Perform shadow offset calculation based on untagged address, as |
Andrey Konovalov | 1ce9a05 | 2023-12-19 23:29:03 +0100 | [diff] [blame] | 169 | * some of the callers (e.g. kasan_unpoison_new_object) pass tagged |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 170 | * addresses to this function. |
| 171 | */ |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 172 | addr = kasan_reset_tag(addr); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 173 | |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 174 | if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) |
| 175 | return; |
| 176 | |
| 177 | /* Unpoison all granules that cover the object. */ |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 178 | kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 179 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 180 | /* Partially poison the last granule for the generic mode. */ |
| 181 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 182 | kasan_poison_last_granule(addr, size); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 186 | static bool shadow_mapped(unsigned long addr) |
| 187 | { |
| 188 | pgd_t *pgd = pgd_offset_k(addr); |
| 189 | p4d_t *p4d; |
| 190 | pud_t *pud; |
| 191 | pmd_t *pmd; |
| 192 | pte_t *pte; |
| 193 | |
| 194 | if (pgd_none(*pgd)) |
| 195 | return false; |
| 196 | p4d = p4d_offset(pgd, addr); |
| 197 | if (p4d_none(*p4d)) |
| 198 | return false; |
| 199 | pud = pud_offset(p4d, addr); |
| 200 | if (pud_none(*pud)) |
| 201 | return false; |
Peter Xu | b6c9d5a | 2024-03-05 12:37:46 +0800 | [diff] [blame] | 202 | if (pud_leaf(*pud)) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 203 | return true; |
| 204 | pmd = pmd_offset(pud, addr); |
| 205 | if (pmd_none(*pmd)) |
| 206 | return false; |
Peter Xu | b6c9d5a | 2024-03-05 12:37:46 +0800 | [diff] [blame] | 207 | if (pmd_leaf(*pmd)) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 208 | return true; |
| 209 | pte = pte_offset_kernel(pmd, addr); |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 210 | return !pte_none(ptep_get(pte)); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, |
| 214 | unsigned long action, void *data) |
| 215 | { |
| 216 | struct memory_notify *mem_data = data; |
| 217 | unsigned long nr_shadow_pages, start_kaddr, shadow_start; |
| 218 | unsigned long shadow_end, shadow_size; |
| 219 | |
| 220 | nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; |
| 221 | start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); |
| 222 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); |
| 223 | shadow_size = nr_shadow_pages << PAGE_SHIFT; |
| 224 | shadow_end = shadow_start + shadow_size; |
| 225 | |
| 226 | if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) || |
Andrey Konovalov | affc3f0 | 2020-12-22 12:00:35 -0800 | [diff] [blame] | 227 | WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE)) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 228 | return NOTIFY_BAD; |
| 229 | |
| 230 | switch (action) { |
| 231 | case MEM_GOING_ONLINE: { |
| 232 | void *ret; |
| 233 | |
| 234 | /* |
| 235 | * If shadow is mapped already than it must have been mapped |
| 236 | * during the boot. This could happen if we onlining previously |
| 237 | * offlined memory. |
| 238 | */ |
| 239 | if (shadow_mapped(shadow_start)) |
| 240 | return NOTIFY_OK; |
| 241 | |
| 242 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, |
| 243 | shadow_end, GFP_KERNEL, |
| 244 | PAGE_KERNEL, VM_NO_GUARD, |
| 245 | pfn_to_nid(mem_data->start_pfn), |
| 246 | __builtin_return_address(0)); |
| 247 | if (!ret) |
| 248 | return NOTIFY_BAD; |
| 249 | |
| 250 | kmemleak_ignore(ret); |
| 251 | return NOTIFY_OK; |
| 252 | } |
| 253 | case MEM_CANCEL_ONLINE: |
| 254 | case MEM_OFFLINE: { |
| 255 | struct vm_struct *vm; |
| 256 | |
| 257 | /* |
| 258 | * shadow_start was either mapped during boot by kasan_init() |
| 259 | * or during memory online by __vmalloc_node_range(). |
| 260 | * In the latter case we can use vfree() to free shadow. |
| 261 | * Non-NULL result of the find_vm_area() will tell us if |
| 262 | * that was the second case. |
| 263 | * |
| 264 | * Currently it's not possible to free shadow mapped |
| 265 | * during boot by kasan_init(). It's because the code |
| 266 | * to do that hasn't been written yet. So we'll just |
| 267 | * leak the memory. |
| 268 | */ |
| 269 | vm = find_vm_area((void *)shadow_start); |
| 270 | if (vm) |
| 271 | vfree((void *)shadow_start); |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | return NOTIFY_OK; |
| 276 | } |
| 277 | |
| 278 | static int __init kasan_memhotplug_init(void) |
| 279 | { |
Liu Shixin | 1eeaa4f | 2022-09-23 11:33:47 +0800 | [diff] [blame] | 280 | hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 281 | |
| 282 | return 0; |
| 283 | } |
| 284 | |
| 285 | core_initcall(kasan_memhotplug_init); |
| 286 | #endif |
| 287 | |
| 288 | #ifdef CONFIG_KASAN_VMALLOC |
| 289 | |
Kefeng Wang | 3252b1d | 2021-11-05 13:39:47 -0700 | [diff] [blame] | 290 | void __init __weak kasan_populate_early_vm_area_shadow(void *start, |
| 291 | unsigned long size) |
| 292 | { |
| 293 | } |
| 294 | |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 295 | static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, |
| 296 | void *unused) |
| 297 | { |
| 298 | unsigned long page; |
| 299 | pte_t pte; |
| 300 | |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 301 | if (likely(!pte_none(ptep_get(ptep)))) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 302 | return 0; |
| 303 | |
| 304 | page = __get_free_page(GFP_KERNEL); |
| 305 | if (!page) |
| 306 | return -ENOMEM; |
| 307 | |
Andrey Konovalov | 01a5ad81 | 2023-10-06 17:18:44 +0200 | [diff] [blame] | 308 | __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 309 | pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); |
| 310 | |
| 311 | spin_lock(&init_mm.page_table_lock); |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 312 | if (likely(pte_none(ptep_get(ptep)))) { |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 313 | set_pte_at(&init_mm, addr, ptep, pte); |
| 314 | page = 0; |
| 315 | } |
| 316 | spin_unlock(&init_mm.page_table_lock); |
| 317 | if (page) |
| 318 | free_page(page); |
| 319 | return 0; |
| 320 | } |
| 321 | |
| 322 | int kasan_populate_vmalloc(unsigned long addr, unsigned long size) |
| 323 | { |
| 324 | unsigned long shadow_start, shadow_end; |
| 325 | int ret; |
| 326 | |
Christophe Leroy | 55d77ba | 2023-01-26 08:04:47 +0100 | [diff] [blame] | 327 | if (!kasan_arch_is_ready()) |
| 328 | return 0; |
| 329 | |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 330 | if (!is_vmalloc_or_module_addr((void *)addr)) |
| 331 | return 0; |
| 332 | |
| 333 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 334 | shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); |
Patricia Alfonso | 5b30140 | 2022-07-01 17:16:20 +0800 | [diff] [blame] | 335 | |
| 336 | /* |
| 337 | * User Mode Linux maps enough shadow memory for all of virtual memory |
| 338 | * at boot, so doesn't need to allocate more on vmalloc, just clear it. |
| 339 | * |
| 340 | * The remaining CONFIG_UML checks in this file exist for the same |
| 341 | * reason. |
| 342 | */ |
| 343 | if (IS_ENABLED(CONFIG_UML)) { |
| 344 | __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start); |
| 345 | return 0; |
| 346 | } |
| 347 | |
| 348 | shadow_start = PAGE_ALIGN_DOWN(shadow_start); |
| 349 | shadow_end = PAGE_ALIGN(shadow_end); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 350 | |
| 351 | ret = apply_to_page_range(&init_mm, shadow_start, |
| 352 | shadow_end - shadow_start, |
| 353 | kasan_populate_vmalloc_pte, NULL); |
| 354 | if (ret) |
| 355 | return ret; |
| 356 | |
| 357 | flush_cache_vmap(shadow_start, shadow_end); |
| 358 | |
| 359 | /* |
| 360 | * We need to be careful about inter-cpu effects here. Consider: |
| 361 | * |
| 362 | * CPU#0 CPU#1 |
| 363 | * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; |
| 364 | * p[99] = 1; |
| 365 | * |
| 366 | * With compiler instrumentation, that ends up looking like this: |
| 367 | * |
| 368 | * CPU#0 CPU#1 |
| 369 | * // vmalloc() allocates memory |
| 370 | * // let a = area->addr |
| 371 | * // we reach kasan_populate_vmalloc |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 372 | * // and call kasan_unpoison: |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 373 | * STORE shadow(a), unpoison_val |
| 374 | * ... |
| 375 | * STORE shadow(a+99), unpoison_val x = LOAD p |
| 376 | * // rest of vmalloc process <data dependency> |
| 377 | * STORE p, a LOAD shadow(x+99) |
| 378 | * |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 379 | * If there is no barrier between the end of unpoisoning the shadow |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 380 | * and the store of the result to p, the stores could be committed |
| 381 | * in a different order by CPU#0, and CPU#1 could erroneously observe |
| 382 | * poison in the shadow. |
| 383 | * |
| 384 | * We need some sort of barrier between the stores. |
| 385 | * |
| 386 | * In the vmalloc() case, this is provided by a smp_wmb() in |
| 387 | * clear_vm_uninitialized_flag(). In the per-cpu allocator and in |
| 388 | * get_vm_area() and friends, the caller gets shadow allocated but |
| 389 | * doesn't have any pages mapped into the virtual address space that |
| 390 | * has been reserved. Mapping those pages in will involve taking and |
| 391 | * releasing a page-table lock, which will provide the barrier. |
| 392 | */ |
| 393 | |
| 394 | return 0; |
| 395 | } |
| 396 | |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 397 | static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, |
| 398 | void *unused) |
| 399 | { |
| 400 | unsigned long page; |
| 401 | |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 402 | page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 403 | |
| 404 | spin_lock(&init_mm.page_table_lock); |
| 405 | |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 406 | if (likely(!pte_none(ptep_get(ptep)))) { |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 407 | pte_clear(&init_mm, addr, ptep); |
| 408 | free_page(page); |
| 409 | } |
| 410 | spin_unlock(&init_mm.page_table_lock); |
| 411 | |
| 412 | return 0; |
| 413 | } |
| 414 | |
| 415 | /* |
| 416 | * Release the backing for the vmalloc region [start, end), which |
| 417 | * lies within the free region [free_region_start, free_region_end). |
| 418 | * |
| 419 | * This can be run lazily, long after the region was freed. It runs |
| 420 | * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap |
| 421 | * infrastructure. |
| 422 | * |
| 423 | * How does this work? |
| 424 | * ------------------- |
| 425 | * |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 426 | * We have a region that is page aligned, labeled as A. |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 427 | * That might not map onto the shadow in a way that is page-aligned: |
| 428 | * |
| 429 | * start end |
| 430 | * v v |
| 431 | * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc |
| 432 | * -------- -------- -------- -------- -------- |
| 433 | * | | | | | |
| 434 | * | | | /-------/ | |
| 435 | * \-------\|/------/ |/---------------/ |
| 436 | * ||| || |
| 437 | * |??AAAAAA|AAAAAAAA|AA??????| < shadow |
| 438 | * (1) (2) (3) |
| 439 | * |
| 440 | * First we align the start upwards and the end downwards, so that the |
| 441 | * shadow of the region aligns with shadow page boundaries. In the |
| 442 | * example, this gives us the shadow page (2). This is the shadow entirely |
| 443 | * covered by this allocation. |
| 444 | * |
| 445 | * Then we have the tricky bits. We want to know if we can free the |
| 446 | * partially covered shadow pages - (1) and (3) in the example. For this, |
| 447 | * we are given the start and end of the free region that contains this |
| 448 | * allocation. Extending our previous example, we could have: |
| 449 | * |
| 450 | * free_region_start free_region_end |
| 451 | * | start end | |
| 452 | * v v v v |
| 453 | * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc |
| 454 | * -------- -------- -------- -------- -------- |
| 455 | * | | | | | |
| 456 | * | | | /-------/ | |
| 457 | * \-------\|/------/ |/---------------/ |
| 458 | * ||| || |
| 459 | * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow |
| 460 | * (1) (2) (3) |
| 461 | * |
| 462 | * Once again, we align the start of the free region up, and the end of |
| 463 | * the free region down so that the shadow is page aligned. So we can free |
| 464 | * page (1) - we know no allocation currently uses anything in that page, |
| 465 | * because all of it is in the vmalloc free region. But we cannot free |
| 466 | * page (3), because we can't be sure that the rest of it is unused. |
| 467 | * |
| 468 | * We only consider pages that contain part of the original region for |
| 469 | * freeing: we don't try to free other pages from the free region or we'd |
| 470 | * end up trying to free huge chunks of virtual address space. |
| 471 | * |
| 472 | * Concurrency |
| 473 | * ----------- |
| 474 | * |
| 475 | * How do we know that we're not freeing a page that is simultaneously |
| 476 | * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? |
| 477 | * |
| 478 | * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running |
| 479 | * at the same time. While we run under free_vmap_area_lock, the population |
| 480 | * code does not. |
| 481 | * |
| 482 | * free_vmap_area_lock instead operates to ensure that the larger range |
| 483 | * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and |
| 484 | * the per-cpu region-finding algorithm both run under free_vmap_area_lock, |
| 485 | * no space identified as free will become used while we are running. This |
| 486 | * means that so long as we are careful with alignment and only free shadow |
| 487 | * pages entirely covered by the free region, we will not run in to any |
| 488 | * trouble - any simultaneous allocations will be for disjoint regions. |
| 489 | */ |
| 490 | void kasan_release_vmalloc(unsigned long start, unsigned long end, |
| 491 | unsigned long free_region_start, |
| 492 | unsigned long free_region_end) |
| 493 | { |
| 494 | void *shadow_start, *shadow_end; |
| 495 | unsigned long region_start, region_end; |
| 496 | unsigned long size; |
| 497 | |
Christophe Leroy | 55d77ba | 2023-01-26 08:04:47 +0100 | [diff] [blame] | 498 | if (!kasan_arch_is_ready()) |
| 499 | return; |
| 500 | |
Andrey Konovalov | affc3f0 | 2020-12-22 12:00:35 -0800 | [diff] [blame] | 501 | region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); |
| 502 | region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 503 | |
Andrey Konovalov | affc3f0 | 2020-12-22 12:00:35 -0800 | [diff] [blame] | 504 | free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 505 | |
| 506 | if (start != region_start && |
| 507 | free_region_start < region_start) |
Andrey Konovalov | affc3f0 | 2020-12-22 12:00:35 -0800 | [diff] [blame] | 508 | region_start -= KASAN_MEMORY_PER_SHADOW_PAGE; |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 509 | |
Andrey Konovalov | affc3f0 | 2020-12-22 12:00:35 -0800 | [diff] [blame] | 510 | free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 511 | |
| 512 | if (end != region_end && |
| 513 | free_region_end > region_end) |
Andrey Konovalov | affc3f0 | 2020-12-22 12:00:35 -0800 | [diff] [blame] | 514 | region_end += KASAN_MEMORY_PER_SHADOW_PAGE; |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 515 | |
| 516 | shadow_start = kasan_mem_to_shadow((void *)region_start); |
| 517 | shadow_end = kasan_mem_to_shadow((void *)region_end); |
| 518 | |
| 519 | if (shadow_end > shadow_start) { |
| 520 | size = shadow_end - shadow_start; |
Patricia Alfonso | 5b30140 | 2022-07-01 17:16:20 +0800 | [diff] [blame] | 521 | if (IS_ENABLED(CONFIG_UML)) { |
| 522 | __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); |
| 523 | return; |
| 524 | } |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 525 | apply_to_existing_page_range(&init_mm, |
| 526 | (unsigned long)shadow_start, |
| 527 | size, kasan_depopulate_vmalloc_pte, |
| 528 | NULL); |
| 529 | flush_tlb_kernel_range((unsigned long)shadow_start, |
| 530 | (unsigned long)shadow_end); |
| 531 | } |
| 532 | } |
| 533 | |
Andrey Konovalov | 23689e9 | 2022-03-24 18:11:32 -0700 | [diff] [blame] | 534 | void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, |
| 535 | kasan_vmalloc_flags_t flags) |
Andrey Konovalov | 5bd9bae | 2022-03-24 18:10:58 -0700 | [diff] [blame] | 536 | { |
Andrey Konovalov | 23689e9 | 2022-03-24 18:11:32 -0700 | [diff] [blame] | 537 | /* |
| 538 | * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC |
| 539 | * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored. |
| 540 | * Software KASAN modes can't optimize zeroing memory by combining it |
| 541 | * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. |
| 542 | */ |
| 543 | |
Christophe Leroy | 55d77ba | 2023-01-26 08:04:47 +0100 | [diff] [blame] | 544 | if (!kasan_arch_is_ready()) |
| 545 | return (void *)start; |
| 546 | |
Andrey Konovalov | 5bd9bae | 2022-03-24 18:10:58 -0700 | [diff] [blame] | 547 | if (!is_vmalloc_or_module_addr(start)) |
Andrey Konovalov | 1d96320 | 2022-03-24 18:11:13 -0700 | [diff] [blame] | 548 | return (void *)start; |
Andrey Konovalov | 5bd9bae | 2022-03-24 18:10:58 -0700 | [diff] [blame] | 549 | |
Andrey Konovalov | f6e3979 | 2022-03-24 18:11:35 -0700 | [diff] [blame] | 550 | /* |
| 551 | * Don't tag executable memory with the tag-based mode. |
| 552 | * The kernel doesn't tolerate having the PC register tagged. |
| 553 | */ |
| 554 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) && |
| 555 | !(flags & KASAN_VMALLOC_PROT_NORMAL)) |
| 556 | return (void *)start; |
| 557 | |
Andrey Konovalov | 1d96320 | 2022-03-24 18:11:13 -0700 | [diff] [blame] | 558 | start = set_tag(start, kasan_random_tag()); |
Andrey Konovalov | 5bd9bae | 2022-03-24 18:10:58 -0700 | [diff] [blame] | 559 | kasan_unpoison(start, size, false); |
Andrey Konovalov | 1d96320 | 2022-03-24 18:11:13 -0700 | [diff] [blame] | 560 | return (void *)start; |
Andrey Konovalov | 5bd9bae | 2022-03-24 18:10:58 -0700 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | /* |
| 564 | * Poison the shadow for a vmalloc region. Called as part of the |
| 565 | * freeing process at the time the region is freed. |
| 566 | */ |
Andrey Konovalov | 579fb0ac | 2022-03-24 18:11:01 -0700 | [diff] [blame] | 567 | void __kasan_poison_vmalloc(const void *start, unsigned long size) |
Andrey Konovalov | 5bd9bae | 2022-03-24 18:10:58 -0700 | [diff] [blame] | 568 | { |
Christophe Leroy | 55d77ba | 2023-01-26 08:04:47 +0100 | [diff] [blame] | 569 | if (!kasan_arch_is_ready()) |
| 570 | return; |
| 571 | |
Andrey Konovalov | 5bd9bae | 2022-03-24 18:10:58 -0700 | [diff] [blame] | 572 | if (!is_vmalloc_or_module_addr(start)) |
| 573 | return; |
| 574 | |
| 575 | size = round_up(size, KASAN_GRANULE_SIZE); |
| 576 | kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); |
| 577 | } |
| 578 | |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 579 | #else /* CONFIG_KASAN_VMALLOC */ |
| 580 | |
Andrey Konovalov | 63840de | 2022-03-24 18:10:52 -0700 | [diff] [blame] | 581 | int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 582 | { |
| 583 | void *ret; |
| 584 | size_t scaled_size; |
| 585 | size_t shadow_size; |
| 586 | unsigned long shadow_start; |
| 587 | |
| 588 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
| 589 | scaled_size = (size + KASAN_GRANULE_SIZE - 1) >> |
| 590 | KASAN_SHADOW_SCALE_SHIFT; |
| 591 | shadow_size = round_up(scaled_size, PAGE_SIZE); |
| 592 | |
| 593 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) |
| 594 | return -EINVAL; |
| 595 | |
Patricia Alfonso | 5b30140 | 2022-07-01 17:16:20 +0800 | [diff] [blame] | 596 | if (IS_ENABLED(CONFIG_UML)) { |
| 597 | __memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size); |
| 598 | return 0; |
| 599 | } |
| 600 | |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 601 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, |
| 602 | shadow_start + shadow_size, |
| 603 | GFP_KERNEL, |
| 604 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
| 605 | __builtin_return_address(0)); |
| 606 | |
| 607 | if (ret) { |
Kefeng Wang | 60115fa | 2022-01-14 14:04:11 -0800 | [diff] [blame] | 608 | struct vm_struct *vm = find_vm_area(addr); |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 609 | __memset(ret, KASAN_SHADOW_INIT, shadow_size); |
Kefeng Wang | 60115fa | 2022-01-14 14:04:11 -0800 | [diff] [blame] | 610 | vm->flags |= VM_KASAN; |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 611 | kmemleak_ignore(ret); |
Kefeng Wang | 60115fa | 2022-01-14 14:04:11 -0800 | [diff] [blame] | 612 | |
| 613 | if (vm->flags & VM_DEFER_KMEMLEAK) |
| 614 | kmemleak_vmalloc(vm, size, gfp_mask); |
| 615 | |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 616 | return 0; |
| 617 | } |
| 618 | |
| 619 | return -ENOMEM; |
| 620 | } |
| 621 | |
Andrey Konovalov | 63840de | 2022-03-24 18:10:52 -0700 | [diff] [blame] | 622 | void kasan_free_module_shadow(const struct vm_struct *vm) |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 623 | { |
Patricia Alfonso | 5b30140 | 2022-07-01 17:16:20 +0800 | [diff] [blame] | 624 | if (IS_ENABLED(CONFIG_UML)) |
| 625 | return; |
| 626 | |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 627 | if (vm->flags & VM_KASAN) |
| 628 | vfree(kasan_mem_to_shadow(vm->addr)); |
| 629 | } |
| 630 | |
| 631 | #endif |