blob: 6310a180278b6234d91fd9631d2638a412812606 [file] [log] [blame]
Andrey Konovalove886bf92018-12-28 00:31:14 -08001// SPDX-License-Identifier: GPL-2.0
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08002/*
Andrey Konovalov2bd926b2018-12-28 00:29:53 -08003 * This file contains core generic KASAN code.
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08004 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
Andrey Ryabinin2baf9e82015-08-14 15:35:13 -07006 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08007 *
Andrey Konovalov5d0926e2015-11-05 18:51:12 -08008 * Some code borrowed from https://github.com/xairy/kasan-prototype by
Andrey Konovalov5f21f3a2018-02-06 15:36:41 -08009 * Andrey Konovalov <andreyknvl@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080010 */
11
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080012#include <linux/export.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070013#include <linux/interrupt.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080014#include <linux/init.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070015#include <linux/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080016#include <linux/kernel.h>
Alexander Potapenko2b830522021-02-25 17:19:21 -080017#include <linux/kfence.h>
Andrey Ryabinin45937252015-11-20 15:57:18 -080018#include <linux/kmemleak.h>
Mark Rutlande3ae1162016-03-09 14:08:15 -080019#include <linux/linkage.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080020#include <linux/memblock.h>
Andrey Ryabinin786a8952015-02-13 14:39:21 -080021#include <linux/memory.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080022#include <linux/mm.h>
Andrey Ryabininbebf56a2015-02-13 14:40:17 -080023#include <linux/module.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080024#include <linux/printk.h>
25#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010026#include <linux/sched/task_stack.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080027#include <linux/slab.h>
Andrey Konovalova414d422023-12-19 22:19:51 +010028#include <linux/spinlock.h>
Andrey Konovalov022012d2023-11-20 18:47:13 +010029#include <linux/stackdepot.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080030#include <linux/stacktrace.h>
31#include <linux/string.h>
32#include <linux/types.h>
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070033#include <linux/vmalloc.h>
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020034#include <linux/bug.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080035
36#include "kasan.h"
Andrey Ryabinin0316bec2015-02-13 14:39:42 -080037#include "../slab.h"
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080038
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080039/*
40 * All functions below always inlined so compiler could
41 * perform better optimizations in each of __asan_loadX/__assn_storeX
42 * depending on memory access size X.
43 */
44
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020045static __always_inline bool memory_is_poisoned_1(const void *addr)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080046{
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020047 s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080048
49 if (unlikely(shadow_value)) {
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020050 s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080051 return unlikely(last_accessible_byte >= shadow_value);
52 }
53
54 return false;
55}
56
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020057static __always_inline bool memory_is_poisoned_2_4_8(const void *addr,
Andrey Ryabininc634d802017-07-10 15:50:24 -070058 unsigned long size)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080059{
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020060 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080061
Andrey Ryabininc634d802017-07-10 15:50:24 -070062 /*
63 * Access crosses 8(shadow size)-byte boundary. Such access maps
64 * into 2 shadow bytes, so we need to check them both.
65 */
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020066 if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
Andrey Ryabininc634d802017-07-10 15:50:24 -070067 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080068
Andrey Ryabininc634d802017-07-10 15:50:24 -070069 return memory_is_poisoned_1(addr + size - 1);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080070}
71
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020072static __always_inline bool memory_is_poisoned_16(const void *addr)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080073{
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020074 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080075
Andrey Ryabininc634d802017-07-10 15:50:24 -070076 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +020077 if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE)))
Andrey Ryabininc634d802017-07-10 15:50:24 -070078 return *shadow_addr || memory_is_poisoned_1(addr + 15);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080079
Andrey Ryabininc634d802017-07-10 15:50:24 -070080 return *shadow_addr;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080081}
82
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -070083static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080084 size_t size)
85{
86 while (size) {
87 if (unlikely(*start))
88 return (unsigned long)start;
89 start++;
90 size--;
91 }
92
93 return 0;
94}
95
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -070096static __always_inline unsigned long memory_is_nonzero(const void *start,
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080097 const void *end)
98{
99 unsigned int words;
100 unsigned long ret;
101 unsigned int prefix = (unsigned long)start % 8;
102
103 if (end - start <= 16)
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700104 return bytes_is_nonzero(start, end - start);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800105
106 if (prefix) {
107 prefix = 8 - prefix;
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700108 ret = bytes_is_nonzero(start, prefix);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800109 if (unlikely(ret))
110 return ret;
111 start += prefix;
112 }
113
114 words = (end - start) / 8;
115 while (words) {
116 if (unlikely(*(u64 *)start))
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700117 return bytes_is_nonzero(start, 8);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800118 start += 8;
119 words--;
120 }
121
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700122 return bytes_is_nonzero(start, (end - start) % 8);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800123}
124
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200125static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800126{
127 unsigned long ret;
128
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200129 ret = memory_is_nonzero(kasan_mem_to_shadow(addr),
130 kasan_mem_to_shadow(addr + size - 1) + 1);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800131
132 if (unlikely(ret)) {
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200133 const void *last_byte = addr + size - 1;
134 s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
Andrey Konovalov05c56e72023-07-04 02:52:05 +0200135 s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800136
137 if (unlikely(ret != (unsigned long)last_shadow ||
Andrey Konovalov05c56e72023-07-04 02:52:05 +0200138 last_accessible_byte >= *last_shadow))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800139 return true;
140 }
141 return false;
142}
143
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200144static __always_inline bool memory_is_poisoned(const void *addr, size_t size)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800145{
146 if (__builtin_constant_p(size)) {
147 switch (size) {
148 case 1:
149 return memory_is_poisoned_1(addr);
150 case 2:
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800151 case 4:
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800152 case 8:
Andrey Ryabininc634d802017-07-10 15:50:24 -0700153 return memory_is_poisoned_2_4_8(addr, size);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800154 case 16:
155 return memory_is_poisoned_16(addr);
156 default:
157 BUILD_BUG();
158 }
159 }
160
161 return memory_is_poisoned_n(addr, size);
162}
163
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200164static __always_inline bool check_region_inline(const void *addr,
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700165 size_t size, bool write,
166 unsigned long ret_ip)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800167{
Daniel Axtensaf3751f32021-06-28 19:40:42 -0700168 if (!kasan_arch_is_ready())
169 return true;
170
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800171 if (unlikely(size == 0))
Marco Elverb5f6e0f2019-07-11 20:54:07 -0700172 return true;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800173
Walter Wu8cceeff2020-04-01 21:09:37 -0700174 if (unlikely(addr + size < addr))
175 return !kasan_report(addr, size, write, ret_ip);
176
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200177 if (unlikely(!addr_has_metadata(addr)))
Walter Wu8cceeff2020-04-01 21:09:37 -0700178 return !kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800179
180 if (likely(!memory_is_poisoned(addr, size)))
Marco Elverb5f6e0f2019-07-11 20:54:07 -0700181 return true;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800182
Walter Wu8cceeff2020-04-01 21:09:37 -0700183 return !kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800184}
185
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200186bool kasan_check_range(const void *addr, size_t size, bool write,
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800187 unsigned long ret_ip)
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700188{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800189 return check_region_inline(addr, size, write, ret_ip);
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700190}
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800191
Andrey Konovalov611806b2021-02-24 12:05:50 -0800192bool kasan_byte_accessible(const void *addr)
Andrey Konovalov2cdbed62020-12-22 12:00:46 -0800193{
Christophe Leroy55d77ba2023-01-26 08:04:47 +0100194 s8 shadow_byte;
195
196 if (!kasan_arch_is_ready())
197 return true;
198
199 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
Andrey Konovalov2cdbed62020-12-22 12:00:46 -0800200
Andrey Konovalov611806b2021-02-24 12:05:50 -0800201 return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
Andrey Konovalov2cdbed62020-12-22 12:00:46 -0800202}
203
Alexander Potapenko55834c52016-05-20 16:59:11 -0700204void kasan_cache_shrink(struct kmem_cache *cache)
205{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800206 kasan_quarantine_remove_cache(cache);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700207}
208
Greg Thelenf9fa1d92017-02-24 15:00:05 -0800209void kasan_cache_shutdown(struct kmem_cache *cache)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700210{
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700211 if (!__kmem_cache_empty(cache))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800212 kasan_quarantine_remove_cache(cache);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700213}
214
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800215static void register_global(struct kasan_global *global)
216{
Andrey Konovalov1f600622020-12-22 12:00:24 -0800217 size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800218
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700219 kasan_unpoison(global->beg, global->size, false);
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800220
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800221 kasan_poison(global->beg + aligned_size,
Andrey Konovalovcebd0eb2020-12-22 12:00:21 -0800222 global->size_with_redzone - aligned_size,
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700223 KASAN_GLOBAL_REDZONE, false);
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800224}
225
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200226void __asan_register_globals(void *ptr, ssize_t size)
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800227{
228 int i;
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200229 struct kasan_global *globals = ptr;
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800230
231 for (i = 0; i < size; i++)
232 register_global(&globals[i]);
233}
234EXPORT_SYMBOL(__asan_register_globals);
235
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200236void __asan_unregister_globals(void *ptr, ssize_t size)
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800237{
238}
239EXPORT_SYMBOL(__asan_unregister_globals);
240
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700241#define DEFINE_ASAN_LOAD_STORE(size) \
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200242 void __asan_load##size(void *addr) \
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700243 { \
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800244 check_region_inline(addr, size, false, _RET_IP_); \
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700245 } \
246 EXPORT_SYMBOL(__asan_load##size); \
247 __alias(__asan_load##size) \
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200248 void __asan_load##size##_noabort(void *); \
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700249 EXPORT_SYMBOL(__asan_load##size##_noabort); \
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200250 void __asan_store##size(void *addr) \
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700251 { \
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800252 check_region_inline(addr, size, true, _RET_IP_); \
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700253 } \
254 EXPORT_SYMBOL(__asan_store##size); \
255 __alias(__asan_store##size) \
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200256 void __asan_store##size##_noabort(void *); \
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800257 EXPORT_SYMBOL(__asan_store##size##_noabort)
258
259DEFINE_ASAN_LOAD_STORE(1);
260DEFINE_ASAN_LOAD_STORE(2);
261DEFINE_ASAN_LOAD_STORE(4);
262DEFINE_ASAN_LOAD_STORE(8);
263DEFINE_ASAN_LOAD_STORE(16);
264
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200265void __asan_loadN(void *addr, ssize_t size)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800266{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800267 kasan_check_range(addr, size, false, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800268}
269EXPORT_SYMBOL(__asan_loadN);
270
271__alias(__asan_loadN)
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200272void __asan_loadN_noabort(void *, ssize_t);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800273EXPORT_SYMBOL(__asan_loadN_noabort);
274
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200275void __asan_storeN(void *addr, ssize_t size)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800276{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800277 kasan_check_range(addr, size, true, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800278}
279EXPORT_SYMBOL(__asan_storeN);
280
281__alias(__asan_storeN)
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200282void __asan_storeN_noabort(void *, ssize_t);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800283EXPORT_SYMBOL(__asan_storeN_noabort);
284
285/* to shut up compiler complaints */
286void __asan_handle_no_return(void) {}
287EXPORT_SYMBOL(__asan_handle_no_return);
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800288
Paul Lawrence342061e2018-02-06 15:36:11 -0800289/* Emitted by compiler to poison alloca()ed objects. */
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200290void __asan_alloca_poison(void *addr, ssize_t size)
Paul Lawrence342061e2018-02-06 15:36:11 -0800291{
Andrey Konovalov1f600622020-12-22 12:00:24 -0800292 size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
Paul Lawrence342061e2018-02-06 15:36:11 -0800293 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
294 rounded_up_size;
Andrey Konovalov1f600622020-12-22 12:00:24 -0800295 size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
Paul Lawrence342061e2018-02-06 15:36:11 -0800296
297 const void *left_redzone = (const void *)(addr -
298 KASAN_ALLOCA_REDZONE_SIZE);
299 const void *right_redzone = (const void *)(addr + rounded_up_size);
300
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200301 WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE));
Paul Lawrence342061e2018-02-06 15:36:11 -0800302
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800303 kasan_unpoison((const void *)(addr + rounded_down_size),
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700304 size - rounded_down_size, false);
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800305 kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700306 KASAN_ALLOCA_LEFT, false);
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800307 kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700308 KASAN_ALLOCA_RIGHT, false);
Paul Lawrence342061e2018-02-06 15:36:11 -0800309}
310EXPORT_SYMBOL(__asan_alloca_poison);
311
312/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200313void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom)
Paul Lawrence342061e2018-02-06 15:36:11 -0800314{
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200315 if (unlikely(!stack_top || stack_top > (void *)stack_bottom))
Paul Lawrence342061e2018-02-06 15:36:11 -0800316 return;
317
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200318 kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false);
Paul Lawrence342061e2018-02-06 15:36:11 -0800319}
320EXPORT_SYMBOL(__asan_allocas_unpoison);
321
Alexander Potapenkod3215992018-02-06 15:36:20 -0800322/* Emitted by the compiler to [un]poison local variables. */
323#define DEFINE_ASAN_SET_SHADOW(byte) \
Arnd Bergmannbb6e04a2023-05-09 16:57:21 +0200324 void __asan_set_shadow_##byte(const void *addr, ssize_t size) \
Alexander Potapenkod3215992018-02-06 15:36:20 -0800325 { \
326 __memset((void *)addr, 0x##byte, size); \
327 } \
328 EXPORT_SYMBOL(__asan_set_shadow_##byte)
329
330DEFINE_ASAN_SET_SHADOW(00);
331DEFINE_ASAN_SET_SHADOW(f1);
332DEFINE_ASAN_SET_SHADOW(f2);
333DEFINE_ASAN_SET_SHADOW(f3);
334DEFINE_ASAN_SET_SHADOW(f5);
335DEFINE_ASAN_SET_SHADOW(f8);
Walter Wu26e760c2020-08-06 23:24:35 -0700336
Andrey Konovalov59351432022-09-05 23:05:27 +0200337/*
338 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
339 * For larger allocations larger redzones are used.
340 */
341static inline unsigned int optimal_redzone(unsigned int object_size)
342{
343 return
344 object_size <= 64 - 16 ? 16 :
345 object_size <= 128 - 32 ? 32 :
346 object_size <= 512 - 64 ? 64 :
347 object_size <= 4096 - 128 ? 128 :
348 object_size <= (1 << 14) - 256 ? 256 :
349 object_size <= (1 << 15) - 512 ? 512 :
350 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
351}
352
Andrey Konovalov682ed082022-09-05 23:05:33 +0200353void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
354 slab_flags_t *flags)
Andrey Konovalov59351432022-09-05 23:05:27 +0200355{
356 unsigned int ok_size;
357 unsigned int optimal_size;
Juntong Denga5989d42023-11-20 04:46:29 +0800358 unsigned int rem_free_meta_size;
359 unsigned int orig_alloc_meta_offset;
Andrey Konovalov59351432022-09-05 23:05:27 +0200360
Andrey Konovalov682ed082022-09-05 23:05:33 +0200361 if (!kasan_requires_meta())
362 return;
363
364 /*
Vlastimil Babka96d8dbb2024-02-23 19:27:19 +0100365 * SLAB_KASAN is used to mark caches that are sanitized by KASAN and
366 * that thus have per-object metadata. Currently, this flag is used in
367 * slab_ksize() to account for per-object metadata when calculating the
368 * size of the accessible memory within the object. Additionally, we use
369 * SLAB_NO_MERGE to prevent merging of caches with per-object metadata.
Andrey Konovalov682ed082022-09-05 23:05:33 +0200370 */
Vlastimil Babka96d8dbb2024-02-23 19:27:19 +0100371 *flags |= SLAB_KASAN | SLAB_NO_MERGE;
Andrey Konovalov682ed082022-09-05 23:05:33 +0200372
Andrey Konovalov59351432022-09-05 23:05:27 +0200373 ok_size = *size;
374
Andrey Konovalovf6940e82023-12-21 19:35:37 +0100375 /* Add alloc meta into the redzone. */
Andrey Konovalov59351432022-09-05 23:05:27 +0200376 cache->kasan_info.alloc_meta_offset = *size;
377 *size += sizeof(struct kasan_alloc_meta);
378
Andrey Konovalovf6940e82023-12-21 19:35:37 +0100379 /* If alloc meta doesn't fit, don't add it. */
Andrey Konovalov59351432022-09-05 23:05:27 +0200380 if (*size > KMALLOC_MAX_SIZE) {
381 cache->kasan_info.alloc_meta_offset = 0;
382 *size = ok_size;
383 /* Continue, since free meta might still fit. */
384 }
385
Juntong Denga5989d42023-11-20 04:46:29 +0800386 ok_size = *size;
387 orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
388
Andrey Konovalov59351432022-09-05 23:05:27 +0200389 /*
Andrey Konovalovf6940e82023-12-21 19:35:37 +0100390 * Store free meta in the redzone when it's not possible to store
Andrey Konovalov59351432022-09-05 23:05:27 +0200391 * it in the object. This is the case when:
392 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
393 * be touched after it was freed, or
394 * 2. Object has a constructor, which means it's expected to
Andrey Konovalovf6940e82023-12-21 19:35:37 +0100395 * retain its content until the next allocation.
Andrey Konovalov59351432022-09-05 23:05:27 +0200396 */
Juntong Denga5989d42023-11-20 04:46:29 +0800397 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) {
Andrey Konovalov59351432022-09-05 23:05:27 +0200398 cache->kasan_info.free_meta_offset = *size;
399 *size += sizeof(struct kasan_free_meta);
Andrey Konovalovf6940e82023-12-21 19:35:37 +0100400 goto free_meta_added;
Andrey Konovalov59351432022-09-05 23:05:27 +0200401 }
402
Andrey Konovalovf6940e82023-12-21 19:35:37 +0100403 /*
404 * Otherwise, if the object is large enough to contain free meta,
405 * store it within the object.
406 */
407 if (sizeof(struct kasan_free_meta) <= cache->object_size) {
408 /* cache->kasan_info.free_meta_offset = 0 is implied. */
409 goto free_meta_added;
410 }
411
412 /*
413 * For smaller objects, store the beginning of free meta within the
414 * object and the end in the redzone. And thus shift the location of
415 * alloc meta to free up space for free meta.
416 * This is only possible when slub_debug is disabled, as otherwise
417 * the end of free meta will overlap with slub_debug metadata.
418 */
419 if (!__slub_debug_enabled()) {
420 rem_free_meta_size = sizeof(struct kasan_free_meta) -
421 cache->object_size;
422 *size += rem_free_meta_size;
423 if (cache->kasan_info.alloc_meta_offset != 0)
424 cache->kasan_info.alloc_meta_offset += rem_free_meta_size;
425 goto free_meta_added;
426 }
427
428 /*
429 * If the object is small and slub_debug is enabled, store free meta
430 * in the redzone after alloc meta.
431 */
432 cache->kasan_info.free_meta_offset = *size;
433 *size += sizeof(struct kasan_free_meta);
434
435free_meta_added:
Juntong Denga5989d42023-11-20 04:46:29 +0800436 /* If free meta doesn't fit, don't add it. */
437 if (*size > KMALLOC_MAX_SIZE) {
438 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
439 cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset;
440 *size = ok_size;
441 }
442
Andrey Konovalov59351432022-09-05 23:05:27 +0200443 /* Calculate size with optimal redzone. */
444 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
Andrey Konovalovf6940e82023-12-21 19:35:37 +0100445 /* Limit it with KMALLOC_MAX_SIZE. */
Andrey Konovalov59351432022-09-05 23:05:27 +0200446 if (optimal_size > KMALLOC_MAX_SIZE)
447 optimal_size = KMALLOC_MAX_SIZE;
448 /* Use optimal size if the size with added metas is not large enough. */
449 if (*size < optimal_size)
450 *size = optimal_size;
451}
452
Andrey Konovalov2f356802022-09-05 23:05:25 +0200453struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
454 const void *object)
455{
456 if (!cache->kasan_info.alloc_meta_offset)
457 return NULL;
458 return (void *)object + cache->kasan_info.alloc_meta_offset;
459}
460
461struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
462 const void *object)
463{
464 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
465 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
466 return NULL;
467 return (void *)object + cache->kasan_info.free_meta_offset;
468}
469
Andrey Konovalov836daba2022-09-05 23:05:23 +0200470void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
471{
472 struct kasan_alloc_meta *alloc_meta;
473
474 alloc_meta = kasan_get_alloc_meta(cache, object);
Andrey Konovalova414d422023-12-19 22:19:51 +0100475 if (alloc_meta) {
Andrey Konovalov63b85ac2023-12-26 23:51:21 +0100476 /* Zero out alloc meta to mark it as invalid. */
Andrey Konovalov836daba2022-09-05 23:05:23 +0200477 __memset(alloc_meta, 0, sizeof(*alloc_meta));
Andrey Konovalova414d422023-12-19 22:19:51 +0100478 }
Andrey Konovalov63b85ac2023-12-26 23:51:21 +0100479
480 /*
481 * Explicitly marking free meta as invalid is not required: the shadow
482 * value for the first 8 bytes of a newly allocated object is not
483 * KASAN_SLAB_FREE_META.
484 */
485}
486
487static void release_alloc_meta(struct kasan_alloc_meta *meta)
488{
Marco Elver711d3492024-01-29 11:07:02 +0100489 /* Zero out alloc meta to mark it as invalid. */
490 __memset(meta, 0, sizeof(*meta));
Andrey Konovalov63b85ac2023-12-26 23:51:21 +0100491}
492
493static void release_free_meta(const void *object, struct kasan_free_meta *meta)
494{
Benjamin Gray2597c992024-02-13 14:39:58 +1100495 if (!kasan_arch_is_ready())
496 return;
497
Andrey Konovalov63b85ac2023-12-26 23:51:21 +0100498 /* Check if free meta is valid. */
499 if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
500 return;
501
Andrey Konovalov63b85ac2023-12-26 23:51:21 +0100502 /* Mark free meta as invalid. */
503 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
504}
505
Feng Tang5d1ba312022-10-21 11:24:04 +0800506size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
Andrey Konovalovf372bde2022-09-05 23:05:29 +0200507{
Feng Tang5d1ba312022-10-21 11:24:04 +0800508 struct kasan_cache *info = &cache->kasan_info;
509
Andrey Konovalovf372bde2022-09-05 23:05:29 +0200510 if (!kasan_requires_meta())
511 return 0;
Feng Tang5d1ba312022-10-21 11:24:04 +0800512
513 if (in_object)
514 return (info->free_meta_offset ?
515 0 : sizeof(struct kasan_free_meta));
516 else
517 return (info->alloc_meta_offset ?
518 sizeof(struct kasan_alloc_meta) : 0) +
519 ((info->free_meta_offset &&
520 info->free_meta_offset != KASAN_NO_FREE_META) ?
521 sizeof(struct kasan_free_meta) : 0);
Andrey Konovalovf372bde2022-09-05 23:05:29 +0200522}
523
Andrey Konovalov022012d2023-11-20 18:47:13 +0100524static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
Walter Wu26e760c2020-08-06 23:24:35 -0700525{
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100526 struct slab *slab = kasan_addr_to_slab(addr);
Walter Wu26e760c2020-08-06 23:24:35 -0700527 struct kmem_cache *cache;
Andrey Konovalov64767922020-12-22 12:02:34 -0800528 struct kasan_alloc_meta *alloc_meta;
Walter Wu26e760c2020-08-06 23:24:35 -0700529 void *object;
530
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100531 if (is_kfence_address(addr) || !slab)
Walter Wu26e760c2020-08-06 23:24:35 -0700532 return;
533
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100534 cache = slab->slab_cache;
535 object = nearest_obj(cache, slab, addr);
Andrey Konovalov64767922020-12-22 12:02:34 -0800536 alloc_meta = kasan_get_alloc_meta(cache, object);
Walter Wu13384f62020-12-29 15:14:46 -0800537 if (!alloc_meta)
538 return;
Walter Wu26e760c2020-08-06 23:24:35 -0700539
Andrey Konovalov64767922020-12-22 12:02:34 -0800540 alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
Marco Elver711d3492024-01-29 11:07:02 +0100541 alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
Marco Elver7cb30072021-11-05 13:35:46 -0700542}
543
544void kasan_record_aux_stack(void *addr)
545{
Marco Elver711d3492024-01-29 11:07:02 +0100546 return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
Marco Elver7cb30072021-11-05 13:35:46 -0700547}
548
549void kasan_record_aux_stack_noalloc(void *addr)
550{
Marco Elver711d3492024-01-29 11:07:02 +0100551 return __kasan_record_aux_stack(addr, 0);
Walter Wu26e760c2020-08-06 23:24:35 -0700552}
Walter Wue4b78182020-08-06 23:24:39 -0700553
Andrey Konovalovccf643e2022-09-05 23:05:19 +0200554void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
555{
556 struct kasan_alloc_meta *alloc_meta;
557
558 alloc_meta = kasan_get_alloc_meta(cache, object);
Andrey Konovalov773688a2023-11-20 18:47:19 +0100559 if (!alloc_meta)
560 return;
561
Marco Elver711d3492024-01-29 11:07:02 +0100562 /* Invalidate previous stack traces (might exist for krealloc or mempool). */
Andrey Konovalov63b85ac2023-12-26 23:51:21 +0100563 release_alloc_meta(alloc_meta);
Andrey Konovalov773688a2023-11-20 18:47:19 +0100564
Andrey Konovalovfd4064f2023-12-21 19:35:39 +0100565 kasan_save_track(&alloc_meta->alloc_track, flags);
Andrey Konovalovccf643e2022-09-05 23:05:19 +0200566}
567
Andrey Konovalov6b074342022-09-05 23:05:34 +0200568void kasan_save_free_info(struct kmem_cache *cache, void *object)
Walter Wue4b78182020-08-06 23:24:39 -0700569{
570 struct kasan_free_meta *free_meta;
571
Andrey Konovalov64767922020-12-22 12:02:34 -0800572 free_meta = kasan_get_free_meta(cache, object);
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800573 if (!free_meta)
574 return;
Walter Wue4b78182020-08-06 23:24:39 -0700575
Marco Elver711d3492024-01-29 11:07:02 +0100576 /* Invalidate previous stack trace (might exist for mempool). */
Andrey Konovalov63b85ac2023-12-26 23:51:21 +0100577 release_free_meta(object, free_meta);
578
Andrey Konovalovfd4064f2023-12-21 19:35:39 +0100579 kasan_save_track(&free_meta->free_track, 0);
Andrey Konovalov63b85ac2023-12-26 23:51:21 +0100580
581 /* Mark free meta as valid. */
582 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META;
Walter Wue4b78182020-08-06 23:24:39 -0700583}