Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 2 | /* |
| 3 | * This implements the various checks for CONFIG_HARDENED_USERCOPY*, |
| 4 | * which are designed to protect kernel memory from needless exposure |
| 5 | * and overwrite under many unintended conditions. This code is based |
| 6 | * on PAX_USERCOPY, which is: |
| 7 | * |
| 8 | * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source |
| 9 | * Security Inc. |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 10 | */ |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
| 13 | #include <linux/mm.h> |
Kees Cook | 314eed3 | 2019-09-17 11:00:25 -0700 | [diff] [blame] | 14 | #include <linux/highmem.h> |
Christophe JAILLET | f15be1b | 2022-11-01 22:14:09 +0100 | [diff] [blame] | 15 | #include <linux/kstrtox.h> |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 16 | #include <linux/slab.h> |
Ingo Molnar | 5b825c3 | 2017-02-02 17:54:15 +0100 | [diff] [blame] | 17 | #include <linux/sched.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 18 | #include <linux/sched/task.h> |
| 19 | #include <linux/sched/task_stack.h> |
Sahara | 96dc4f9 | 2017-02-16 18:29:15 +0000 | [diff] [blame] | 20 | #include <linux/thread_info.h> |
Matthew Wilcox (Oracle) | 0aef499 | 2022-01-10 23:15:28 +0000 | [diff] [blame] | 21 | #include <linux/vmalloc.h> |
Chris von Recklinghausen | b5cb15d | 2018-07-03 15:43:08 -0400 | [diff] [blame] | 22 | #include <linux/atomic.h> |
| 23 | #include <linux/jump_label.h> |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 24 | #include <asm/sections.h> |
Matthew Wilcox (Oracle) | 0b3eb091 | 2021-10-04 14:45:56 +0100 | [diff] [blame] | 25 | #include "slab.h" |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 26 | |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 27 | /* |
| 28 | * Checks if a given pointer and length is contained by the current |
| 29 | * stack frame (if possible). |
| 30 | * |
| 31 | * Returns: |
| 32 | * NOT_STACK: not at all on the stack |
| 33 | * GOOD_FRAME: fully within a valid stack frame |
Kees Cook | 2792d84 | 2022-02-16 12:05:28 -0800 | [diff] [blame] | 34 | * GOOD_STACK: within the current stack (when can't frame-check exactly) |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 35 | * BAD_STACK: error condition (invalid stack position or bad stack frame) |
| 36 | */ |
| 37 | static noinline int check_stack_object(const void *obj, unsigned long len) |
| 38 | { |
| 39 | const void * const stack = task_stack_page(current); |
| 40 | const void * const stackend = stack + THREAD_SIZE; |
| 41 | int ret; |
| 42 | |
| 43 | /* Object is not on the stack at all. */ |
| 44 | if (obj + len <= stack || stackend <= obj) |
| 45 | return NOT_STACK; |
| 46 | |
| 47 | /* |
| 48 | * Reject: object partially overlaps the stack (passing the |
Randy Dunlap | 5ce1be0 | 2020-08-11 18:33:23 -0700 | [diff] [blame] | 49 | * check above means at least one end is within the stack, |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 50 | * so if this check fails, the other end is outside the stack). |
| 51 | */ |
| 52 | if (obj < stack || stackend < obj + len) |
| 53 | return BAD_STACK; |
| 54 | |
| 55 | /* Check if object is safely within a valid frame. */ |
| 56 | ret = arch_within_stack_frames(stack, stackend, obj, len); |
| 57 | if (ret) |
| 58 | return ret; |
| 59 | |
Kees Cook | 2792d84 | 2022-02-16 12:05:28 -0800 | [diff] [blame] | 60 | /* Finally, check stack depth if possible. */ |
| 61 | #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER |
| 62 | if (IS_ENABLED(CONFIG_STACK_GROWSUP)) { |
| 63 | if ((void *)current_stack_pointer < obj + len) |
| 64 | return BAD_STACK; |
| 65 | } else { |
| 66 | if (obj < (void *)current_stack_pointer) |
| 67 | return BAD_STACK; |
| 68 | } |
| 69 | #endif |
| 70 | |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 71 | return GOOD_STACK; |
| 72 | } |
| 73 | |
Kees Cook | b394d46 | 2018-01-10 14:22:38 -0800 | [diff] [blame] | 74 | /* |
Kees Cook | afcc90f8 | 2018-01-10 15:17:01 -0800 | [diff] [blame] | 75 | * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found |
| 76 | * an unexpected state during a copy_from_user() or copy_to_user() call. |
Kees Cook | b394d46 | 2018-01-10 14:22:38 -0800 | [diff] [blame] | 77 | * There are several checks being performed on the buffer by the |
| 78 | * __check_object_size() function. Normal stack buffer usage should never |
| 79 | * trip the checks, and kernel text addressing will always trip the check. |
Kees Cook | afcc90f8 | 2018-01-10 15:17:01 -0800 | [diff] [blame] | 80 | * For cache objects, it is checking that only the whitelisted range of |
| 81 | * bytes for a given cache is being accessed (via the cache's usersize and |
| 82 | * useroffset fields). To adjust a cache whitelist, use the usercopy-aware |
| 83 | * kmem_cache_create_usercopy() function to create the cache (and |
| 84 | * carefully audit the whitelist range). |
Kees Cook | b394d46 | 2018-01-10 14:22:38 -0800 | [diff] [blame] | 85 | */ |
| 86 | void __noreturn usercopy_abort(const char *name, const char *detail, |
| 87 | bool to_user, unsigned long offset, |
| 88 | unsigned long len) |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 89 | { |
Kees Cook | b394d46 | 2018-01-10 14:22:38 -0800 | [diff] [blame] | 90 | pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", |
| 91 | to_user ? "exposure" : "overwrite", |
| 92 | to_user ? "from" : "to", |
| 93 | name ? : "unknown?!", |
| 94 | detail ? " '" : "", detail ? : "", detail ? "'" : "", |
| 95 | offset, len); |
| 96 | |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 97 | /* |
| 98 | * For greater effect, it would be nice to do do_group_exit(), |
| 99 | * but BUG() actually hooks all the lock-breaking and per-arch |
| 100 | * Oops code, so that is used here instead. |
| 101 | */ |
| 102 | BUG(); |
| 103 | } |
| 104 | |
| 105 | /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 106 | static bool overlaps(const unsigned long ptr, unsigned long n, |
| 107 | unsigned long low, unsigned long high) |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 108 | { |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 109 | const unsigned long check_low = ptr; |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 110 | unsigned long check_high = check_low + n; |
| 111 | |
| 112 | /* Does not overlap if entirely above or entirely below. */ |
Josh Poimboeuf | 94cd97a | 2016-08-22 11:53:59 -0500 | [diff] [blame] | 113 | if (check_low >= high || check_high <= low) |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 114 | return false; |
| 115 | |
| 116 | return true; |
| 117 | } |
| 118 | |
| 119 | /* Is this address range in the kernel text area? */ |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 120 | static inline void check_kernel_text_object(const unsigned long ptr, |
| 121 | unsigned long n, bool to_user) |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 122 | { |
| 123 | unsigned long textlow = (unsigned long)_stext; |
| 124 | unsigned long texthigh = (unsigned long)_etext; |
| 125 | unsigned long textlow_linear, texthigh_linear; |
| 126 | |
| 127 | if (overlaps(ptr, n, textlow, texthigh)) |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 128 | usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 129 | |
| 130 | /* |
| 131 | * Some architectures have virtual memory mappings with a secondary |
| 132 | * mapping of the kernel text, i.e. there is more than one virtual |
| 133 | * kernel address that points to the kernel image. It is usually |
| 134 | * when there is a separate linear physical memory mapping, in that |
| 135 | * __pa() is not just the reverse of __va(). This can be detected |
| 136 | * and checked: |
| 137 | */ |
Laura Abbott | 46f6236 | 2017-01-10 13:35:45 -0800 | [diff] [blame] | 138 | textlow_linear = (unsigned long)lm_alias(textlow); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 139 | /* No different mapping: we're done. */ |
| 140 | if (textlow_linear == textlow) |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 141 | return; |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 142 | |
| 143 | /* Check the secondary mapping... */ |
Laura Abbott | 46f6236 | 2017-01-10 13:35:45 -0800 | [diff] [blame] | 144 | texthigh_linear = (unsigned long)lm_alias(texthigh); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 145 | if (overlaps(ptr, n, textlow_linear, texthigh_linear)) |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 146 | usercopy_abort("linear kernel text", NULL, to_user, |
| 147 | ptr - textlow_linear, n); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 150 | static inline void check_bogus_address(const unsigned long ptr, unsigned long n, |
| 151 | bool to_user) |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 152 | { |
| 153 | /* Reject if object wraps past end of memory. */ |
Isaac J. Manjarres | 9515316 | 2019-08-13 15:37:37 -0700 | [diff] [blame] | 154 | if (ptr + (n - 1) < ptr) |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 155 | usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 156 | |
| 157 | /* Reject if NULL or ZERO-allocation. */ |
| 158 | if (ZERO_OR_NULL_PTR(ptr)) |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 159 | usercopy_abort("null address", NULL, to_user, ptr, n); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 160 | } |
| 161 | |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 162 | static inline void check_heap_object(const void *ptr, unsigned long n, |
| 163 | bool to_user) |
Kees Cook | 8e1f74e | 2016-09-07 09:54:34 -0700 | [diff] [blame] | 164 | { |
Jason A. Donenfeld | 170b2c3 | 2022-06-16 16:36:17 +0200 | [diff] [blame] | 165 | unsigned long addr = (unsigned long)ptr; |
Matthew Wilcox (Oracle) | 1dfbe9f | 2022-06-12 22:32:27 +0100 | [diff] [blame] | 166 | unsigned long offset; |
Matthew Wilcox (Oracle) | 0b3eb091 | 2021-10-04 14:45:56 +0100 | [diff] [blame] | 167 | struct folio *folio; |
Kees Cook | 8e1f74e | 2016-09-07 09:54:34 -0700 | [diff] [blame] | 168 | |
Matthew Wilcox (Oracle) | 4e140f5 | 2022-01-10 23:15:27 +0000 | [diff] [blame] | 169 | if (is_kmap_addr(ptr)) { |
Matthew Wilcox (Oracle) | 1dfbe9f | 2022-06-12 22:32:27 +0100 | [diff] [blame] | 170 | offset = offset_in_page(ptr); |
| 171 | if (n > PAGE_SIZE - offset) |
| 172 | usercopy_abort("kmap", NULL, to_user, offset, n); |
Matthew Wilcox (Oracle) | 4e140f5 | 2022-01-10 23:15:27 +0000 | [diff] [blame] | 173 | return; |
| 174 | } |
| 175 | |
Alexei Starovoitov | d319f34 | 2023-04-10 19:43:44 +0200 | [diff] [blame] | 176 | if (is_vmalloc_addr(ptr) && !pagefault_disabled()) { |
Matthew Wilcox (Oracle) | 35fb9ae | 2022-06-12 22:32:26 +0100 | [diff] [blame] | 177 | struct vmap_area *area = find_vmap_area(addr); |
Matthew Wilcox (Oracle) | 0aef499 | 2022-01-10 23:15:28 +0000 | [diff] [blame] | 178 | |
Matthew Wilcox (Oracle) | 993d0b2 | 2022-06-12 22:32:25 +0100 | [diff] [blame] | 179 | if (!area) |
Matthew Wilcox (Oracle) | 0aef499 | 2022-01-10 23:15:28 +0000 | [diff] [blame] | 180 | usercopy_abort("vmalloc", "no area", to_user, 0, n); |
Matthew Wilcox (Oracle) | 0aef499 | 2022-01-10 23:15:28 +0000 | [diff] [blame] | 181 | |
Matthew Wilcox (Oracle) | 1dfbe9f | 2022-06-12 22:32:27 +0100 | [diff] [blame] | 182 | if (n > area->va_end - addr) { |
| 183 | offset = addr - area->va_start; |
Matthew Wilcox (Oracle) | 0aef499 | 2022-01-10 23:15:28 +0000 | [diff] [blame] | 184 | usercopy_abort("vmalloc", NULL, to_user, offset, n); |
Matthew Wilcox (Oracle) | 1dfbe9f | 2022-06-12 22:32:27 +0100 | [diff] [blame] | 185 | } |
Matthew Wilcox (Oracle) | 0aef499 | 2022-01-10 23:15:28 +0000 | [diff] [blame] | 186 | return; |
| 187 | } |
| 188 | |
Yuanzheng Song | a5f4d9d | 2022-05-05 07:10:37 +0000 | [diff] [blame] | 189 | if (!virt_addr_valid(ptr)) |
| 190 | return; |
| 191 | |
Matthew Wilcox (Oracle) | 4e140f5 | 2022-01-10 23:15:27 +0000 | [diff] [blame] | 192 | folio = virt_to_folio(ptr); |
Kees Cook | 8e1f74e | 2016-09-07 09:54:34 -0700 | [diff] [blame] | 193 | |
Matthew Wilcox (Oracle) | 0b3eb091 | 2021-10-04 14:45:56 +0100 | [diff] [blame] | 194 | if (folio_test_slab(folio)) { |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 195 | /* Check slab allocator for flags and size. */ |
Matthew Wilcox (Oracle) | 0b3eb091 | 2021-10-04 14:45:56 +0100 | [diff] [blame] | 196 | __check_heap_object(ptr, n, folio_slab(folio), to_user); |
Matthew Wilcox (Oracle) | ab50210 | 2022-01-10 23:15:29 +0000 | [diff] [blame] | 197 | } else if (folio_test_large(folio)) { |
Matthew Wilcox (Oracle) | 1dfbe9f | 2022-06-12 22:32:27 +0100 | [diff] [blame] | 198 | offset = ptr - folio_address(folio); |
| 199 | if (n > folio_size(folio) - offset) |
Matthew Wilcox (Oracle) | ab50210 | 2022-01-10 23:15:29 +0000 | [diff] [blame] | 200 | usercopy_abort("page alloc", NULL, to_user, offset, n); |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 201 | } |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 202 | } |
| 203 | |
Chris von Recklinghausen | b5cb15d | 2018-07-03 15:43:08 -0400 | [diff] [blame] | 204 | static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks); |
| 205 | |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 206 | /* |
| 207 | * Validates that the given object is: |
| 208 | * - not bogus address |
Qian Cai | 7bff3c0 | 2019-01-08 15:23:04 -0800 | [diff] [blame] | 209 | * - fully contained by stack (or stack frame, when available) |
| 210 | * - fully within SLAB object (or object whitelist area, when available) |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 211 | * - not in kernel text |
| 212 | */ |
| 213 | void __check_object_size(const void *ptr, unsigned long n, bool to_user) |
| 214 | { |
Chris von Recklinghausen | b5cb15d | 2018-07-03 15:43:08 -0400 | [diff] [blame] | 215 | if (static_branch_unlikely(&bypass_usercopy_checks)) |
| 216 | return; |
| 217 | |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 218 | /* Skip all tests if size is zero. */ |
| 219 | if (!n) |
| 220 | return; |
| 221 | |
| 222 | /* Check for invalid addresses. */ |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 223 | check_bogus_address((const unsigned long)ptr, n, to_user); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 224 | |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 225 | /* Check for bad stack object. */ |
| 226 | switch (check_stack_object(ptr, n)) { |
| 227 | case NOT_STACK: |
| 228 | /* Object is not touching the current process stack. */ |
| 229 | break; |
| 230 | case GOOD_FRAME: |
| 231 | case GOOD_STACK: |
| 232 | /* |
| 233 | * Object is either in the correct frame (when it |
| 234 | * is possible to check) or just generally on the |
| 235 | * process stack (when frame checking not available). |
| 236 | */ |
| 237 | return; |
| 238 | default: |
Kees Cook | 2792d84 | 2022-02-16 12:05:28 -0800 | [diff] [blame] | 239 | usercopy_abort("process stack", NULL, to_user, |
| 240 | #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER |
| 241 | IS_ENABLED(CONFIG_STACK_GROWSUP) ? |
| 242 | ptr - (void *)current_stack_pointer : |
| 243 | (void *)current_stack_pointer - ptr, |
| 244 | #else |
| 245 | 0, |
| 246 | #endif |
| 247 | n); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 248 | } |
| 249 | |
Qian Cai | 7bff3c0 | 2019-01-08 15:23:04 -0800 | [diff] [blame] | 250 | /* Check for bad heap object. */ |
| 251 | check_heap_object(ptr, n, to_user); |
| 252 | |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 253 | /* Check for object in kernel to avoid text exposure. */ |
Kees Cook | f4e6e28 | 2018-01-10 14:48:22 -0800 | [diff] [blame] | 254 | check_kernel_text_object((const unsigned long)ptr, n, to_user); |
Kees Cook | f5509cc | 2016-06-07 11:05:33 -0700 | [diff] [blame] | 255 | } |
| 256 | EXPORT_SYMBOL(__check_object_size); |
Chris von Recklinghausen | b5cb15d | 2018-07-03 15:43:08 -0400 | [diff] [blame] | 257 | |
| 258 | static bool enable_checks __initdata = true; |
| 259 | |
| 260 | static int __init parse_hardened_usercopy(char *str) |
| 261 | { |
Christophe JAILLET | f15be1b | 2022-11-01 22:14:09 +0100 | [diff] [blame] | 262 | if (kstrtobool(str, &enable_checks)) |
Randy Dunlap | 05fe3c1 | 2022-03-22 14:47:52 -0700 | [diff] [blame] | 263 | pr_warn("Invalid option string for hardened_usercopy: '%s'\n", |
| 264 | str); |
| 265 | return 1; |
Chris von Recklinghausen | b5cb15d | 2018-07-03 15:43:08 -0400 | [diff] [blame] | 266 | } |
| 267 | |
| 268 | __setup("hardened_usercopy=", parse_hardened_usercopy); |
| 269 | |
| 270 | static int __init set_hardened_usercopy(void) |
| 271 | { |
| 272 | if (enable_checks == false) |
| 273 | static_branch_enable(&bypass_usercopy_checks); |
| 274 | return 1; |
| 275 | } |
| 276 | |
| 277 | late_initcall(set_hardened_usercopy); |