Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Morton | 16d6926 | 2008-07-25 19:44:36 -0700 | [diff] [blame] | 2 | #include <linux/mm.h> |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 3 | #include <linux/slab.h> |
| 4 | #include <linux/string.h> |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 5 | #include <linux/compiler.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 6 | #include <linux/export.h> |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 7 | #include <linux/err.h> |
Adrian Bunk | 3b8f14b | 2008-07-26 15:22:28 -0700 | [diff] [blame] | 8 | #include <linux/sched.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 9 | #include <linux/sched/mm.h> |
Daniel Jordan | 79eb597 | 2019-07-16 16:30:54 -0700 | [diff] [blame] | 10 | #include <linux/sched/signal.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 11 | #include <linux/sched/task_stack.h> |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 12 | #include <linux/security.h> |
Shaohua Li | 9800339 | 2013-02-22 16:34:35 -0800 | [diff] [blame] | 13 | #include <linux/swap.h> |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 14 | #include <linux/swapops.h> |
Jerome Marchand | 00619bc | 2013-11-12 15:08:31 -0800 | [diff] [blame] | 15 | #include <linux/mman.h> |
| 16 | #include <linux/hugetlb.h> |
Al Viro | 39f1f78 | 2014-05-06 14:02:53 -0400 | [diff] [blame] | 17 | #include <linux/vmalloc.h> |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 18 | #include <linux/userfaultfd_k.h> |
Alexandre Ghiti | 649775b | 2019-09-23 15:38:37 -0700 | [diff] [blame] | 19 | #include <linux/elf.h> |
Alexandre Ghiti | 67f3977 | 2019-09-23 15:38:47 -0700 | [diff] [blame] | 20 | #include <linux/elf-randomize.h> |
| 21 | #include <linux/personality.h> |
Alexandre Ghiti | 649775b | 2019-09-23 15:38:37 -0700 | [diff] [blame] | 22 | #include <linux/random.h> |
Alexandre Ghiti | 67f3977 | 2019-09-23 15:38:47 -0700 | [diff] [blame] | 23 | #include <linux/processor.h> |
| 24 | #include <linux/sizes.h> |
| 25 | #include <linux/compat.h> |
Jerome Marchand | 00619bc | 2013-11-12 15:08:31 -0800 | [diff] [blame] | 26 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 27 | #include <linux/uaccess.h> |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 28 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 29 | #include "internal.h" |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 30 | #include "swap.h" |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 31 | |
Andrzej Hajda | a4bb1e4 | 2015-02-13 14:36:24 -0800 | [diff] [blame] | 32 | /** |
| 33 | * kfree_const - conditionally free memory |
| 34 | * @x: pointer to the memory |
| 35 | * |
| 36 | * Function calls kfree only if @x is not in .rodata section. |
| 37 | */ |
| 38 | void kfree_const(const void *x) |
| 39 | { |
| 40 | if (!is_kernel_rodata((unsigned long)x)) |
| 41 | kfree(x); |
| 42 | } |
| 43 | EXPORT_SYMBOL(kfree_const); |
| 44 | |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 45 | /** |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 46 | * kstrdup - allocate space for and copy an existing string |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 47 | * @s: the string to duplicate |
| 48 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 49 | * |
| 50 | * Return: newly allocated copy of @s or %NULL in case of error |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 51 | */ |
| 52 | char *kstrdup(const char *s, gfp_t gfp) |
| 53 | { |
| 54 | size_t len; |
| 55 | char *buf; |
| 56 | |
| 57 | if (!s) |
| 58 | return NULL; |
| 59 | |
| 60 | len = strlen(s) + 1; |
Christoph Hellwig | 1d2c8ee | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 61 | buf = kmalloc_track_caller(len, gfp); |
Matt Mackall | 30992c9 | 2006-01-08 01:01:43 -0800 | [diff] [blame] | 62 | if (buf) |
| 63 | memcpy(buf, s, len); |
| 64 | return buf; |
| 65 | } |
| 66 | EXPORT_SYMBOL(kstrdup); |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 67 | |
Alexey Dobriyan | 1a2f67b | 2006-09-30 23:27:20 -0700 | [diff] [blame] | 68 | /** |
Andrzej Hajda | a4bb1e4 | 2015-02-13 14:36:24 -0800 | [diff] [blame] | 69 | * kstrdup_const - conditionally duplicate an existing const string |
| 70 | * @s: the string to duplicate |
| 71 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
| 72 | * |
Bartosz Golaszewski | 295a173 | 2020-10-15 20:07:39 -0700 | [diff] [blame] | 73 | * Note: Strings allocated by kstrdup_const should be freed by kfree_const and |
| 74 | * must not be passed to krealloc(). |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 75 | * |
| 76 | * Return: source string if it is in .rodata section otherwise |
| 77 | * fallback to kstrdup. |
Andrzej Hajda | a4bb1e4 | 2015-02-13 14:36:24 -0800 | [diff] [blame] | 78 | */ |
| 79 | const char *kstrdup_const(const char *s, gfp_t gfp) |
| 80 | { |
| 81 | if (is_kernel_rodata((unsigned long)s)) |
| 82 | return s; |
| 83 | |
| 84 | return kstrdup(s, gfp); |
| 85 | } |
| 86 | EXPORT_SYMBOL(kstrdup_const); |
| 87 | |
| 88 | /** |
Jeremy Fitzhardinge | 1e66df3 | 2007-07-17 18:37:02 -0700 | [diff] [blame] | 89 | * kstrndup - allocate space for and copy an existing string |
| 90 | * @s: the string to duplicate |
| 91 | * @max: read at most @max chars from @s |
| 92 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
David Howells | f351574 | 2017-07-04 17:25:02 +0100 | [diff] [blame] | 93 | * |
| 94 | * Note: Use kmemdup_nul() instead if the size is known exactly. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 95 | * |
| 96 | * Return: newly allocated copy of @s or %NULL in case of error |
Jeremy Fitzhardinge | 1e66df3 | 2007-07-17 18:37:02 -0700 | [diff] [blame] | 97 | */ |
| 98 | char *kstrndup(const char *s, size_t max, gfp_t gfp) |
| 99 | { |
| 100 | size_t len; |
| 101 | char *buf; |
| 102 | |
| 103 | if (!s) |
| 104 | return NULL; |
| 105 | |
| 106 | len = strnlen(s, max); |
| 107 | buf = kmalloc_track_caller(len+1, gfp); |
| 108 | if (buf) { |
| 109 | memcpy(buf, s, len); |
| 110 | buf[len] = '\0'; |
| 111 | } |
| 112 | return buf; |
| 113 | } |
| 114 | EXPORT_SYMBOL(kstrndup); |
| 115 | |
| 116 | /** |
Alexey Dobriyan | 1a2f67b | 2006-09-30 23:27:20 -0700 | [diff] [blame] | 117 | * kmemdup - duplicate region of memory |
| 118 | * |
| 119 | * @src: memory region to duplicate |
| 120 | * @len: memory region length |
| 121 | * @gfp: GFP mask to use |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 122 | * |
| 123 | * Return: newly allocated copy of @src or %NULL in case of error |
Alexey Dobriyan | 1a2f67b | 2006-09-30 23:27:20 -0700 | [diff] [blame] | 124 | */ |
| 125 | void *kmemdup(const void *src, size_t len, gfp_t gfp) |
| 126 | { |
| 127 | void *p; |
| 128 | |
Christoph Hellwig | 1d2c8ee | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 129 | p = kmalloc_track_caller(len, gfp); |
Alexey Dobriyan | 1a2f67b | 2006-09-30 23:27:20 -0700 | [diff] [blame] | 130 | if (p) |
| 131 | memcpy(p, src, len); |
| 132 | return p; |
| 133 | } |
| 134 | EXPORT_SYMBOL(kmemdup); |
| 135 | |
Christoph Lameter | ef2ad80 | 2007-07-17 04:03:21 -0700 | [diff] [blame] | 136 | /** |
David Howells | f351574 | 2017-07-04 17:25:02 +0100 | [diff] [blame] | 137 | * kmemdup_nul - Create a NUL-terminated string from unterminated data |
| 138 | * @s: The data to stringify |
| 139 | * @len: The size of the data |
| 140 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 141 | * |
| 142 | * Return: newly allocated copy of @s with NUL-termination or %NULL in |
| 143 | * case of error |
David Howells | f351574 | 2017-07-04 17:25:02 +0100 | [diff] [blame] | 144 | */ |
| 145 | char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) |
| 146 | { |
| 147 | char *buf; |
| 148 | |
| 149 | if (!s) |
| 150 | return NULL; |
| 151 | |
| 152 | buf = kmalloc_track_caller(len + 1, gfp); |
| 153 | if (buf) { |
| 154 | memcpy(buf, s, len); |
| 155 | buf[len] = '\0'; |
| 156 | } |
| 157 | return buf; |
| 158 | } |
| 159 | EXPORT_SYMBOL(kmemdup_nul); |
| 160 | |
| 161 | /** |
Li Zefan | 610a77e | 2009-03-31 15:23:16 -0700 | [diff] [blame] | 162 | * memdup_user - duplicate memory region from user space |
| 163 | * |
| 164 | * @src: source address in user space |
| 165 | * @len: number of bytes to copy |
| 166 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 167 | * Return: an ERR_PTR() on failure. Result is physically |
Al Viro | 50fd2f2 | 2018-01-07 13:06:15 -0500 | [diff] [blame] | 168 | * contiguous, to be freed by kfree(). |
Li Zefan | 610a77e | 2009-03-31 15:23:16 -0700 | [diff] [blame] | 169 | */ |
| 170 | void *memdup_user(const void __user *src, size_t len) |
| 171 | { |
| 172 | void *p; |
| 173 | |
Daniel Vetter | 6c8fcc0 | 2019-02-20 22:20:42 -0800 | [diff] [blame] | 174 | p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); |
Li Zefan | 610a77e | 2009-03-31 15:23:16 -0700 | [diff] [blame] | 175 | if (!p) |
| 176 | return ERR_PTR(-ENOMEM); |
| 177 | |
| 178 | if (copy_from_user(p, src, len)) { |
| 179 | kfree(p); |
| 180 | return ERR_PTR(-EFAULT); |
| 181 | } |
| 182 | |
| 183 | return p; |
| 184 | } |
| 185 | EXPORT_SYMBOL(memdup_user); |
| 186 | |
Al Viro | 50fd2f2 | 2018-01-07 13:06:15 -0500 | [diff] [blame] | 187 | /** |
| 188 | * vmemdup_user - duplicate memory region from user space |
| 189 | * |
| 190 | * @src: source address in user space |
| 191 | * @len: number of bytes to copy |
| 192 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 193 | * Return: an ERR_PTR() on failure. Result may be not |
Al Viro | 50fd2f2 | 2018-01-07 13:06:15 -0500 | [diff] [blame] | 194 | * physically contiguous. Use kvfree() to free. |
| 195 | */ |
| 196 | void *vmemdup_user(const void __user *src, size_t len) |
| 197 | { |
| 198 | void *p; |
| 199 | |
| 200 | p = kvmalloc(len, GFP_USER); |
| 201 | if (!p) |
| 202 | return ERR_PTR(-ENOMEM); |
| 203 | |
| 204 | if (copy_from_user(p, src, len)) { |
| 205 | kvfree(p); |
| 206 | return ERR_PTR(-EFAULT); |
| 207 | } |
| 208 | |
| 209 | return p; |
| 210 | } |
| 211 | EXPORT_SYMBOL(vmemdup_user); |
| 212 | |
Mike Rapoport | b86181f | 2018-08-23 17:00:59 -0700 | [diff] [blame] | 213 | /** |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 214 | * strndup_user - duplicate an existing string from user space |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 215 | * @s: The string to duplicate |
| 216 | * @n: Maximum number of bytes to copy, including the trailing NUL. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 217 | * |
Andrew Morton | e914552 | 2019-04-05 18:39:34 -0700 | [diff] [blame] | 218 | * Return: newly allocated copy of @s or an ERR_PTR() in case of error |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 219 | */ |
| 220 | char *strndup_user(const char __user *s, long n) |
| 221 | { |
| 222 | char *p; |
| 223 | long length; |
| 224 | |
| 225 | length = strnlen_user(s, n); |
| 226 | |
| 227 | if (!length) |
| 228 | return ERR_PTR(-EFAULT); |
| 229 | |
| 230 | if (length > n) |
| 231 | return ERR_PTR(-EINVAL); |
| 232 | |
Julia Lawall | 90d7404 | 2010-08-09 17:18:26 -0700 | [diff] [blame] | 233 | p = memdup_user(s, length); |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 234 | |
Julia Lawall | 90d7404 | 2010-08-09 17:18:26 -0700 | [diff] [blame] | 235 | if (IS_ERR(p)) |
| 236 | return p; |
Davi Arnaut | 96840aa | 2006-03-24 03:18:42 -0800 | [diff] [blame] | 237 | |
| 238 | p[length - 1] = '\0'; |
| 239 | |
| 240 | return p; |
| 241 | } |
| 242 | EXPORT_SYMBOL(strndup_user); |
Andrew Morton | 16d6926 | 2008-07-25 19:44:36 -0700 | [diff] [blame] | 243 | |
Al Viro | e9d408e1 | 2015-12-24 00:06:05 -0500 | [diff] [blame] | 244 | /** |
| 245 | * memdup_user_nul - duplicate memory region from user space and NUL-terminate |
| 246 | * |
| 247 | * @src: source address in user space |
| 248 | * @len: number of bytes to copy |
| 249 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 250 | * Return: an ERR_PTR() on failure. |
Al Viro | e9d408e1 | 2015-12-24 00:06:05 -0500 | [diff] [blame] | 251 | */ |
| 252 | void *memdup_user_nul(const void __user *src, size_t len) |
| 253 | { |
| 254 | char *p; |
| 255 | |
| 256 | /* |
| 257 | * Always use GFP_KERNEL, since copy_from_user() can sleep and |
| 258 | * cause pagefault, which makes it pointless to use GFP_NOFS |
| 259 | * or GFP_ATOMIC. |
| 260 | */ |
| 261 | p = kmalloc_track_caller(len + 1, GFP_KERNEL); |
| 262 | if (!p) |
| 263 | return ERR_PTR(-ENOMEM); |
| 264 | |
| 265 | if (copy_from_user(p, src, len)) { |
| 266 | kfree(p); |
| 267 | return ERR_PTR(-EFAULT); |
| 268 | } |
| 269 | p[len] = '\0'; |
| 270 | |
| 271 | return p; |
| 272 | } |
| 273 | EXPORT_SYMBOL(memdup_user_nul); |
| 274 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 275 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
Wei Yang | aba6dfb | 2019-11-30 17:50:53 -0800 | [diff] [blame] | 276 | struct vm_area_struct *prev) |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 277 | { |
| 278 | struct vm_area_struct *next; |
| 279 | |
| 280 | vma->vm_prev = prev; |
| 281 | if (prev) { |
| 282 | next = prev->vm_next; |
| 283 | prev->vm_next = vma; |
| 284 | } else { |
Wei Yang | aba6dfb | 2019-11-30 17:50:53 -0800 | [diff] [blame] | 285 | next = mm->mmap; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 286 | mm->mmap = vma; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 287 | } |
| 288 | vma->vm_next = next; |
| 289 | if (next) |
| 290 | next->vm_prev = vma; |
| 291 | } |
| 292 | |
Wei Yang | 1b9fc5b2 | 2019-11-30 17:50:49 -0800 | [diff] [blame] | 293 | void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) |
| 294 | { |
| 295 | struct vm_area_struct *prev, *next; |
| 296 | |
| 297 | next = vma->vm_next; |
| 298 | prev = vma->vm_prev; |
| 299 | if (prev) |
| 300 | prev->vm_next = next; |
| 301 | else |
| 302 | mm->mmap = next; |
| 303 | if (next) |
| 304 | next->vm_prev = prev; |
| 305 | } |
| 306 | |
Siddhesh Poyarekar | b764375 | 2012-03-21 16:34:04 -0700 | [diff] [blame] | 307 | /* Check if the vma is being used as a stack by this task */ |
Andy Lutomirski | d17af50 | 2016-09-30 10:58:58 -0700 | [diff] [blame] | 308 | int vma_is_stack_for_current(struct vm_area_struct *vma) |
Siddhesh Poyarekar | b764375 | 2012-03-21 16:34:04 -0700 | [diff] [blame] | 309 | { |
Andy Lutomirski | d17af50 | 2016-09-30 10:58:58 -0700 | [diff] [blame] | 310 | struct task_struct * __maybe_unused t = current; |
| 311 | |
Siddhesh Poyarekar | b764375 | 2012-03-21 16:34:04 -0700 | [diff] [blame] | 312 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); |
| 313 | } |
| 314 | |
Christian König | 295992f | 2020-09-14 15:09:33 +0200 | [diff] [blame] | 315 | /* |
| 316 | * Change backing file, only valid to use during initial VMA setup. |
| 317 | */ |
| 318 | void vma_set_file(struct vm_area_struct *vma, struct file *file) |
| 319 | { |
| 320 | /* Changing an anonymous vma with this is illegal */ |
| 321 | get_file(file); |
| 322 | swap(vma->vm_file, file); |
| 323 | fput(file); |
| 324 | } |
| 325 | EXPORT_SYMBOL(vma_set_file); |
| 326 | |
Alexandre Ghiti | 649775b | 2019-09-23 15:38:37 -0700 | [diff] [blame] | 327 | #ifndef STACK_RND_MASK |
| 328 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ |
| 329 | #endif |
| 330 | |
| 331 | unsigned long randomize_stack_top(unsigned long stack_top) |
| 332 | { |
| 333 | unsigned long random_variable = 0; |
| 334 | |
| 335 | if (current->flags & PF_RANDOMIZE) { |
| 336 | random_variable = get_random_long(); |
| 337 | random_variable &= STACK_RND_MASK; |
| 338 | random_variable <<= PAGE_SHIFT; |
| 339 | } |
| 340 | #ifdef CONFIG_STACK_GROWSUP |
| 341 | return PAGE_ALIGN(stack_top) + random_variable; |
| 342 | #else |
| 343 | return PAGE_ALIGN(stack_top) - random_variable; |
| 344 | #endif |
| 345 | } |
| 346 | |
Jason A. Donenfeld | 5ad7dd8 | 2022-05-14 13:59:30 +0200 | [diff] [blame] | 347 | /** |
| 348 | * randomize_page - Generate a random, page aligned address |
| 349 | * @start: The smallest acceptable address the caller will take. |
| 350 | * @range: The size of the area, starting at @start, within which the |
| 351 | * random address must fall. |
| 352 | * |
| 353 | * If @start + @range would overflow, @range is capped. |
| 354 | * |
| 355 | * NOTE: Historical use of randomize_range, which this replaces, presumed that |
| 356 | * @start was already page aligned. We now align it regardless. |
| 357 | * |
| 358 | * Return: A page aligned address within [start, start + range). On error, |
| 359 | * @start is returned. |
| 360 | */ |
| 361 | unsigned long randomize_page(unsigned long start, unsigned long range) |
| 362 | { |
| 363 | if (!PAGE_ALIGNED(start)) { |
| 364 | range -= PAGE_ALIGN(start) - start; |
| 365 | start = PAGE_ALIGN(start); |
| 366 | } |
| 367 | |
| 368 | if (start > ULONG_MAX - range) |
| 369 | range = ULONG_MAX - start; |
| 370 | |
| 371 | range >>= PAGE_SHIFT; |
| 372 | |
| 373 | if (range == 0) |
| 374 | return start; |
| 375 | |
| 376 | return start + (get_random_long() % range << PAGE_SHIFT); |
| 377 | } |
| 378 | |
Alexandre Ghiti | 67f3977 | 2019-09-23 15:38:47 -0700 | [diff] [blame] | 379 | #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT |
Christophe Leroy | 723820f | 2022-04-09 19:17:26 +0200 | [diff] [blame] | 380 | unsigned long __weak arch_randomize_brk(struct mm_struct *mm) |
Alexandre Ghiti | e7142bf | 2019-09-23 15:38:50 -0700 | [diff] [blame] | 381 | { |
| 382 | /* Is the current task 32bit ? */ |
| 383 | if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) |
| 384 | return randomize_page(mm->brk, SZ_32M); |
| 385 | |
| 386 | return randomize_page(mm->brk, SZ_1G); |
| 387 | } |
| 388 | |
Alexandre Ghiti | 67f3977 | 2019-09-23 15:38:47 -0700 | [diff] [blame] | 389 | unsigned long arch_mmap_rnd(void) |
| 390 | { |
| 391 | unsigned long rnd; |
| 392 | |
| 393 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS |
| 394 | if (is_compat_task()) |
| 395 | rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); |
| 396 | else |
| 397 | #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ |
| 398 | rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); |
| 399 | |
| 400 | return rnd << PAGE_SHIFT; |
| 401 | } |
Alexandre Ghiti | 67f3977 | 2019-09-23 15:38:47 -0700 | [diff] [blame] | 402 | |
| 403 | static int mmap_is_legacy(struct rlimit *rlim_stack) |
| 404 | { |
| 405 | if (current->personality & ADDR_COMPAT_LAYOUT) |
| 406 | return 1; |
| 407 | |
| 408 | if (rlim_stack->rlim_cur == RLIM_INFINITY) |
| 409 | return 1; |
| 410 | |
| 411 | return sysctl_legacy_va_layout; |
| 412 | } |
| 413 | |
| 414 | /* |
| 415 | * Leave enough space between the mmap area and the stack to honour ulimit in |
| 416 | * the face of randomisation. |
| 417 | */ |
| 418 | #define MIN_GAP (SZ_128M) |
| 419 | #define MAX_GAP (STACK_TOP / 6 * 5) |
| 420 | |
| 421 | static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) |
| 422 | { |
| 423 | unsigned long gap = rlim_stack->rlim_cur; |
| 424 | unsigned long pad = stack_guard_gap; |
| 425 | |
| 426 | /* Account for stack randomization if necessary */ |
| 427 | if (current->flags & PF_RANDOMIZE) |
| 428 | pad += (STACK_RND_MASK << PAGE_SHIFT); |
| 429 | |
| 430 | /* Values close to RLIM_INFINITY can overflow. */ |
| 431 | if (gap + pad > gap) |
| 432 | gap += pad; |
| 433 | |
| 434 | if (gap < MIN_GAP) |
| 435 | gap = MIN_GAP; |
| 436 | else if (gap > MAX_GAP) |
| 437 | gap = MAX_GAP; |
| 438 | |
| 439 | return PAGE_ALIGN(STACK_TOP - gap - rnd); |
| 440 | } |
| 441 | |
| 442 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
| 443 | { |
| 444 | unsigned long random_factor = 0UL; |
| 445 | |
| 446 | if (current->flags & PF_RANDOMIZE) |
| 447 | random_factor = arch_mmap_rnd(); |
| 448 | |
| 449 | if (mmap_is_legacy(rlim_stack)) { |
| 450 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; |
| 451 | mm->get_unmapped_area = arch_get_unmapped_area; |
| 452 | } else { |
| 453 | mm->mmap_base = mmap_base(random_factor, rlim_stack); |
| 454 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
| 455 | } |
| 456 | } |
| 457 | #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
Kees Cook | 8f2af15 | 2018-04-10 16:34:53 -0700 | [diff] [blame] | 458 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
Andrew Morton | 16d6926 | 2008-07-25 19:44:36 -0700 | [diff] [blame] | 459 | { |
| 460 | mm->mmap_base = TASK_UNMAPPED_BASE; |
| 461 | mm->get_unmapped_area = arch_get_unmapped_area; |
Andrew Morton | 16d6926 | 2008-07-25 19:44:36 -0700 | [diff] [blame] | 462 | } |
| 463 | #endif |
Rusty Russell | 912985d | 2008-08-12 17:52:52 -0500 | [diff] [blame] | 464 | |
Daniel Jordan | 79eb597 | 2019-07-16 16:30:54 -0700 | [diff] [blame] | 465 | /** |
| 466 | * __account_locked_vm - account locked pages to an mm's locked_vm |
| 467 | * @mm: mm to account against |
| 468 | * @pages: number of pages to account |
| 469 | * @inc: %true if @pages should be considered positive, %false if not |
| 470 | * @task: task used to check RLIMIT_MEMLOCK |
| 471 | * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped |
| 472 | * |
| 473 | * Assumes @task and @mm are valid (i.e. at least one reference on each), and |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 474 | * that mmap_lock is held as writer. |
Daniel Jordan | 79eb597 | 2019-07-16 16:30:54 -0700 | [diff] [blame] | 475 | * |
| 476 | * Return: |
| 477 | * * 0 on success |
| 478 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. |
| 479 | */ |
| 480 | int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, |
| 481 | struct task_struct *task, bool bypass_rlim) |
| 482 | { |
| 483 | unsigned long locked_vm, limit; |
| 484 | int ret = 0; |
| 485 | |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 486 | mmap_assert_write_locked(mm); |
Daniel Jordan | 79eb597 | 2019-07-16 16:30:54 -0700 | [diff] [blame] | 487 | |
| 488 | locked_vm = mm->locked_vm; |
| 489 | if (inc) { |
| 490 | if (!bypass_rlim) { |
| 491 | limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 492 | if (locked_vm + pages > limit) |
| 493 | ret = -ENOMEM; |
| 494 | } |
| 495 | if (!ret) |
| 496 | mm->locked_vm = locked_vm + pages; |
| 497 | } else { |
| 498 | WARN_ON_ONCE(pages > locked_vm); |
| 499 | mm->locked_vm = locked_vm - pages; |
| 500 | } |
| 501 | |
| 502 | pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, |
| 503 | (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, |
| 504 | locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), |
| 505 | ret ? " - exceeded" : ""); |
| 506 | |
| 507 | return ret; |
| 508 | } |
| 509 | EXPORT_SYMBOL_GPL(__account_locked_vm); |
| 510 | |
| 511 | /** |
| 512 | * account_locked_vm - account locked pages to an mm's locked_vm |
| 513 | * @mm: mm to account against, may be NULL |
| 514 | * @pages: number of pages to account |
| 515 | * @inc: %true if @pages should be considered positive, %false if not |
| 516 | * |
| 517 | * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). |
| 518 | * |
| 519 | * Return: |
| 520 | * * 0 on success, or if mm is NULL |
| 521 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. |
| 522 | */ |
| 523 | int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) |
| 524 | { |
| 525 | int ret; |
| 526 | |
| 527 | if (pages == 0 || !mm) |
| 528 | return 0; |
| 529 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 530 | mmap_write_lock(mm); |
Daniel Jordan | 79eb597 | 2019-07-16 16:30:54 -0700 | [diff] [blame] | 531 | ret = __account_locked_vm(mm, pages, inc, current, |
| 532 | capable(CAP_IPC_LOCK)); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 533 | mmap_write_unlock(mm); |
Daniel Jordan | 79eb597 | 2019-07-16 16:30:54 -0700 | [diff] [blame] | 534 | |
| 535 | return ret; |
| 536 | } |
| 537 | EXPORT_SYMBOL_GPL(account_locked_vm); |
| 538 | |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 539 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, |
| 540 | unsigned long len, unsigned long prot, |
Michal Hocko | 9fbeb5a | 2016-05-23 16:25:30 -0700 | [diff] [blame] | 541 | unsigned long flag, unsigned long pgoff) |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 542 | { |
| 543 | unsigned long ret; |
| 544 | struct mm_struct *mm = current->mm; |
Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 545 | unsigned long populate; |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 546 | LIST_HEAD(uf); |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 547 | |
| 548 | ret = security_mmap_file(file, prot, flag); |
| 549 | if (!ret) { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 550 | if (mmap_write_lock_killable(mm)) |
Michal Hocko | 9fbeb5a | 2016-05-23 16:25:30 -0700 | [diff] [blame] | 551 | return -EINTR; |
Peter Collingbourne | 45e5530 | 2020-08-06 23:23:37 -0700 | [diff] [blame] | 552 | ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate, |
| 553 | &uf); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 554 | mmap_write_unlock(mm); |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 555 | userfaultfd_unmap_complete(mm, &uf); |
Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 556 | if (populate) |
| 557 | mm_populate(ret, populate); |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 558 | } |
| 559 | return ret; |
| 560 | } |
| 561 | |
| 562 | unsigned long vm_mmap(struct file *file, unsigned long addr, |
| 563 | unsigned long len, unsigned long prot, |
| 564 | unsigned long flag, unsigned long offset) |
| 565 | { |
| 566 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) |
| 567 | return -EINVAL; |
Alexander Kuleshov | ea53cde | 2015-11-05 18:46:46 -0800 | [diff] [blame] | 568 | if (unlikely(offset_in_page(offset))) |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 569 | return -EINVAL; |
| 570 | |
Michal Hocko | 9fbeb5a | 2016-05-23 16:25:30 -0700 | [diff] [blame] | 571 | return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 572 | } |
| 573 | EXPORT_SYMBOL(vm_mmap); |
| 574 | |
Michal Hocko | a7c3e90 | 2017-05-08 15:57:09 -0700 | [diff] [blame] | 575 | /** |
| 576 | * kvmalloc_node - attempt to allocate physically contiguous memory, but upon |
| 577 | * failure, fall back to non-contiguous (vmalloc) allocation. |
| 578 | * @size: size of the request. |
| 579 | * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. |
| 580 | * @node: numa node to allocate from |
| 581 | * |
| 582 | * Uses kmalloc to get the memory but if the allocation fails then falls back |
| 583 | * to the vmalloc allocator. Use kvfree for freeing the memory. |
| 584 | * |
Michal Hocko | a421ef3 | 2022-01-14 14:07:07 -0800 | [diff] [blame] | 585 | * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. |
Michal Hocko | cc965a2 | 2017-07-12 14:36:52 -0700 | [diff] [blame] | 586 | * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is |
| 587 | * preferable to the vmalloc fallback, due to visible performance drawbacks. |
Michal Hocko | a7c3e90 | 2017-05-08 15:57:09 -0700 | [diff] [blame] | 588 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 589 | * Return: pointer to the allocated memory of %NULL in case of failure |
Michal Hocko | a7c3e90 | 2017-05-08 15:57:09 -0700 | [diff] [blame] | 590 | */ |
| 591 | void *kvmalloc_node(size_t size, gfp_t flags, int node) |
| 592 | { |
| 593 | gfp_t kmalloc_flags = flags; |
| 594 | void *ret; |
| 595 | |
| 596 | /* |
Michal Hocko | 4f4f2ba | 2017-06-02 14:46:19 -0700 | [diff] [blame] | 597 | * We want to attempt a large physically contiguous block first because |
| 598 | * it is less likely to fragment multiple larger blocks and therefore |
| 599 | * contribute to a long term fragmentation less than vmalloc fallback. |
| 600 | * However make sure that larger requests are not too disruptive - no |
| 601 | * OOM killer and no allocation failure warnings as we have a fallback. |
Michal Hocko | a7c3e90 | 2017-05-08 15:57:09 -0700 | [diff] [blame] | 602 | */ |
Michal Hocko | 6c5ab651 | 2017-05-08 15:57:15 -0700 | [diff] [blame] | 603 | if (size > PAGE_SIZE) { |
| 604 | kmalloc_flags |= __GFP_NOWARN; |
| 605 | |
Michal Hocko | cc965a2 | 2017-07-12 14:36:52 -0700 | [diff] [blame] | 606 | if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) |
Michal Hocko | 6c5ab651 | 2017-05-08 15:57:15 -0700 | [diff] [blame] | 607 | kmalloc_flags |= __GFP_NORETRY; |
Michal Hocko | a421ef3 | 2022-01-14 14:07:07 -0800 | [diff] [blame] | 608 | |
| 609 | /* nofail semantic is implemented by the vmalloc fallback */ |
| 610 | kmalloc_flags &= ~__GFP_NOFAIL; |
Michal Hocko | 6c5ab651 | 2017-05-08 15:57:15 -0700 | [diff] [blame] | 611 | } |
Michal Hocko | a7c3e90 | 2017-05-08 15:57:09 -0700 | [diff] [blame] | 612 | |
| 613 | ret = kmalloc_node(size, kmalloc_flags, node); |
| 614 | |
| 615 | /* |
| 616 | * It doesn't really make sense to fallback to vmalloc for sub page |
| 617 | * requests |
| 618 | */ |
| 619 | if (ret || size <= PAGE_SIZE) |
| 620 | return ret; |
| 621 | |
Linus Torvalds | 7661809 | 2021-07-14 09:45:49 -0700 | [diff] [blame] | 622 | /* Don't even allow crazy sizes */ |
Daniel Borkmann | 0708a0a | 2022-03-04 15:26:32 +0100 | [diff] [blame] | 623 | if (unlikely(size > INT_MAX)) { |
| 624 | WARN_ON_ONCE(!(flags & __GFP_NOWARN)); |
Linus Torvalds | 7661809 | 2021-07-14 09:45:49 -0700 | [diff] [blame] | 625 | return NULL; |
Daniel Borkmann | 0708a0a | 2022-03-04 15:26:32 +0100 | [diff] [blame] | 626 | } |
Linus Torvalds | 7661809 | 2021-07-14 09:45:49 -0700 | [diff] [blame] | 627 | |
Linus Torvalds | 9becb68 | 2022-04-22 11:41:38 -0700 | [diff] [blame] | 628 | /* |
| 629 | * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, |
| 630 | * since the callers already cannot assume anything |
| 631 | * about the resulting pointer, and cannot play |
| 632 | * protection games. |
| 633 | */ |
| 634 | return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, |
| 635 | flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, |
| 636 | node, __builtin_return_address(0)); |
Michal Hocko | a7c3e90 | 2017-05-08 15:57:09 -0700 | [diff] [blame] | 637 | } |
| 638 | EXPORT_SYMBOL(kvmalloc_node); |
| 639 | |
Mike Rapoport | ff4dc77 | 2018-08-23 17:01:02 -0700 | [diff] [blame] | 640 | /** |
Andrew Morton | 04b8e94 | 2018-09-04 15:45:55 -0700 | [diff] [blame] | 641 | * kvfree() - Free memory. |
| 642 | * @addr: Pointer to allocated memory. |
Mike Rapoport | ff4dc77 | 2018-08-23 17:01:02 -0700 | [diff] [blame] | 643 | * |
Andrew Morton | 04b8e94 | 2018-09-04 15:45:55 -0700 | [diff] [blame] | 644 | * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). |
| 645 | * It is slightly more efficient to use kfree() or vfree() if you are certain |
| 646 | * that you know which one to use. |
| 647 | * |
Andrey Ryabinin | 52414d3 | 2018-10-26 15:07:00 -0700 | [diff] [blame] | 648 | * Context: Either preemptible task context or not-NMI interrupt. |
Mike Rapoport | ff4dc77 | 2018-08-23 17:01:02 -0700 | [diff] [blame] | 649 | */ |
Al Viro | 39f1f78 | 2014-05-06 14:02:53 -0400 | [diff] [blame] | 650 | void kvfree(const void *addr) |
| 651 | { |
| 652 | if (is_vmalloc_addr(addr)) |
| 653 | vfree(addr); |
| 654 | else |
| 655 | kfree(addr); |
| 656 | } |
| 657 | EXPORT_SYMBOL(kvfree); |
| 658 | |
Waiman Long | d4eaa28 | 2020-06-04 16:48:21 -0700 | [diff] [blame] | 659 | /** |
| 660 | * kvfree_sensitive - Free a data object containing sensitive information. |
| 661 | * @addr: address of the data object to be freed. |
| 662 | * @len: length of the data object. |
| 663 | * |
| 664 | * Use the special memzero_explicit() function to clear the content of a |
| 665 | * kvmalloc'ed object containing sensitive data to make sure that the |
| 666 | * compiler won't optimize out the data clearing. |
| 667 | */ |
| 668 | void kvfree_sensitive(const void *addr, size_t len) |
| 669 | { |
| 670 | if (likely(!ZERO_OR_NULL_PTR(addr))) { |
| 671 | memzero_explicit((void *)addr, len); |
| 672 | kvfree(addr); |
| 673 | } |
| 674 | } |
| 675 | EXPORT_SYMBOL(kvfree_sensitive); |
| 676 | |
Dave Chinner | de2860f | 2021-08-09 10:10:00 -0700 | [diff] [blame] | 677 | void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) |
| 678 | { |
| 679 | void *newp; |
| 680 | |
| 681 | if (oldsize >= newsize) |
| 682 | return (void *)p; |
| 683 | newp = kvmalloc(newsize, flags); |
| 684 | if (!newp) |
| 685 | return NULL; |
| 686 | memcpy(newp, p, oldsize); |
| 687 | kvfree(p); |
| 688 | return newp; |
| 689 | } |
| 690 | EXPORT_SYMBOL(kvrealloc); |
| 691 | |
Paolo Bonzini | a8749a3 | 2022-03-08 04:47:22 -0500 | [diff] [blame] | 692 | /** |
| 693 | * __vmalloc_array - allocate memory for a virtually contiguous array. |
| 694 | * @n: number of elements. |
| 695 | * @size: element size. |
| 696 | * @flags: the type of memory to allocate (see kmalloc). |
| 697 | */ |
| 698 | void *__vmalloc_array(size_t n, size_t size, gfp_t flags) |
| 699 | { |
| 700 | size_t bytes; |
| 701 | |
| 702 | if (unlikely(check_mul_overflow(n, size, &bytes))) |
| 703 | return NULL; |
| 704 | return __vmalloc(bytes, flags); |
| 705 | } |
| 706 | EXPORT_SYMBOL(__vmalloc_array); |
| 707 | |
| 708 | /** |
| 709 | * vmalloc_array - allocate memory for a virtually contiguous array. |
| 710 | * @n: number of elements. |
| 711 | * @size: element size. |
| 712 | */ |
| 713 | void *vmalloc_array(size_t n, size_t size) |
| 714 | { |
| 715 | return __vmalloc_array(n, size, GFP_KERNEL); |
| 716 | } |
| 717 | EXPORT_SYMBOL(vmalloc_array); |
| 718 | |
| 719 | /** |
| 720 | * __vcalloc - allocate and zero memory for a virtually contiguous array. |
| 721 | * @n: number of elements. |
| 722 | * @size: element size. |
| 723 | * @flags: the type of memory to allocate (see kmalloc). |
| 724 | */ |
| 725 | void *__vcalloc(size_t n, size_t size, gfp_t flags) |
| 726 | { |
| 727 | return __vmalloc_array(n, size, flags | __GFP_ZERO); |
| 728 | } |
| 729 | EXPORT_SYMBOL(__vcalloc); |
| 730 | |
| 731 | /** |
| 732 | * vcalloc - allocate and zero memory for a virtually contiguous array. |
| 733 | * @n: number of elements. |
| 734 | * @size: element size. |
| 735 | */ |
| 736 | void *vcalloc(size_t n, size_t size) |
| 737 | { |
| 738 | return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); |
| 739 | } |
| 740 | EXPORT_SYMBOL(vcalloc); |
| 741 | |
Kirill A. Shutemov | e39155ea | 2015-04-15 16:14:53 -0700 | [diff] [blame] | 742 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ |
| 743 | void *page_rmapping(struct page *page) |
| 744 | { |
Matthew Wilcox (Oracle) | 6460100 | 2021-05-07 11:17:34 -0400 | [diff] [blame] | 745 | return folio_raw_mapping(page_folio(page)); |
Kirill A. Shutemov | e39155ea | 2015-04-15 16:14:53 -0700 | [diff] [blame] | 746 | } |
| 747 | |
Matthew Wilcox (Oracle) | dd10ab0 | 2021-04-12 16:45:17 -0400 | [diff] [blame] | 748 | /** |
| 749 | * folio_mapped - Is this folio mapped into userspace? |
| 750 | * @folio: The folio. |
| 751 | * |
| 752 | * Return: True if any page in this folio is referenced by user page tables. |
Andrew Morton | 1aa8aea5 | 2016-05-19 17:12:00 -0700 | [diff] [blame] | 753 | */ |
Matthew Wilcox (Oracle) | dd10ab0 | 2021-04-12 16:45:17 -0400 | [diff] [blame] | 754 | bool folio_mapped(struct folio *folio) |
Andrew Morton | 1aa8aea5 | 2016-05-19 17:12:00 -0700 | [diff] [blame] | 755 | { |
Matthew Wilcox (Oracle) | dd10ab0 | 2021-04-12 16:45:17 -0400 | [diff] [blame] | 756 | long i, nr; |
Andrew Morton | 1aa8aea5 | 2016-05-19 17:12:00 -0700 | [diff] [blame] | 757 | |
Matthew Wilcox (Oracle) | a1efe48 | 2021-11-16 21:18:52 -0500 | [diff] [blame] | 758 | if (!folio_test_large(folio)) |
Matthew Wilcox (Oracle) | dd10ab0 | 2021-04-12 16:45:17 -0400 | [diff] [blame] | 759 | return atomic_read(&folio->_mapcount) >= 0; |
| 760 | if (atomic_read(folio_mapcount_ptr(folio)) >= 0) |
Andrew Morton | 1aa8aea5 | 2016-05-19 17:12:00 -0700 | [diff] [blame] | 761 | return true; |
Matthew Wilcox (Oracle) | dd10ab0 | 2021-04-12 16:45:17 -0400 | [diff] [blame] | 762 | if (folio_test_hugetlb(folio)) |
Andrew Morton | 1aa8aea5 | 2016-05-19 17:12:00 -0700 | [diff] [blame] | 763 | return false; |
Matthew Wilcox (Oracle) | dd10ab0 | 2021-04-12 16:45:17 -0400 | [diff] [blame] | 764 | |
| 765 | nr = folio_nr_pages(folio); |
| 766 | for (i = 0; i < nr; i++) { |
| 767 | if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0) |
Andrew Morton | 1aa8aea5 | 2016-05-19 17:12:00 -0700 | [diff] [blame] | 768 | return true; |
| 769 | } |
| 770 | return false; |
| 771 | } |
Matthew Wilcox (Oracle) | dd10ab0 | 2021-04-12 16:45:17 -0400 | [diff] [blame] | 772 | EXPORT_SYMBOL(folio_mapped); |
Andrew Morton | 1aa8aea5 | 2016-05-19 17:12:00 -0700 | [diff] [blame] | 773 | |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 774 | struct anon_vma *folio_anon_vma(struct folio *folio) |
Kirill A. Shutemov | e39155ea | 2015-04-15 16:14:53 -0700 | [diff] [blame] | 775 | { |
Matthew Wilcox (Oracle) | 6460100 | 2021-05-07 11:17:34 -0400 | [diff] [blame] | 776 | unsigned long mapping = (unsigned long)folio->mapping; |
Kirill A. Shutemov | e39155ea | 2015-04-15 16:14:53 -0700 | [diff] [blame] | 777 | |
Kirill A. Shutemov | e39155ea | 2015-04-15 16:14:53 -0700 | [diff] [blame] | 778 | if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
| 779 | return NULL; |
Matthew Wilcox (Oracle) | 6460100 | 2021-05-07 11:17:34 -0400 | [diff] [blame] | 780 | return (void *)(mapping - PAGE_MAPPING_ANON); |
Kirill A. Shutemov | e39155ea | 2015-04-15 16:14:53 -0700 | [diff] [blame] | 781 | } |
| 782 | |
Matthew Wilcox (Oracle) | 2f52578 | 2020-12-10 10:55:05 -0500 | [diff] [blame] | 783 | /** |
| 784 | * folio_mapping - Find the mapping where this folio is stored. |
| 785 | * @folio: The folio. |
| 786 | * |
| 787 | * For folios which are in the page cache, return the mapping that this |
| 788 | * page belongs to. Folios in the swap cache return the swap mapping |
| 789 | * this page is stored in (which is different from the mapping for the |
| 790 | * swap file or swap device where the data is stored). |
| 791 | * |
| 792 | * You can call this for folios which aren't in the swap cache or page |
| 793 | * cache and it will return NULL. |
| 794 | */ |
| 795 | struct address_space *folio_mapping(struct folio *folio) |
Shaohua Li | 9800339 | 2013-02-22 16:34:35 -0800 | [diff] [blame] | 796 | { |
Kirill A. Shutemov | 1c290f6 | 2016-01-15 16:52:07 -0800 | [diff] [blame] | 797 | struct address_space *mapping; |
| 798 | |
Mikulas Patocka | 03e5ac2 | 2014-01-14 17:56:40 -0800 | [diff] [blame] | 799 | /* This happens if someone calls flush_dcache_page on slab page */ |
Matthew Wilcox (Oracle) | 2f52578 | 2020-12-10 10:55:05 -0500 | [diff] [blame] | 800 | if (unlikely(folio_test_slab(folio))) |
Mikulas Patocka | 03e5ac2 | 2014-01-14 17:56:40 -0800 | [diff] [blame] | 801 | return NULL; |
| 802 | |
Matthew Wilcox (Oracle) | 2f52578 | 2020-12-10 10:55:05 -0500 | [diff] [blame] | 803 | if (unlikely(folio_test_swapcache(folio))) |
| 804 | return swap_address_space(folio_swap_entry(folio)); |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 805 | |
Matthew Wilcox (Oracle) | 2f52578 | 2020-12-10 10:55:05 -0500 | [diff] [blame] | 806 | mapping = folio->mapping; |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 807 | if ((unsigned long)mapping & PAGE_MAPPING_ANON) |
Kirill A. Shutemov | e39155ea | 2015-04-15 16:14:53 -0700 | [diff] [blame] | 808 | return NULL; |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 809 | |
| 810 | return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); |
Shaohua Li | 9800339 | 2013-02-22 16:34:35 -0800 | [diff] [blame] | 811 | } |
Matthew Wilcox (Oracle) | 2f52578 | 2020-12-10 10:55:05 -0500 | [diff] [blame] | 812 | EXPORT_SYMBOL(folio_mapping); |
Shaohua Li | 9800339 | 2013-02-22 16:34:35 -0800 | [diff] [blame] | 813 | |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 814 | /* Slow path of page_mapcount() for compound pages */ |
| 815 | int __page_mapcount(struct page *page) |
| 816 | { |
| 817 | int ret; |
| 818 | |
| 819 | ret = atomic_read(&page->_mapcount) + 1; |
Kirill A. Shutemov | dd78fed | 2016-07-26 15:25:26 -0700 | [diff] [blame] | 820 | /* |
| 821 | * For file THP page->_mapcount contains total number of mapping |
| 822 | * of the page: no need to look into compound_mapcount. |
| 823 | */ |
| 824 | if (!PageAnon(page) && !PageHuge(page)) |
| 825 | return ret; |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 826 | page = compound_head(page); |
| 827 | ret += atomic_read(compound_mapcount_ptr(page)) + 1; |
| 828 | if (PageDoubleMap(page)) |
| 829 | ret--; |
| 830 | return ret; |
| 831 | } |
| 832 | EXPORT_SYMBOL_GPL(__page_mapcount); |
| 833 | |
Matthew Wilcox (Oracle) | 715cbfd | 2021-05-07 15:05:06 -0400 | [diff] [blame] | 834 | /** |
Matthew Wilcox (Oracle) | 4ba1119 | 2022-01-17 16:33:26 -0500 | [diff] [blame] | 835 | * folio_mapcount() - Calculate the number of mappings of this folio. |
| 836 | * @folio: The folio. |
| 837 | * |
| 838 | * A large folio tracks both how many times the entire folio is mapped, |
| 839 | * and how many times each individual page in the folio is mapped. |
| 840 | * This function calculates the total number of times the folio is |
| 841 | * mapped. |
| 842 | * |
| 843 | * Return: The number of times this folio is mapped. |
| 844 | */ |
| 845 | int folio_mapcount(struct folio *folio) |
| 846 | { |
| 847 | int i, compound, nr, ret; |
| 848 | |
| 849 | if (likely(!folio_test_large(folio))) |
| 850 | return atomic_read(&folio->_mapcount) + 1; |
| 851 | |
| 852 | compound = folio_entire_mapcount(folio); |
| 853 | nr = folio_nr_pages(folio); |
| 854 | if (folio_test_hugetlb(folio)) |
| 855 | return compound; |
| 856 | ret = compound; |
| 857 | for (i = 0; i < nr; i++) |
| 858 | ret += atomic_read(&folio_page(folio, i)->_mapcount) + 1; |
| 859 | /* File pages has compound_mapcount included in _mapcount */ |
| 860 | if (!folio_test_anon(folio)) |
| 861 | return ret - compound * nr; |
| 862 | if (folio_test_double_map(folio)) |
| 863 | ret -= nr; |
| 864 | return ret; |
| 865 | } |
| 866 | |
| 867 | /** |
Matthew Wilcox (Oracle) | 715cbfd | 2021-05-07 15:05:06 -0400 | [diff] [blame] | 868 | * folio_copy - Copy the contents of one folio to another. |
| 869 | * @dst: Folio to copy to. |
| 870 | * @src: Folio to copy from. |
| 871 | * |
| 872 | * The bytes in the folio represented by @src are copied to @dst. |
| 873 | * Assumes the caller has validated that @dst is at least as large as @src. |
| 874 | * Can be called in atomic context for order-0 folios, but if the folio is |
| 875 | * larger, it may sleep. |
| 876 | */ |
| 877 | void folio_copy(struct folio *dst, struct folio *src) |
Matthew Wilcox (Oracle) | 79789db | 2021-07-12 16:32:07 +0100 | [diff] [blame] | 878 | { |
Matthew Wilcox (Oracle) | 715cbfd | 2021-05-07 15:05:06 -0400 | [diff] [blame] | 879 | long i = 0; |
| 880 | long nr = folio_nr_pages(src); |
Matthew Wilcox (Oracle) | 79789db | 2021-07-12 16:32:07 +0100 | [diff] [blame] | 881 | |
Matthew Wilcox (Oracle) | 715cbfd | 2021-05-07 15:05:06 -0400 | [diff] [blame] | 882 | for (;;) { |
| 883 | copy_highpage(folio_page(dst, i), folio_page(src, i)); |
| 884 | if (++i == nr) |
| 885 | break; |
Matthew Wilcox (Oracle) | 79789db | 2021-07-12 16:32:07 +0100 | [diff] [blame] | 886 | cond_resched(); |
Matthew Wilcox (Oracle) | 79789db | 2021-07-12 16:32:07 +0100 | [diff] [blame] | 887 | } |
| 888 | } |
| 889 | |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 890 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; |
| 891 | int sysctl_overcommit_ratio __read_mostly = 50; |
| 892 | unsigned long sysctl_overcommit_kbytes __read_mostly; |
| 893 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
| 894 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ |
| 895 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ |
| 896 | |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 897 | int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, |
| 898 | size_t *lenp, loff_t *ppos) |
Jerome Marchand | 49f0ce5 | 2014-01-21 15:49:14 -0800 | [diff] [blame] | 899 | { |
| 900 | int ret; |
| 901 | |
| 902 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
| 903 | if (ret == 0 && write) |
| 904 | sysctl_overcommit_kbytes = 0; |
| 905 | return ret; |
| 906 | } |
| 907 | |
Feng Tang | 56f3547 | 2020-08-06 23:23:15 -0700 | [diff] [blame] | 908 | static void sync_overcommit_as(struct work_struct *dummy) |
| 909 | { |
| 910 | percpu_counter_sync(&vm_committed_as); |
| 911 | } |
| 912 | |
| 913 | int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, |
| 914 | size_t *lenp, loff_t *ppos) |
| 915 | { |
| 916 | struct ctl_table t; |
Chen Jun | bcbda81 | 2021-09-24 15:44:06 -0700 | [diff] [blame] | 917 | int new_policy = -1; |
Feng Tang | 56f3547 | 2020-08-06 23:23:15 -0700 | [diff] [blame] | 918 | int ret; |
| 919 | |
| 920 | /* |
| 921 | * The deviation of sync_overcommit_as could be big with loose policy |
| 922 | * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to |
| 923 | * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply |
Bhaskar Chowdhury | 3145498 | 2021-05-04 18:38:35 -0700 | [diff] [blame] | 924 | * with the strict "NEVER", and to avoid possible race condition (even |
Feng Tang | 56f3547 | 2020-08-06 23:23:15 -0700 | [diff] [blame] | 925 | * though user usually won't too frequently do the switching to policy |
| 926 | * OVERCOMMIT_NEVER), the switch is done in the following order: |
| 927 | * 1. changing the batch |
| 928 | * 2. sync percpu count on each CPU |
| 929 | * 3. switch the policy |
| 930 | */ |
| 931 | if (write) { |
| 932 | t = *table; |
| 933 | t.data = &new_policy; |
| 934 | ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
Chen Jun | bcbda81 | 2021-09-24 15:44:06 -0700 | [diff] [blame] | 935 | if (ret || new_policy == -1) |
Feng Tang | 56f3547 | 2020-08-06 23:23:15 -0700 | [diff] [blame] | 936 | return ret; |
| 937 | |
| 938 | mm_compute_batch(new_policy); |
| 939 | if (new_policy == OVERCOMMIT_NEVER) |
| 940 | schedule_on_each_cpu(sync_overcommit_as); |
| 941 | sysctl_overcommit_memory = new_policy; |
| 942 | } else { |
| 943 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 944 | } |
| 945 | |
| 946 | return ret; |
| 947 | } |
| 948 | |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 949 | int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, |
| 950 | size_t *lenp, loff_t *ppos) |
Jerome Marchand | 49f0ce5 | 2014-01-21 15:49:14 -0800 | [diff] [blame] | 951 | { |
| 952 | int ret; |
| 953 | |
| 954 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
| 955 | if (ret == 0 && write) |
| 956 | sysctl_overcommit_ratio = 0; |
| 957 | return ret; |
| 958 | } |
| 959 | |
Jerome Marchand | 00619bc | 2013-11-12 15:08:31 -0800 | [diff] [blame] | 960 | /* |
| 961 | * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used |
| 962 | */ |
| 963 | unsigned long vm_commit_limit(void) |
| 964 | { |
Jerome Marchand | 49f0ce5 | 2014-01-21 15:49:14 -0800 | [diff] [blame] | 965 | unsigned long allowed; |
| 966 | |
| 967 | if (sysctl_overcommit_kbytes) |
| 968 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); |
| 969 | else |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 970 | allowed = ((totalram_pages() - hugetlb_total_pages()) |
Jerome Marchand | 49f0ce5 | 2014-01-21 15:49:14 -0800 | [diff] [blame] | 971 | * sysctl_overcommit_ratio / 100); |
| 972 | allowed += total_swap_pages; |
| 973 | |
| 974 | return allowed; |
Jerome Marchand | 00619bc | 2013-11-12 15:08:31 -0800 | [diff] [blame] | 975 | } |
| 976 | |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 977 | /* |
| 978 | * Make sure vm_committed_as in one cacheline and not cacheline shared with |
| 979 | * other variables. It can be updated by several CPUs frequently. |
| 980 | */ |
| 981 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; |
| 982 | |
| 983 | /* |
| 984 | * The global memory commitment made in the system can be a metric |
| 985 | * that can be used to drive ballooning decisions when Linux is hosted |
| 986 | * as a guest. On Hyper-V, the host implements a policy engine for dynamically |
| 987 | * balancing memory across competing virtual machines that are hosted. |
| 988 | * Several metrics drive this policy engine including the guest reported |
| 989 | * memory commitment. |
Feng Tang | 4e2ee51 | 2020-08-06 23:23:07 -0700 | [diff] [blame] | 990 | * |
| 991 | * The time cost of this is very low for small platforms, and for big |
| 992 | * platform like a 2S/36C/72T Skylake server, in worst case where |
| 993 | * vm_committed_as's spinlock is under severe contention, the time cost |
| 994 | * could be about 30~40 microseconds. |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 995 | */ |
| 996 | unsigned long vm_memory_committed(void) |
| 997 | { |
Feng Tang | 4e2ee51 | 2020-08-06 23:23:07 -0700 | [diff] [blame] | 998 | return percpu_counter_sum_positive(&vm_committed_as); |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 999 | } |
| 1000 | EXPORT_SYMBOL_GPL(vm_memory_committed); |
| 1001 | |
| 1002 | /* |
| 1003 | * Check that a process has enough memory to allocate a new virtual |
| 1004 | * mapping. 0 means there is enough memory for the allocation to |
| 1005 | * succeed and -ENOMEM implies there is not. |
| 1006 | * |
| 1007 | * We currently support three overcommit policies, which are set via the |
Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 1008 | * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 1009 | * |
| 1010 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. |
| 1011 | * Additional code 2002 Jul 20 by Robert Love. |
| 1012 | * |
| 1013 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. |
| 1014 | * |
| 1015 | * Note this is a helper function intended to be used by LSMs which |
| 1016 | * wish to use this logic. |
| 1017 | */ |
| 1018 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) |
| 1019 | { |
Johannes Weiner | 8c7829b | 2019-05-13 17:21:50 -0700 | [diff] [blame] | 1020 | long allowed; |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 1021 | |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 1022 | vm_acct_memory(pages); |
| 1023 | |
| 1024 | /* |
| 1025 | * Sometimes we want to use more memory than we have |
| 1026 | */ |
| 1027 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) |
| 1028 | return 0; |
| 1029 | |
| 1030 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
Johannes Weiner | 8c7829b | 2019-05-13 17:21:50 -0700 | [diff] [blame] | 1031 | if (pages > totalram_pages() + total_swap_pages) |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 1032 | goto error; |
Johannes Weiner | 8c7829b | 2019-05-13 17:21:50 -0700 | [diff] [blame] | 1033 | return 0; |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 1034 | } |
| 1035 | |
| 1036 | allowed = vm_commit_limit(); |
| 1037 | /* |
| 1038 | * Reserve some for root |
| 1039 | */ |
| 1040 | if (!cap_sys_admin) |
| 1041 | allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); |
| 1042 | |
| 1043 | /* |
| 1044 | * Don't let a single process grow so big a user can't recover |
| 1045 | */ |
| 1046 | if (mm) { |
Johannes Weiner | 8c7829b | 2019-05-13 17:21:50 -0700 | [diff] [blame] | 1047 | long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); |
| 1048 | |
Andrey Ryabinin | 39a1aa8 | 2016-03-17 14:18:50 -0700 | [diff] [blame] | 1049 | allowed -= min_t(long, mm->total_vm / 32, reserve); |
| 1050 | } |
| 1051 | |
| 1052 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) |
| 1053 | return 0; |
| 1054 | error: |
| 1055 | vm_unacct_memory(pages); |
| 1056 | |
| 1057 | return -ENOMEM; |
| 1058 | } |
| 1059 | |
William Roberts | a909025 | 2014-02-11 10:11:59 -0800 | [diff] [blame] | 1060 | /** |
| 1061 | * get_cmdline() - copy the cmdline value to a buffer. |
| 1062 | * @task: the task whose cmdline value to copy. |
| 1063 | * @buffer: the buffer to copy to. |
| 1064 | * @buflen: the length of the buffer. Larger cmdline values are truncated |
| 1065 | * to this length. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 1066 | * |
| 1067 | * Return: the size of the cmdline field copied. Note that the copy does |
William Roberts | a909025 | 2014-02-11 10:11:59 -0800 | [diff] [blame] | 1068 | * not guarantee an ending NULL byte. |
| 1069 | */ |
| 1070 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) |
| 1071 | { |
| 1072 | int res = 0; |
| 1073 | unsigned int len; |
| 1074 | struct mm_struct *mm = get_task_mm(task); |
Mateusz Guzik | a3b609e | 2016-01-20 15:01:05 -0800 | [diff] [blame] | 1075 | unsigned long arg_start, arg_end, env_start, env_end; |
William Roberts | a909025 | 2014-02-11 10:11:59 -0800 | [diff] [blame] | 1076 | if (!mm) |
| 1077 | goto out; |
| 1078 | if (!mm->arg_end) |
| 1079 | goto out_mm; /* Shh! No looking before we're done */ |
| 1080 | |
Michal Koutný | bc81426 | 2019-05-31 22:30:19 -0700 | [diff] [blame] | 1081 | spin_lock(&mm->arg_lock); |
Mateusz Guzik | a3b609e | 2016-01-20 15:01:05 -0800 | [diff] [blame] | 1082 | arg_start = mm->arg_start; |
| 1083 | arg_end = mm->arg_end; |
| 1084 | env_start = mm->env_start; |
| 1085 | env_end = mm->env_end; |
Michal Koutný | bc81426 | 2019-05-31 22:30:19 -0700 | [diff] [blame] | 1086 | spin_unlock(&mm->arg_lock); |
Mateusz Guzik | a3b609e | 2016-01-20 15:01:05 -0800 | [diff] [blame] | 1087 | |
| 1088 | len = arg_end - arg_start; |
William Roberts | a909025 | 2014-02-11 10:11:59 -0800 | [diff] [blame] | 1089 | |
| 1090 | if (len > buflen) |
| 1091 | len = buflen; |
| 1092 | |
Lorenzo Stoakes | f307ab6 | 2016-10-13 01:20:20 +0100 | [diff] [blame] | 1093 | res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); |
William Roberts | a909025 | 2014-02-11 10:11:59 -0800 | [diff] [blame] | 1094 | |
| 1095 | /* |
| 1096 | * If the nul at the end of args has been overwritten, then |
| 1097 | * assume application is using setproctitle(3). |
| 1098 | */ |
| 1099 | if (res > 0 && buffer[res-1] != '\0' && len < buflen) { |
| 1100 | len = strnlen(buffer, res); |
| 1101 | if (len < res) { |
| 1102 | res = len; |
| 1103 | } else { |
Mateusz Guzik | a3b609e | 2016-01-20 15:01:05 -0800 | [diff] [blame] | 1104 | len = env_end - env_start; |
William Roberts | a909025 | 2014-02-11 10:11:59 -0800 | [diff] [blame] | 1105 | if (len > buflen - res) |
| 1106 | len = buflen - res; |
Mateusz Guzik | a3b609e | 2016-01-20 15:01:05 -0800 | [diff] [blame] | 1107 | res += access_process_vm(task, env_start, |
Lorenzo Stoakes | f307ab6 | 2016-10-13 01:20:20 +0100 | [diff] [blame] | 1108 | buffer+res, len, |
| 1109 | FOLL_FORCE); |
William Roberts | a909025 | 2014-02-11 10:11:59 -0800 | [diff] [blame] | 1110 | res = strnlen(buffer, res); |
| 1111 | } |
| 1112 | } |
| 1113 | out_mm: |
| 1114 | mmput(mm); |
| 1115 | out: |
| 1116 | return res; |
| 1117 | } |
Song Liu | 010c164 | 2019-09-23 15:38:19 -0700 | [diff] [blame] | 1118 | |
Catalin Marinas | 4d1a8a2 | 2019-11-27 09:53:44 +0000 | [diff] [blame] | 1119 | int __weak memcmp_pages(struct page *page1, struct page *page2) |
Song Liu | 010c164 | 2019-09-23 15:38:19 -0700 | [diff] [blame] | 1120 | { |
| 1121 | char *addr1, *addr2; |
| 1122 | int ret; |
| 1123 | |
| 1124 | addr1 = kmap_atomic(page1); |
| 1125 | addr2 = kmap_atomic(page2); |
| 1126 | ret = memcmp(addr1, addr2, PAGE_SIZE); |
| 1127 | kunmap_atomic(addr2); |
| 1128 | kunmap_atomic(addr1); |
| 1129 | return ret; |
| 1130 | } |
Paul E. McKenney | 8e7f37f | 2020-12-07 17:41:02 -0800 | [diff] [blame] | 1131 | |
Paul E. McKenney | 5bb1bb3 | 2021-01-07 13:46:11 -0800 | [diff] [blame] | 1132 | #ifdef CONFIG_PRINTK |
Paul E. McKenney | 8e7f37f | 2020-12-07 17:41:02 -0800 | [diff] [blame] | 1133 | /** |
| 1134 | * mem_dump_obj - Print available provenance information |
| 1135 | * @object: object for which to find provenance information. |
| 1136 | * |
| 1137 | * This function uses pr_cont(), so that the caller is expected to have |
| 1138 | * printed out whatever preamble is appropriate. The provenance information |
| 1139 | * depends on the type of object and on how much debugging is enabled. |
| 1140 | * For example, for a slab-cache object, the slab name is printed, and, |
| 1141 | * if available, the return address and stack trace from the allocation |
Maninder Singh | e548eaa | 2021-03-16 16:07:11 +0530 | [diff] [blame] | 1142 | * and last free path of that object. |
Paul E. McKenney | 8e7f37f | 2020-12-07 17:41:02 -0800 | [diff] [blame] | 1143 | */ |
| 1144 | void mem_dump_obj(void *object) |
| 1145 | { |
Joe Perches | 2521781 | 2021-05-04 18:38:32 -0700 | [diff] [blame] | 1146 | const char *type; |
| 1147 | |
Paul E. McKenney | 98f1808 | 2020-12-08 16:13:57 -0800 | [diff] [blame] | 1148 | if (kmem_valid_obj(object)) { |
| 1149 | kmem_dump_obj(object); |
| 1150 | return; |
| 1151 | } |
Joe Perches | 2521781 | 2021-05-04 18:38:32 -0700 | [diff] [blame] | 1152 | |
Paul E. McKenney | 98f1808 | 2020-12-08 16:13:57 -0800 | [diff] [blame] | 1153 | if (vmalloc_dump_obj(object)) |
| 1154 | return; |
Joe Perches | 2521781 | 2021-05-04 18:38:32 -0700 | [diff] [blame] | 1155 | |
| 1156 | if (virt_addr_valid(object)) |
| 1157 | type = "non-slab/vmalloc memory"; |
| 1158 | else if (object == NULL) |
| 1159 | type = "NULL pointer"; |
| 1160 | else if (object == ZERO_SIZE_PTR) |
| 1161 | type = "zero-size pointer"; |
| 1162 | else |
| 1163 | type = "non-paged memory"; |
| 1164 | |
| 1165 | pr_cont(" %s\n", type); |
Paul E. McKenney | 8e7f37f | 2020-12-07 17:41:02 -0800 | [diff] [blame] | 1166 | } |
Paul E. McKenney | 0d3dd2c | 2020-12-07 21:23:36 -0800 | [diff] [blame] | 1167 | EXPORT_SYMBOL_GPL(mem_dump_obj); |
Paul E. McKenney | 5bb1bb3 | 2021-01-07 13:46:11 -0800 | [diff] [blame] | 1168 | #endif |
David Hildenbrand | 8284045 | 2021-06-30 18:50:14 -0700 | [diff] [blame] | 1169 | |
| 1170 | /* |
| 1171 | * A driver might set a page logically offline -- PageOffline() -- and |
| 1172 | * turn the page inaccessible in the hypervisor; after that, access to page |
| 1173 | * content can be fatal. |
| 1174 | * |
| 1175 | * Some special PFN walkers -- i.e., /proc/kcore -- read content of random |
| 1176 | * pages after checking PageOffline(); however, these PFN walkers can race |
| 1177 | * with drivers that set PageOffline(). |
| 1178 | * |
| 1179 | * page_offline_freeze()/page_offline_thaw() allows for a subsystem to |
| 1180 | * synchronize with such drivers, achieving that a page cannot be set |
| 1181 | * PageOffline() while frozen. |
| 1182 | * |
| 1183 | * page_offline_begin()/page_offline_end() is used by drivers that care about |
| 1184 | * such races when setting a page PageOffline(). |
| 1185 | */ |
| 1186 | static DECLARE_RWSEM(page_offline_rwsem); |
| 1187 | |
| 1188 | void page_offline_freeze(void) |
| 1189 | { |
| 1190 | down_read(&page_offline_rwsem); |
| 1191 | } |
| 1192 | |
| 1193 | void page_offline_thaw(void) |
| 1194 | { |
| 1195 | up_read(&page_offline_rwsem); |
| 1196 | } |
| 1197 | |
| 1198 | void page_offline_begin(void) |
| 1199 | { |
| 1200 | down_write(&page_offline_rwsem); |
| 1201 | } |
| 1202 | EXPORT_SYMBOL(page_offline_begin); |
| 1203 | |
| 1204 | void page_offline_end(void) |
| 1205 | { |
| 1206 | up_write(&page_offline_rwsem); |
| 1207 | } |
| 1208 | EXPORT_SYMBOL(page_offline_end); |
Matthew Wilcox (Oracle) | 08b0b00 | 2020-12-16 11:06:33 -0500 | [diff] [blame] | 1209 | |
| 1210 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO |
| 1211 | void flush_dcache_folio(struct folio *folio) |
| 1212 | { |
| 1213 | long i, nr = folio_nr_pages(folio); |
| 1214 | |
| 1215 | for (i = 0; i < nr; i++) |
| 1216 | flush_dcache_page(folio_page(folio, i)); |
| 1217 | } |
| 1218 | EXPORT_SYMBOL(flush_dcache_folio); |
| 1219 | #endif |