Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/madvise.c |
| 4 | * |
| 5 | * Copyright (C) 1999 Linus Torvalds |
| 6 | * Copyright (C) 2002 Christoph Hellwig |
| 7 | */ |
| 8 | |
| 9 | #include <linux/mman.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/syscalls.h> |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 12 | #include <linux/mempolicy.h> |
Andi Kleen | afcf938 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 13 | #include <linux/page-isolation.h> |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 14 | #include <linux/page_idle.h> |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 15 | #include <linux/userfaultfd_k.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/hugetlb.h> |
Hugh Dickins | 3f31d07 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 17 | #include <linux/falloc.h> |
Jan Kara | 692fe62 | 2019-08-29 09:04:11 -0700 | [diff] [blame] | 18 | #include <linux/fadvise.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 19 | #include <linux/sched.h> |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 20 | #include <linux/sched/mm.h> |
Arnd Bergmann | 17fca13 | 2022-01-14 14:06:07 -0800 | [diff] [blame] | 21 | #include <linux/mm_inline.h> |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 22 | #include <linux/string.h> |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 23 | #include <linux/uio.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 24 | #include <linux/ksm.h> |
Hugh Dickins | 3f31d07 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 25 | #include <linux/fs.h> |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 26 | #include <linux/file.h> |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 27 | #include <linux/blkdev.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 28 | #include <linux/backing-dev.h> |
Christoph Hellwig | a520110 | 2019-08-28 16:19:53 +0200 | [diff] [blame] | 29 | #include <linux/pagewalk.h> |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 30 | #include <linux/swap.h> |
| 31 | #include <linux/swapops.h> |
Hugh Dickins | 3a4f8a0 | 2017-02-24 14:59:36 -0800 | [diff] [blame] | 32 | #include <linux/shmem_fs.h> |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 33 | #include <linux/mmu_notifier.h> |
| 34 | |
| 35 | #include <asm/tlb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Kirill A. Shutemov | 2351907 | 2017-02-22 15:46:39 -0800 | [diff] [blame] | 37 | #include "internal.h" |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 38 | #include "swap.h" |
Kirill A. Shutemov | 2351907 | 2017-02-22 15:46:39 -0800 | [diff] [blame] | 39 | |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 40 | struct madvise_walk_private { |
| 41 | struct mmu_gather *tlb; |
| 42 | bool pageout; |
| 43 | }; |
| 44 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | /* |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 46 | * Any behaviour which results in changes to the vma->vm_flags needs to |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 47 | * take mmap_lock for writing. Others, which simply traverse vmas, need |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 48 | * to only take it for reading. |
| 49 | */ |
| 50 | static int madvise_need_mmap_write(int behavior) |
| 51 | { |
| 52 | switch (behavior) { |
| 53 | case MADV_REMOVE: |
| 54 | case MADV_WILLNEED: |
| 55 | case MADV_DONTNEED: |
Johannes Weiner | 9457056 | 2022-03-24 18:14:12 -0700 | [diff] [blame] | 56 | case MADV_DONTNEED_LOCKED: |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 57 | case MADV_COLD: |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 58 | case MADV_PAGEOUT: |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 59 | case MADV_FREE: |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 60 | case MADV_POPULATE_READ: |
| 61 | case MADV_POPULATE_WRITE: |
Zach O'Keefe | 7d8faaf | 2022-07-06 16:59:27 -0700 | [diff] [blame] | 62 | case MADV_COLLAPSE: |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 63 | return 0; |
| 64 | default: |
| 65 | /* be safe, default to 1. list exceptions explicitly */ |
| 66 | return 1; |
| 67 | } |
| 68 | } |
| 69 | |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 70 | #ifdef CONFIG_ANON_VMA_NAME |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 71 | struct anon_vma_name *anon_vma_name_alloc(const char *name) |
Suren Baghdasaryan | 78db341 | 2022-01-14 14:06:03 -0800 | [diff] [blame] | 72 | { |
| 73 | struct anon_vma_name *anon_name; |
| 74 | size_t count; |
| 75 | |
| 76 | /* Add 1 for NUL terminator at the end of the anon_name->name */ |
| 77 | count = strlen(name) + 1; |
| 78 | anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); |
| 79 | if (anon_name) { |
| 80 | kref_init(&anon_name->kref); |
| 81 | memcpy(anon_name->name, name, count); |
| 82 | } |
| 83 | |
| 84 | return anon_name; |
| 85 | } |
| 86 | |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 87 | void anon_vma_name_free(struct kref *kref) |
Suren Baghdasaryan | 78db341 | 2022-01-14 14:06:03 -0800 | [diff] [blame] | 88 | { |
| 89 | struct anon_vma_name *anon_name = |
| 90 | container_of(kref, struct anon_vma_name, kref); |
| 91 | kfree(anon_name); |
| 92 | } |
| 93 | |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 94 | struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 95 | { |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 96 | mmap_assert_locked(vma->vm_mm); |
| 97 | |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 98 | if (vma->vm_file) |
| 99 | return NULL; |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 100 | |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 101 | return vma->anon_name; |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | /* mmap_lock should be write-locked */ |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 105 | static int replace_anon_vma_name(struct vm_area_struct *vma, |
| 106 | struct anon_vma_name *anon_name) |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 107 | { |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 108 | struct anon_vma_name *orig_name = anon_vma_name(vma); |
Suren Baghdasaryan | 78db341 | 2022-01-14 14:06:03 -0800 | [diff] [blame] | 109 | |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 110 | if (!anon_name) { |
| 111 | vma->anon_name = NULL; |
| 112 | anon_vma_name_put(orig_name); |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 113 | return 0; |
| 114 | } |
| 115 | |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 116 | if (anon_vma_name_eq(orig_name, anon_name)) |
| 117 | return 0; |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 118 | |
Suren Baghdasaryan | 96403e1 | 2022-03-04 20:28:55 -0800 | [diff] [blame] | 119 | vma->anon_name = anon_vma_name_reuse(anon_name); |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 120 | anon_vma_name_put(orig_name); |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 121 | |
| 122 | return 0; |
| 123 | } |
| 124 | #else /* CONFIG_ANON_VMA_NAME */ |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 125 | static int replace_anon_vma_name(struct vm_area_struct *vma, |
| 126 | struct anon_vma_name *anon_name) |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 127 | { |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 128 | if (anon_name) |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 129 | return -EINVAL; |
| 130 | |
| 131 | return 0; |
| 132 | } |
| 133 | #endif /* CONFIG_ANON_VMA_NAME */ |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 134 | /* |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 135 | * Update the vm_flags on region of a vma, splitting it or merging it as |
| 136 | * necessary. Must be called with mmap_sem held for writing; |
Suren Baghdasaryan | 942341d | 2022-03-04 20:28:58 -0800 | [diff] [blame] | 137 | * Caller should ensure anon_name stability by raising its refcount even when |
| 138 | * anon_name belongs to a valid vma because this function might free that vma. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | */ |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 140 | static int madvise_update_vma(struct vm_area_struct *vma, |
| 141 | struct vm_area_struct **prev, unsigned long start, |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 142 | unsigned long end, unsigned long new_flags, |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 143 | struct anon_vma_name *anon_name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { |
Vladimir Cernov | ec9bed9 | 2013-09-11 14:20:15 -0700 | [diff] [blame] | 145 | struct mm_struct *mm = vma->vm_mm; |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 146 | int error; |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 147 | pgoff_t pgoff; |
Prasanna Meda | e798c6e | 2005-06-21 17:14:36 -0700 | [diff] [blame] | 148 | |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 149 | if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 150 | *prev = vma; |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 151 | return 0; |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
| 155 | *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, |
Andrea Arcangeli | 19a809a | 2015-09-04 15:46:24 -0700 | [diff] [blame] | 156 | vma->vm_file, pgoff, vma_policy(vma), |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 157 | vma->vm_userfaultfd_ctx, anon_name); |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 158 | if (*prev) { |
| 159 | vma = *prev; |
| 160 | goto success; |
| 161 | } |
| 162 | |
| 163 | *prev = vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
| 165 | if (start != vma->vm_start) { |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 166 | if (unlikely(mm->map_count >= sysctl_max_map_count)) |
| 167 | return -ENOMEM; |
David Rientjes | def5efe | 2017-02-24 14:58:47 -0800 | [diff] [blame] | 168 | error = __split_vma(mm, vma, start, 1); |
Mike Rapoport | f3bc0db | 2019-09-23 15:39:31 -0700 | [diff] [blame] | 169 | if (error) |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 170 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | if (end != vma->vm_end) { |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 174 | if (unlikely(mm->map_count >= sysctl_max_map_count)) |
| 175 | return -ENOMEM; |
David Rientjes | def5efe | 2017-02-24 14:58:47 -0800 | [diff] [blame] | 176 | error = __split_vma(mm, vma, end, 0); |
Mike Rapoport | f3bc0db | 2019-09-23 15:39:31 -0700 | [diff] [blame] | 177 | if (error) |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 178 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | } |
| 180 | |
Hugh Dickins | 836d5ff | 2005-09-03 15:54:53 -0700 | [diff] [blame] | 181 | success: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 183 | * vm_flags is protected by the mmap_lock held in write mode. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | */ |
Prasanna Meda | e798c6e | 2005-06-21 17:14:36 -0700 | [diff] [blame] | 185 | vma->vm_flags = new_flags; |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 186 | if (!vma->vm_file) { |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 187 | error = replace_anon_vma_name(vma, anon_name); |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 188 | if (error) |
| 189 | return error; |
| 190 | } |
Mike Rapoport | f3bc0db | 2019-09-23 15:39:31 -0700 | [diff] [blame] | 191 | |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 192 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | } |
| 194 | |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 195 | #ifdef CONFIG_SWAP |
| 196 | static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, |
| 197 | unsigned long end, struct mm_walk *walk) |
| 198 | { |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 199 | struct vm_area_struct *vma = walk->private; |
| 200 | unsigned long index; |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 201 | struct swap_iocb *splug = NULL; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 202 | |
| 203 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
| 204 | return 0; |
| 205 | |
| 206 | for (index = start; index != end; index += PAGE_SIZE) { |
| 207 | pte_t pte; |
| 208 | swp_entry_t entry; |
| 209 | struct page *page; |
| 210 | spinlock_t *ptl; |
Miaohe Lin | f7cc67a | 2022-06-18 17:05:27 +0800 | [diff] [blame] | 211 | pte_t *ptep; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 212 | |
Miaohe Lin | f7cc67a | 2022-06-18 17:05:27 +0800 | [diff] [blame] | 213 | ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl); |
| 214 | pte = *ptep; |
| 215 | pte_unmap_unlock(ptep, ptl); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 216 | |
Miaohe Lin | f7cc67a | 2022-06-18 17:05:27 +0800 | [diff] [blame] | 217 | if (!is_swap_pte(pte)) |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 218 | continue; |
| 219 | entry = pte_to_swp_entry(pte); |
| 220 | if (unlikely(non_swap_entry(entry))) |
| 221 | continue; |
| 222 | |
| 223 | page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 224 | vma, index, false, &splug); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 225 | if (page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 226 | put_page(page); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 227 | } |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 228 | swap_read_unplug(splug); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 229 | |
| 230 | return 0; |
| 231 | } |
| 232 | |
Christoph Hellwig | 7b86ac3 | 2019-08-28 16:19:54 +0200 | [diff] [blame] | 233 | static const struct mm_walk_ops swapin_walk_ops = { |
| 234 | .pmd_entry = swapin_walk_pmd_entry, |
| 235 | }; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 236 | |
| 237 | static void force_shm_swapin_readahead(struct vm_area_struct *vma, |
| 238 | unsigned long start, unsigned long end, |
| 239 | struct address_space *mapping) |
| 240 | { |
Matthew Wilcox (Oracle) | e6e8871 | 2020-10-13 16:51:24 -0700 | [diff] [blame] | 241 | XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); |
Matthew Wilcox (Oracle) | 6638380 | 2020-11-21 22:17:22 -0800 | [diff] [blame] | 242 | pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 243 | struct page *page; |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 244 | struct swap_iocb *splug = NULL; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 245 | |
Matthew Wilcox (Oracle) | e6e8871 | 2020-10-13 16:51:24 -0700 | [diff] [blame] | 246 | rcu_read_lock(); |
| 247 | xas_for_each(&xas, page, end_index) { |
| 248 | swp_entry_t swap; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 249 | |
Matthew Wilcox (Oracle) | e6e8871 | 2020-10-13 16:51:24 -0700 | [diff] [blame] | 250 | if (!xa_is_value(page)) |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 251 | continue; |
Miaohe Lin | ba6851b | 2022-05-19 20:50:30 +0800 | [diff] [blame] | 252 | swap = radix_to_swp_entry(page); |
| 253 | /* There might be swapin error entries in shmem mapping. */ |
| 254 | if (non_swap_entry(swap)) |
| 255 | continue; |
Matthew Wilcox (Oracle) | e6e8871 | 2020-10-13 16:51:24 -0700 | [diff] [blame] | 256 | xas_pause(&xas); |
| 257 | rcu_read_unlock(); |
| 258 | |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 259 | page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 260 | NULL, 0, false, &splug); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 261 | if (page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 262 | put_page(page); |
Matthew Wilcox (Oracle) | e6e8871 | 2020-10-13 16:51:24 -0700 | [diff] [blame] | 263 | |
| 264 | rcu_read_lock(); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 265 | } |
Matthew Wilcox (Oracle) | e6e8871 | 2020-10-13 16:51:24 -0700 | [diff] [blame] | 266 | rcu_read_unlock(); |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 267 | swap_read_unplug(splug); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 268 | |
| 269 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
| 270 | } |
| 271 | #endif /* CONFIG_SWAP */ |
| 272 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | /* |
| 274 | * Schedule all required I/O operations. Do not wait for completion. |
| 275 | */ |
Vladimir Cernov | ec9bed9 | 2013-09-11 14:20:15 -0700 | [diff] [blame] | 276 | static long madvise_willneed(struct vm_area_struct *vma, |
| 277 | struct vm_area_struct **prev, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | unsigned long start, unsigned long end) |
| 279 | { |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 280 | struct mm_struct *mm = vma->vm_mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | struct file *file = vma->vm_file; |
Jan Kara | 692fe62 | 2019-08-29 09:04:11 -0700 | [diff] [blame] | 282 | loff_t offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | |
chenjie | 6ea8d95 | 2017-11-29 16:10:54 -0800 | [diff] [blame] | 284 | *prev = vma; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 285 | #ifdef CONFIG_SWAP |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 286 | if (!file) { |
Christoph Hellwig | 7b86ac3 | 2019-08-28 16:19:54 +0200 | [diff] [blame] | 287 | walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); |
| 288 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 289 | return 0; |
| 290 | } |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 291 | |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 292 | if (shmem_mapping(file->f_mapping)) { |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 293 | force_shm_swapin_readahead(vma, start, end, |
| 294 | file->f_mapping); |
| 295 | return 0; |
| 296 | } |
| 297 | #else |
Suzuki | 1bef400 | 2005-10-11 08:29:06 -0700 | [diff] [blame] | 298 | if (!file) |
| 299 | return -EBADF; |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 300 | #endif |
Suzuki | 1bef400 | 2005-10-11 08:29:06 -0700 | [diff] [blame] | 301 | |
Matthew Wilcox | e748dcd | 2015-02-16 15:59:12 -0800 | [diff] [blame] | 302 | if (IS_DAX(file_inode(file))) { |
Carsten Otte | fe77ba6 | 2005-06-23 22:05:29 -0700 | [diff] [blame] | 303 | /* no bad return value, but ignore advice */ |
| 304 | return 0; |
| 305 | } |
| 306 | |
Jan Kara | 692fe62 | 2019-08-29 09:04:11 -0700 | [diff] [blame] | 307 | /* |
| 308 | * Filesystem's fadvise may need to take various locks. We need to |
| 309 | * explicitly grab a reference because the vma (and hence the |
| 310 | * vma's reference to the file) can go away as soon as we drop |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 311 | * mmap_lock. |
Jan Kara | 692fe62 | 2019-08-29 09:04:11 -0700 | [diff] [blame] | 312 | */ |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 313 | *prev = NULL; /* tell sys_madvise we drop mmap_lock */ |
Jan Kara | 692fe62 | 2019-08-29 09:04:11 -0700 | [diff] [blame] | 314 | get_file(file); |
Jan Kara | 692fe62 | 2019-08-29 09:04:11 -0700 | [diff] [blame] | 315 | offset = (loff_t)(start - vma->vm_start) |
| 316 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 317 | mmap_read_unlock(mm); |
Jan Kara | 692fe62 | 2019-08-29 09:04:11 -0700 | [diff] [blame] | 318 | vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); |
| 319 | fput(file); |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 320 | mmap_read_lock(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | return 0; |
| 322 | } |
| 323 | |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 324 | static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, |
| 325 | unsigned long addr, unsigned long end, |
| 326 | struct mm_walk *walk) |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 327 | { |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 328 | struct madvise_walk_private *private = walk->private; |
| 329 | struct mmu_gather *tlb = private->tlb; |
| 330 | bool pageout = private->pageout; |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 331 | struct mm_struct *mm = tlb->mm; |
| 332 | struct vm_area_struct *vma = walk->vma; |
| 333 | pte_t *orig_pte, *pte, ptent; |
| 334 | spinlock_t *ptl; |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 335 | struct page *page = NULL; |
| 336 | LIST_HEAD(page_list); |
| 337 | |
| 338 | if (fatal_signal_pending(current)) |
| 339 | return -EINTR; |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 340 | |
| 341 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 342 | if (pmd_trans_huge(*pmd)) { |
| 343 | pmd_t orig_pmd; |
| 344 | unsigned long next = pmd_addr_end(addr, end); |
| 345 | |
| 346 | tlb_change_page_size(tlb, HPAGE_PMD_SIZE); |
| 347 | ptl = pmd_trans_huge_lock(pmd, vma); |
| 348 | if (!ptl) |
| 349 | return 0; |
| 350 | |
| 351 | orig_pmd = *pmd; |
| 352 | if (is_huge_zero_pmd(orig_pmd)) |
| 353 | goto huge_unlock; |
| 354 | |
| 355 | if (unlikely(!pmd_present(orig_pmd))) { |
| 356 | VM_BUG_ON(thp_migration_supported() && |
| 357 | !is_pmd_migration_entry(orig_pmd)); |
| 358 | goto huge_unlock; |
| 359 | } |
| 360 | |
| 361 | page = pmd_page(orig_pmd); |
Michal Hocko | 12e967f | 2020-03-21 18:22:26 -0700 | [diff] [blame] | 362 | |
| 363 | /* Do not interfere with other mappings of this page */ |
| 364 | if (page_mapcount(page) != 1) |
| 365 | goto huge_unlock; |
| 366 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 367 | if (next - addr != HPAGE_PMD_SIZE) { |
| 368 | int err; |
| 369 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 370 | get_page(page); |
| 371 | spin_unlock(ptl); |
| 372 | lock_page(page); |
| 373 | err = split_huge_page(page); |
| 374 | unlock_page(page); |
| 375 | put_page(page); |
| 376 | if (!err) |
| 377 | goto regular_page; |
| 378 | return 0; |
| 379 | } |
| 380 | |
| 381 | if (pmd_young(orig_pmd)) { |
| 382 | pmdp_invalidate(vma, addr, pmd); |
| 383 | orig_pmd = pmd_mkold(orig_pmd); |
| 384 | |
| 385 | set_pmd_at(mm, addr, pmd, orig_pmd); |
| 386 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
| 387 | } |
| 388 | |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 389 | ClearPageReferenced(page); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 390 | test_and_clear_page_young(page); |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 391 | if (pageout) { |
zhong jiang | 8207296 | 2019-11-15 17:34:36 -0800 | [diff] [blame] | 392 | if (!isolate_lru_page(page)) { |
| 393 | if (PageUnevictable(page)) |
| 394 | putback_lru_page(page); |
| 395 | else |
| 396 | list_add(&page->lru, &page_list); |
| 397 | } |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 398 | } else |
| 399 | deactivate_page(page); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 400 | huge_unlock: |
| 401 | spin_unlock(ptl); |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 402 | if (pageout) |
| 403 | reclaim_pages(&page_list); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 404 | return 0; |
| 405 | } |
| 406 | |
Minchan Kim | ce26842 | 2020-09-14 23:32:15 -0700 | [diff] [blame] | 407 | regular_page: |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 408 | if (pmd_trans_unstable(pmd)) |
| 409 | return 0; |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 410 | #endif |
| 411 | tlb_change_page_size(tlb, PAGE_SIZE); |
| 412 | orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
| 413 | flush_tlb_batched_pending(mm); |
| 414 | arch_enter_lazy_mmu_mode(); |
| 415 | for (; addr < end; pte++, addr += PAGE_SIZE) { |
| 416 | ptent = *pte; |
| 417 | |
| 418 | if (pte_none(ptent)) |
| 419 | continue; |
| 420 | |
| 421 | if (!pte_present(ptent)) |
| 422 | continue; |
| 423 | |
| 424 | page = vm_normal_page(vma, addr, ptent); |
Alex Sierra | 3218f87 | 2022-07-15 10:05:11 -0500 | [diff] [blame] | 425 | if (!page || is_zone_device_page(page)) |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 426 | continue; |
| 427 | |
| 428 | /* |
| 429 | * Creating a THP page is expensive so split it only if we |
| 430 | * are sure it's worth. Split it if we are only owner. |
| 431 | */ |
| 432 | if (PageTransCompound(page)) { |
| 433 | if (page_mapcount(page) != 1) |
| 434 | break; |
| 435 | get_page(page); |
| 436 | if (!trylock_page(page)) { |
| 437 | put_page(page); |
| 438 | break; |
| 439 | } |
| 440 | pte_unmap_unlock(orig_pte, ptl); |
| 441 | if (split_huge_page(page)) { |
| 442 | unlock_page(page); |
| 443 | put_page(page); |
Miaohe Lin | f3b9e8c | 2022-04-28 23:16:09 -0700 | [diff] [blame] | 444 | orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 445 | break; |
| 446 | } |
| 447 | unlock_page(page); |
| 448 | put_page(page); |
Miaohe Lin | f3b9e8c | 2022-04-28 23:16:09 -0700 | [diff] [blame] | 449 | orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 450 | pte--; |
| 451 | addr -= PAGE_SIZE; |
| 452 | continue; |
| 453 | } |
| 454 | |
Minchan Kim | 58d426a | 2022-09-08 08:12:04 -0700 | [diff] [blame] | 455 | /* |
| 456 | * Do not interfere with other mappings of this page and |
| 457 | * non-LRU page. |
| 458 | */ |
| 459 | if (!PageLRU(page) || page_mapcount(page) != 1) |
Michal Hocko | 12e967f | 2020-03-21 18:22:26 -0700 | [diff] [blame] | 460 | continue; |
| 461 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 462 | VM_BUG_ON_PAGE(PageTransCompound(page), page); |
| 463 | |
| 464 | if (pte_young(ptent)) { |
| 465 | ptent = ptep_get_and_clear_full(mm, addr, pte, |
| 466 | tlb->fullmm); |
| 467 | ptent = pte_mkold(ptent); |
| 468 | set_pte_at(mm, addr, pte, ptent); |
| 469 | tlb_remove_tlb_entry(tlb, pte, addr); |
| 470 | } |
| 471 | |
| 472 | /* |
| 473 | * We are deactivating a page for accelerating reclaiming. |
| 474 | * VM couldn't reclaim the page unless we clear PG_young. |
| 475 | * As a side effect, it makes confuse idle-page tracking |
| 476 | * because they will miss recent referenced history. |
| 477 | */ |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 478 | ClearPageReferenced(page); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 479 | test_and_clear_page_young(page); |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 480 | if (pageout) { |
zhong jiang | 8207296 | 2019-11-15 17:34:36 -0800 | [diff] [blame] | 481 | if (!isolate_lru_page(page)) { |
| 482 | if (PageUnevictable(page)) |
| 483 | putback_lru_page(page); |
| 484 | else |
| 485 | list_add(&page->lru, &page_list); |
| 486 | } |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 487 | } else |
| 488 | deactivate_page(page); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 489 | } |
| 490 | |
| 491 | arch_leave_lazy_mmu_mode(); |
| 492 | pte_unmap_unlock(orig_pte, ptl); |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 493 | if (pageout) |
| 494 | reclaim_pages(&page_list); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 495 | cond_resched(); |
| 496 | |
| 497 | return 0; |
| 498 | } |
| 499 | |
| 500 | static const struct mm_walk_ops cold_walk_ops = { |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 501 | .pmd_entry = madvise_cold_or_pageout_pte_range, |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 502 | }; |
| 503 | |
| 504 | static void madvise_cold_page_range(struct mmu_gather *tlb, |
| 505 | struct vm_area_struct *vma, |
| 506 | unsigned long addr, unsigned long end) |
| 507 | { |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 508 | struct madvise_walk_private walk_private = { |
| 509 | .pageout = false, |
| 510 | .tlb = tlb, |
| 511 | }; |
| 512 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 513 | tlb_start_vma(tlb, vma); |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 514 | walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 515 | tlb_end_vma(tlb, vma); |
| 516 | } |
| 517 | |
Hugh Dickins | a213e5c | 2022-02-14 18:23:29 -0800 | [diff] [blame] | 518 | static inline bool can_madv_lru_vma(struct vm_area_struct *vma) |
| 519 | { |
Johannes Weiner | 9457056 | 2022-03-24 18:14:12 -0700 | [diff] [blame] | 520 | return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); |
Hugh Dickins | a213e5c | 2022-02-14 18:23:29 -0800 | [diff] [blame] | 521 | } |
| 522 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 523 | static long madvise_cold(struct vm_area_struct *vma, |
| 524 | struct vm_area_struct **prev, |
| 525 | unsigned long start_addr, unsigned long end_addr) |
| 526 | { |
| 527 | struct mm_struct *mm = vma->vm_mm; |
| 528 | struct mmu_gather tlb; |
| 529 | |
| 530 | *prev = vma; |
| 531 | if (!can_madv_lru_vma(vma)) |
| 532 | return -EINVAL; |
| 533 | |
| 534 | lru_add_drain(); |
Will Deacon | a72afd8 | 2021-01-27 23:53:45 +0000 | [diff] [blame] | 535 | tlb_gather_mmu(&tlb, mm); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 536 | madvise_cold_page_range(&tlb, vma, start_addr, end_addr); |
Will Deacon | ae8eba8 | 2021-01-27 23:53:43 +0000 | [diff] [blame] | 537 | tlb_finish_mmu(&tlb); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 538 | |
| 539 | return 0; |
| 540 | } |
| 541 | |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 542 | static void madvise_pageout_page_range(struct mmu_gather *tlb, |
| 543 | struct vm_area_struct *vma, |
| 544 | unsigned long addr, unsigned long end) |
| 545 | { |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 546 | struct madvise_walk_private walk_private = { |
| 547 | .pageout = true, |
| 548 | .tlb = tlb, |
| 549 | }; |
| 550 | |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 551 | tlb_start_vma(tlb, vma); |
Minchan Kim | d616d51 | 2019-09-25 16:49:19 -0700 | [diff] [blame] | 552 | walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 553 | tlb_end_vma(tlb, vma); |
| 554 | } |
| 555 | |
| 556 | static inline bool can_do_pageout(struct vm_area_struct *vma) |
| 557 | { |
| 558 | if (vma_is_anonymous(vma)) |
| 559 | return true; |
| 560 | if (!vma->vm_file) |
| 561 | return false; |
| 562 | /* |
| 563 | * paging out pagecache only for non-anonymous mappings that correspond |
| 564 | * to the files the calling process could (if tried) open for writing; |
| 565 | * otherwise we'd be including shared non-exclusive mappings, which |
| 566 | * opens a side channel. |
| 567 | */ |
Christian Brauner | 21cb47b | 2021-01-21 14:19:25 +0100 | [diff] [blame] | 568 | return inode_owner_or_capable(&init_user_ns, |
| 569 | file_inode(vma->vm_file)) || |
Christian Brauner | 02f92b3 | 2021-01-21 14:19:22 +0100 | [diff] [blame] | 570 | file_permission(vma->vm_file, MAY_WRITE) == 0; |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 571 | } |
| 572 | |
| 573 | static long madvise_pageout(struct vm_area_struct *vma, |
| 574 | struct vm_area_struct **prev, |
| 575 | unsigned long start_addr, unsigned long end_addr) |
| 576 | { |
| 577 | struct mm_struct *mm = vma->vm_mm; |
| 578 | struct mmu_gather tlb; |
| 579 | |
| 580 | *prev = vma; |
| 581 | if (!can_madv_lru_vma(vma)) |
| 582 | return -EINVAL; |
| 583 | |
| 584 | if (!can_do_pageout(vma)) |
| 585 | return 0; |
| 586 | |
| 587 | lru_add_drain(); |
Will Deacon | a72afd8 | 2021-01-27 23:53:45 +0000 | [diff] [blame] | 588 | tlb_gather_mmu(&tlb, mm); |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 589 | madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); |
Will Deacon | ae8eba8 | 2021-01-27 23:53:43 +0000 | [diff] [blame] | 590 | tlb_finish_mmu(&tlb); |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 591 | |
| 592 | return 0; |
| 593 | } |
| 594 | |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 595 | static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, |
| 596 | unsigned long end, struct mm_walk *walk) |
| 597 | |
| 598 | { |
| 599 | struct mmu_gather *tlb = walk->private; |
| 600 | struct mm_struct *mm = tlb->mm; |
| 601 | struct vm_area_struct *vma = walk->vma; |
| 602 | spinlock_t *ptl; |
| 603 | pte_t *orig_pte, *pte, ptent; |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 604 | struct folio *folio; |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 605 | struct page *page; |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 606 | int nr_swap = 0; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 607 | unsigned long next; |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 608 | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 609 | next = pmd_addr_end(addr, end); |
| 610 | if (pmd_trans_huge(*pmd)) |
| 611 | if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) |
| 612 | goto next; |
| 613 | |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 614 | if (pmd_trans_unstable(pmd)) |
| 615 | return 0; |
| 616 | |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 617 | tlb_change_page_size(tlb, PAGE_SIZE); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 618 | orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 619 | flush_tlb_batched_pending(mm); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 620 | arch_enter_lazy_mmu_mode(); |
| 621 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
| 622 | ptent = *pte; |
| 623 | |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 624 | if (pte_none(ptent)) |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 625 | continue; |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 626 | /* |
| 627 | * If the pte has swp_entry, just clear page table to |
| 628 | * prevent swap-in which is more expensive rather than |
| 629 | * (page allocation + zeroing). |
| 630 | */ |
| 631 | if (!pte_present(ptent)) { |
| 632 | swp_entry_t entry; |
| 633 | |
| 634 | entry = pte_to_swp_entry(ptent); |
Miaohe Lin | 7b49514f | 2022-05-19 20:50:28 +0800 | [diff] [blame] | 635 | if (!non_swap_entry(entry)) { |
| 636 | nr_swap--; |
| 637 | free_swap_and_cache(entry); |
| 638 | pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); |
| 639 | } else if (is_hwpoison_entry(entry) || |
| 640 | is_swapin_error_entry(entry)) { |
| 641 | pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); |
| 642 | } |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 643 | continue; |
| 644 | } |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 645 | |
Christoph Hellwig | 25b2995 | 2019-06-13 22:50:49 +0200 | [diff] [blame] | 646 | page = vm_normal_page(vma, addr, ptent); |
Alex Sierra | 3218f87 | 2022-07-15 10:05:11 -0500 | [diff] [blame] | 647 | if (!page || is_zone_device_page(page)) |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 648 | continue; |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 649 | folio = page_folio(page); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 650 | |
| 651 | /* |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 652 | * If pmd isn't transhuge but the folio is large and |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 653 | * is owned by only this process, split it and |
| 654 | * deactivate all pages. |
| 655 | */ |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 656 | if (folio_test_large(folio)) { |
| 657 | if (folio_mapcount(folio) != 1) |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 658 | goto out; |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 659 | folio_get(folio); |
| 660 | if (!folio_trylock(folio)) { |
| 661 | folio_put(folio); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 662 | goto out; |
| 663 | } |
| 664 | pte_unmap_unlock(orig_pte, ptl); |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 665 | if (split_folio(folio)) { |
| 666 | folio_unlock(folio); |
| 667 | folio_put(folio); |
Miaohe Lin | f3b9e8c | 2022-04-28 23:16:09 -0700 | [diff] [blame] | 668 | orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 669 | goto out; |
| 670 | } |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 671 | folio_unlock(folio); |
| 672 | folio_put(folio); |
Miaohe Lin | f3b9e8c | 2022-04-28 23:16:09 -0700 | [diff] [blame] | 673 | orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 674 | pte--; |
| 675 | addr -= PAGE_SIZE; |
| 676 | continue; |
| 677 | } |
| 678 | |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 679 | if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { |
| 680 | if (!folio_trylock(folio)) |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 681 | continue; |
| 682 | /* |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 683 | * If folio is shared with others, we mustn't clear |
| 684 | * the folio's dirty flag. |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 685 | */ |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 686 | if (folio_mapcount(folio) != 1) { |
| 687 | folio_unlock(folio); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 688 | continue; |
| 689 | } |
| 690 | |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 691 | if (folio_test_swapcache(folio) && |
| 692 | !folio_free_swap(folio)) { |
| 693 | folio_unlock(folio); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 694 | continue; |
| 695 | } |
| 696 | |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 697 | folio_clear_dirty(folio); |
| 698 | folio_unlock(folio); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 699 | } |
| 700 | |
| 701 | if (pte_young(ptent) || pte_dirty(ptent)) { |
| 702 | /* |
| 703 | * Some of architecture(ex, PPC) don't update TLB |
| 704 | * with set_pte_at and tlb_remove_tlb_entry so for |
| 705 | * the portability, remap the pte with old|clean |
| 706 | * after pte clearing. |
| 707 | */ |
| 708 | ptent = ptep_get_and_clear_full(mm, addr, pte, |
| 709 | tlb->fullmm); |
| 710 | |
| 711 | ptent = pte_mkold(ptent); |
| 712 | ptent = pte_mkclean(ptent); |
| 713 | set_pte_at(mm, addr, pte, ptent); |
| 714 | tlb_remove_tlb_entry(tlb, pte, addr); |
| 715 | } |
Matthew Wilcox (Oracle) | 98b211d | 2022-09-02 20:46:39 +0100 | [diff] [blame] | 716 | mark_page_lazyfree(&folio->page); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 717 | } |
| 718 | out: |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 719 | if (nr_swap) { |
| 720 | if (current->mm == mm) |
| 721 | sync_mm_rss(mm); |
| 722 | |
| 723 | add_mm_counter(mm, MM_SWAPENTS, nr_swap); |
| 724 | } |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 725 | arch_leave_lazy_mmu_mode(); |
| 726 | pte_unmap_unlock(orig_pte, ptl); |
| 727 | cond_resched(); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 728 | next: |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 729 | return 0; |
| 730 | } |
| 731 | |
Christoph Hellwig | 7b86ac3 | 2019-08-28 16:19:54 +0200 | [diff] [blame] | 732 | static const struct mm_walk_ops madvise_free_walk_ops = { |
| 733 | .pmd_entry = madvise_free_pte_range, |
| 734 | }; |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 735 | |
| 736 | static int madvise_free_single_vma(struct vm_area_struct *vma, |
| 737 | unsigned long start_addr, unsigned long end_addr) |
| 738 | { |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 739 | struct mm_struct *mm = vma->vm_mm; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 740 | struct mmu_notifier_range range; |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 741 | struct mmu_gather tlb; |
| 742 | |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 743 | /* MADV_FREE works for only anon vma at the moment */ |
| 744 | if (!vma_is_anonymous(vma)) |
| 745 | return -EINVAL; |
| 746 | |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 747 | range.start = max(vma->vm_start, start_addr); |
| 748 | if (range.start >= vma->vm_end) |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 749 | return -EINVAL; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 750 | range.end = min(vma->vm_end, end_addr); |
| 751 | if (range.end <= vma->vm_start) |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 752 | return -EINVAL; |
Jérôme Glisse | 7269f99 | 2019-05-13 17:20:53 -0700 | [diff] [blame] | 753 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, |
Jérôme Glisse | 6f4f13e | 2019-05-13 17:20:49 -0700 | [diff] [blame] | 754 | range.start, range.end); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 755 | |
| 756 | lru_add_drain(); |
Will Deacon | a72afd8 | 2021-01-27 23:53:45 +0000 | [diff] [blame] | 757 | tlb_gather_mmu(&tlb, mm); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 758 | update_hiwater_rss(mm); |
| 759 | |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 760 | mmu_notifier_invalidate_range_start(&range); |
Christoph Hellwig | 7b86ac3 | 2019-08-28 16:19:54 +0200 | [diff] [blame] | 761 | tlb_start_vma(&tlb, vma); |
| 762 | walk_page_range(vma->vm_mm, range.start, range.end, |
| 763 | &madvise_free_walk_ops, &tlb); |
| 764 | tlb_end_vma(&tlb, vma); |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 765 | mmu_notifier_invalidate_range_end(&range); |
Will Deacon | ae8eba8 | 2021-01-27 23:53:43 +0000 | [diff] [blame] | 766 | tlb_finish_mmu(&tlb); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 767 | |
| 768 | return 0; |
| 769 | } |
| 770 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | /* |
| 772 | * Application no longer needs these pages. If the pages are dirty, |
| 773 | * it's OK to just throw them away. The app will be more careful about |
| 774 | * data it wants to keep. Be sure to free swap resources too. The |
Fernando Luis Vazquez Cao | 7e6cbea | 2008-07-29 22:33:39 -0700 | [diff] [blame] | 775 | * zap_page_range call sets things up for shrink_active_list to actually free |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | * these pages later if no one else has touched them in the meantime, |
| 777 | * although we could add these pages to a global reuse list for |
Fernando Luis Vazquez Cao | 7e6cbea | 2008-07-29 22:33:39 -0700 | [diff] [blame] | 778 | * shrink_active_list to pick up before reclaiming other pages. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | * |
| 780 | * NB: This interface discards data rather than pushes it out to swap, |
| 781 | * as some implementations do. This has performance implications for |
| 782 | * applications like large transactional databases which want to discard |
| 783 | * pages in anonymous maps after committing to backing store the data |
| 784 | * that was kept in them. There is no reason to write this data out to |
| 785 | * the swap area if the application is discarding it. |
| 786 | * |
| 787 | * An interface that causes the system to free clean pages and flush |
| 788 | * dirty pages is already available as msync(MS_INVALIDATE). |
| 789 | */ |
Mike Rapoport | 230ca98 | 2017-07-10 15:49:02 -0700 | [diff] [blame] | 790 | static long madvise_dontneed_single_vma(struct vm_area_struct *vma, |
| 791 | unsigned long start, unsigned long end) |
| 792 | { |
| 793 | zap_page_range(vma, start, end - start); |
| 794 | return 0; |
| 795 | } |
| 796 | |
Mike Kravetz | 90e7e7f | 2022-03-24 18:13:18 -0700 | [diff] [blame] | 797 | static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, |
| 798 | unsigned long start, |
| 799 | unsigned long *end, |
| 800 | int behavior) |
| 801 | { |
Johannes Weiner | 9457056 | 2022-03-24 18:14:12 -0700 | [diff] [blame] | 802 | if (!is_vm_hugetlb_page(vma)) { |
| 803 | unsigned int forbidden = VM_PFNMAP; |
Mike Kravetz | 90e7e7f | 2022-03-24 18:13:18 -0700 | [diff] [blame] | 804 | |
Johannes Weiner | 9457056 | 2022-03-24 18:14:12 -0700 | [diff] [blame] | 805 | if (behavior != MADV_DONTNEED_LOCKED) |
| 806 | forbidden |= VM_LOCKED; |
| 807 | |
| 808 | return !(vma->vm_flags & forbidden); |
| 809 | } |
| 810 | |
| 811 | if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) |
Mike Kravetz | 90e7e7f | 2022-03-24 18:13:18 -0700 | [diff] [blame] | 812 | return false; |
| 813 | if (start & ~huge_page_mask(hstate_vma(vma))) |
| 814 | return false; |
| 815 | |
Rik van Riel | 8ebe0a5 | 2022-10-21 19:28:05 -0400 | [diff] [blame] | 816 | /* |
| 817 | * Madvise callers expect the length to be rounded up to PAGE_SIZE |
| 818 | * boundaries, and may be unaware that this VMA uses huge pages. |
| 819 | * Avoid unexpected data loss by rounding down the number of |
| 820 | * huge pages freed. |
| 821 | */ |
| 822 | *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); |
| 823 | |
Mike Kravetz | 90e7e7f | 2022-03-24 18:13:18 -0700 | [diff] [blame] | 824 | return true; |
| 825 | } |
| 826 | |
Mike Rapoport | 230ca98 | 2017-07-10 15:49:02 -0700 | [diff] [blame] | 827 | static long madvise_dontneed_free(struct vm_area_struct *vma, |
| 828 | struct vm_area_struct **prev, |
| 829 | unsigned long start, unsigned long end, |
| 830 | int behavior) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | { |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 832 | struct mm_struct *mm = vma->vm_mm; |
| 833 | |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 834 | *prev = vma; |
Mike Kravetz | 90e7e7f | 2022-03-24 18:13:18 -0700 | [diff] [blame] | 835 | if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | return -EINVAL; |
| 837 | |
Rik van Riel | 8ebe0a5 | 2022-10-21 19:28:05 -0400 | [diff] [blame] | 838 | if (start == end) |
| 839 | return 0; |
| 840 | |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 841 | if (!userfaultfd_remove(vma, start, end)) { |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 842 | *prev = NULL; /* mmap_lock has been dropped, prev is stale */ |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 843 | |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 844 | mmap_read_lock(mm); |
| 845 | vma = find_vma(mm, start); |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 846 | if (!vma) |
| 847 | return -ENOMEM; |
| 848 | if (start < vma->vm_start) { |
| 849 | /* |
| 850 | * This "vma" under revalidation is the one |
| 851 | * with the lowest vma->vm_start where start |
| 852 | * is also < vma->vm_end. If start < |
| 853 | * vma->vm_start it means an hole materialized |
| 854 | * in the user address space within the |
Mike Rapoport | 230ca98 | 2017-07-10 15:49:02 -0700 | [diff] [blame] | 855 | * virtual range passed to MADV_DONTNEED |
| 856 | * or MADV_FREE. |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 857 | */ |
| 858 | return -ENOMEM; |
| 859 | } |
Mike Kravetz | 90e7e7f | 2022-03-24 18:13:18 -0700 | [diff] [blame] | 860 | /* |
| 861 | * Potential end adjustment for hugetlb vma is OK as |
| 862 | * the check below keeps end within vma. |
| 863 | */ |
| 864 | if (!madvise_dontneed_free_valid_vma(vma, start, &end, |
| 865 | behavior)) |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 866 | return -EINVAL; |
| 867 | if (end > vma->vm_end) { |
| 868 | /* |
| 869 | * Don't fail if end > vma->vm_end. If the old |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 870 | * vma was split while the mmap_lock was |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 871 | * released the effect of the concurrent |
Mike Rapoport | 230ca98 | 2017-07-10 15:49:02 -0700 | [diff] [blame] | 872 | * operation may not cause madvise() to |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 873 | * have an undefined result. There may be an |
| 874 | * adjacent next vma that we'll walk |
| 875 | * next. userfaultfd_remove() will generate an |
| 876 | * UFFD_EVENT_REMOVE repetition on the |
| 877 | * end-vma->vm_end range, but the manager can |
| 878 | * handle a repetition fine. |
| 879 | */ |
| 880 | end = vma->vm_end; |
| 881 | } |
| 882 | VM_WARN_ON(start >= end); |
| 883 | } |
Mike Rapoport | 230ca98 | 2017-07-10 15:49:02 -0700 | [diff] [blame] | 884 | |
Johannes Weiner | 9457056 | 2022-03-24 18:14:12 -0700 | [diff] [blame] | 885 | if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) |
Mike Rapoport | 230ca98 | 2017-07-10 15:49:02 -0700 | [diff] [blame] | 886 | return madvise_dontneed_single_vma(vma, start, end); |
| 887 | else if (behavior == MADV_FREE) |
| 888 | return madvise_free_single_vma(vma, start, end); |
| 889 | else |
| 890 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | } |
| 892 | |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 893 | static long madvise_populate(struct vm_area_struct *vma, |
| 894 | struct vm_area_struct **prev, |
| 895 | unsigned long start, unsigned long end, |
| 896 | int behavior) |
| 897 | { |
| 898 | const bool write = behavior == MADV_POPULATE_WRITE; |
| 899 | struct mm_struct *mm = vma->vm_mm; |
| 900 | unsigned long tmp_end; |
| 901 | int locked = 1; |
| 902 | long pages; |
| 903 | |
| 904 | *prev = vma; |
| 905 | |
| 906 | while (start < end) { |
| 907 | /* |
| 908 | * We might have temporarily dropped the lock. For example, |
| 909 | * our VMA might have been split. |
| 910 | */ |
| 911 | if (!vma || start >= vma->vm_end) { |
Miaohe Lin | 531037a | 2022-03-22 14:46:41 -0700 | [diff] [blame] | 912 | vma = vma_lookup(mm, start); |
| 913 | if (!vma) |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 914 | return -ENOMEM; |
| 915 | } |
| 916 | |
| 917 | tmp_end = min_t(unsigned long, end, vma->vm_end); |
| 918 | /* Populate (prefault) page tables readable/writable. */ |
| 919 | pages = faultin_vma_page_range(vma, start, tmp_end, write, |
| 920 | &locked); |
| 921 | if (!locked) { |
| 922 | mmap_read_lock(mm); |
| 923 | locked = 1; |
| 924 | *prev = NULL; |
| 925 | vma = NULL; |
| 926 | } |
| 927 | if (pages < 0) { |
| 928 | switch (pages) { |
| 929 | case -EINTR: |
| 930 | return -EINTR; |
David Hildenbrand | eb2faa5 | 2021-08-13 16:54:37 -0700 | [diff] [blame] | 931 | case -EINVAL: /* Incompatible mappings / permissions. */ |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 932 | return -EINVAL; |
| 933 | case -EHWPOISON: |
| 934 | return -EHWPOISON; |
David Hildenbrand | eb2faa5 | 2021-08-13 16:54:37 -0700 | [diff] [blame] | 935 | case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ |
| 936 | return -EFAULT; |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 937 | default: |
| 938 | pr_warn_once("%s: unhandled return value: %ld\n", |
| 939 | __func__, pages); |
| 940 | fallthrough; |
| 941 | case -ENOMEM: |
| 942 | return -ENOMEM; |
| 943 | } |
| 944 | } |
| 945 | start += pages * PAGE_SIZE; |
| 946 | } |
| 947 | return 0; |
| 948 | } |
| 949 | |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 950 | /* |
| 951 | * Application wants to free up the pages and associated backing store. |
| 952 | * This is effectively punching a hole into the middle of a file. |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 953 | */ |
| 954 | static long madvise_remove(struct vm_area_struct *vma, |
Nick Piggin | 00e9fa2 | 2007-03-16 13:38:10 -0800 | [diff] [blame] | 955 | struct vm_area_struct **prev, |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 956 | unsigned long start, unsigned long end) |
| 957 | { |
Hugh Dickins | 3f31d07 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 958 | loff_t offset; |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 959 | int error; |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 960 | struct file *f; |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 961 | struct mm_struct *mm = vma->vm_mm; |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 962 | |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 963 | *prev = NULL; /* tell sys_madvise we drop mmap_lock */ |
Nick Piggin | 00e9fa2 | 2007-03-16 13:38:10 -0800 | [diff] [blame] | 964 | |
Mike Kravetz | 72079ba | 2015-09-08 15:01:57 -0700 | [diff] [blame] | 965 | if (vma->vm_flags & VM_LOCKED) |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 966 | return -EINVAL; |
| 967 | |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 968 | f = vma->vm_file; |
| 969 | |
| 970 | if (!f || !f->f_mapping || !f->f_mapping->host) { |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 971 | return -EINVAL; |
| 972 | } |
| 973 | |
Hugh Dickins | 69cf0fa | 2006-04-17 22:46:32 +0100 | [diff] [blame] | 974 | if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) |
| 975 | return -EACCES; |
| 976 | |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 977 | offset = (loff_t)(start - vma->vm_start) |
| 978 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 979 | |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 980 | /* |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 981 | * Filesystem's fallocate may need to take i_rwsem. We need to |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 982 | * explicitly grab a reference because the vma (and hence the |
| 983 | * vma's reference to the file) can go away as soon as we drop |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 984 | * mmap_lock. |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 985 | */ |
| 986 | get_file(f); |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 987 | if (userfaultfd_remove(vma, start, end)) { |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 988 | /* mmap_lock was not released by userfaultfd_remove() */ |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 989 | mmap_read_unlock(mm); |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 990 | } |
Anna Schumaker | 72c72bd | 2014-11-07 14:44:25 -0500 | [diff] [blame] | 991 | error = vfs_fallocate(f, |
Hugh Dickins | 3f31d07 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 992 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, |
| 993 | offset, end - start); |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 994 | fput(f); |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 995 | mmap_read_lock(mm); |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 996 | return error; |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 997 | } |
| 998 | |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 999 | /* |
| 1000 | * Apply an madvise behavior to a region of a vma. madvise_update_vma |
| 1001 | * will handle splitting a vm area into separate areas, each area with its own |
| 1002 | * behavior. |
| 1003 | */ |
| 1004 | static int madvise_vma_behavior(struct vm_area_struct *vma, |
| 1005 | struct vm_area_struct **prev, |
| 1006 | unsigned long start, unsigned long end, |
| 1007 | unsigned long behavior) |
| 1008 | { |
| 1009 | int error; |
Suren Baghdasaryan | 942341d | 2022-03-04 20:28:58 -0800 | [diff] [blame] | 1010 | struct anon_vma_name *anon_name; |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1011 | unsigned long new_flags = vma->vm_flags; |
| 1012 | |
| 1013 | switch (behavior) { |
| 1014 | case MADV_REMOVE: |
| 1015 | return madvise_remove(vma, prev, start, end); |
| 1016 | case MADV_WILLNEED: |
| 1017 | return madvise_willneed(vma, prev, start, end); |
| 1018 | case MADV_COLD: |
| 1019 | return madvise_cold(vma, prev, start, end); |
| 1020 | case MADV_PAGEOUT: |
| 1021 | return madvise_pageout(vma, prev, start, end); |
| 1022 | case MADV_FREE: |
| 1023 | case MADV_DONTNEED: |
Johannes Weiner | 9457056 | 2022-03-24 18:14:12 -0700 | [diff] [blame] | 1024 | case MADV_DONTNEED_LOCKED: |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1025 | return madvise_dontneed_free(vma, prev, start, end, behavior); |
| 1026 | case MADV_POPULATE_READ: |
| 1027 | case MADV_POPULATE_WRITE: |
| 1028 | return madvise_populate(vma, prev, start, end, behavior); |
| 1029 | case MADV_NORMAL: |
| 1030 | new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; |
| 1031 | break; |
| 1032 | case MADV_SEQUENTIAL: |
| 1033 | new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; |
| 1034 | break; |
| 1035 | case MADV_RANDOM: |
| 1036 | new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; |
| 1037 | break; |
| 1038 | case MADV_DONTFORK: |
| 1039 | new_flags |= VM_DONTCOPY; |
| 1040 | break; |
| 1041 | case MADV_DOFORK: |
| 1042 | if (vma->vm_flags & VM_IO) |
| 1043 | return -EINVAL; |
| 1044 | new_flags &= ~VM_DONTCOPY; |
| 1045 | break; |
| 1046 | case MADV_WIPEONFORK: |
| 1047 | /* MADV_WIPEONFORK is only supported on anonymous memory. */ |
| 1048 | if (vma->vm_file || vma->vm_flags & VM_SHARED) |
| 1049 | return -EINVAL; |
| 1050 | new_flags |= VM_WIPEONFORK; |
| 1051 | break; |
| 1052 | case MADV_KEEPONFORK: |
| 1053 | new_flags &= ~VM_WIPEONFORK; |
| 1054 | break; |
| 1055 | case MADV_DONTDUMP: |
| 1056 | new_flags |= VM_DONTDUMP; |
| 1057 | break; |
| 1058 | case MADV_DODUMP: |
| 1059 | if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) |
| 1060 | return -EINVAL; |
| 1061 | new_flags &= ~VM_DONTDUMP; |
| 1062 | break; |
| 1063 | case MADV_MERGEABLE: |
| 1064 | case MADV_UNMERGEABLE: |
| 1065 | error = ksm_madvise(vma, start, end, behavior, &new_flags); |
| 1066 | if (error) |
| 1067 | goto out; |
| 1068 | break; |
| 1069 | case MADV_HUGEPAGE: |
| 1070 | case MADV_NOHUGEPAGE: |
| 1071 | error = hugepage_madvise(vma, &new_flags, behavior); |
| 1072 | if (error) |
| 1073 | goto out; |
| 1074 | break; |
Zach O'Keefe | 7d8faaf | 2022-07-06 16:59:27 -0700 | [diff] [blame] | 1075 | case MADV_COLLAPSE: |
| 1076 | return madvise_collapse(vma, prev, start, end); |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1077 | } |
| 1078 | |
Suren Baghdasaryan | 942341d | 2022-03-04 20:28:58 -0800 | [diff] [blame] | 1079 | anon_name = anon_vma_name(vma); |
| 1080 | anon_vma_name_get(anon_name); |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 1081 | error = madvise_update_vma(vma, prev, start, end, new_flags, |
Suren Baghdasaryan | 942341d | 2022-03-04 20:28:58 -0800 | [diff] [blame] | 1082 | anon_name); |
| 1083 | anon_vma_name_put(anon_name); |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1084 | |
| 1085 | out: |
| 1086 | /* |
| 1087 | * madvise() returns EAGAIN if kernel resources, such as |
| 1088 | * slab, are temporarily unavailable. |
| 1089 | */ |
| 1090 | if (error == -ENOMEM) |
| 1091 | error = -EAGAIN; |
| 1092 | return error; |
| 1093 | } |
| 1094 | |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 1095 | #ifdef CONFIG_MEMORY_FAILURE |
| 1096 | /* |
| 1097 | * Error injection support for memory error handling. |
| 1098 | */ |
Anshuman Khandual | 97167a7 | 2017-05-03 14:55:25 -0700 | [diff] [blame] | 1099 | static int madvise_inject_error(int behavior, |
| 1100 | unsigned long start, unsigned long end) |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 1101 | { |
Yunfeng Ye | d3cd257 | 2019-11-30 17:57:42 -0800 | [diff] [blame] | 1102 | unsigned long size; |
Anshuman Khandual | 97167a7 | 2017-05-03 14:55:25 -0700 | [diff] [blame] | 1103 | |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 1104 | if (!capable(CAP_SYS_ADMIN)) |
| 1105 | return -EPERM; |
Anshuman Khandual | 97167a7 | 2017-05-03 14:55:25 -0700 | [diff] [blame] | 1106 | |
Alexandru Moise | 19bfbe2 | 2017-10-03 16:14:31 -0700 | [diff] [blame] | 1107 | |
Yunfeng Ye | d3cd257 | 2019-11-30 17:57:42 -0800 | [diff] [blame] | 1108 | for (; start < end; start += size) { |
Dan Williams | 23e7b5c | 2018-07-13 21:50:06 -0700 | [diff] [blame] | 1109 | unsigned long pfn; |
Oscar Salvador | dc7560b | 2020-10-15 20:06:53 -0700 | [diff] [blame] | 1110 | struct page *page; |
Andrew Morton | 325c4ef | 2013-09-11 14:23:03 -0700 | [diff] [blame] | 1111 | int ret; |
| 1112 | |
Anshuman Khandual | 97167a7 | 2017-05-03 14:55:25 -0700 | [diff] [blame] | 1113 | ret = get_user_pages_fast(start, 1, 0, &page); |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 1114 | if (ret != 1) |
| 1115 | return ret; |
Dan Williams | 23e7b5c | 2018-07-13 21:50:06 -0700 | [diff] [blame] | 1116 | pfn = page_to_pfn(page); |
Andrew Morton | 325c4ef | 2013-09-11 14:23:03 -0700 | [diff] [blame] | 1117 | |
Alexandru Moise | 19bfbe2 | 2017-10-03 16:14:31 -0700 | [diff] [blame] | 1118 | /* |
| 1119 | * When soft offlining hugepages, after migrating the page |
| 1120 | * we dissolve it, therefore in the second loop "page" will |
Yunfeng Ye | d3cd257 | 2019-11-30 17:57:42 -0800 | [diff] [blame] | 1121 | * no longer be a compound page. |
Alexandru Moise | 19bfbe2 | 2017-10-03 16:14:31 -0700 | [diff] [blame] | 1122 | */ |
Yunfeng Ye | d3cd257 | 2019-11-30 17:57:42 -0800 | [diff] [blame] | 1123 | size = page_size(compound_head(page)); |
Alexandru Moise | 19bfbe2 | 2017-10-03 16:14:31 -0700 | [diff] [blame] | 1124 | |
Anshuman Khandual | 97167a7 | 2017-05-03 14:55:25 -0700 | [diff] [blame] | 1125 | if (behavior == MADV_SOFT_OFFLINE) { |
| 1126 | pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", |
Oscar Salvador | dc7560b | 2020-10-15 20:06:53 -0700 | [diff] [blame] | 1127 | pfn, start); |
Naoya Horiguchi | feec24a | 2019-11-30 17:53:38 -0800 | [diff] [blame] | 1128 | ret = soft_offline_page(pfn, MF_COUNT_INCREASED); |
Oscar Salvador | dc7560b | 2020-10-15 20:06:53 -0700 | [diff] [blame] | 1129 | } else { |
| 1130 | pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", |
| 1131 | pfn, start); |
zhenwei pi | 67f22ba | 2022-06-15 17:32:09 +0800 | [diff] [blame] | 1132 | ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); |
luofei | d1fe111 | 2022-03-22 14:44:38 -0700 | [diff] [blame] | 1133 | if (ret == -EOPNOTSUPP) |
| 1134 | ret = 0; |
Andi Kleen | afcf938 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 1135 | } |
Anshuman Khandual | 97167a7 | 2017-05-03 14:55:25 -0700 | [diff] [blame] | 1136 | |
Naoya Horiguchi | 23a003b | 2016-03-15 14:56:36 -0700 | [diff] [blame] | 1137 | if (ret) |
| 1138 | return ret; |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 1139 | } |
Mel Gorman | c461ad6 | 2017-08-31 16:15:30 -0700 | [diff] [blame] | 1140 | |
Andrew Morton | 325c4ef | 2013-09-11 14:23:03 -0700 | [diff] [blame] | 1141 | return 0; |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 1142 | } |
| 1143 | #endif |
| 1144 | |
Nicholas Krause | 1ecef9e | 2015-09-04 15:48:24 -0700 | [diff] [blame] | 1145 | static bool |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 1146 | madvise_behavior_valid(int behavior) |
| 1147 | { |
| 1148 | switch (behavior) { |
| 1149 | case MADV_DOFORK: |
| 1150 | case MADV_DONTFORK: |
| 1151 | case MADV_NORMAL: |
| 1152 | case MADV_SEQUENTIAL: |
| 1153 | case MADV_RANDOM: |
| 1154 | case MADV_REMOVE: |
| 1155 | case MADV_WILLNEED: |
| 1156 | case MADV_DONTNEED: |
Johannes Weiner | 9457056 | 2022-03-24 18:14:12 -0700 | [diff] [blame] | 1157 | case MADV_DONTNEED_LOCKED: |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 1158 | case MADV_FREE: |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 1159 | case MADV_COLD: |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 1160 | case MADV_PAGEOUT: |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1161 | case MADV_POPULATE_READ: |
| 1162 | case MADV_POPULATE_WRITE: |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1163 | #ifdef CONFIG_KSM |
| 1164 | case MADV_MERGEABLE: |
| 1165 | case MADV_UNMERGEABLE: |
| 1166 | #endif |
Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 1167 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 1168 | case MADV_HUGEPAGE: |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 1169 | case MADV_NOHUGEPAGE: |
Zach O'Keefe | 7d8faaf | 2022-07-06 16:59:27 -0700 | [diff] [blame] | 1170 | case MADV_COLLAPSE: |
Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 1171 | #endif |
Jason Baron | accb61f | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 1172 | case MADV_DONTDUMP: |
| 1173 | case MADV_DODUMP: |
Rik van Riel | d2cd9ed | 2017-09-06 16:25:15 -0700 | [diff] [blame] | 1174 | case MADV_WIPEONFORK: |
| 1175 | case MADV_KEEPONFORK: |
Anshuman Khandual | 5e451be | 2017-05-03 14:55:28 -0700 | [diff] [blame] | 1176 | #ifdef CONFIG_MEMORY_FAILURE |
| 1177 | case MADV_SOFT_OFFLINE: |
| 1178 | case MADV_HWPOISON: |
| 1179 | #endif |
Nicholas Krause | 1ecef9e | 2015-09-04 15:48:24 -0700 | [diff] [blame] | 1180 | return true; |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 1181 | |
| 1182 | default: |
Nicholas Krause | 1ecef9e | 2015-09-04 15:48:24 -0700 | [diff] [blame] | 1183 | return false; |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 1184 | } |
| 1185 | } |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 1186 | |
Zach O'Keefe | 876b4a1 | 2022-07-06 16:59:30 -0700 | [diff] [blame] | 1187 | static bool process_madvise_behavior_valid(int behavior) |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1188 | { |
| 1189 | switch (behavior) { |
| 1190 | case MADV_COLD: |
| 1191 | case MADV_PAGEOUT: |
zhangkui | d5fffc5 | 2021-09-02 15:01:11 -0700 | [diff] [blame] | 1192 | case MADV_WILLNEED: |
Zach O'Keefe | 876b4a1 | 2022-07-06 16:59:30 -0700 | [diff] [blame] | 1193 | case MADV_COLLAPSE: |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1194 | return true; |
| 1195 | default: |
| 1196 | return false; |
| 1197 | } |
| 1198 | } |
| 1199 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | /* |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1201 | * Walk the vmas in range [start,end), and call the visit function on each one. |
| 1202 | * The visit function will get start and end parameters that cover the overlap |
| 1203 | * between the current vma and the original range. Any unmapped regions in the |
| 1204 | * original range will result in this function returning -ENOMEM while still |
| 1205 | * calling the visit function on all of the existing vmas in the range. |
| 1206 | * Must be called with the mmap_lock held for reading or writing. |
| 1207 | */ |
| 1208 | static |
| 1209 | int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, |
| 1210 | unsigned long end, unsigned long arg, |
| 1211 | int (*visit)(struct vm_area_struct *vma, |
| 1212 | struct vm_area_struct **prev, unsigned long start, |
| 1213 | unsigned long end, unsigned long arg)) |
| 1214 | { |
| 1215 | struct vm_area_struct *vma; |
| 1216 | struct vm_area_struct *prev; |
| 1217 | unsigned long tmp; |
| 1218 | int unmapped_error = 0; |
| 1219 | |
| 1220 | /* |
| 1221 | * If the interval [start,end) covers some unmapped address |
| 1222 | * ranges, just ignore them, but return -ENOMEM at the end. |
| 1223 | * - different from the way of handling in mlock etc. |
| 1224 | */ |
| 1225 | vma = find_vma_prev(mm, start, &prev); |
| 1226 | if (vma && start > vma->vm_start) |
| 1227 | prev = vma; |
| 1228 | |
| 1229 | for (;;) { |
| 1230 | int error; |
| 1231 | |
| 1232 | /* Still start < end. */ |
| 1233 | if (!vma) |
| 1234 | return -ENOMEM; |
| 1235 | |
| 1236 | /* Here start < (end|vma->vm_end). */ |
| 1237 | if (start < vma->vm_start) { |
| 1238 | unmapped_error = -ENOMEM; |
| 1239 | start = vma->vm_start; |
| 1240 | if (start >= end) |
| 1241 | break; |
| 1242 | } |
| 1243 | |
| 1244 | /* Here vma->vm_start <= start < (end|vma->vm_end) */ |
| 1245 | tmp = vma->vm_end; |
| 1246 | if (end < tmp) |
| 1247 | tmp = end; |
| 1248 | |
| 1249 | /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ |
| 1250 | error = visit(vma, &prev, start, tmp, arg); |
| 1251 | if (error) |
| 1252 | return error; |
| 1253 | start = tmp; |
| 1254 | if (prev && start < prev->vm_end) |
| 1255 | start = prev->vm_end; |
| 1256 | if (start >= end) |
| 1257 | break; |
| 1258 | if (prev) |
Liam R. Howlett | 35474818 | 2022-09-06 19:49:01 +0000 | [diff] [blame] | 1259 | vma = find_vma(mm, prev->vm_end); |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1260 | else /* madvise_remove dropped mmap_lock */ |
| 1261 | vma = find_vma(mm, start); |
| 1262 | } |
| 1263 | |
| 1264 | return unmapped_error; |
| 1265 | } |
| 1266 | |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 1267 | #ifdef CONFIG_ANON_VMA_NAME |
| 1268 | static int madvise_vma_anon_name(struct vm_area_struct *vma, |
| 1269 | struct vm_area_struct **prev, |
| 1270 | unsigned long start, unsigned long end, |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 1271 | unsigned long anon_name) |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 1272 | { |
| 1273 | int error; |
| 1274 | |
| 1275 | /* Only anonymous mappings can be named */ |
| 1276 | if (vma->vm_file) |
| 1277 | return -EBADF; |
| 1278 | |
| 1279 | error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 1280 | (struct anon_vma_name *)anon_name); |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 1281 | |
| 1282 | /* |
| 1283 | * madvise() returns EAGAIN if kernel resources, such as |
| 1284 | * slab, are temporarily unavailable. |
| 1285 | */ |
| 1286 | if (error == -ENOMEM) |
| 1287 | error = -EAGAIN; |
| 1288 | return error; |
| 1289 | } |
| 1290 | |
| 1291 | int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 1292 | unsigned long len_in, struct anon_vma_name *anon_name) |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 1293 | { |
| 1294 | unsigned long end; |
| 1295 | unsigned long len; |
| 1296 | |
| 1297 | if (start & ~PAGE_MASK) |
| 1298 | return -EINVAL; |
| 1299 | len = (len_in + ~PAGE_MASK) & PAGE_MASK; |
| 1300 | |
| 1301 | /* Check to see whether len was rounded up from small -ve to zero */ |
| 1302 | if (len_in && !len) |
| 1303 | return -EINVAL; |
| 1304 | |
| 1305 | end = start + len; |
| 1306 | if (end < start) |
| 1307 | return -EINVAL; |
| 1308 | |
| 1309 | if (end == start) |
| 1310 | return 0; |
| 1311 | |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 1312 | return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 1313 | madvise_vma_anon_name); |
| 1314 | } |
| 1315 | #endif /* CONFIG_ANON_VMA_NAME */ |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1316 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 | * The madvise(2) system call. |
| 1318 | * |
| 1319 | * Applications can use madvise() to advise the kernel how it should |
| 1320 | * handle paging I/O in this VM area. The idea is to help the kernel |
| 1321 | * use appropriate read-ahead and caching techniques. The information |
| 1322 | * provided is advisory only, and can be safely disregarded by the |
| 1323 | * kernel without affecting the correct operation of the application. |
| 1324 | * |
| 1325 | * behavior values: |
| 1326 | * MADV_NORMAL - the default behavior is to read clusters. This |
| 1327 | * results in some read-ahead and read-behind. |
| 1328 | * MADV_RANDOM - the system should read the minimum amount of data |
| 1329 | * on any access, since it is unlikely that the appli- |
| 1330 | * cation will need more than what it asks for. |
| 1331 | * MADV_SEQUENTIAL - pages in the given range will probably be accessed |
| 1332 | * once, so they can be aggressively read ahead, and |
| 1333 | * can be freed soon after they are accessed. |
| 1334 | * MADV_WILLNEED - the application is notifying the system to read |
| 1335 | * some pages ahead. |
| 1336 | * MADV_DONTNEED - the application is finished with the given range, |
| 1337 | * so the kernel can free resources associated with it. |
Naoya Horiguchi | d7206a7 | 2016-03-15 14:56:58 -0700 | [diff] [blame] | 1338 | * MADV_FREE - the application marks pages in the given range as lazy free, |
| 1339 | * where actual purges are postponed until memory pressure happens. |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 1340 | * MADV_REMOVE - the application wants to free up the given range of |
| 1341 | * pages and associated backing store. |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 1342 | * MADV_DONTFORK - omit this area from child's address space when forking: |
| 1343 | * typically, to avoid COWing pages pinned by get_user_pages(). |
| 1344 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. |
Yang Shi | c02c300 | 2017-10-13 15:57:37 -0700 | [diff] [blame] | 1345 | * MADV_WIPEONFORK - present the child process with zero-filled memory in this |
| 1346 | * range after a fork. |
| 1347 | * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK |
Naoya Horiguchi | d7206a7 | 2016-03-15 14:56:58 -0700 | [diff] [blame] | 1348 | * MADV_HWPOISON - trigger memory error handler as if the given memory range |
| 1349 | * were corrupted by unrecoverable hardware memory failure. |
| 1350 | * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1351 | * MADV_MERGEABLE - the application recommends that KSM try to merge pages in |
| 1352 | * this area with pages of identical content from other such areas. |
| 1353 | * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. |
Naoya Horiguchi | d7206a7 | 2016-03-15 14:56:58 -0700 | [diff] [blame] | 1354 | * MADV_HUGEPAGE - the application wants to back the given range by transparent |
| 1355 | * huge pages in the future. Existing pages might be coalesced and |
| 1356 | * new pages might be allocated as THP. |
| 1357 | * MADV_NOHUGEPAGE - mark the given range as not worth being backed by |
| 1358 | * transparent huge pages so the existing pages will not be |
| 1359 | * coalesced into THP and new pages will not be allocated as THP. |
Zach O'Keefe | 7d8faaf | 2022-07-06 16:59:27 -0700 | [diff] [blame] | 1360 | * MADV_COLLAPSE - synchronously coalesce pages into new THP. |
Naoya Horiguchi | d7206a7 | 2016-03-15 14:56:58 -0700 | [diff] [blame] | 1361 | * MADV_DONTDUMP - the application wants to prevent pages in the given range |
| 1362 | * from being included in its core dump. |
| 1363 | * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1364 | * MADV_COLD - the application is not expected to use this memory soon, |
| 1365 | * deactivate pages in this range so that they can be reclaimed |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 1366 | * easily if memory pressure happens. |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1367 | * MADV_PAGEOUT - the application is not expected to use this memory soon, |
| 1368 | * page out the pages in this range immediately. |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1369 | * MADV_POPULATE_READ - populate (prefault) page tables readable by |
| 1370 | * triggering read faults if required |
| 1371 | * MADV_POPULATE_WRITE - populate (prefault) page tables writable by |
| 1372 | * triggering write faults if required |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1373 | * |
| 1374 | * return values: |
| 1375 | * zero - success |
| 1376 | * -EINVAL - start + len < 0, start is not page-aligned, |
| 1377 | * "behavior" is not a valid value, or application |
Yang Shi | c02c300 | 2017-10-13 15:57:37 -0700 | [diff] [blame] | 1378 | * is attempting to release locked or shared pages, |
| 1379 | * or the specified address range includes file, Huge TLB, |
| 1380 | * MAP_SHARED or VMPFNMAP range. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | * -ENOMEM - addresses in the specified range are not currently |
| 1382 | * mapped, or are outside the AS of the process. |
| 1383 | * -EIO - an I/O error occurred while paging in data. |
| 1384 | * -EBADF - map exists, but area maps something that isn't a file. |
| 1385 | * -EAGAIN - a kernel resource was temporarily unavailable. |
| 1386 | */ |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 1387 | int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | { |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1389 | unsigned long end; |
| 1390 | int error; |
Jason Baron | f797779 | 2007-07-15 23:38:21 -0700 | [diff] [blame] | 1391 | int write; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | size_t len; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 1393 | struct blk_plug plug; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | |
Andrey Konovalov | 057d3389 | 2019-09-25 16:48:30 -0700 | [diff] [blame] | 1395 | start = untagged_addr(start); |
| 1396 | |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 1397 | if (!madvise_behavior_valid(behavior)) |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1398 | return -EINVAL; |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 1399 | |
Wei Yang | df6c650 | 2019-11-30 17:57:46 -0800 | [diff] [blame] | 1400 | if (!PAGE_ALIGNED(start)) |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1401 | return -EINVAL; |
Wei Yang | df6c650 | 2019-11-30 17:57:46 -0800 | [diff] [blame] | 1402 | len = PAGE_ALIGN(len_in); |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 1403 | |
| 1404 | /* Check to see whether len was rounded up from small -ve to zero */ |
| 1405 | if (len_in && !len) |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1406 | return -EINVAL; |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 1407 | |
| 1408 | end = start + len; |
| 1409 | if (end < start) |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1410 | return -EINVAL; |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 1411 | |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 1412 | if (end == start) |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1413 | return 0; |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 1414 | |
Anshuman Khandual | 5e451be | 2017-05-03 14:55:28 -0700 | [diff] [blame] | 1415 | #ifdef CONFIG_MEMORY_FAILURE |
| 1416 | if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) |
| 1417 | return madvise_inject_error(behavior, start, start + len_in); |
| 1418 | #endif |
| 1419 | |
Jason Baron | f797779 | 2007-07-15 23:38:21 -0700 | [diff] [blame] | 1420 | write = madvise_need_mmap_write(behavior); |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 1421 | if (write) { |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 1422 | if (mmap_write_lock_killable(mm)) |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 1423 | return -EINTR; |
| 1424 | } else { |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 1425 | mmap_read_lock(mm); |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 1426 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 1428 | blk_start_plug(&plug); |
Colin Cross | ac1e9ac | 2022-01-14 14:05:55 -0800 | [diff] [blame] | 1429 | error = madvise_walk_vmas(mm, start, end, behavior, |
| 1430 | madvise_vma_behavior); |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 1431 | blk_finish_plug(&plug); |
Jason Baron | f797779 | 2007-07-15 23:38:21 -0700 | [diff] [blame] | 1432 | if (write) |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 1433 | mmap_write_unlock(mm); |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 1434 | else |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 1435 | mmap_read_unlock(mm); |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 1436 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | return error; |
| 1438 | } |
Jens Axboe | db08ca2 | 2019-12-25 22:14:54 -0700 | [diff] [blame] | 1439 | |
| 1440 | SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) |
| 1441 | { |
Minchan Kim | 0726b01 | 2020-10-17 16:14:50 -0700 | [diff] [blame] | 1442 | return do_madvise(current->mm, start, len_in, behavior); |
Jens Axboe | db08ca2 | 2019-12-25 22:14:54 -0700 | [diff] [blame] | 1443 | } |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1444 | |
| 1445 | SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, |
| 1446 | size_t, vlen, int, behavior, unsigned int, flags) |
| 1447 | { |
| 1448 | ssize_t ret; |
| 1449 | struct iovec iovstack[UIO_FASTIOV], iovec; |
| 1450 | struct iovec *iov = iovstack; |
| 1451 | struct iov_iter iter; |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1452 | struct task_struct *task; |
| 1453 | struct mm_struct *mm; |
| 1454 | size_t total_len; |
| 1455 | unsigned int f_flags; |
| 1456 | |
| 1457 | if (flags != 0) { |
| 1458 | ret = -EINVAL; |
| 1459 | goto out; |
| 1460 | } |
| 1461 | |
| 1462 | ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); |
| 1463 | if (ret < 0) |
| 1464 | goto out; |
| 1465 | |
Christian Brauner | ee9955d | 2021-10-11 15:32:45 +0200 | [diff] [blame] | 1466 | task = pidfd_get_task(pidfd, &f_flags); |
| 1467 | if (IS_ERR(task)) { |
| 1468 | ret = PTR_ERR(task); |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1469 | goto free_iov; |
| 1470 | } |
| 1471 | |
Minchan Kim | a68a026 | 2020-12-08 20:57:18 -0800 | [diff] [blame] | 1472 | if (!process_madvise_behavior_valid(behavior)) { |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1473 | ret = -EINVAL; |
| 1474 | goto release_task; |
| 1475 | } |
| 1476 | |
Suren Baghdasaryan | 96cfe2c | 2021-03-12 21:08:06 -0800 | [diff] [blame] | 1477 | /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ |
| 1478 | mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1479 | if (IS_ERR_OR_NULL(mm)) { |
| 1480 | ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; |
| 1481 | goto release_task; |
| 1482 | } |
| 1483 | |
Suren Baghdasaryan | 96cfe2c | 2021-03-12 21:08:06 -0800 | [diff] [blame] | 1484 | /* |
| 1485 | * Require CAP_SYS_NICE for influencing process performance. Note that |
| 1486 | * only non-destructive hints are currently supported. |
| 1487 | */ |
| 1488 | if (!capable(CAP_SYS_NICE)) { |
| 1489 | ret = -EPERM; |
| 1490 | goto release_mm; |
| 1491 | } |
| 1492 | |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1493 | total_len = iov_iter_count(&iter); |
| 1494 | |
| 1495 | while (iov_iter_count(&iter)) { |
| 1496 | iovec = iov_iter_iovec(&iter); |
| 1497 | ret = do_madvise(mm, (unsigned long)iovec.iov_base, |
| 1498 | iovec.iov_len, behavior); |
Charan Teja Kalla | e6b0a7b | 2022-04-01 11:28:12 -0700 | [diff] [blame] | 1499 | if (ret < 0) |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1500 | break; |
| 1501 | iov_iter_advance(&iter, iovec.iov_len); |
| 1502 | } |
| 1503 | |
Charan Teja Kalla | 5bd009c | 2022-03-22 14:46:44 -0700 | [diff] [blame] | 1504 | ret = (total_len - iov_iter_count(&iter)) ? : ret; |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1505 | |
Suren Baghdasaryan | 96cfe2c | 2021-03-12 21:08:06 -0800 | [diff] [blame] | 1506 | release_mm: |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1507 | mmput(mm); |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1508 | release_task: |
| 1509 | put_task_struct(task); |
Minchan Kim | ecb8ac8 | 2020-10-17 16:14:59 -0700 | [diff] [blame] | 1510 | free_iov: |
| 1511 | kfree(iov); |
| 1512 | out: |
| 1513 | return ret; |
| 1514 | } |