Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * mm/rmap.c - physical to virtual reverse mappings |
| 3 | * |
| 4 | * Copyright 2001, Rik van Riel <riel@conectiva.com.br> |
| 5 | * Released under the General Public License (GPL). |
| 6 | * |
| 7 | * Simple, low overhead reverse mapping scheme. |
| 8 | * Please try to keep this thing as modular as possible. |
| 9 | * |
| 10 | * Provides methods for unmapping each kind of mapped page: |
| 11 | * the anon methods track anonymous pages, and |
| 12 | * the file methods track pages belonging to an inode. |
| 13 | * |
| 14 | * Original design by Rik van Riel <riel@conectiva.com.br> 2001 |
| 15 | * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 |
| 16 | * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 |
Hugh Dickins | 98f3260 | 2009-05-21 20:33:58 +0100 | [diff] [blame] | 17 | * Contributions by Hugh Dickins 2003, 2004 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | */ |
| 19 | |
| 20 | /* |
| 21 | * Lock ordering in mm: |
| 22 | * |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 23 | * inode->i_rwsem (while writing or truncating, not reading or faulting) |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 24 | * mm->mmap_lock |
Jan Kara | 730633f | 2021-01-28 19:19:45 +0100 | [diff] [blame] | 25 | * mapping->invalidate_lock (in filemap_fault) |
Mike Kravetz | 3a47c54 | 2022-09-14 15:18:03 -0700 | [diff] [blame] | 26 | * page->flags PG_locked (lock_page) |
Mike Kravetz | 8d9bfb2 | 2022-09-14 15:18:07 -0700 | [diff] [blame] | 27 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) |
Jan Kara | 730633f | 2021-01-28 19:19:45 +0100 | [diff] [blame] | 28 | * mapping->i_mmap_rwsem |
Jan Kara | 730633f | 2021-01-28 19:19:45 +0100 | [diff] [blame] | 29 | * anon_vma->rwsem |
| 30 | * mm->page_table_lock or pte_lock |
| 31 | * swap_lock (in swap_duplicate, swap_info_get) |
| 32 | * mmlist_lock (in mmput, drain_mmlist and others) |
Matthew Wilcox (Oracle) | e621900 | 2022-02-09 20:22:12 +0000 | [diff] [blame] | 33 | * mapping->private_lock (in block_dirty_folio) |
| 34 | * folio_lock_memcg move_lock (in block_dirty_folio) |
Jan Kara | 730633f | 2021-01-28 19:19:45 +0100 | [diff] [blame] | 35 | * i_pages lock (widely used) |
Matthew Wilcox (Oracle) | e809c3f | 2021-06-28 21:59:47 -0400 | [diff] [blame] | 36 | * lruvec->lru_lock (in folio_lruvec_lock_irq) |
Jan Kara | 730633f | 2021-01-28 19:19:45 +0100 | [diff] [blame] | 37 | * inode->i_lock (in set_page_dirty's __mark_inode_dirty) |
| 38 | * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) |
| 39 | * sb_lock (within inode_lock in fs/fs-writeback.c) |
| 40 | * i_pages lock (widely used, in set_page_dirty, |
| 41 | * in arch-dependent flush_dcache_mmap_lock, |
| 42 | * within bdi.wb->list_lock in __sync_single_inode) |
Andi Kleen | 6a46079 | 2009-09-16 11:50:15 +0200 | [diff] [blame] | 43 | * |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 44 | * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) |
Peter Zijlstra | 9b679320a | 2011-06-27 16:18:09 -0700 | [diff] [blame] | 45 | * ->tasklist_lock |
Andi Kleen | 6a46079 | 2009-09-16 11:50:15 +0200 | [diff] [blame] | 46 | * pte map lock |
Mike Kravetz | c0d0381 | 2020-04-01 21:11:05 -0700 | [diff] [blame] | 47 | * |
Mike Kravetz | 8d9bfb2 | 2022-09-14 15:18:07 -0700 | [diff] [blame] | 48 | * hugetlbfs PageHuge() take locks in this order: |
| 49 | * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) |
| 50 | * vma_lock (hugetlb specific lock for pmd_sharing) |
| 51 | * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) |
| 52 | * page->flags PG_locked (lock_page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | */ |
| 54 | |
| 55 | #include <linux/mm.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 56 | #include <linux/sched/mm.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 57 | #include <linux/sched/task.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #include <linux/pagemap.h> |
| 59 | #include <linux/swap.h> |
| 60 | #include <linux/swapops.h> |
| 61 | #include <linux/slab.h> |
| 62 | #include <linux/init.h> |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 63 | #include <linux/ksm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | #include <linux/rmap.h> |
| 65 | #include <linux/rcupdate.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 66 | #include <linux/export.h> |
Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 67 | #include <linux/memcontrol.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 68 | #include <linux/mmu_notifier.h> |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 69 | #include <linux/migrate.h> |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 70 | #include <linux/hugetlb.h> |
Ben Dooks | 444f84f | 2019-10-18 20:20:17 -0700 | [diff] [blame] | 71 | #include <linux/huge_mm.h> |
Jan Kara | ef5d437 | 2012-10-25 13:37:31 -0700 | [diff] [blame] | 72 | #include <linux/backing-dev.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 73 | #include <linux/page_idle.h> |
Jérôme Glisse | a5430dd | 2017-09-08 16:12:17 -0700 | [diff] [blame] | 74 | #include <linux/memremap.h> |
Christian Borntraeger | bce73e4 | 2018-07-13 16:58:52 -0700 | [diff] [blame] | 75 | #include <linux/userfaultfd_k.h> |
Peter Xu | 999dad8 | 2022-05-12 20:22:53 -0700 | [diff] [blame] | 76 | #include <linux/mm_inline.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
| 78 | #include <asm/tlbflush.h> |
| 79 | |
Anshuman Khandual | 4cc79b3 | 2022-03-24 18:10:01 -0700 | [diff] [blame] | 80 | #define CREATE_TRACE_POINTS |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 81 | #include <trace/events/tlb.h> |
Anshuman Khandual | 4cc79b3 | 2022-03-24 18:10:01 -0700 | [diff] [blame] | 82 | #include <trace/events/migrate.h> |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 83 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 84 | #include "internal.h" |
| 85 | |
Adrian Bunk | fdd2e5f | 2008-10-18 20:28:38 -0700 | [diff] [blame] | 86 | static struct kmem_cache *anon_vma_cachep; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 87 | static struct kmem_cache *anon_vma_chain_cachep; |
Adrian Bunk | fdd2e5f | 2008-10-18 20:28:38 -0700 | [diff] [blame] | 88 | |
| 89 | static inline struct anon_vma *anon_vma_alloc(void) |
| 90 | { |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 91 | struct anon_vma *anon_vma; |
| 92 | |
| 93 | anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); |
| 94 | if (anon_vma) { |
| 95 | atomic_set(&anon_vma->refcount, 1); |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 96 | anon_vma->num_children = 0; |
| 97 | anon_vma->num_active_vmas = 0; |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 98 | anon_vma->parent = anon_vma; |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 99 | /* |
| 100 | * Initialise the anon_vma root to point to itself. If called |
| 101 | * from fork, the root will be reset to the parents anon_vma. |
| 102 | */ |
| 103 | anon_vma->root = anon_vma; |
| 104 | } |
| 105 | |
| 106 | return anon_vma; |
Adrian Bunk | fdd2e5f | 2008-10-18 20:28:38 -0700 | [diff] [blame] | 107 | } |
| 108 | |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 109 | static inline void anon_vma_free(struct anon_vma *anon_vma) |
Adrian Bunk | fdd2e5f | 2008-10-18 20:28:38 -0700 | [diff] [blame] | 110 | { |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 111 | VM_BUG_ON(atomic_read(&anon_vma->refcount)); |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 112 | |
| 113 | /* |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 114 | * Synchronize against folio_lock_anon_vma_read() such that |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 115 | * we can safely hold the lock without the anon_vma getting |
| 116 | * freed. |
| 117 | * |
| 118 | * Relies on the full mb implied by the atomic_dec_and_test() from |
| 119 | * put_anon_vma() against the acquire barrier implied by |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 120 | * down_read_trylock() from folio_lock_anon_vma_read(). This orders: |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 121 | * |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 122 | * folio_lock_anon_vma_read() VS put_anon_vma() |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 123 | * down_read_trylock() atomic_dec_and_test() |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 124 | * LOCK MB |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 125 | * atomic_read() rwsem_is_locked() |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 126 | * |
| 127 | * LOCK should suffice since the actual taking of the lock must |
| 128 | * happen _before_ what follows. |
| 129 | */ |
Hugh Dickins | 7f39dda | 2014-06-04 16:05:33 -0700 | [diff] [blame] | 130 | might_sleep(); |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 131 | if (rwsem_is_locked(&anon_vma->root->rwsem)) { |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 132 | anon_vma_lock_write(anon_vma); |
Konstantin Khlebnikov | 08b5270 | 2013-02-22 16:34:40 -0800 | [diff] [blame] | 133 | anon_vma_unlock_write(anon_vma); |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Adrian Bunk | fdd2e5f | 2008-10-18 20:28:38 -0700 | [diff] [blame] | 136 | kmem_cache_free(anon_vma_cachep, anon_vma); |
| 137 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | |
Linus Torvalds | dd34739 | 2011-06-17 19:05:36 -0700 | [diff] [blame] | 139 | static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 140 | { |
Linus Torvalds | dd34739 | 2011-06-17 19:05:36 -0700 | [diff] [blame] | 141 | return kmem_cache_alloc(anon_vma_chain_cachep, gfp); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 142 | } |
| 143 | |
Namhyung Kim | e574b5f | 2010-10-26 14:22:02 -0700 | [diff] [blame] | 144 | static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 145 | { |
| 146 | kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); |
| 147 | } |
| 148 | |
Kautuk Consul | 6583a84 | 2012-03-21 16:34:01 -0700 | [diff] [blame] | 149 | static void anon_vma_chain_link(struct vm_area_struct *vma, |
| 150 | struct anon_vma_chain *avc, |
| 151 | struct anon_vma *anon_vma) |
| 152 | { |
| 153 | avc->vma = vma; |
| 154 | avc->anon_vma = anon_vma; |
| 155 | list_add(&avc->same_vma, &vma->anon_vma_chain); |
Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 156 | anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); |
Kautuk Consul | 6583a84 | 2012-03-21 16:34:01 -0700 | [diff] [blame] | 157 | } |
| 158 | |
Linus Torvalds | d9d332e | 2008-10-19 10:32:20 -0700 | [diff] [blame] | 159 | /** |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 160 | * __anon_vma_prepare - attach an anon_vma to a memory region |
Linus Torvalds | d9d332e | 2008-10-19 10:32:20 -0700 | [diff] [blame] | 161 | * @vma: the memory region in question |
| 162 | * |
| 163 | * This makes sure the memory mapping described by 'vma' has |
| 164 | * an 'anon_vma' attached to it, so that we can associate the |
| 165 | * anonymous pages mapped into it with that anon_vma. |
| 166 | * |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 167 | * The common case will be that we already have one, which |
| 168 | * is handled inline by anon_vma_prepare(). But if |
Figo.zhang | 23a0790 | 2010-12-27 15:14:06 +0100 | [diff] [blame] | 169 | * not we either need to find an adjacent mapping that we |
Linus Torvalds | d9d332e | 2008-10-19 10:32:20 -0700 | [diff] [blame] | 170 | * can re-use the anon_vma from (very common when the only |
| 171 | * reason for splitting a vma has been mprotect()), or we |
| 172 | * allocate a new one. |
| 173 | * |
| 174 | * Anon-vma allocations are very subtle, because we may have |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 175 | * optimistically looked up an anon_vma in folio_lock_anon_vma_read() |
Miaohe Lin | aaf1f99 | 2021-02-25 17:17:53 -0800 | [diff] [blame] | 176 | * and that may actually touch the rwsem even in the newly |
Linus Torvalds | d9d332e | 2008-10-19 10:32:20 -0700 | [diff] [blame] | 177 | * allocated vma (it depends on RCU to make sure that the |
| 178 | * anon_vma isn't actually destroyed). |
| 179 | * |
| 180 | * As a result, we need to do proper anon_vma locking even |
| 181 | * for the new allocation. At the same time, we do not want |
| 182 | * to do any locking for the common case of already having |
| 183 | * an anon_vma. |
| 184 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 185 | * This must be called with the mmap_lock held for reading. |
Linus Torvalds | d9d332e | 2008-10-19 10:32:20 -0700 | [diff] [blame] | 186 | */ |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 187 | int __anon_vma_prepare(struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | { |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 189 | struct mm_struct *mm = vma->vm_mm; |
| 190 | struct anon_vma *anon_vma, *allocated; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 191 | struct anon_vma_chain *avc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
| 193 | might_sleep(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 195 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
| 196 | if (!avc) |
| 197 | goto out_enomem; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 198 | |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 199 | anon_vma = find_mergeable_anon_vma(vma); |
| 200 | allocated = NULL; |
| 201 | if (!anon_vma) { |
| 202 | anon_vma = anon_vma_alloc(); |
| 203 | if (unlikely(!anon_vma)) |
| 204 | goto out_enomem_free_avc; |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 205 | anon_vma->num_children++; /* self-parent link for new root */ |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 206 | allocated = anon_vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | } |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 208 | |
| 209 | anon_vma_lock_write(anon_vma); |
| 210 | /* page_table_lock to protect against threads */ |
| 211 | spin_lock(&mm->page_table_lock); |
| 212 | if (likely(!vma->anon_vma)) { |
| 213 | vma->anon_vma = anon_vma; |
| 214 | anon_vma_chain_link(vma, avc, anon_vma); |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 215 | anon_vma->num_active_vmas++; |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 216 | allocated = NULL; |
| 217 | avc = NULL; |
| 218 | } |
| 219 | spin_unlock(&mm->page_table_lock); |
| 220 | anon_vma_unlock_write(anon_vma); |
| 221 | |
| 222 | if (unlikely(allocated)) |
| 223 | put_anon_vma(allocated); |
| 224 | if (unlikely(avc)) |
| 225 | anon_vma_chain_free(avc); |
| 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | return 0; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 228 | |
| 229 | out_enomem_free_avc: |
| 230 | anon_vma_chain_free(avc); |
| 231 | out_enomem: |
| 232 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | } |
| 234 | |
Linus Torvalds | bb4aa39 | 2011-06-16 20:44:51 -0700 | [diff] [blame] | 235 | /* |
| 236 | * This is a useful helper function for locking the anon_vma root as |
| 237 | * we traverse the vma->anon_vma_chain, looping over anon_vma's that |
| 238 | * have the same vma. |
| 239 | * |
| 240 | * Such anon_vma's should have the same root, so you'd expect to see |
| 241 | * just a single mutex_lock for the whole traversal. |
| 242 | */ |
| 243 | static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) |
| 244 | { |
| 245 | struct anon_vma *new_root = anon_vma->root; |
| 246 | if (new_root != root) { |
| 247 | if (WARN_ON_ONCE(root)) |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 248 | up_write(&root->rwsem); |
Linus Torvalds | bb4aa39 | 2011-06-16 20:44:51 -0700 | [diff] [blame] | 249 | root = new_root; |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 250 | down_write(&root->rwsem); |
Linus Torvalds | bb4aa39 | 2011-06-16 20:44:51 -0700 | [diff] [blame] | 251 | } |
| 252 | return root; |
| 253 | } |
| 254 | |
| 255 | static inline void unlock_anon_vma_root(struct anon_vma *root) |
| 256 | { |
| 257 | if (root) |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 258 | up_write(&root->rwsem); |
Linus Torvalds | bb4aa39 | 2011-06-16 20:44:51 -0700 | [diff] [blame] | 259 | } |
| 260 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 261 | /* |
| 262 | * Attach the anon_vmas from src to dst. |
| 263 | * Returns 0 on success, -ENOMEM on failure. |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 264 | * |
Liam R. Howlett | 0503ea8 | 2023-01-20 11:26:49 -0500 | [diff] [blame] | 265 | * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), |
| 266 | * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, |
| 267 | * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to |
| 268 | * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before |
| 269 | * call, we can identify this case by checking (!dst->anon_vma && |
| 270 | * src->anon_vma). |
Wei Yang | 47b390d | 2019-11-30 17:50:56 -0800 | [diff] [blame] | 271 | * |
| 272 | * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find |
| 273 | * and reuse existing anon_vma which has no vmas and only one child anon_vma. |
| 274 | * This prevents degradation of anon_vma hierarchy to endless linear chain in |
| 275 | * case of constantly forking task. On the other hand, an anon_vma with more |
| 276 | * than one child isn't reused even if there was no alive vma, thus rmap |
| 277 | * walker has a good chance of avoiding scanning the whole hierarchy when it |
| 278 | * searches where page is mapped. |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 279 | */ |
| 280 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | { |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 282 | struct anon_vma_chain *avc, *pavc; |
Linus Torvalds | bb4aa39 | 2011-06-16 20:44:51 -0700 | [diff] [blame] | 283 | struct anon_vma *root = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Linus Torvalds | 646d87b | 2010-04-11 17:15:03 -0700 | [diff] [blame] | 285 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
Linus Torvalds | bb4aa39 | 2011-06-16 20:44:51 -0700 | [diff] [blame] | 286 | struct anon_vma *anon_vma; |
| 287 | |
Linus Torvalds | dd34739 | 2011-06-17 19:05:36 -0700 | [diff] [blame] | 288 | avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); |
| 289 | if (unlikely(!avc)) { |
| 290 | unlock_anon_vma_root(root); |
| 291 | root = NULL; |
| 292 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
| 293 | if (!avc) |
| 294 | goto enomem_failure; |
| 295 | } |
Linus Torvalds | bb4aa39 | 2011-06-16 20:44:51 -0700 | [diff] [blame] | 296 | anon_vma = pavc->anon_vma; |
| 297 | root = lock_anon_vma_root(root, anon_vma); |
| 298 | anon_vma_chain_link(dst, avc, anon_vma); |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 299 | |
| 300 | /* |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 301 | * Reuse existing anon_vma if it has no vma and only one |
| 302 | * anon_vma child. |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 303 | * |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 304 | * Root anon_vma is never reused: |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 305 | * it has self-parent reference and at least one child. |
| 306 | */ |
Wei Yang | 47b390d | 2019-11-30 17:50:56 -0800 | [diff] [blame] | 307 | if (!dst->anon_vma && src->anon_vma && |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 308 | anon_vma->num_children < 2 && |
| 309 | anon_vma->num_active_vmas == 0) |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 310 | dst->anon_vma = anon_vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | } |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 312 | if (dst->anon_vma) |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 313 | dst->anon_vma->num_active_vmas++; |
Linus Torvalds | bb4aa39 | 2011-06-16 20:44:51 -0700 | [diff] [blame] | 314 | unlock_anon_vma_root(root); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 315 | return 0; |
| 316 | |
| 317 | enomem_failure: |
Leon Yu | 3fe89b3 | 2015-03-25 15:55:11 -0700 | [diff] [blame] | 318 | /* |
Ma Wupeng | d8e454e | 2022-10-14 09:39:31 +0800 | [diff] [blame] | 319 | * dst->anon_vma is dropped here otherwise its num_active_vmas can |
| 320 | * be incorrectly decremented in unlink_anon_vmas(). |
Leon Yu | 3fe89b3 | 2015-03-25 15:55:11 -0700 | [diff] [blame] | 321 | * We can safely do this because callers of anon_vma_clone() don't care |
| 322 | * about dst->anon_vma if anon_vma_clone() failed. |
| 323 | */ |
| 324 | dst->anon_vma = NULL; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 325 | unlink_anon_vmas(dst); |
| 326 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | } |
| 328 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 329 | /* |
| 330 | * Attach vma to its own anon_vma, as well as to the anon_vmas that |
| 331 | * the corresponding VMA in the parent process is attached to. |
| 332 | * Returns 0 on success, non-zero on failure. |
| 333 | */ |
| 334 | int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | { |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 336 | struct anon_vma_chain *avc; |
| 337 | struct anon_vma *anon_vma; |
Daniel Forrest | c4ea95d | 2014-12-02 15:59:42 -0800 | [diff] [blame] | 338 | int error; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 339 | |
| 340 | /* Don't bother if the parent process has no anon_vma here. */ |
| 341 | if (!pvma->anon_vma) |
| 342 | return 0; |
| 343 | |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 344 | /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ |
| 345 | vma->anon_vma = NULL; |
| 346 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 347 | /* |
| 348 | * First, attach the new VMA to the parent VMA's anon_vmas, |
| 349 | * so rmap can find non-COWed pages in child processes. |
| 350 | */ |
Daniel Forrest | c4ea95d | 2014-12-02 15:59:42 -0800 | [diff] [blame] | 351 | error = anon_vma_clone(vma, pvma); |
| 352 | if (error) |
| 353 | return error; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 354 | |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 355 | /* An existing anon_vma has been reused, all done then. */ |
| 356 | if (vma->anon_vma) |
| 357 | return 0; |
| 358 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 359 | /* Then add our own anon_vma. */ |
| 360 | anon_vma = anon_vma_alloc(); |
| 361 | if (!anon_vma) |
| 362 | goto out_error; |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 363 | anon_vma->num_active_vmas++; |
Linus Torvalds | dd34739 | 2011-06-17 19:05:36 -0700 | [diff] [blame] | 364 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 365 | if (!avc) |
| 366 | goto out_error_free_anon_vma; |
Rik van Riel | 5c341ee1 | 2010-08-09 17:18:39 -0700 | [diff] [blame] | 367 | |
| 368 | /* |
Miaohe Lin | aaf1f99 | 2021-02-25 17:17:53 -0800 | [diff] [blame] | 369 | * The root anon_vma's rwsem is the lock actually used when we |
Rik van Riel | 5c341ee1 | 2010-08-09 17:18:39 -0700 | [diff] [blame] | 370 | * lock any of the anon_vmas in this anon_vma tree. |
| 371 | */ |
| 372 | anon_vma->root = pvma->anon_vma->root; |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 373 | anon_vma->parent = pvma->anon_vma; |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 374 | /* |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 375 | * With refcounts, an anon_vma can stay around longer than the |
| 376 | * process it belongs to. The root anon_vma needs to be pinned until |
| 377 | * this anon_vma is freed, because the lock lives in the root. |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 378 | */ |
| 379 | get_anon_vma(anon_vma->root); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 380 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ |
| 381 | vma->anon_vma = anon_vma; |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 382 | anon_vma_lock_write(anon_vma); |
Rik van Riel | 5c341ee1 | 2010-08-09 17:18:39 -0700 | [diff] [blame] | 383 | anon_vma_chain_link(vma, avc, anon_vma); |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 384 | anon_vma->parent->num_children++; |
Konstantin Khlebnikov | 08b5270 | 2013-02-22 16:34:40 -0800 | [diff] [blame] | 385 | anon_vma_unlock_write(anon_vma); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 386 | |
| 387 | return 0; |
| 388 | |
| 389 | out_error_free_anon_vma: |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 390 | put_anon_vma(anon_vma); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 391 | out_error: |
Rik van Riel | 4946d54 | 2010-04-05 12:13:33 -0400 | [diff] [blame] | 392 | unlink_anon_vmas(vma); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 393 | return -ENOMEM; |
| 394 | } |
| 395 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 396 | void unlink_anon_vmas(struct vm_area_struct *vma) |
| 397 | { |
| 398 | struct anon_vma_chain *avc, *next; |
Peter Zijlstra | eee2acb | 2011-06-17 13:54:23 +0200 | [diff] [blame] | 399 | struct anon_vma *root = NULL; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 400 | |
Rik van Riel | 5c341ee1 | 2010-08-09 17:18:39 -0700 | [diff] [blame] | 401 | /* |
| 402 | * Unlink each anon_vma chained to the VMA. This list is ordered |
| 403 | * from newest to oldest, ensuring the root anon_vma gets freed last. |
| 404 | */ |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 405 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
Peter Zijlstra | eee2acb | 2011-06-17 13:54:23 +0200 | [diff] [blame] | 406 | struct anon_vma *anon_vma = avc->anon_vma; |
| 407 | |
| 408 | root = lock_anon_vma_root(root, anon_vma); |
Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 409 | anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); |
Peter Zijlstra | eee2acb | 2011-06-17 13:54:23 +0200 | [diff] [blame] | 410 | |
| 411 | /* |
| 412 | * Leave empty anon_vmas on the list - we'll need |
| 413 | * to free them outside the lock. |
| 414 | */ |
Davidlohr Bueso | f808c13 | 2017-09-08 16:15:08 -0700 | [diff] [blame] | 415 | if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 416 | anon_vma->parent->num_children--; |
Peter Zijlstra | eee2acb | 2011-06-17 13:54:23 +0200 | [diff] [blame] | 417 | continue; |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 418 | } |
Peter Zijlstra | eee2acb | 2011-06-17 13:54:23 +0200 | [diff] [blame] | 419 | |
| 420 | list_del(&avc->same_vma); |
| 421 | anon_vma_chain_free(avc); |
| 422 | } |
Li Xinhai | ee8ab19 | 2021-02-24 12:04:49 -0800 | [diff] [blame] | 423 | if (vma->anon_vma) { |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 424 | vma->anon_vma->num_active_vmas--; |
Li Xinhai | ee8ab19 | 2021-02-24 12:04:49 -0800 | [diff] [blame] | 425 | |
| 426 | /* |
| 427 | * vma would still be needed after unlink, and anon_vma will be prepared |
| 428 | * when handle fault. |
| 429 | */ |
| 430 | vma->anon_vma = NULL; |
| 431 | } |
Peter Zijlstra | eee2acb | 2011-06-17 13:54:23 +0200 | [diff] [blame] | 432 | unlock_anon_vma_root(root); |
| 433 | |
| 434 | /* |
| 435 | * Iterate the list once more, it now only contains empty and unlinked |
| 436 | * anon_vmas, destroy them. Could not do before due to __put_anon_vma() |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 437 | * needing to write-acquire the anon_vma->root->rwsem. |
Peter Zijlstra | eee2acb | 2011-06-17 13:54:23 +0200 | [diff] [blame] | 438 | */ |
| 439 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
| 440 | struct anon_vma *anon_vma = avc->anon_vma; |
| 441 | |
Jann Horn | 2555283 | 2022-08-31 19:06:00 +0200 | [diff] [blame] | 442 | VM_WARN_ON(anon_vma->num_children); |
| 443 | VM_WARN_ON(anon_vma->num_active_vmas); |
Peter Zijlstra | eee2acb | 2011-06-17 13:54:23 +0200 | [diff] [blame] | 444 | put_anon_vma(anon_vma); |
| 445 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 446 | list_del(&avc->same_vma); |
| 447 | anon_vma_chain_free(avc); |
| 448 | } |
| 449 | } |
| 450 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 451 | static void anon_vma_ctor(void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | { |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 453 | struct anon_vma *anon_vma = data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 455 | init_rwsem(&anon_vma->rwsem); |
Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 456 | atomic_set(&anon_vma->refcount, 0); |
Davidlohr Bueso | f808c13 | 2017-09-08 16:15:08 -0700 | [diff] [blame] | 457 | anon_vma->rb_root = RB_ROOT_CACHED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | void __init anon_vma_init(void) |
| 461 | { |
| 462 | anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 463 | 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 464 | anon_vma_ctor); |
| 465 | anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, |
| 466 | SLAB_PANIC|SLAB_ACCOUNT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | } |
| 468 | |
| 469 | /* |
Peter Zijlstra | 6111e4c | 2011-05-24 17:12:08 -0700 | [diff] [blame] | 470 | * Getting a lock on a stable anon_vma from a page off the LRU is tricky! |
| 471 | * |
| 472 | * Since there is no serialization what so ever against page_remove_rmap() |
Miaohe Lin | ad8a20c | 2021-02-25 17:18:06 -0800 | [diff] [blame] | 473 | * the best this function can do is return a refcount increased anon_vma |
| 474 | * that might have been relevant to this page. |
Peter Zijlstra | 6111e4c | 2011-05-24 17:12:08 -0700 | [diff] [blame] | 475 | * |
| 476 | * The page might have been remapped to a different anon_vma or the anon_vma |
| 477 | * returned may already be freed (and even reused). |
| 478 | * |
Peter Zijlstra | bc658c9 | 2011-05-29 10:33:44 +0200 | [diff] [blame] | 479 | * In case it was remapped to a different anon_vma, the new anon_vma will be a |
| 480 | * child of the old anon_vma, and the anon_vma lifetime rules will therefore |
| 481 | * ensure that any anon_vma obtained from the page will still be valid for as |
| 482 | * long as we observe page_mapped() [ hence all those page_mapped() tests ]. |
| 483 | * |
Peter Zijlstra | 6111e4c | 2011-05-24 17:12:08 -0700 | [diff] [blame] | 484 | * All users of this function must be very careful when walking the anon_vma |
| 485 | * chain and verify that the page in question is indeed mapped in it |
| 486 | * [ something equivalent to page_mapped_in_vma() ]. |
| 487 | * |
Miles Chen | 091e429 | 2019-11-30 17:51:23 -0800 | [diff] [blame] | 488 | * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from |
| 489 | * page_remove_rmap() that the anon_vma pointer from page->mapping is valid |
| 490 | * if there is a mapcount, we can dereference the anon_vma after observing |
| 491 | * those. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | */ |
Matthew Wilcox (Oracle) | 29eea9b | 2022-09-02 20:46:50 +0100 | [diff] [blame] | 493 | struct anon_vma *folio_get_anon_vma(struct folio *folio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | { |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 495 | struct anon_vma *anon_vma = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | unsigned long anon_mapping; |
| 497 | |
| 498 | rcu_read_lock(); |
Matthew Wilcox (Oracle) | 29eea9b | 2022-09-02 20:46:50 +0100 | [diff] [blame] | 499 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
Hugh Dickins | 3ca7b3c | 2009-12-14 17:58:57 -0800 | [diff] [blame] | 500 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | goto out; |
Matthew Wilcox (Oracle) | 29eea9b | 2022-09-02 20:46:50 +0100 | [diff] [blame] | 502 | if (!folio_mapped(folio)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | goto out; |
| 504 | |
| 505 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 506 | if (!atomic_inc_not_zero(&anon_vma->refcount)) { |
| 507 | anon_vma = NULL; |
| 508 | goto out; |
| 509 | } |
Hugh Dickins | f1819427 | 2010-08-25 23:12:54 -0700 | [diff] [blame] | 510 | |
| 511 | /* |
Matthew Wilcox (Oracle) | 29eea9b | 2022-09-02 20:46:50 +0100 | [diff] [blame] | 512 | * If this folio is still mapped, then its anon_vma cannot have been |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 513 | * freed. But if it has been unmapped, we have no security against the |
| 514 | * anon_vma structure being freed and reused (for another anon_vma: |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 515 | * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 516 | * above cannot corrupt). |
Hugh Dickins | f1819427 | 2010-08-25 23:12:54 -0700 | [diff] [blame] | 517 | */ |
Matthew Wilcox (Oracle) | 29eea9b | 2022-09-02 20:46:50 +0100 | [diff] [blame] | 518 | if (!folio_mapped(folio)) { |
Hugh Dickins | 7f39dda | 2014-06-04 16:05:33 -0700 | [diff] [blame] | 519 | rcu_read_unlock(); |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 520 | put_anon_vma(anon_vma); |
Hugh Dickins | 7f39dda | 2014-06-04 16:05:33 -0700 | [diff] [blame] | 521 | return NULL; |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 522 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | out: |
| 524 | rcu_read_unlock(); |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 525 | |
| 526 | return anon_vma; |
| 527 | } |
| 528 | |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 529 | /* |
Matthew Wilcox (Oracle) | 29eea9b | 2022-09-02 20:46:50 +0100 | [diff] [blame] | 530 | * Similar to folio_get_anon_vma() except it locks the anon_vma. |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 531 | * |
| 532 | * Its a little more complex as it tries to keep the fast path to a single |
| 533 | * atomic op -- the trylock. If we fail the trylock, we fall back to getting a |
Matthew Wilcox (Oracle) | 29eea9b | 2022-09-02 20:46:50 +0100 | [diff] [blame] | 534 | * reference like with folio_get_anon_vma() and then block on the mutex |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 535 | * on !rwc->try_lock case. |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 536 | */ |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 537 | struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, |
| 538 | struct rmap_walk_control *rwc) |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 539 | { |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 540 | struct anon_vma *anon_vma = NULL; |
Hugh Dickins | eee0f25 | 2011-05-28 13:20:21 -0700 | [diff] [blame] | 541 | struct anon_vma *root_anon_vma; |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 542 | unsigned long anon_mapping; |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 543 | |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 544 | rcu_read_lock(); |
Matthew Wilcox (Oracle) | 9595d76 | 2022-02-01 23:33:08 -0500 | [diff] [blame] | 545 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 546 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
| 547 | goto out; |
Matthew Wilcox (Oracle) | 9595d76 | 2022-02-01 23:33:08 -0500 | [diff] [blame] | 548 | if (!folio_mapped(folio)) |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 549 | goto out; |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 550 | |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 551 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 552 | root_anon_vma = READ_ONCE(anon_vma->root); |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 553 | if (down_read_trylock(&root_anon_vma->rwsem)) { |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 554 | /* |
Matthew Wilcox (Oracle) | 9595d76 | 2022-02-01 23:33:08 -0500 | [diff] [blame] | 555 | * If the folio is still mapped, then this anon_vma is still |
Hugh Dickins | eee0f25 | 2011-05-28 13:20:21 -0700 | [diff] [blame] | 556 | * its anon_vma, and holding the mutex ensures that it will |
Peter Zijlstra | bc658c9 | 2011-05-29 10:33:44 +0200 | [diff] [blame] | 557 | * not go away, see anon_vma_free(). |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 558 | */ |
Matthew Wilcox (Oracle) | 9595d76 | 2022-02-01 23:33:08 -0500 | [diff] [blame] | 559 | if (!folio_mapped(folio)) { |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 560 | up_read(&root_anon_vma->rwsem); |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 561 | anon_vma = NULL; |
| 562 | } |
| 563 | goto out; |
| 564 | } |
| 565 | |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 566 | if (rwc && rwc->try_lock) { |
| 567 | anon_vma = NULL; |
| 568 | rwc->contended = true; |
| 569 | goto out; |
| 570 | } |
| 571 | |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 572 | /* trylock failed, we got to sleep */ |
| 573 | if (!atomic_inc_not_zero(&anon_vma->refcount)) { |
| 574 | anon_vma = NULL; |
| 575 | goto out; |
| 576 | } |
| 577 | |
Matthew Wilcox (Oracle) | 9595d76 | 2022-02-01 23:33:08 -0500 | [diff] [blame] | 578 | if (!folio_mapped(folio)) { |
Hugh Dickins | 7f39dda | 2014-06-04 16:05:33 -0700 | [diff] [blame] | 579 | rcu_read_unlock(); |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 580 | put_anon_vma(anon_vma); |
Hugh Dickins | 7f39dda | 2014-06-04 16:05:33 -0700 | [diff] [blame] | 581 | return NULL; |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 582 | } |
| 583 | |
| 584 | /* we pinned the anon_vma, its safe to sleep */ |
| 585 | rcu_read_unlock(); |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 586 | anon_vma_lock_read(anon_vma); |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 587 | |
| 588 | if (atomic_dec_and_test(&anon_vma->refcount)) { |
| 589 | /* |
| 590 | * Oops, we held the last refcount, release the lock |
| 591 | * and bail -- can't simply use put_anon_vma() because |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 592 | * we'll deadlock on the anon_vma_lock_write() recursion. |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 593 | */ |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 594 | anon_vma_unlock_read(anon_vma); |
Peter Zijlstra | 88c2208 | 2011-05-24 17:12:13 -0700 | [diff] [blame] | 595 | __put_anon_vma(anon_vma); |
| 596 | anon_vma = NULL; |
| 597 | } |
| 598 | |
| 599 | return anon_vma; |
| 600 | |
| 601 | out: |
| 602 | rcu_read_unlock(); |
Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 603 | return anon_vma; |
Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 604 | } |
| 605 | |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 606 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 607 | /* |
| 608 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is |
| 609 | * important if a PTE was dirty when it was unmapped that it's flushed |
| 610 | * before any IO is initiated on the page to prevent lost writes. Similarly, |
| 611 | * it must be flushed before freeing to prevent data leakage. |
| 612 | */ |
| 613 | void try_to_unmap_flush(void) |
| 614 | { |
| 615 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 616 | |
| 617 | if (!tlb_ubc->flush_required) |
| 618 | return; |
| 619 | |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 620 | arch_tlbbatch_flush(&tlb_ubc->arch); |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 621 | tlb_ubc->flush_required = false; |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 622 | tlb_ubc->writable = false; |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 623 | } |
| 624 | |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 625 | /* Flush iff there are potentially writable TLB entries that can race with IO */ |
| 626 | void try_to_unmap_flush_dirty(void) |
| 627 | { |
| 628 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
| 629 | |
| 630 | if (tlb_ubc->writable) |
| 631 | try_to_unmap_flush(); |
| 632 | } |
| 633 | |
Huang Ying | 5ee2fa2 | 2022-01-14 14:09:16 -0800 | [diff] [blame] | 634 | /* |
| 635 | * Bits 0-14 of mm->tlb_flush_batched record pending generations. |
| 636 | * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. |
| 637 | */ |
| 638 | #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 |
| 639 | #define TLB_FLUSH_BATCH_PENDING_MASK \ |
| 640 | ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) |
| 641 | #define TLB_FLUSH_BATCH_PENDING_LARGE \ |
| 642 | (TLB_FLUSH_BATCH_PENDING_MASK / 2) |
| 643 | |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 644 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 645 | { |
| 646 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
Huang Ying | 5ee2fa2 | 2022-01-14 14:09:16 -0800 | [diff] [blame] | 647 | int batch, nbatch; |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 648 | |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 649 | arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 650 | tlb_ubc->flush_required = true; |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 651 | |
| 652 | /* |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 653 | * Ensure compiler does not re-order the setting of tlb_flush_batched |
| 654 | * before the PTE is cleared. |
| 655 | */ |
| 656 | barrier(); |
Huang Ying | 5ee2fa2 | 2022-01-14 14:09:16 -0800 | [diff] [blame] | 657 | batch = atomic_read(&mm->tlb_flush_batched); |
| 658 | retry: |
| 659 | if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { |
| 660 | /* |
| 661 | * Prevent `pending' from catching up with `flushed' because of |
| 662 | * overflow. Reset `pending' and `flushed' to be 1 and 0 if |
| 663 | * `pending' becomes large. |
| 664 | */ |
| 665 | nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1); |
| 666 | if (nbatch != batch) { |
| 667 | batch = nbatch; |
| 668 | goto retry; |
| 669 | } |
| 670 | } else { |
| 671 | atomic_inc(&mm->tlb_flush_batched); |
| 672 | } |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 673 | |
| 674 | /* |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 675 | * If the PTE was dirty then it's best to assume it's writable. The |
| 676 | * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() |
| 677 | * before the page is queued for IO. |
| 678 | */ |
| 679 | if (writable) |
| 680 | tlb_ubc->writable = true; |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 681 | } |
| 682 | |
| 683 | /* |
| 684 | * Returns true if the TLB flush should be deferred to the end of a batch of |
| 685 | * unmap operations to reduce IPIs. |
| 686 | */ |
| 687 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) |
| 688 | { |
| 689 | bool should_defer = false; |
| 690 | |
| 691 | if (!(flags & TTU_BATCH_FLUSH)) |
| 692 | return false; |
| 693 | |
| 694 | /* If remote CPUs need to be flushed then defer batch the flush */ |
| 695 | if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) |
| 696 | should_defer = true; |
| 697 | put_cpu(); |
| 698 | |
| 699 | return should_defer; |
| 700 | } |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 701 | |
| 702 | /* |
| 703 | * Reclaim unmaps pages under the PTL but do not flush the TLB prior to |
| 704 | * releasing the PTL if TLB flushes are batched. It's possible for a parallel |
| 705 | * operation such as mprotect or munmap to race between reclaim unmapping |
| 706 | * the page and flushing the page. If this race occurs, it potentially allows |
| 707 | * access to data via a stale TLB entry. Tracking all mm's that have TLB |
| 708 | * batching in flight would be expensive during reclaim so instead track |
| 709 | * whether TLB batching occurred in the past and if so then do a flush here |
| 710 | * if required. This will cost one additional flush per reclaim cycle paid |
| 711 | * by the first operation at risk such as mprotect and mumap. |
| 712 | * |
| 713 | * This must be called under the PTL so that an access to tlb_flush_batched |
| 714 | * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise |
| 715 | * via the PTL. |
| 716 | */ |
| 717 | void flush_tlb_batched_pending(struct mm_struct *mm) |
| 718 | { |
Huang Ying | 5ee2fa2 | 2022-01-14 14:09:16 -0800 | [diff] [blame] | 719 | int batch = atomic_read(&mm->tlb_flush_batched); |
| 720 | int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; |
| 721 | int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 722 | |
Huang Ying | 5ee2fa2 | 2022-01-14 14:09:16 -0800 | [diff] [blame] | 723 | if (pending != flushed) { |
| 724 | flush_tlb_mm(mm); |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 725 | /* |
Huang Ying | 5ee2fa2 | 2022-01-14 14:09:16 -0800 | [diff] [blame] | 726 | * If the new TLB flushing is pending during flushing, leave |
| 727 | * mm->tlb_flush_batched as is, to avoid losing flushing. |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 728 | */ |
Huang Ying | 5ee2fa2 | 2022-01-14 14:09:16 -0800 | [diff] [blame] | 729 | atomic_cmpxchg(&mm->tlb_flush_batched, batch, |
| 730 | pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 731 | } |
| 732 | } |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 733 | #else |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 734 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 735 | { |
| 736 | } |
| 737 | |
| 738 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) |
| 739 | { |
| 740 | return false; |
| 741 | } |
| 742 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
| 743 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | /* |
Huang Shijie | bf89c8c | 2009-10-01 15:44:04 -0700 | [diff] [blame] | 745 | * At what user virtual address is page expected in vma? |
Naoya Horiguchi | ab941e0 | 2010-05-11 14:06:55 -0700 | [diff] [blame] | 746 | * Caller should check the page is actually part of the vma. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | */ |
| 748 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
| 749 | { |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 750 | struct folio *folio = page_folio(page); |
| 751 | if (folio_test_anon(folio)) { |
| 752 | struct anon_vma *page__anon_vma = folio_anon_vma(folio); |
Hugh Dickins | 4829b90 | 2010-10-02 17:46:06 -0700 | [diff] [blame] | 753 | /* |
| 754 | * Note: swapoff's unuse_vma() is more efficient with this |
| 755 | * check, and needs it to match anon_vma when KSM is active. |
| 756 | */ |
| 757 | if (!vma->anon_vma || !page__anon_vma || |
| 758 | vma->anon_vma->root != page__anon_vma->root) |
Andrea Arcangeli | 21d0d44 | 2010-08-09 17:19:10 -0700 | [diff] [blame] | 759 | return -EFAULT; |
Jue Wang | 3165717 | 2021-06-15 18:24:00 -0700 | [diff] [blame] | 760 | } else if (!vma->vm_file) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | return -EFAULT; |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 762 | } else if (vma->vm_file->f_mapping != folio->mapping) { |
Jue Wang | 3165717 | 2021-06-15 18:24:00 -0700 | [diff] [blame] | 763 | return -EFAULT; |
| 764 | } |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 765 | |
| 766 | return vma_address(page, vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | } |
| 768 | |
Zach O'Keefe | 5072280 | 2022-07-06 16:59:26 -0700 | [diff] [blame] | 769 | /* |
| 770 | * Returns the actual pmd_t* where we expect 'address' to be mapped from, or |
| 771 | * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* |
| 772 | * represents. |
| 773 | */ |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 774 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) |
| 775 | { |
| 776 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 777 | p4d_t *p4d; |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 778 | pud_t *pud; |
| 779 | pmd_t *pmd = NULL; |
| 780 | |
| 781 | pgd = pgd_offset(mm, address); |
| 782 | if (!pgd_present(*pgd)) |
| 783 | goto out; |
| 784 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 785 | p4d = p4d_offset(pgd, address); |
| 786 | if (!p4d_present(*p4d)) |
| 787 | goto out; |
| 788 | |
| 789 | pud = pud_offset(p4d, address); |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 790 | if (!pud_present(*pud)) |
| 791 | goto out; |
| 792 | |
| 793 | pmd = pmd_offset(pud, address); |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 794 | out: |
| 795 | return pmd; |
| 796 | } |
| 797 | |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 798 | struct folio_referenced_arg { |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 799 | int mapcount; |
| 800 | int referenced; |
| 801 | unsigned long vm_flags; |
| 802 | struct mem_cgroup *memcg; |
| 803 | }; |
Nikita Danilov | 81b4082 | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 804 | /* |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 805 | * arg: folio_referenced_arg will be passed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | */ |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 807 | static bool folio_referenced_one(struct folio *folio, |
| 808 | struct vm_area_struct *vma, unsigned long address, void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | { |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 810 | struct folio_referenced_arg *pra = arg; |
| 811 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
Vladimir Davydov | 8749cfe | 2016-01-15 16:54:45 -0800 | [diff] [blame] | 812 | int referenced = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | |
Kirill A. Shutemov | 8eaeded | 2017-02-24 14:57:48 -0800 | [diff] [blame] | 814 | while (page_vma_mapped_walk(&pvmw)) { |
| 815 | address = pvmw.address; |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 816 | |
Hugh Dickins | 47d4f3e | 2022-02-14 18:42:33 -0800 | [diff] [blame] | 817 | if ((vma->vm_flags & VM_LOCKED) && |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 818 | (!folio_test_large(folio) || !pvmw.pte)) { |
Hugh Dickins | 47d4f3e | 2022-02-14 18:42:33 -0800 | [diff] [blame] | 819 | /* Restore the mlock which got missed */ |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 820 | mlock_vma_folio(folio, vma, !pvmw.pte); |
Kirill A. Shutemov | 8eaeded | 2017-02-24 14:57:48 -0800 | [diff] [blame] | 821 | page_vma_mapped_walk_done(&pvmw); |
| 822 | pra->vm_flags |= VM_LOCKED; |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 823 | return false; /* To break the loop */ |
Vladimir Davydov | 8749cfe | 2016-01-15 16:54:45 -0800 | [diff] [blame] | 824 | } |
Kirill A. Shutemov | 8eaeded | 2017-02-24 14:57:48 -0800 | [diff] [blame] | 825 | |
| 826 | if (pvmw.pte) { |
Yu Zhao | 8788f67 | 2022-12-30 14:52:51 -0700 | [diff] [blame] | 827 | if (lru_gen_enabled() && pte_young(*pvmw.pte)) { |
Yu Zhao | 018ee47 | 2022-09-18 02:00:04 -0600 | [diff] [blame] | 828 | lru_gen_look_around(&pvmw); |
| 829 | referenced++; |
| 830 | } |
| 831 | |
Kirill A. Shutemov | 8eaeded | 2017-02-24 14:57:48 -0800 | [diff] [blame] | 832 | if (ptep_clear_flush_young_notify(vma, address, |
Yu Zhao | 8788f67 | 2022-12-30 14:52:51 -0700 | [diff] [blame] | 833 | pvmw.pte)) |
| 834 | referenced++; |
Kirill A. Shutemov | 8eaeded | 2017-02-24 14:57:48 -0800 | [diff] [blame] | 835 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
| 836 | if (pmdp_clear_flush_young_notify(vma, address, |
| 837 | pvmw.pmd)) |
| 838 | referenced++; |
| 839 | } else { |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 840 | /* unexpected pmd-mapped folio? */ |
Kirill A. Shutemov | 8eaeded | 2017-02-24 14:57:48 -0800 | [diff] [blame] | 841 | WARN_ON_ONCE(1); |
| 842 | } |
| 843 | |
| 844 | pra->mapcount--; |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 845 | } |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 846 | |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 847 | if (referenced) |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 848 | folio_clear_idle(folio); |
| 849 | if (folio_test_clear_young(folio)) |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 850 | referenced++; |
| 851 | |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 852 | if (referenced) { |
| 853 | pra->referenced++; |
Hugh Dickins | 47d4f3e | 2022-02-14 18:42:33 -0800 | [diff] [blame] | 854 | pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | } |
Oleg Nesterov | 34bbd70 | 2007-02-28 20:13:49 -0800 | [diff] [blame] | 856 | |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 857 | if (!pra->mapcount) |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 858 | return false; /* To break the loop */ |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 859 | |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 860 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | } |
| 862 | |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 863 | static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | { |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 865 | struct folio_referenced_arg *pra = arg; |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 866 | struct mem_cgroup *memcg = pra->memcg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | |
Yu Zhao | 8788f67 | 2022-12-30 14:52:51 -0700 | [diff] [blame] | 868 | /* |
| 869 | * Ignore references from this mapping if it has no recency. If the |
| 870 | * folio has been used in another mapping, we will catch it; if this |
| 871 | * other mapping is already gone, the unmap path will have set the |
| 872 | * referenced flag or activated the folio in zap_pte_range(). |
| 873 | */ |
| 874 | if (!vma_has_recency(vma)) |
| 875 | return true; |
| 876 | |
| 877 | /* |
| 878 | * If we are reclaiming on behalf of a cgroup, skip counting on behalf |
| 879 | * of references from different cgroups. |
| 880 | */ |
| 881 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 882 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 884 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | } |
| 886 | |
| 887 | /** |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 888 | * folio_referenced() - Test if the folio was referenced. |
| 889 | * @folio: The folio to test. |
| 890 | * @is_locked: Caller holds lock on the folio. |
Johannes Weiner | 72835c8 | 2012-01-12 17:18:32 -0800 | [diff] [blame] | 891 | * @memcg: target memory cgroup |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 892 | * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | * |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 894 | * Quick test_and_clear_referenced for all mappings of a folio, |
| 895 | * |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 896 | * Return: The number of mappings which referenced the folio. Return -1 if |
| 897 | * the function bailed out due to rmap lock contention. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | */ |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 899 | int folio_referenced(struct folio *folio, int is_locked, |
| 900 | struct mem_cgroup *memcg, unsigned long *vm_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | { |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 902 | int we_locked = 0; |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 903 | struct folio_referenced_arg pra = { |
| 904 | .mapcount = folio_mapcount(folio), |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 905 | .memcg = memcg, |
| 906 | }; |
| 907 | struct rmap_walk_control rwc = { |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 908 | .rmap_one = folio_referenced_one, |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 909 | .arg = (void *)&pra, |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 910 | .anon_lock = folio_lock_anon_vma_read, |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 911 | .try_lock = true, |
Yu Zhao | 8788f67 | 2022-12-30 14:52:51 -0700 | [diff] [blame] | 912 | .invalid_vma = invalid_folio_referenced_vma, |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 913 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | |
Wu Fengguang | 6fe6b7e | 2009-06-16 15:33:05 -0700 | [diff] [blame] | 915 | *vm_flags = 0; |
Huang Shijie | 059d844 | 2019-05-13 17:21:07 -0700 | [diff] [blame] | 916 | if (!pra.mapcount) |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 917 | return 0; |
| 918 | |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 919 | if (!folio_raw_mapping(folio)) |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 920 | return 0; |
| 921 | |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 922 | if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { |
| 923 | we_locked = folio_trylock(folio); |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 924 | if (!we_locked) |
| 925 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | } |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 927 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 928 | rmap_walk(folio, &rwc); |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 929 | *vm_flags = pra.vm_flags; |
| 930 | |
| 931 | if (we_locked) |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 932 | folio_unlock(folio); |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 933 | |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 934 | return rwc.contended ? -1 : pra.referenced; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | } |
| 936 | |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 937 | static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 938 | { |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 939 | int cleaned = 0; |
| 940 | struct vm_area_struct *vma = pvmw->vma; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 941 | struct mmu_notifier_range range; |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 942 | unsigned long address = pvmw->address; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 943 | |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 944 | /* |
| 945 | * We have to assume the worse case ie pmd for invalidation. Note that |
Matthew Wilcox (Oracle) | e83c09a | 2022-01-20 18:20:07 -0500 | [diff] [blame] | 946 | * the folio can not be freed from this function. |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 947 | */ |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 948 | mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, |
| 949 | vma->vm_mm, address, vma_address_end(pvmw)); |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 950 | mmu_notifier_invalidate_range_start(&range); |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 951 | |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 952 | while (page_vma_mapped_walk(pvmw)) { |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 953 | int ret = 0; |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 954 | |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 955 | address = pvmw->address; |
| 956 | if (pvmw->pte) { |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 957 | pte_t entry; |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 958 | pte_t *pte = pvmw->pte; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 959 | |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 960 | if (!pte_dirty(*pte) && !pte_write(*pte)) |
| 961 | continue; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 962 | |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 963 | flush_cache_page(vma, address, pte_pfn(*pte)); |
| 964 | entry = ptep_clear_flush(vma, address, pte); |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 965 | entry = pte_wrprotect(entry); |
| 966 | entry = pte_mkclean(entry); |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 967 | set_pte_at(vma->vm_mm, address, pte, entry); |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 968 | ret = 1; |
| 969 | } else { |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 970 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 971 | pmd_t *pmd = pvmw->pmd; |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 972 | pmd_t entry; |
| 973 | |
| 974 | if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) |
| 975 | continue; |
| 976 | |
Muchun Song | 7f9c9b6 | 2022-04-28 23:16:09 -0700 | [diff] [blame] | 977 | flush_cache_range(vma, address, |
| 978 | address + HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | 024eee0 | 2019-05-13 17:19:11 -0700 | [diff] [blame] | 979 | entry = pmdp_invalidate(vma, address, pmd); |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 980 | entry = pmd_wrprotect(entry); |
| 981 | entry = pmd_mkclean(entry); |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 982 | set_pmd_at(vma->vm_mm, address, pmd, entry); |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 983 | ret = 1; |
| 984 | #else |
Matthew Wilcox (Oracle) | e83c09a | 2022-01-20 18:20:07 -0500 | [diff] [blame] | 985 | /* unexpected pmd-mapped folio? */ |
Kirill A. Shutemov | f27176c | 2017-02-24 14:57:57 -0800 | [diff] [blame] | 986 | WARN_ON_ONCE(1); |
| 987 | #endif |
| 988 | } |
| 989 | |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 990 | /* |
| 991 | * No need to call mmu_notifier_invalidate_range() as we are |
| 992 | * downgrading page table protection not changing it to point |
| 993 | * to a new page. |
| 994 | * |
Mike Rapoport | ee65728 | 2022-06-27 09:00:26 +0300 | [diff] [blame] | 995 | * See Documentation/mm/mmu_notifier.rst |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 996 | */ |
| 997 | if (ret) |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 998 | cleaned++; |
Peter Zijlstra | c2fda5f | 2006-12-22 14:25:52 +0100 | [diff] [blame] | 999 | } |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1000 | |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1001 | mmu_notifier_invalidate_range_end(&range); |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 1002 | |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 1003 | return cleaned; |
| 1004 | } |
| 1005 | |
| 1006 | static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, |
| 1007 | unsigned long address, void *arg) |
| 1008 | { |
| 1009 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); |
| 1010 | int *cleaned = arg; |
| 1011 | |
| 1012 | *cleaned += page_vma_mkclean_one(&pvmw); |
| 1013 | |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 1014 | return true; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1015 | } |
| 1016 | |
Joonsoo Kim | 9853a40 | 2014-01-21 15:49:55 -0800 | [diff] [blame] | 1017 | static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1018 | { |
Joonsoo Kim | 9853a40 | 2014-01-21 15:49:55 -0800 | [diff] [blame] | 1019 | if (vma->vm_flags & VM_SHARED) |
Fengguang Wu | 871beb8 | 2014-01-23 15:53:41 -0800 | [diff] [blame] | 1020 | return false; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1021 | |
Fengguang Wu | 871beb8 | 2014-01-23 15:53:41 -0800 | [diff] [blame] | 1022 | return true; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1023 | } |
| 1024 | |
Matthew Wilcox (Oracle) | d9c08e2 | 2021-02-28 16:02:57 -0500 | [diff] [blame] | 1025 | int folio_mkclean(struct folio *folio) |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1026 | { |
Joonsoo Kim | 9853a40 | 2014-01-21 15:49:55 -0800 | [diff] [blame] | 1027 | int cleaned = 0; |
| 1028 | struct address_space *mapping; |
| 1029 | struct rmap_walk_control rwc = { |
| 1030 | .arg = (void *)&cleaned, |
| 1031 | .rmap_one = page_mkclean_one, |
| 1032 | .invalid_vma = invalid_mkclean_vma, |
| 1033 | }; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1034 | |
Matthew Wilcox (Oracle) | d9c08e2 | 2021-02-28 16:02:57 -0500 | [diff] [blame] | 1035 | BUG_ON(!folio_test_locked(folio)); |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1036 | |
Matthew Wilcox (Oracle) | d9c08e2 | 2021-02-28 16:02:57 -0500 | [diff] [blame] | 1037 | if (!folio_mapped(folio)) |
Joonsoo Kim | 9853a40 | 2014-01-21 15:49:55 -0800 | [diff] [blame] | 1038 | return 0; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1039 | |
Matthew Wilcox (Oracle) | d9c08e2 | 2021-02-28 16:02:57 -0500 | [diff] [blame] | 1040 | mapping = folio_mapping(folio); |
Joonsoo Kim | 9853a40 | 2014-01-21 15:49:55 -0800 | [diff] [blame] | 1041 | if (!mapping) |
| 1042 | return 0; |
| 1043 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 1044 | rmap_walk(folio, &rwc); |
Joonsoo Kim | 9853a40 | 2014-01-21 15:49:55 -0800 | [diff] [blame] | 1045 | |
| 1046 | return cleaned; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1047 | } |
Matthew Wilcox (Oracle) | d9c08e2 | 2021-02-28 16:02:57 -0500 | [diff] [blame] | 1048 | EXPORT_SYMBOL_GPL(folio_mkclean); |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1049 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | /** |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 1051 | * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of |
| 1052 | * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) |
| 1053 | * within the @vma of shared mappings. And since clean PTEs |
| 1054 | * should also be readonly, write protects them too. |
| 1055 | * @pfn: start pfn. |
| 1056 | * @nr_pages: number of physically contiguous pages srarting with @pfn. |
| 1057 | * @pgoff: page offset that the @pfn mapped with. |
| 1058 | * @vma: vma that @pfn mapped within. |
| 1059 | * |
| 1060 | * Returns the number of cleaned PTEs (including PMDs). |
| 1061 | */ |
| 1062 | int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, |
| 1063 | struct vm_area_struct *vma) |
| 1064 | { |
| 1065 | struct page_vma_mapped_walk pvmw = { |
| 1066 | .pfn = pfn, |
| 1067 | .nr_pages = nr_pages, |
| 1068 | .pgoff = pgoff, |
| 1069 | .vma = vma, |
| 1070 | .flags = PVMW_SYNC, |
| 1071 | }; |
| 1072 | |
| 1073 | if (invalid_mkclean_vma(vma, NULL)) |
| 1074 | return 0; |
| 1075 | |
| 1076 | pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); |
| 1077 | VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); |
| 1078 | |
| 1079 | return page_vma_mkclean_one(&pvmw); |
| 1080 | } |
| 1081 | |
Matthew Wilcox (Oracle) | b14224f | 2023-01-11 14:28:50 +0000 | [diff] [blame] | 1082 | int folio_total_mapcount(struct folio *folio) |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1083 | { |
Matthew Wilcox (Oracle) | b14224f | 2023-01-11 14:28:50 +0000 | [diff] [blame] | 1084 | int mapcount = folio_entire_mapcount(folio); |
| 1085 | int nr_pages; |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 1086 | int i; |
| 1087 | |
Matthew Wilcox (Oracle) | b14224f | 2023-01-11 14:28:50 +0000 | [diff] [blame] | 1088 | /* In the common case, avoid the loop when no pages mapped by PTE */ |
Matthew Wilcox (Oracle) | eec2042 | 2023-01-11 14:28:48 +0000 | [diff] [blame] | 1089 | if (folio_nr_pages_mapped(folio) == 0) |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1090 | return mapcount; |
| 1091 | /* |
Matthew Wilcox (Oracle) | b14224f | 2023-01-11 14:28:50 +0000 | [diff] [blame] | 1092 | * Add all the PTE mappings of those pages mapped by PTE. |
| 1093 | * Limit the loop to folio_nr_pages_mapped()? |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1094 | * Perhaps: given all the raciness, that may be a good or a bad idea. |
| 1095 | */ |
Matthew Wilcox (Oracle) | b14224f | 2023-01-11 14:28:50 +0000 | [diff] [blame] | 1096 | nr_pages = folio_nr_pages(folio); |
| 1097 | for (i = 0; i < nr_pages; i++) |
| 1098 | mapcount += atomic_read(&folio_page(folio, i)->_mapcount); |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1099 | |
| 1100 | /* But each of those _mapcounts was based on -1 */ |
Matthew Wilcox (Oracle) | b14224f | 2023-01-11 14:28:50 +0000 | [diff] [blame] | 1101 | mapcount += nr_pages; |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1102 | return mapcount; |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 1103 | } |
| 1104 | |
Muchun Song | 6a8e059 | 2022-04-28 23:16:10 -0700 | [diff] [blame] | 1105 | /** |
Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 1106 | * page_move_anon_rmap - move a page to our anon_vma |
| 1107 | * @page: the page to move to our anon_vma |
| 1108 | * @vma: the vma the page belongs to |
Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 1109 | * |
| 1110 | * When a page belongs exclusively to one process after a COW event, |
| 1111 | * that page can be moved into the anon_vma that belongs to just that |
| 1112 | * process, so the rmap code will not search the parent or sibling |
| 1113 | * processes. |
| 1114 | */ |
Hugh Dickins | 5a49973 | 2016-07-14 12:07:38 -0700 | [diff] [blame] | 1115 | void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) |
Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 1116 | { |
Matthew Wilcox (Oracle) | 595af4c9 | 2022-09-02 20:46:45 +0100 | [diff] [blame] | 1117 | void *anon_vma = vma->anon_vma; |
| 1118 | struct folio *folio = page_folio(page); |
Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 1119 | |
Matthew Wilcox (Oracle) | 595af4c9 | 2022-09-02 20:46:45 +0100 | [diff] [blame] | 1120 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
Sasha Levin | 81d1b09 | 2014-10-09 15:28:10 -0700 | [diff] [blame] | 1121 | VM_BUG_ON_VMA(!anon_vma, vma); |
Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 1122 | |
Matthew Wilcox (Oracle) | 595af4c9 | 2022-09-02 20:46:45 +0100 | [diff] [blame] | 1123 | anon_vma += PAGE_MAPPING_ANON; |
Vladimir Davydov | 414e2fb | 2015-06-24 16:56:56 -0700 | [diff] [blame] | 1124 | /* |
| 1125 | * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written |
Matthew Wilcox (Oracle) | b3ac041 | 2022-01-21 11:27:31 -0500 | [diff] [blame] | 1126 | * simultaneously, so a concurrent reader (eg folio_referenced()'s |
| 1127 | * folio_test_anon()) will not see one without the other. |
Vladimir Davydov | 414e2fb | 2015-06-24 16:56:56 -0700 | [diff] [blame] | 1128 | */ |
Matthew Wilcox (Oracle) | 595af4c9 | 2022-09-02 20:46:45 +0100 | [diff] [blame] | 1129 | WRITE_ONCE(folio->mapping, anon_vma); |
| 1130 | SetPageAnonExclusive(page); |
Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 1131 | } |
| 1132 | |
| 1133 | /** |
Andi Kleen | 4e1c197 | 2010-09-22 12:43:56 +0200 | [diff] [blame] | 1134 | * __page_set_anon_rmap - set up new anonymous rmap |
Matthew Wilcox (Oracle) | 5b4bd90 | 2023-01-16 19:29:59 +0000 | [diff] [blame] | 1135 | * @folio: Folio which contains page. |
| 1136 | * @page: Page to add to rmap. |
Andi Kleen | 4e1c197 | 2010-09-22 12:43:56 +0200 | [diff] [blame] | 1137 | * @vma: VM area to add page to. |
| 1138 | * @address: User virtual address of the mapping |
Rik van Riel | e8a03fe | 2010-04-14 17:59:28 -0400 | [diff] [blame] | 1139 | * @exclusive: the page is exclusively owned by the current process |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1140 | */ |
Matthew Wilcox (Oracle) | 5b4bd90 | 2023-01-16 19:29:59 +0000 | [diff] [blame] | 1141 | static void __page_set_anon_rmap(struct folio *folio, struct page *page, |
Rik van Riel | e8a03fe | 2010-04-14 17:59:28 -0400 | [diff] [blame] | 1142 | struct vm_area_struct *vma, unsigned long address, int exclusive) |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1143 | { |
Rik van Riel | e8a03fe | 2010-04-14 17:59:28 -0400 | [diff] [blame] | 1144 | struct anon_vma *anon_vma = vma->anon_vma; |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1145 | |
Rik van Riel | e8a03fe | 2010-04-14 17:59:28 -0400 | [diff] [blame] | 1146 | BUG_ON(!anon_vma); |
Linus Torvalds | ea90002 | 2010-04-12 12:44:29 -0700 | [diff] [blame] | 1147 | |
Matthew Wilcox (Oracle) | 5b4bd90 | 2023-01-16 19:29:59 +0000 | [diff] [blame] | 1148 | if (folio_test_anon(folio)) |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1149 | goto out; |
Andi Kleen | 4e1c197 | 2010-09-22 12:43:56 +0200 | [diff] [blame] | 1150 | |
Linus Torvalds | ea90002 | 2010-04-12 12:44:29 -0700 | [diff] [blame] | 1151 | /* |
Rik van Riel | e8a03fe | 2010-04-14 17:59:28 -0400 | [diff] [blame] | 1152 | * If the page isn't exclusively mapped into this vma, |
| 1153 | * we must use the _oldest_ possible anon_vma for the |
| 1154 | * page mapping! |
Linus Torvalds | ea90002 | 2010-04-12 12:44:29 -0700 | [diff] [blame] | 1155 | */ |
Andi Kleen | 4e1c197 | 2010-09-22 12:43:56 +0200 | [diff] [blame] | 1156 | if (!exclusive) |
Andrea Arcangeli | 288468c | 2010-08-09 17:19:09 -0700 | [diff] [blame] | 1157 | anon_vma = anon_vma->root; |
Linus Torvalds | ea90002 | 2010-04-12 12:44:29 -0700 | [diff] [blame] | 1158 | |
Alex Shi | 16f5e70 | 2020-12-15 12:33:42 -0800 | [diff] [blame] | 1159 | /* |
Matthew Wilcox (Oracle) | 5b4bd90 | 2023-01-16 19:29:59 +0000 | [diff] [blame] | 1160 | * page_idle does a lockless/optimistic rmap scan on folio->mapping. |
Alex Shi | 16f5e70 | 2020-12-15 12:33:42 -0800 | [diff] [blame] | 1161 | * Make sure the compiler doesn't split the stores of anon_vma and |
| 1162 | * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code |
| 1163 | * could mistake the mapping for a struct address_space and crash. |
| 1164 | */ |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1165 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
Matthew Wilcox (Oracle) | 5b4bd90 | 2023-01-16 19:29:59 +0000 | [diff] [blame] | 1166 | WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); |
| 1167 | folio->index = linear_page_index(vma, address); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1168 | out: |
| 1169 | if (exclusive) |
| 1170 | SetPageAnonExclusive(page); |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1171 | } |
| 1172 | |
| 1173 | /** |
Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 1174 | * __page_check_anon_rmap - sanity check anonymous rmap addition |
Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 1175 | * @page: the page to add the mapping to |
| 1176 | * @vma: the vm area in which the mapping is added |
| 1177 | * @address: the user virtual address mapped |
| 1178 | */ |
| 1179 | static void __page_check_anon_rmap(struct page *page, |
| 1180 | struct vm_area_struct *vma, unsigned long address) |
| 1181 | { |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 1182 | struct folio *folio = page_folio(page); |
Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 1183 | /* |
| 1184 | * The page's anon-rmap details (mapping and index) are guaranteed to |
| 1185 | * be set up correctly at this point. |
| 1186 | * |
| 1187 | * We have exclusion against page_add_anon_rmap because the caller |
Miaohe Lin | 90aaca8 | 2021-02-25 17:17:59 -0800 | [diff] [blame] | 1188 | * always holds the page locked. |
Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 1189 | * |
| 1190 | * We have exclusion against page_add_new_anon_rmap because those pages |
| 1191 | * are initially only visible via the pagetables, and the pte is locked |
| 1192 | * over the call to page_add_new_anon_rmap. |
| 1193 | */ |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 1194 | VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, |
| 1195 | folio); |
Yang Shi | 30c4638 | 2019-11-30 17:51:26 -0800 | [diff] [blame] | 1196 | VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), |
| 1197 | page); |
Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 1198 | } |
| 1199 | |
| 1200 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 | * page_add_anon_rmap - add pte mapping to an anonymous page |
| 1202 | * @page: the page to add the mapping to |
| 1203 | * @vma: the vm area in which the mapping is added |
| 1204 | * @address: the user virtual address mapped |
David Hildenbrand | f1e2db1 | 2022-05-09 18:20:43 -0700 | [diff] [blame] | 1205 | * @flags: the rmap flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | * |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1207 | * The caller needs to hold the pte lock, and the page must be locked in |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1208 | * the anon_vma case: to serialize mapping,index checking after setting, |
| 1209 | * and to ensure that PageAnon is not being upgraded racily to PageKsm |
| 1210 | * (but PageKsm is never downgraded to PageAnon). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1211 | */ |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1212 | void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, |
| 1213 | unsigned long address, rmap_t flags) |
Rik van Riel | ad8c2ee | 2010-08-09 17:19:48 -0700 | [diff] [blame] | 1214 | { |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1215 | struct folio *folio = page_folio(page); |
| 1216 | atomic_t *mapped = &folio->_nr_pages_mapped; |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1217 | int nr = 0, nr_pmdmapped = 0; |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 1218 | bool compound = flags & RMAP_COMPOUND; |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1219 | bool first = true; |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 1220 | |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1221 | /* Is page being mapped by PTE? Is this its first map to be added? */ |
| 1222 | if (likely(!compound)) { |
Hugh Dickins | d8dd5e9 | 2022-11-09 18:18:49 -0800 | [diff] [blame] | 1223 | first = atomic_inc_and_test(&page->_mapcount); |
| 1224 | nr = first; |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1225 | if (first && folio_test_large(folio)) { |
Hugh Dickins | 4b51634 | 2022-11-22 01:49:36 -0800 | [diff] [blame] | 1226 | nr = atomic_inc_return_relaxed(mapped); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1227 | nr = (nr < COMPOUND_MAPPED); |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1228 | } |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1229 | } else if (folio_test_pmd_mappable(folio)) { |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1230 | /* That test is redundant: it's for safety or to optimize out */ |
| 1231 | |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1232 | first = atomic_inc_and_test(&folio->_entire_mapcount); |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1233 | if (first) { |
Hugh Dickins | 4b51634 | 2022-11-22 01:49:36 -0800 | [diff] [blame] | 1234 | nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1235 | if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1236 | nr_pmdmapped = folio_nr_pages(folio); |
Matthew Wilcox (Oracle) | eec2042 | 2023-01-11 14:28:48 +0000 | [diff] [blame] | 1237 | nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1238 | /* Raced ahead of a remove and another add? */ |
| 1239 | if (unlikely(nr < 0)) |
| 1240 | nr = 0; |
| 1241 | } else { |
| 1242 | /* Raced ahead of a remove of COMPOUND_MAPPED */ |
| 1243 | nr = 0; |
| 1244 | } |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 1245 | } |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 1246 | } |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 1247 | |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1248 | VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); |
| 1249 | VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 1250 | |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1251 | if (nr_pmdmapped) |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1252 | __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1253 | if (nr) |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1254 | __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1255 | |
Matthew Wilcox (Oracle) | ee0800c | 2023-01-11 14:28:52 +0000 | [diff] [blame] | 1256 | if (likely(!folio_test_ksm(folio))) { |
Liam R. Howlett | 0503ea8 | 2023-01-20 11:26:49 -0500 | [diff] [blame] | 1257 | /* address might be in next vma when migration races vma_merge */ |
Johannes Weiner | c7c3dec | 2022-12-06 18:13:40 +0100 | [diff] [blame] | 1258 | if (first) |
Matthew Wilcox (Oracle) | 5b4bd90 | 2023-01-16 19:29:59 +0000 | [diff] [blame] | 1259 | __page_set_anon_rmap(folio, page, vma, address, |
Johannes Weiner | c7c3dec | 2022-12-06 18:13:40 +0100 | [diff] [blame] | 1260 | !!(flags & RMAP_EXCLUSIVE)); |
| 1261 | else |
| 1262 | __page_check_anon_rmap(page, vma, address); |
| 1263 | } |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 1264 | |
Matthew Wilcox (Oracle) | 7efecff | 2023-01-16 19:28:25 +0000 | [diff] [blame] | 1265 | mlock_vma_folio(folio, vma, compound); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 | } |
| 1267 | |
Randy Dunlap | 43d8eac | 2008-03-19 17:00:43 -0700 | [diff] [blame] | 1268 | /** |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1269 | * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. |
| 1270 | * @folio: The folio to add the mapping to. |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1271 | * @vma: the vm area in which the mapping is added |
| 1272 | * @address: the user virtual address mapped |
David Hildenbrand | 40f2bbf | 2022-05-09 18:20:43 -0700 | [diff] [blame] | 1273 | * |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1274 | * Like page_add_anon_rmap() but must only be called on *new* folios. |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1275 | * This means the inc-and-test can be bypassed. |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1276 | * The folio does not have to be locked. |
| 1277 | * |
| 1278 | * If the folio is large, it is accounted as a THP. As the folio |
| 1279 | * is new, it's assumed to be mapped exclusively by a single process. |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1280 | */ |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1281 | void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, |
| 1282 | unsigned long address) |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1283 | { |
Hugh Dickins | d8dd5e9 | 2022-11-09 18:18:49 -0800 | [diff] [blame] | 1284 | int nr; |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1285 | |
Sasha Levin | 81d1b09 | 2014-10-09 15:28:10 -0700 | [diff] [blame] | 1286 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1287 | __folio_set_swapbacked(folio); |
Hugh Dickins | d8dd5e9 | 2022-11-09 18:18:49 -0800 | [diff] [blame] | 1288 | |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1289 | if (likely(!folio_test_pmd_mappable(folio))) { |
Hugh Dickins | d8dd5e9 | 2022-11-09 18:18:49 -0800 | [diff] [blame] | 1290 | /* increment count (starts at -1) */ |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1291 | atomic_set(&folio->_mapcount, 0); |
Hugh Dickins | d8dd5e9 | 2022-11-09 18:18:49 -0800 | [diff] [blame] | 1292 | nr = 1; |
| 1293 | } else { |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 1294 | /* increment count (starts at -1) */ |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1295 | atomic_set(&folio->_entire_mapcount, 0); |
| 1296 | atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); |
| 1297 | nr = folio_nr_pages(folio); |
| 1298 | __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1299 | } |
Hugh Dickins | d8dd5e9 | 2022-11-09 18:18:49 -0800 | [diff] [blame] | 1300 | |
Matthew Wilcox (Oracle) | 4d510f3 | 2023-01-11 14:28:54 +0000 | [diff] [blame] | 1301 | __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); |
Matthew Wilcox (Oracle) | 5b4bd90 | 2023-01-16 19:29:59 +0000 | [diff] [blame] | 1302 | __page_set_anon_rmap(folio, &folio->page, vma, address, 1); |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 1303 | } |
| 1304 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | /** |
| 1306 | * page_add_file_rmap - add pte mapping to a file page |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 1307 | * @page: the page to add the mapping to |
| 1308 | * @vma: the vm area in which the mapping is added |
| 1309 | * @compound: charge the page as compound or small page |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | * |
Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 1311 | * The caller needs to hold the pte lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | */ |
Matthew Wilcox (Oracle) | eb01a2a | 2023-01-11 14:28:53 +0000 | [diff] [blame] | 1313 | void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, |
| 1314 | bool compound) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1315 | { |
Matthew Wilcox (Oracle) | eb01a2a | 2023-01-11 14:28:53 +0000 | [diff] [blame] | 1316 | struct folio *folio = page_folio(page); |
| 1317 | atomic_t *mapped = &folio->_nr_pages_mapped; |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1318 | int nr = 0, nr_pmdmapped = 0; |
| 1319 | bool first; |
Kirill A. Shutemov | dd78fed | 2016-07-26 15:25:26 -0700 | [diff] [blame] | 1320 | |
| 1321 | VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1322 | |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1323 | /* Is page being mapped by PTE? Is this its first map to be added? */ |
| 1324 | if (likely(!compound)) { |
Hugh Dickins | d8dd5e9 | 2022-11-09 18:18:49 -0800 | [diff] [blame] | 1325 | first = atomic_inc_and_test(&page->_mapcount); |
| 1326 | nr = first; |
Matthew Wilcox (Oracle) | eb01a2a | 2023-01-11 14:28:53 +0000 | [diff] [blame] | 1327 | if (first && folio_test_large(folio)) { |
Hugh Dickins | 4b51634 | 2022-11-22 01:49:36 -0800 | [diff] [blame] | 1328 | nr = atomic_inc_return_relaxed(mapped); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1329 | nr = (nr < COMPOUND_MAPPED); |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1330 | } |
Matthew Wilcox (Oracle) | eb01a2a | 2023-01-11 14:28:53 +0000 | [diff] [blame] | 1331 | } else if (folio_test_pmd_mappable(folio)) { |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1332 | /* That test is redundant: it's for safety or to optimize out */ |
| 1333 | |
Matthew Wilcox (Oracle) | eb01a2a | 2023-01-11 14:28:53 +0000 | [diff] [blame] | 1334 | first = atomic_inc_and_test(&folio->_entire_mapcount); |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1335 | if (first) { |
Hugh Dickins | 4b51634 | 2022-11-22 01:49:36 -0800 | [diff] [blame] | 1336 | nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1337 | if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { |
Matthew Wilcox (Oracle) | eb01a2a | 2023-01-11 14:28:53 +0000 | [diff] [blame] | 1338 | nr_pmdmapped = folio_nr_pages(folio); |
Matthew Wilcox (Oracle) | eec2042 | 2023-01-11 14:28:48 +0000 | [diff] [blame] | 1339 | nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1340 | /* Raced ahead of a remove and another add? */ |
| 1341 | if (unlikely(nr < 0)) |
| 1342 | nr = 0; |
| 1343 | } else { |
| 1344 | /* Raced ahead of a remove of COMPOUND_MAPPED */ |
| 1345 | nr = 0; |
| 1346 | } |
Kirill A. Shutemov | 9a73f61 | 2016-07-26 15:25:53 -0700 | [diff] [blame] | 1347 | } |
Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1348 | } |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1349 | |
| 1350 | if (nr_pmdmapped) |
Matthew Wilcox (Oracle) | eb01a2a | 2023-01-11 14:28:53 +0000 | [diff] [blame] | 1351 | __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1352 | NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); |
Hugh Dickins | 5d543f1 | 2022-03-24 18:09:55 -0700 | [diff] [blame] | 1353 | if (nr) |
Matthew Wilcox (Oracle) | eb01a2a | 2023-01-11 14:28:53 +0000 | [diff] [blame] | 1354 | __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 1355 | |
Matthew Wilcox (Oracle) | 7efecff | 2023-01-16 19:28:25 +0000 | [diff] [blame] | 1356 | mlock_vma_folio(folio, vma, compound); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | } |
| 1358 | |
| 1359 | /** |
| 1360 | * page_remove_rmap - take down pte mapping from a page |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1361 | * @page: page to remove mapping from |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 1362 | * @vma: the vm area from which the mapping is removed |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1363 | * @compound: uncharge the page as compound or small page |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | * |
Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 1365 | * The caller needs to hold the pte lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | */ |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1367 | void page_remove_rmap(struct page *page, struct vm_area_struct *vma, |
| 1368 | bool compound) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | { |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1370 | struct folio *folio = page_folio(page); |
| 1371 | atomic_t *mapped = &folio->_nr_pages_mapped; |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1372 | int nr = 0, nr_pmdmapped = 0; |
| 1373 | bool last; |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1374 | enum node_stat_item idx; |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1375 | |
| 1376 | VM_BUG_ON_PAGE(compound && !PageHead(page), page); |
| 1377 | |
| 1378 | /* Hugetlb pages are not counted in NR_*MAPPED */ |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1379 | if (unlikely(folio_test_hugetlb(folio))) { |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1380 | /* hugetlb pages are always mapped with pmds */ |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1381 | atomic_dec(&folio->_entire_mapcount); |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1382 | return; |
| 1383 | } |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 1384 | |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1385 | /* Is page being unmapped by PTE? Is this its last map to be removed? */ |
| 1386 | if (likely(!compound)) { |
Hugh Dickins | d8dd5e9 | 2022-11-09 18:18:49 -0800 | [diff] [blame] | 1387 | last = atomic_add_negative(-1, &page->_mapcount); |
| 1388 | nr = last; |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1389 | if (last && folio_test_large(folio)) { |
Hugh Dickins | 4b51634 | 2022-11-22 01:49:36 -0800 | [diff] [blame] | 1390 | nr = atomic_dec_return_relaxed(mapped); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1391 | nr = (nr < COMPOUND_MAPPED); |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1392 | } |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1393 | } else if (folio_test_pmd_mappable(folio)) { |
Hugh Dickins | be5ef2d | 2022-11-22 01:42:04 -0800 | [diff] [blame] | 1394 | /* That test is redundant: it's for safety or to optimize out */ |
| 1395 | |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1396 | last = atomic_add_negative(-1, &folio->_entire_mapcount); |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1397 | if (last) { |
Hugh Dickins | 4b51634 | 2022-11-22 01:49:36 -0800 | [diff] [blame] | 1398 | nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1399 | if (likely(nr < COMPOUND_MAPPED)) { |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1400 | nr_pmdmapped = folio_nr_pages(folio); |
Matthew Wilcox (Oracle) | eec2042 | 2023-01-11 14:28:48 +0000 | [diff] [blame] | 1401 | nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); |
Hugh Dickins | 6287b7d | 2022-12-04 17:57:07 -0800 | [diff] [blame] | 1402 | /* Raced ahead of another remove and an add? */ |
| 1403 | if (unlikely(nr < 0)) |
| 1404 | nr = 0; |
| 1405 | } else { |
| 1406 | /* An add of COMPOUND_MAPPED raced ahead */ |
| 1407 | nr = 0; |
| 1408 | } |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1409 | } |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 1410 | } |
| 1411 | |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1412 | if (nr_pmdmapped) { |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1413 | if (folio_test_anon(folio)) |
| 1414 | idx = NR_ANON_THPS; |
| 1415 | else if (folio_test_swapbacked(folio)) |
| 1416 | idx = NR_SHMEM_PMDMAPPED; |
| 1417 | else |
| 1418 | idx = NR_FILE_PMDMAPPED; |
| 1419 | __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1420 | } |
| 1421 | if (nr) { |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1422 | idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; |
| 1423 | __lruvec_stat_mod_folio(folio, idx, -nr); |
| 1424 | |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1425 | /* |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1426 | * Queue anon THP for deferred split if at least one |
| 1427 | * page of the folio is unmapped and at least one page |
| 1428 | * is still mapped. |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1429 | */ |
Matthew Wilcox (Oracle) | 62beb90 | 2023-01-11 14:28:51 +0000 | [diff] [blame] | 1430 | if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1431 | if (!compound || nr < nr_pmdmapped) |
Matthew Wilcox (Oracle) | f158ed6 | 2023-01-11 14:29:13 +0000 | [diff] [blame] | 1432 | deferred_split_folio(folio); |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1433 | } |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 1434 | |
KOSAKI Motohiro | b904dcf | 2009-09-21 17:01:28 -0700 | [diff] [blame] | 1435 | /* |
Matthew Wilcox (Oracle) | 672aa27 | 2023-01-16 19:28:26 +0000 | [diff] [blame] | 1436 | * It would be tidy to reset folio_test_anon mapping when fully |
| 1437 | * unmapped, but that might overwrite a racing page_add_anon_rmap |
| 1438 | * which increments mapcount after us but sets mapping before us: |
| 1439 | * so leave the reset to free_pages_prepare, and remember that |
| 1440 | * it's only reliable while mapped. |
KOSAKI Motohiro | b904dcf | 2009-09-21 17:01:28 -0700 | [diff] [blame] | 1441 | */ |
Hugh Dickins | 9bd3155 | 2022-11-02 18:53:45 -0700 | [diff] [blame] | 1442 | |
Matthew Wilcox (Oracle) | 672aa27 | 2023-01-16 19:28:26 +0000 | [diff] [blame] | 1443 | munlock_vma_folio(folio, vma, compound); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1444 | } |
| 1445 | |
| 1446 | /* |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1447 | * @arg: enum ttu_flags will be passed to this argument |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1448 | */ |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 1449 | static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1450 | unsigned long address, void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | { |
| 1452 | struct mm_struct *mm = vma->vm_mm; |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1453 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | pte_t pteval; |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1455 | struct page *subpage; |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1456 | bool anon_exclusive, ret = true; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1457 | struct mmu_notifier_range range; |
Palmer Dabbelt | 4708f31 | 2020-04-06 20:08:00 -0700 | [diff] [blame] | 1458 | enum ttu_flags flags = (enum ttu_flags)(long)arg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1459 | |
Hugh Dickins | 732ed55 | 2021-06-15 18:23:53 -0700 | [diff] [blame] | 1460 | /* |
| 1461 | * When racing against e.g. zap_pte_range() on another cpu, |
| 1462 | * in between its ptep_get_and_clear_full() and page_remove_rmap(), |
Yang Shi | 1fb08ac | 2021-06-30 18:52:01 -0700 | [diff] [blame] | 1463 | * try_to_unmap() may return before page_mapped() has become false, |
Hugh Dickins | 732ed55 | 2021-06-15 18:23:53 -0700 | [diff] [blame] | 1464 | * if page table locking is skipped: use TTU_SYNC to wait for that. |
| 1465 | */ |
| 1466 | if (flags & TTU_SYNC) |
| 1467 | pvmw.flags = PVMW_SYNC; |
| 1468 | |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1469 | if (flags & TTU_SPLIT_HUGE_PMD) |
Matthew Wilcox (Oracle) | af28a98 | 2022-01-21 10:44:52 -0500 | [diff] [blame] | 1470 | split_huge_pmd_address(vma, address, false, folio); |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 1471 | |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 1472 | /* |
Mike Kravetz | 017b166 | 2018-10-05 15:51:29 -0700 | [diff] [blame] | 1473 | * For THP, we have to assume the worse case ie pmd for invalidation. |
| 1474 | * For hugetlb, it could be much worse if we need to do pud |
| 1475 | * invalidation in the case of pmd sharing. |
| 1476 | * |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1477 | * Note that the folio can not be freed in this function as call of |
| 1478 | * try_to_unmap() must hold a reference on the folio. |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 1479 | */ |
Matthew Wilcox (Oracle) | 2aff7a4 | 2022-02-03 11:40:17 -0500 | [diff] [blame] | 1480 | range.end = vma_address_end(&pvmw); |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 1481 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 1482 | address, range.end); |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1483 | if (folio_test_hugetlb(folio)) { |
Mike Kravetz | 017b166 | 2018-10-05 15:51:29 -0700 | [diff] [blame] | 1484 | /* |
| 1485 | * If sharing is possible, start and end will be adjusted |
| 1486 | * accordingly. |
| 1487 | */ |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1488 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
| 1489 | &range.end); |
Mike Kravetz | 017b166 | 2018-10-05 15:51:29 -0700 | [diff] [blame] | 1490 | } |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1491 | mmu_notifier_invalidate_range_start(&range); |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 1492 | |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1493 | while (page_vma_mapped_walk(&pvmw)) { |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 1494 | /* Unexpected PMD-mapped THP? */ |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1495 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 1496 | |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1497 | /* |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1498 | * If the folio is in an mlock()d vma, we must not swap it out. |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1499 | */ |
Hugh Dickins | efdb672 | 2021-07-11 20:10:49 -0700 | [diff] [blame] | 1500 | if (!(flags & TTU_IGNORE_MLOCK) && |
| 1501 | (vma->vm_flags & VM_LOCKED)) { |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 1502 | /* Restore the mlock which got missed */ |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1503 | mlock_vma_folio(folio, vma, false); |
Hugh Dickins | efdb672 | 2021-07-11 20:10:49 -0700 | [diff] [blame] | 1504 | page_vma_mapped_walk_done(&pvmw); |
| 1505 | ret = false; |
| 1506 | break; |
Hugh Dickins | b87537d9e | 2015-11-05 18:49:33 -0800 | [diff] [blame] | 1507 | } |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1508 | |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1509 | subpage = folio_page(folio, |
| 1510 | pte_pfn(*pvmw.pte) - folio_pfn(folio)); |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 1511 | address = pvmw.address; |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1512 | anon_exclusive = folio_test_anon(folio) && |
| 1513 | PageAnonExclusive(subpage); |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 1514 | |
Baolin Wang | dfc7ab5 | 2022-05-09 18:20:53 -0700 | [diff] [blame] | 1515 | if (folio_test_hugetlb(folio)) { |
Baolin Wang | 0506c31 | 2022-06-20 19:47:15 +0800 | [diff] [blame] | 1516 | bool anon = folio_test_anon(folio); |
| 1517 | |
Baolin Wang | 54205e9 | 2022-05-09 18:20:53 -0700 | [diff] [blame] | 1518 | /* |
Baolin Wang | a00a875 | 2022-05-13 16:48:56 -0700 | [diff] [blame] | 1519 | * The try_to_unmap() is only passed a hugetlb page |
| 1520 | * in the case where the hugetlb page is poisoned. |
| 1521 | */ |
| 1522 | VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); |
| 1523 | /* |
Baolin Wang | 54205e9 | 2022-05-09 18:20:53 -0700 | [diff] [blame] | 1524 | * huge_pmd_unshare may unmap an entire PMD page. |
| 1525 | * There is no way of knowing exactly which PMDs may |
| 1526 | * be cached for this mm, so we must flush them all. |
| 1527 | * start/end were already adjusted above to cover this |
| 1528 | * range. |
| 1529 | */ |
| 1530 | flush_cache_range(vma, range.start, range.end); |
| 1531 | |
Baolin Wang | 0506c31 | 2022-06-20 19:47:15 +0800 | [diff] [blame] | 1532 | /* |
| 1533 | * To call huge_pmd_unshare, i_mmap_rwsem must be |
| 1534 | * held in write mode. Caller needs to explicitly |
| 1535 | * do this outside rmap routines. |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 1536 | * |
| 1537 | * We also must hold hugetlb vma_lock in write mode. |
| 1538 | * Lock order dictates acquiring vma_lock BEFORE |
| 1539 | * i_mmap_rwsem. We can only try lock here and fail |
| 1540 | * if unsuccessful. |
Baolin Wang | 0506c31 | 2022-06-20 19:47:15 +0800 | [diff] [blame] | 1541 | */ |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 1542 | if (!anon) { |
| 1543 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); |
| 1544 | if (!hugetlb_vma_trylock_write(vma)) { |
| 1545 | page_vma_mapped_walk_done(&pvmw); |
| 1546 | ret = false; |
| 1547 | break; |
| 1548 | } |
| 1549 | if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { |
| 1550 | hugetlb_vma_unlock_write(vma); |
| 1551 | flush_tlb_range(vma, |
| 1552 | range.start, range.end); |
| 1553 | mmu_notifier_invalidate_range(mm, |
| 1554 | range.start, range.end); |
| 1555 | /* |
| 1556 | * The ref count of the PMD page was |
| 1557 | * dropped which is part of the way map |
| 1558 | * counting is done for shared PMDs. |
| 1559 | * Return 'true' here. When there is |
| 1560 | * no other sharing, huge_pmd_unshare |
| 1561 | * returns false and we will unmap the |
| 1562 | * actual page and drop map count |
| 1563 | * to zero. |
| 1564 | */ |
| 1565 | page_vma_mapped_walk_done(&pvmw); |
| 1566 | break; |
| 1567 | } |
| 1568 | hugetlb_vma_unlock_write(vma); |
Mike Kravetz | 017b166 | 2018-10-05 15:51:29 -0700 | [diff] [blame] | 1569 | } |
Baolin Wang | a00a875 | 2022-05-13 16:48:56 -0700 | [diff] [blame] | 1570 | pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); |
Baolin Wang | 54205e9 | 2022-05-09 18:20:53 -0700 | [diff] [blame] | 1571 | } else { |
| 1572 | flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); |
David Hildenbrand | 088b8aa | 2022-09-01 10:35:59 +0200 | [diff] [blame] | 1573 | /* Nuke the page table entry. */ |
| 1574 | if (should_defer_flush(mm, flags)) { |
Baolin Wang | a00a875 | 2022-05-13 16:48:56 -0700 | [diff] [blame] | 1575 | /* |
| 1576 | * We clear the PTE but do not flush so potentially |
| 1577 | * a remote CPU could still be writing to the folio. |
| 1578 | * If the entry was previously clean then the |
| 1579 | * architecture must guarantee that a clear->dirty |
| 1580 | * transition on a cached TLB entry is written through |
| 1581 | * and traps if the PTE is unmapped. |
| 1582 | */ |
| 1583 | pteval = ptep_get_and_clear(mm, address, pvmw.pte); |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 1584 | |
Baolin Wang | a00a875 | 2022-05-13 16:48:56 -0700 | [diff] [blame] | 1585 | set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); |
| 1586 | } else { |
| 1587 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
| 1588 | } |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1589 | } |
| 1590 | |
Peter Xu | 999dad8 | 2022-05-12 20:22:53 -0700 | [diff] [blame] | 1591 | /* |
| 1592 | * Now the pte is cleared. If this pte was uffd-wp armed, |
| 1593 | * we may want to replace a none pte with a marker pte if |
| 1594 | * it's file-backed, so we don't lose the tracking info. |
| 1595 | */ |
| 1596 | pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); |
| 1597 | |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1598 | /* Set the dirty flag on the folio now the pte is gone. */ |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1599 | if (pte_dirty(pteval)) |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1600 | folio_mark_dirty(folio); |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1601 | |
| 1602 | /* Update high watermark before we lower rss */ |
| 1603 | update_hiwater_rss(mm); |
| 1604 | |
Naoya Horiguchi | 6da6b1d | 2023-02-21 17:59:05 +0900 | [diff] [blame] | 1605 | if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { |
Punit Agrawal | 5fd27b8 | 2017-07-06 15:39:53 -0700 | [diff] [blame] | 1606 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1607 | if (folio_test_hugetlb(folio)) { |
| 1608 | hugetlb_count_sub(folio_nr_pages(folio), mm); |
Qi Zheng | 18f3962 | 2022-06-26 22:57:17 +0800 | [diff] [blame] | 1609 | set_huge_pte_at(mm, address, pvmw.pte, pteval); |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1610 | } else { |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1611 | dec_mm_counter(mm, mm_counter(&folio->page)); |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 1612 | set_pte_at(mm, address, pvmw.pte, pteval); |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1613 | } |
| 1614 | |
Christian Borntraeger | bce73e4 | 2018-07-13 16:58:52 -0700 | [diff] [blame] | 1615 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1616 | /* |
| 1617 | * The guest indicated that the page content is of no |
| 1618 | * interest anymore. Simply discard the pte, vmscan |
| 1619 | * will take care of the rest. |
Christian Borntraeger | bce73e4 | 2018-07-13 16:58:52 -0700 | [diff] [blame] | 1620 | * A future reference will then fault in a new zero |
| 1621 | * page. When userfaultfd is active, we must not drop |
| 1622 | * this page though, as its main user (postcopy |
| 1623 | * migration) will not expect userfaults on already |
| 1624 | * copied pages. |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1625 | */ |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1626 | dec_mm_counter(mm, mm_counter(&folio->page)); |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1627 | /* We have to invalidate as we cleared the pte */ |
| 1628 | mmu_notifier_invalidate_range(mm, address, |
| 1629 | address + PAGE_SIZE); |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1630 | } else if (folio_test_anon(folio)) { |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1631 | swp_entry_t entry = { .val = page_private(subpage) }; |
| 1632 | pte_t swp_pte; |
| 1633 | /* |
| 1634 | * Store the swap location in the pte. |
| 1635 | * See handle_pte_fault() ... |
| 1636 | */ |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1637 | if (unlikely(folio_test_swapbacked(folio) != |
| 1638 | folio_test_swapcache(folio))) { |
Minchan Kim | eb94a87 | 2017-05-03 14:52:36 -0700 | [diff] [blame] | 1639 | WARN_ON_ONCE(1); |
Minchan Kim | 83612a9 | 2017-05-03 14:54:30 -0700 | [diff] [blame] | 1640 | ret = false; |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 1641 | /* We have to invalidate as we cleared the pte */ |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1642 | mmu_notifier_invalidate_range(mm, address, |
| 1643 | address + PAGE_SIZE); |
Minchan Kim | eb94a87 | 2017-05-03 14:52:36 -0700 | [diff] [blame] | 1644 | page_vma_mapped_walk_done(&pvmw); |
| 1645 | break; |
| 1646 | } |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 1647 | |
Shaohua Li | 802a3a9 | 2017-05-03 14:52:32 -0700 | [diff] [blame] | 1648 | /* MADV_FREE page check */ |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1649 | if (!folio_test_swapbacked(folio)) { |
Mauricio Faria de Oliveira | 6c8e2a2 | 2022-03-24 18:14:09 -0700 | [diff] [blame] | 1650 | int ref_count, map_count; |
| 1651 | |
| 1652 | /* |
| 1653 | * Synchronize with gup_pte_range(): |
| 1654 | * - clear PTE; barrier; read refcount |
| 1655 | * - inc refcount; barrier; read PTE |
| 1656 | */ |
| 1657 | smp_mb(); |
| 1658 | |
| 1659 | ref_count = folio_ref_count(folio); |
| 1660 | map_count = folio_mapcount(folio); |
| 1661 | |
| 1662 | /* |
| 1663 | * Order reads for page refcount and dirty flag |
| 1664 | * (see comments in __remove_mapping()). |
| 1665 | */ |
| 1666 | smp_rmb(); |
| 1667 | |
| 1668 | /* |
| 1669 | * The only page refs must be one from isolation |
| 1670 | * plus the rmap(s) (dropped by discard:). |
| 1671 | */ |
| 1672 | if (ref_count == 1 + map_count && |
| 1673 | !folio_test_dirty(folio)) { |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1674 | /* Invalidate as we cleared the pte */ |
| 1675 | mmu_notifier_invalidate_range(mm, |
| 1676 | address, address + PAGE_SIZE); |
Shaohua Li | 802a3a9 | 2017-05-03 14:52:32 -0700 | [diff] [blame] | 1677 | dec_mm_counter(mm, MM_ANONPAGES); |
| 1678 | goto discard; |
| 1679 | } |
| 1680 | |
| 1681 | /* |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1682 | * If the folio was redirtied, it cannot be |
Shaohua Li | 802a3a9 | 2017-05-03 14:52:32 -0700 | [diff] [blame] | 1683 | * discarded. Remap the page to page table. |
| 1684 | */ |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 1685 | set_pte_at(mm, address, pvmw.pte, pteval); |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1686 | folio_set_swapbacked(folio); |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 1687 | ret = false; |
Shaohua Li | 802a3a9 | 2017-05-03 14:52:32 -0700 | [diff] [blame] | 1688 | page_vma_mapped_walk_done(&pvmw); |
| 1689 | break; |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1690 | } |
| 1691 | |
| 1692 | if (swap_duplicate(entry) < 0) { |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 1693 | set_pte_at(mm, address, pvmw.pte, pteval); |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 1694 | ret = false; |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1695 | page_vma_mapped_walk_done(&pvmw); |
| 1696 | break; |
| 1697 | } |
Khalid Aziz | ca827d5 | 2018-02-21 10:15:44 -0700 | [diff] [blame] | 1698 | if (arch_unmap_one(mm, vma, address, pteval) < 0) { |
David Hildenbrand | 322842e | 2022-05-09 18:20:42 -0700 | [diff] [blame] | 1699 | swap_free(entry); |
Khalid Aziz | ca827d5 | 2018-02-21 10:15:44 -0700 | [diff] [blame] | 1700 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1701 | ret = false; |
| 1702 | page_vma_mapped_walk_done(&pvmw); |
| 1703 | break; |
| 1704 | } |
David Hildenbrand | 088b8aa | 2022-09-01 10:35:59 +0200 | [diff] [blame] | 1705 | |
| 1706 | /* See page_try_share_anon_rmap(): clear PTE first. */ |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1707 | if (anon_exclusive && |
| 1708 | page_try_share_anon_rmap(subpage)) { |
| 1709 | swap_free(entry); |
| 1710 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1711 | ret = false; |
| 1712 | page_vma_mapped_walk_done(&pvmw); |
| 1713 | break; |
| 1714 | } |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1715 | if (list_empty(&mm->mmlist)) { |
| 1716 | spin_lock(&mmlist_lock); |
| 1717 | if (list_empty(&mm->mmlist)) |
| 1718 | list_add(&mm->mmlist, &init_mm.mmlist); |
| 1719 | spin_unlock(&mmlist_lock); |
| 1720 | } |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 1721 | dec_mm_counter(mm, MM_ANONPAGES); |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1722 | inc_mm_counter(mm, MM_SWAPENTS); |
| 1723 | swp_pte = swp_entry_to_pte(entry); |
David Hildenbrand | 1493a19 | 2022-05-09 18:20:45 -0700 | [diff] [blame] | 1724 | if (anon_exclusive) |
| 1725 | swp_pte = pte_swp_mkexclusive(swp_pte); |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1726 | if (pte_soft_dirty(pteval)) |
| 1727 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
Peter Xu | f45ec5f | 2020-04-06 20:06:01 -0700 | [diff] [blame] | 1728 | if (pte_uffd_wp(pteval)) |
| 1729 | swp_pte = pte_swp_mkuffd_wp(swp_pte); |
Linus Torvalds | 785373b | 2017-08-29 09:11:06 -0700 | [diff] [blame] | 1730 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1731 | /* Invalidate as we cleared the pte */ |
| 1732 | mmu_notifier_invalidate_range(mm, address, |
| 1733 | address + PAGE_SIZE); |
| 1734 | } else { |
| 1735 | /* |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1736 | * This is a locked file-backed folio, |
| 1737 | * so it cannot be removed from the page |
| 1738 | * cache and replaced by a new folio before |
| 1739 | * mmu_notifier_invalidate_range_end, so no |
| 1740 | * concurrent thread might update its page table |
| 1741 | * to point at a new folio while a device is |
| 1742 | * still using this folio. |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1743 | * |
Mike Rapoport | ee65728 | 2022-06-27 09:00:26 +0300 | [diff] [blame] | 1744 | * See Documentation/mm/mmu_notifier.rst |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1745 | */ |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1746 | dec_mm_counter(mm, mm_counter_file(&folio->page)); |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1747 | } |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 1748 | discard: |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1749 | /* |
| 1750 | * No need to call mmu_notifier_invalidate_range() it has be |
| 1751 | * done above for all cases requiring it to happen under page |
| 1752 | * table lock before mmu_notifier_invalidate_range_end() |
| 1753 | * |
Mike Rapoport | ee65728 | 2022-06-27 09:00:26 +0300 | [diff] [blame] | 1754 | * See Documentation/mm/mmu_notifier.rst |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1755 | */ |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1756 | page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); |
Hugh Dickins | b743550 | 2022-02-14 18:38:47 -0800 | [diff] [blame] | 1757 | if (vma->vm_flags & VM_LOCKED) |
Lorenzo Stoakes | 96f97c4 | 2023-01-12 12:39:31 +0000 | [diff] [blame] | 1758 | mlock_drain_local(); |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1759 | folio_put(folio); |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 1760 | } |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 1761 | |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 1762 | mmu_notifier_invalidate_range_end(&range); |
Jérôme Glisse | 369ea82 | 2017-08-31 17:17:27 -0400 | [diff] [blame] | 1763 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1764 | return ret; |
| 1765 | } |
| 1766 | |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1767 | static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) |
| 1768 | { |
Anshuman Khandual | 222100e | 2020-04-01 21:07:52 -0700 | [diff] [blame] | 1769 | return vma_is_temporary_stack(vma); |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1770 | } |
| 1771 | |
Kefeng Wang | f3ad032 | 2022-09-27 14:38:26 +0800 | [diff] [blame] | 1772 | static int folio_not_mapped(struct folio *folio) |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1773 | { |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 1774 | return !folio_mapped(folio); |
Kirill A. Shutemov | 2a52bcb | 2016-03-17 14:20:04 -0700 | [diff] [blame] | 1775 | } |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1776 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1777 | /** |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1778 | * try_to_unmap - Try to remove all page table mappings to a folio. |
| 1779 | * @folio: The folio to unmap. |
Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 1780 | * @flags: action and flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1781 | * |
| 1782 | * Tries to remove all the page table entries which are mapping this |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1783 | * folio. It is the caller's responsibility to check if the folio is |
| 1784 | * still mapped if needed (use TTU_SYNC to prevent accounting races). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | * |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1786 | * Context: Caller must hold the folio lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1787 | */ |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 1788 | void try_to_unmap(struct folio *folio, enum ttu_flags flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1789 | { |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1790 | struct rmap_walk_control rwc = { |
| 1791 | .rmap_one = try_to_unmap_one, |
Shaohua Li | 802a3a9 | 2017-05-03 14:52:32 -0700 | [diff] [blame] | 1792 | .arg = (void *)flags, |
Kefeng Wang | f3ad032 | 2022-09-27 14:38:26 +0800 | [diff] [blame] | 1793 | .done = folio_not_mapped, |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 1794 | .anon_lock = folio_lock_anon_vma_read, |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1795 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1796 | |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1797 | if (flags & TTU_RMAP_LOCKED) |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 1798 | rmap_walk_locked(folio, &rwc); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1799 | else |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 1800 | rmap_walk(folio, &rwc); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1801 | } |
| 1802 | |
| 1803 | /* |
| 1804 | * @arg: enum ttu_flags will be passed to this argument. |
| 1805 | * |
| 1806 | * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs |
Hugh Dickins | 64b586d | 2021-07-07 13:06:17 -0700 | [diff] [blame] | 1807 | * containing migration entries. |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1808 | */ |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 1809 | static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1810 | unsigned long address, void *arg) |
| 1811 | { |
| 1812 | struct mm_struct *mm = vma->vm_mm; |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 1813 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1814 | pte_t pteval; |
| 1815 | struct page *subpage; |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1816 | bool anon_exclusive, ret = true; |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1817 | struct mmu_notifier_range range; |
| 1818 | enum ttu_flags flags = (enum ttu_flags)(long)arg; |
| 1819 | |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1820 | /* |
| 1821 | * When racing against e.g. zap_pte_range() on another cpu, |
| 1822 | * in between its ptep_get_and_clear_full() and page_remove_rmap(), |
| 1823 | * try_to_migrate() may return before page_mapped() has become false, |
| 1824 | * if page table locking is skipped: use TTU_SYNC to wait for that. |
| 1825 | */ |
| 1826 | if (flags & TTU_SYNC) |
| 1827 | pvmw.flags = PVMW_SYNC; |
| 1828 | |
| 1829 | /* |
| 1830 | * unmap_page() in mm/huge_memory.c is the only user of migration with |
| 1831 | * TTU_SPLIT_HUGE_PMD and it wants to freeze. |
| 1832 | */ |
| 1833 | if (flags & TTU_SPLIT_HUGE_PMD) |
Matthew Wilcox (Oracle) | af28a98 | 2022-01-21 10:44:52 -0500 | [diff] [blame] | 1834 | split_huge_pmd_address(vma, address, true, folio); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1835 | |
| 1836 | /* |
| 1837 | * For THP, we have to assume the worse case ie pmd for invalidation. |
| 1838 | * For hugetlb, it could be much worse if we need to do pud |
| 1839 | * invalidation in the case of pmd sharing. |
| 1840 | * |
| 1841 | * Note that the page can not be free in this function as call of |
| 1842 | * try_to_unmap() must hold a reference on the page. |
| 1843 | */ |
Matthew Wilcox (Oracle) | 2aff7a4 | 2022-02-03 11:40:17 -0500 | [diff] [blame] | 1844 | range.end = vma_address_end(&pvmw); |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 1845 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1846 | address, range.end); |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 1847 | if (folio_test_hugetlb(folio)) { |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1848 | /* |
| 1849 | * If sharing is possible, start and end will be adjusted |
| 1850 | * accordingly. |
| 1851 | */ |
| 1852 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
| 1853 | &range.end); |
| 1854 | } |
| 1855 | mmu_notifier_invalidate_range_start(&range); |
| 1856 | |
| 1857 | while (page_vma_mapped_walk(&pvmw)) { |
| 1858 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 1859 | /* PMD-mapped THP migration entry */ |
| 1860 | if (!pvmw.pte) { |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 1861 | subpage = folio_page(folio, |
| 1862 | pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); |
| 1863 | VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || |
| 1864 | !folio_test_pmd_mappable(folio), folio); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1865 | |
David Hildenbrand | 7f5abe6 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1866 | if (set_pmd_migration_entry(&pvmw, subpage)) { |
| 1867 | ret = false; |
| 1868 | page_vma_mapped_walk_done(&pvmw); |
| 1869 | break; |
| 1870 | } |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1871 | continue; |
| 1872 | } |
| 1873 | #endif |
| 1874 | |
| 1875 | /* Unexpected PMD-mapped THP? */ |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 1876 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1877 | |
David Hildenbrand | 1118234 | 2022-06-23 22:53:32 +0200 | [diff] [blame] | 1878 | if (folio_is_zone_device(folio)) { |
| 1879 | /* |
| 1880 | * Our PTE is a non-present device exclusive entry and |
| 1881 | * calculating the subpage as for the common case would |
| 1882 | * result in an invalid pointer. |
| 1883 | * |
| 1884 | * Since only PAGE_SIZE pages can currently be |
| 1885 | * migrated, just set it to page. This will need to be |
| 1886 | * changed when hugepage migrations to device private |
| 1887 | * memory are supported. |
| 1888 | */ |
| 1889 | VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); |
| 1890 | subpage = &folio->page; |
| 1891 | } else { |
| 1892 | subpage = folio_page(folio, |
| 1893 | pte_pfn(*pvmw.pte) - folio_pfn(folio)); |
| 1894 | } |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1895 | address = pvmw.address; |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1896 | anon_exclusive = folio_test_anon(folio) && |
| 1897 | PageAnonExclusive(subpage); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1898 | |
Baolin Wang | dfc7ab5 | 2022-05-09 18:20:53 -0700 | [diff] [blame] | 1899 | if (folio_test_hugetlb(folio)) { |
Baolin Wang | 0506c31 | 2022-06-20 19:47:15 +0800 | [diff] [blame] | 1900 | bool anon = folio_test_anon(folio); |
| 1901 | |
Baolin Wang | 54205e9 | 2022-05-09 18:20:53 -0700 | [diff] [blame] | 1902 | /* |
| 1903 | * huge_pmd_unshare may unmap an entire PMD page. |
| 1904 | * There is no way of knowing exactly which PMDs may |
| 1905 | * be cached for this mm, so we must flush them all. |
| 1906 | * start/end were already adjusted above to cover this |
| 1907 | * range. |
| 1908 | */ |
| 1909 | flush_cache_range(vma, range.start, range.end); |
| 1910 | |
Baolin Wang | 0506c31 | 2022-06-20 19:47:15 +0800 | [diff] [blame] | 1911 | /* |
| 1912 | * To call huge_pmd_unshare, i_mmap_rwsem must be |
| 1913 | * held in write mode. Caller needs to explicitly |
| 1914 | * do this outside rmap routines. |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 1915 | * |
| 1916 | * We also must hold hugetlb vma_lock in write mode. |
| 1917 | * Lock order dictates acquiring vma_lock BEFORE |
| 1918 | * i_mmap_rwsem. We can only try lock here and |
| 1919 | * fail if unsuccessful. |
Baolin Wang | 0506c31 | 2022-06-20 19:47:15 +0800 | [diff] [blame] | 1920 | */ |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 1921 | if (!anon) { |
| 1922 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); |
| 1923 | if (!hugetlb_vma_trylock_write(vma)) { |
| 1924 | page_vma_mapped_walk_done(&pvmw); |
| 1925 | ret = false; |
| 1926 | break; |
| 1927 | } |
| 1928 | if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { |
| 1929 | hugetlb_vma_unlock_write(vma); |
| 1930 | flush_tlb_range(vma, |
| 1931 | range.start, range.end); |
| 1932 | mmu_notifier_invalidate_range(mm, |
| 1933 | range.start, range.end); |
Baolin Wang | 0506c31 | 2022-06-20 19:47:15 +0800 | [diff] [blame] | 1934 | |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 1935 | /* |
| 1936 | * The ref count of the PMD page was |
| 1937 | * dropped which is part of the way map |
| 1938 | * counting is done for shared PMDs. |
| 1939 | * Return 'true' here. When there is |
| 1940 | * no other sharing, huge_pmd_unshare |
| 1941 | * returns false and we will unmap the |
| 1942 | * actual page and drop map count |
| 1943 | * to zero. |
| 1944 | */ |
| 1945 | page_vma_mapped_walk_done(&pvmw); |
| 1946 | break; |
| 1947 | } |
| 1948 | hugetlb_vma_unlock_write(vma); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1949 | } |
Baolin Wang | 5d4af61 | 2022-05-13 16:48:55 -0700 | [diff] [blame] | 1950 | /* Nuke the hugetlb page table entry */ |
| 1951 | pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); |
Baolin Wang | 54205e9 | 2022-05-09 18:20:53 -0700 | [diff] [blame] | 1952 | } else { |
| 1953 | flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); |
Baolin Wang | 5d4af61 | 2022-05-13 16:48:55 -0700 | [diff] [blame] | 1954 | /* Nuke the page table entry. */ |
Huang Ying | 7e12beb | 2023-02-13 20:34:43 +0800 | [diff] [blame] | 1955 | if (should_defer_flush(mm, flags)) { |
| 1956 | /* |
| 1957 | * We clear the PTE but do not flush so potentially |
| 1958 | * a remote CPU could still be writing to the folio. |
| 1959 | * If the entry was previously clean then the |
| 1960 | * architecture must guarantee that a clear->dirty |
| 1961 | * transition on a cached TLB entry is written through |
| 1962 | * and traps if the PTE is unmapped. |
| 1963 | */ |
| 1964 | pteval = ptep_get_and_clear(mm, address, pvmw.pte); |
| 1965 | |
| 1966 | set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); |
| 1967 | } else { |
| 1968 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
| 1969 | } |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1970 | } |
| 1971 | |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 1972 | /* Set the dirty flag on the folio now the pte is gone. */ |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1973 | if (pte_dirty(pteval)) |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 1974 | folio_mark_dirty(folio); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1975 | |
| 1976 | /* Update high watermark before we lower rss */ |
| 1977 | update_hiwater_rss(mm); |
| 1978 | |
Alex Sierra | f25cbb7 | 2022-07-15 10:05:10 -0500 | [diff] [blame] | 1979 | if (folio_is_device_private(folio)) { |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 1980 | unsigned long pfn = folio_pfn(folio); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1981 | swp_entry_t entry; |
| 1982 | pte_t swp_pte; |
| 1983 | |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1984 | if (anon_exclusive) |
| 1985 | BUG_ON(page_try_share_anon_rmap(subpage)); |
| 1986 | |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1987 | /* |
| 1988 | * Store the pfn of the page in a special migration |
| 1989 | * pte. do_swap_page() will wait until the migration |
| 1990 | * pte is removed and then restart fault handling. |
| 1991 | */ |
Alistair Popple | 3d88705 | 2021-11-05 13:45:00 -0700 | [diff] [blame] | 1992 | entry = pte_to_swp_entry(pteval); |
| 1993 | if (is_writable_device_private_entry(entry)) |
| 1994 | entry = make_writable_migration_entry(pfn); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1995 | else if (anon_exclusive) |
| 1996 | entry = make_readable_exclusive_migration_entry(pfn); |
Alistair Popple | 3d88705 | 2021-11-05 13:45:00 -0700 | [diff] [blame] | 1997 | else |
| 1998 | entry = make_readable_migration_entry(pfn); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 1999 | swp_pte = swp_entry_to_pte(entry); |
| 2000 | |
| 2001 | /* |
| 2002 | * pteval maps a zone device page and is therefore |
| 2003 | * a swap pte. |
| 2004 | */ |
| 2005 | if (pte_swp_soft_dirty(pteval)) |
| 2006 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
| 2007 | if (pte_swp_uffd_wp(pteval)) |
| 2008 | swp_pte = pte_swp_mkuffd_wp(swp_pte); |
| 2009 | set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); |
Anshuman Khandual | 4cc79b3 | 2022-03-24 18:10:01 -0700 | [diff] [blame] | 2010 | trace_set_migration_pte(pvmw.address, pte_val(swp_pte), |
| 2011 | compound_order(&folio->page)); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2012 | /* |
| 2013 | * No need to invalidate here it will synchronize on |
| 2014 | * against the special swap migration pte. |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2015 | */ |
Matthew Wilcox (Oracle) | da358d5 | 2022-03-22 14:46:38 -0700 | [diff] [blame] | 2016 | } else if (PageHWPoison(subpage)) { |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2017 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2018 | if (folio_test_hugetlb(folio)) { |
| 2019 | hugetlb_count_sub(folio_nr_pages(folio), mm); |
Qi Zheng | 18f3962 | 2022-06-26 22:57:17 +0800 | [diff] [blame] | 2020 | set_huge_pte_at(mm, address, pvmw.pte, pteval); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2021 | } else { |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2022 | dec_mm_counter(mm, mm_counter(&folio->page)); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2023 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 2024 | } |
| 2025 | |
| 2026 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
| 2027 | /* |
| 2028 | * The guest indicated that the page content is of no |
| 2029 | * interest anymore. Simply discard the pte, vmscan |
| 2030 | * will take care of the rest. |
| 2031 | * A future reference will then fault in a new zero |
| 2032 | * page. When userfaultfd is active, we must not drop |
| 2033 | * this page though, as its main user (postcopy |
| 2034 | * migration) will not expect userfaults on already |
| 2035 | * copied pages. |
| 2036 | */ |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2037 | dec_mm_counter(mm, mm_counter(&folio->page)); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2038 | /* We have to invalidate as we cleared the pte */ |
| 2039 | mmu_notifier_invalidate_range(mm, address, |
| 2040 | address + PAGE_SIZE); |
| 2041 | } else { |
| 2042 | swp_entry_t entry; |
| 2043 | pte_t swp_pte; |
| 2044 | |
| 2045 | if (arch_unmap_one(mm, vma, address, pteval) < 0) { |
Baolin Wang | 5d4af61 | 2022-05-13 16:48:55 -0700 | [diff] [blame] | 2046 | if (folio_test_hugetlb(folio)) |
| 2047 | set_huge_pte_at(mm, address, pvmw.pte, pteval); |
| 2048 | else |
| 2049 | set_pte_at(mm, address, pvmw.pte, pteval); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2050 | ret = false; |
| 2051 | page_vma_mapped_walk_done(&pvmw); |
| 2052 | break; |
| 2053 | } |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2054 | VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && |
| 2055 | !anon_exclusive, subpage); |
David Hildenbrand | 088b8aa | 2022-09-01 10:35:59 +0200 | [diff] [blame] | 2056 | |
| 2057 | /* See page_try_share_anon_rmap(): clear PTE first. */ |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2058 | if (anon_exclusive && |
| 2059 | page_try_share_anon_rmap(subpage)) { |
Baolin Wang | 5d4af61 | 2022-05-13 16:48:55 -0700 | [diff] [blame] | 2060 | if (folio_test_hugetlb(folio)) |
| 2061 | set_huge_pte_at(mm, address, pvmw.pte, pteval); |
| 2062 | else |
| 2063 | set_pte_at(mm, address, pvmw.pte, pteval); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2064 | ret = false; |
| 2065 | page_vma_mapped_walk_done(&pvmw); |
| 2066 | break; |
| 2067 | } |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2068 | |
| 2069 | /* |
| 2070 | * Store the pfn of the page in a special migration |
| 2071 | * pte. do_swap_page() will wait until the migration |
| 2072 | * pte is removed and then restart fault handling. |
| 2073 | */ |
| 2074 | if (pte_write(pteval)) |
| 2075 | entry = make_writable_migration_entry( |
| 2076 | page_to_pfn(subpage)); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2077 | else if (anon_exclusive) |
| 2078 | entry = make_readable_exclusive_migration_entry( |
| 2079 | page_to_pfn(subpage)); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2080 | else |
| 2081 | entry = make_readable_migration_entry( |
| 2082 | page_to_pfn(subpage)); |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 2083 | if (pte_young(pteval)) |
| 2084 | entry = make_migration_entry_young(entry); |
| 2085 | if (pte_dirty(pteval)) |
| 2086 | entry = make_migration_entry_dirty(entry); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2087 | swp_pte = swp_entry_to_pte(entry); |
| 2088 | if (pte_soft_dirty(pteval)) |
| 2089 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
| 2090 | if (pte_uffd_wp(pteval)) |
| 2091 | swp_pte = pte_swp_mkuffd_wp(swp_pte); |
Baolin Wang | 5d4af61 | 2022-05-13 16:48:55 -0700 | [diff] [blame] | 2092 | if (folio_test_hugetlb(folio)) |
Qi Zheng | 18f3962 | 2022-06-26 22:57:17 +0800 | [diff] [blame] | 2093 | set_huge_pte_at(mm, address, pvmw.pte, swp_pte); |
Baolin Wang | 5d4af61 | 2022-05-13 16:48:55 -0700 | [diff] [blame] | 2094 | else |
| 2095 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
Anshuman Khandual | 4cc79b3 | 2022-03-24 18:10:01 -0700 | [diff] [blame] | 2096 | trace_set_migration_pte(address, pte_val(swp_pte), |
| 2097 | compound_order(&folio->page)); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2098 | /* |
| 2099 | * No need to invalidate here it will synchronize on |
| 2100 | * against the special swap migration pte. |
| 2101 | */ |
| 2102 | } |
| 2103 | |
| 2104 | /* |
| 2105 | * No need to call mmu_notifier_invalidate_range() it has be |
| 2106 | * done above for all cases requiring it to happen under page |
| 2107 | * table lock before mmu_notifier_invalidate_range_end() |
| 2108 | * |
Mike Rapoport | ee65728 | 2022-06-27 09:00:26 +0300 | [diff] [blame] | 2109 | * See Documentation/mm/mmu_notifier.rst |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2110 | */ |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2111 | page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); |
Hugh Dickins | b743550 | 2022-02-14 18:38:47 -0800 | [diff] [blame] | 2112 | if (vma->vm_flags & VM_LOCKED) |
Lorenzo Stoakes | 96f97c4 | 2023-01-12 12:39:31 +0000 | [diff] [blame] | 2113 | mlock_drain_local(); |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2114 | folio_put(folio); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2115 | } |
| 2116 | |
| 2117 | mmu_notifier_invalidate_range_end(&range); |
| 2118 | |
| 2119 | return ret; |
| 2120 | } |
| 2121 | |
| 2122 | /** |
| 2123 | * try_to_migrate - try to replace all page table mappings with swap entries |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2124 | * @folio: the folio to replace page table entries for |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2125 | * @flags: action and flags |
| 2126 | * |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2127 | * Tries to remove all the page table entries which are mapping this folio and |
| 2128 | * replace them with special swap entries. Caller must hold the folio lock. |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2129 | */ |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2130 | void try_to_migrate(struct folio *folio, enum ttu_flags flags) |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2131 | { |
| 2132 | struct rmap_walk_control rwc = { |
| 2133 | .rmap_one = try_to_migrate_one, |
| 2134 | .arg = (void *)flags, |
Kefeng Wang | f3ad032 | 2022-09-27 14:38:26 +0800 | [diff] [blame] | 2135 | .done = folio_not_mapped, |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2136 | .anon_lock = folio_lock_anon_vma_read, |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2137 | }; |
| 2138 | |
| 2139 | /* |
| 2140 | * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and |
Huang Ying | 7e12beb | 2023-02-13 20:34:43 +0800 | [diff] [blame] | 2141 | * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2142 | */ |
| 2143 | if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | |
Huang Ying | 7e12beb | 2023-02-13 20:34:43 +0800 | [diff] [blame] | 2144 | TTU_SYNC | TTU_BATCH_FLUSH))) |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2145 | return; |
| 2146 | |
Alex Sierra | f25cbb7 | 2022-07-15 10:05:10 -0500 | [diff] [blame] | 2147 | if (folio_is_zone_device(folio) && |
| 2148 | (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) |
Hugh Dickins | 6c855fc | 2021-07-07 13:13:33 -0700 | [diff] [blame] | 2149 | return; |
| 2150 | |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 2151 | /* |
| 2152 | * During exec, a temporary VMA is setup and later moved. |
| 2153 | * The VMA is moved under the anon_vma lock but not the |
| 2154 | * page tables leading to a race where migration cannot |
| 2155 | * find the migration ptes. Rather than increasing the |
| 2156 | * locking requirements of exec(), migration skips |
| 2157 | * temporary VMAs until after exec() completes. |
| 2158 | */ |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2159 | if (!folio_test_ksm(folio) && folio_test_anon(folio)) |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 2160 | rwc.invalid_vma = invalid_migration_vma; |
| 2161 | |
Kirill A. Shutemov | 2a52bcb | 2016-03-17 14:20:04 -0700 | [diff] [blame] | 2162 | if (flags & TTU_RMAP_LOCKED) |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2163 | rmap_walk_locked(folio, &rwc); |
Kirill A. Shutemov | 2a52bcb | 2016-03-17 14:20:04 -0700 | [diff] [blame] | 2164 | else |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2165 | rmap_walk(folio, &rwc); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 2166 | } |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2167 | |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2168 | #ifdef CONFIG_DEVICE_PRIVATE |
| 2169 | struct make_exclusive_args { |
| 2170 | struct mm_struct *mm; |
| 2171 | unsigned long address; |
| 2172 | void *owner; |
| 2173 | bool valid; |
| 2174 | }; |
| 2175 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2176 | static bool page_make_device_exclusive_one(struct folio *folio, |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2177 | struct vm_area_struct *vma, unsigned long address, void *priv) |
| 2178 | { |
| 2179 | struct mm_struct *mm = vma->vm_mm; |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2180 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2181 | struct make_exclusive_args *args = priv; |
| 2182 | pte_t pteval; |
| 2183 | struct page *subpage; |
| 2184 | bool ret = true; |
| 2185 | struct mmu_notifier_range range; |
| 2186 | swp_entry_t entry; |
| 2187 | pte_t swp_pte; |
| 2188 | |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 2189 | mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2190 | vma->vm_mm, address, min(vma->vm_end, |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2191 | address + folio_size(folio)), |
| 2192 | args->owner); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2193 | mmu_notifier_invalidate_range_start(&range); |
| 2194 | |
| 2195 | while (page_vma_mapped_walk(&pvmw)) { |
| 2196 | /* Unexpected PMD-mapped THP? */ |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2197 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2198 | |
| 2199 | if (!pte_present(*pvmw.pte)) { |
| 2200 | ret = false; |
| 2201 | page_vma_mapped_walk_done(&pvmw); |
| 2202 | break; |
| 2203 | } |
| 2204 | |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2205 | subpage = folio_page(folio, |
| 2206 | pte_pfn(*pvmw.pte) - folio_pfn(folio)); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2207 | address = pvmw.address; |
| 2208 | |
| 2209 | /* Nuke the page table entry. */ |
| 2210 | flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); |
| 2211 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
| 2212 | |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2213 | /* Set the dirty flag on the folio now the pte is gone. */ |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2214 | if (pte_dirty(pteval)) |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2215 | folio_mark_dirty(folio); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2216 | |
| 2217 | /* |
| 2218 | * Check that our target page is still mapped at the expected |
| 2219 | * address. |
| 2220 | */ |
| 2221 | if (args->mm == mm && args->address == address && |
| 2222 | pte_write(pteval)) |
| 2223 | args->valid = true; |
| 2224 | |
| 2225 | /* |
| 2226 | * Store the pfn of the page in a special migration |
| 2227 | * pte. do_swap_page() will wait until the migration |
| 2228 | * pte is removed and then restart fault handling. |
| 2229 | */ |
| 2230 | if (pte_write(pteval)) |
| 2231 | entry = make_writable_device_exclusive_entry( |
| 2232 | page_to_pfn(subpage)); |
| 2233 | else |
| 2234 | entry = make_readable_device_exclusive_entry( |
| 2235 | page_to_pfn(subpage)); |
| 2236 | swp_pte = swp_entry_to_pte(entry); |
| 2237 | if (pte_soft_dirty(pteval)) |
| 2238 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
| 2239 | if (pte_uffd_wp(pteval)) |
| 2240 | swp_pte = pte_swp_mkuffd_wp(swp_pte); |
| 2241 | |
| 2242 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
| 2243 | |
| 2244 | /* |
| 2245 | * There is a reference on the page for the swap entry which has |
| 2246 | * been removed, so shouldn't take another. |
| 2247 | */ |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 2248 | page_remove_rmap(subpage, vma, false); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2249 | } |
| 2250 | |
| 2251 | mmu_notifier_invalidate_range_end(&range); |
| 2252 | |
| 2253 | return ret; |
| 2254 | } |
| 2255 | |
| 2256 | /** |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2257 | * folio_make_device_exclusive - Mark the folio exclusively owned by a device. |
| 2258 | * @folio: The folio to replace page table entries for. |
| 2259 | * @mm: The mm_struct where the folio is expected to be mapped. |
| 2260 | * @address: Address where the folio is expected to be mapped. |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2261 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks |
| 2262 | * |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2263 | * Tries to remove all the page table entries which are mapping this |
| 2264 | * folio and replace them with special device exclusive swap entries to |
| 2265 | * grant a device exclusive access to the folio. |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2266 | * |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2267 | * Context: Caller must hold the folio lock. |
| 2268 | * Return: false if the page is still mapped, or if it could not be unmapped |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2269 | * from the expected address. Otherwise returns true (success). |
| 2270 | */ |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2271 | static bool folio_make_device_exclusive(struct folio *folio, |
| 2272 | struct mm_struct *mm, unsigned long address, void *owner) |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2273 | { |
| 2274 | struct make_exclusive_args args = { |
| 2275 | .mm = mm, |
| 2276 | .address = address, |
| 2277 | .owner = owner, |
| 2278 | .valid = false, |
| 2279 | }; |
| 2280 | struct rmap_walk_control rwc = { |
| 2281 | .rmap_one = page_make_device_exclusive_one, |
Kefeng Wang | f3ad032 | 2022-09-27 14:38:26 +0800 | [diff] [blame] | 2282 | .done = folio_not_mapped, |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2283 | .anon_lock = folio_lock_anon_vma_read, |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2284 | .arg = &args, |
| 2285 | }; |
| 2286 | |
| 2287 | /* |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2288 | * Restrict to anonymous folios for now to avoid potential writeback |
| 2289 | * issues. |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2290 | */ |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2291 | if (!folio_test_anon(folio)) |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2292 | return false; |
| 2293 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2294 | rmap_walk(folio, &rwc); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2295 | |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2296 | return args.valid && !folio_mapcount(folio); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2297 | } |
| 2298 | |
| 2299 | /** |
| 2300 | * make_device_exclusive_range() - Mark a range for exclusive use by a device |
Adrian Huang | dd06230 | 2022-05-09 18:20:54 -0700 | [diff] [blame] | 2301 | * @mm: mm_struct of associated target process |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2302 | * @start: start of the region to mark for exclusive device access |
| 2303 | * @end: end address of region |
| 2304 | * @pages: returns the pages which were successfully marked for exclusive access |
| 2305 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering |
| 2306 | * |
| 2307 | * Returns: number of pages found in the range by GUP. A page is marked for |
| 2308 | * exclusive access only if the page pointer is non-NULL. |
| 2309 | * |
| 2310 | * This function finds ptes mapping page(s) to the given address range, locks |
| 2311 | * them and replaces mappings with special swap entries preventing userspace CPU |
| 2312 | * access. On fault these entries are replaced with the original mapping after |
| 2313 | * calling MMU notifiers. |
| 2314 | * |
| 2315 | * A driver using this to program access from a device must use a mmu notifier |
| 2316 | * critical section to hold a device specific lock during programming. Once |
| 2317 | * programming is complete it should drop the page lock and reference after |
| 2318 | * which point CPU access to the page will revoke the exclusive access. |
| 2319 | */ |
| 2320 | int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, |
| 2321 | unsigned long end, struct page **pages, |
| 2322 | void *owner) |
| 2323 | { |
| 2324 | long npages = (end - start) >> PAGE_SHIFT; |
| 2325 | long i; |
| 2326 | |
| 2327 | npages = get_user_pages_remote(mm, start, npages, |
| 2328 | FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, |
| 2329 | pages, NULL, NULL); |
| 2330 | if (npages < 0) |
| 2331 | return npages; |
| 2332 | |
| 2333 | for (i = 0; i < npages; i++, start += PAGE_SIZE) { |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2334 | struct folio *folio = page_folio(pages[i]); |
| 2335 | if (PageTail(pages[i]) || !folio_trylock(folio)) { |
| 2336 | folio_put(folio); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2337 | pages[i] = NULL; |
| 2338 | continue; |
| 2339 | } |
| 2340 | |
Matthew Wilcox (Oracle) | 0d25148 | 2022-01-28 16:03:42 -0500 | [diff] [blame] | 2341 | if (!folio_make_device_exclusive(folio, mm, start, owner)) { |
| 2342 | folio_unlock(folio); |
| 2343 | folio_put(folio); |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 2344 | pages[i] = NULL; |
| 2345 | } |
| 2346 | } |
| 2347 | |
| 2348 | return npages; |
| 2349 | } |
| 2350 | EXPORT_SYMBOL_GPL(make_device_exclusive_range); |
| 2351 | #endif |
| 2352 | |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 2353 | void __put_anon_vma(struct anon_vma *anon_vma) |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 2354 | { |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 2355 | struct anon_vma *root = anon_vma->root; |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 2356 | |
Andrey Ryabinin | 624483f | 2014-06-06 19:09:30 +0400 | [diff] [blame] | 2357 | anon_vma_free(anon_vma); |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 2358 | if (root != anon_vma && atomic_dec_and_test(&root->refcount)) |
| 2359 | anon_vma_free(root); |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 2360 | } |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 2361 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2362 | static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2363 | struct rmap_walk_control *rwc) |
Joonsoo Kim | faecd8d | 2014-01-21 15:49:46 -0800 | [diff] [blame] | 2364 | { |
| 2365 | struct anon_vma *anon_vma; |
| 2366 | |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2367 | if (rwc->anon_lock) |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2368 | return rwc->anon_lock(folio, rwc); |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2369 | |
Joonsoo Kim | faecd8d | 2014-01-21 15:49:46 -0800 | [diff] [blame] | 2370 | /* |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2371 | * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() |
Joonsoo Kim | faecd8d | 2014-01-21 15:49:46 -0800 | [diff] [blame] | 2372 | * because that depends on page_mapped(); but not all its usages |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 2373 | * are holding mmap_lock. Users without mmap_lock are required to |
Joonsoo Kim | faecd8d | 2014-01-21 15:49:46 -0800 | [diff] [blame] | 2374 | * take a reference count to prevent the anon_vma disappearing |
| 2375 | */ |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 2376 | anon_vma = folio_anon_vma(folio); |
Joonsoo Kim | faecd8d | 2014-01-21 15:49:46 -0800 | [diff] [blame] | 2377 | if (!anon_vma) |
| 2378 | return NULL; |
| 2379 | |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2380 | if (anon_vma_trylock_read(anon_vma)) |
| 2381 | goto out; |
| 2382 | |
| 2383 | if (rwc->try_lock) { |
| 2384 | anon_vma = NULL; |
| 2385 | rwc->contended = true; |
| 2386 | goto out; |
| 2387 | } |
| 2388 | |
Joonsoo Kim | faecd8d | 2014-01-21 15:49:46 -0800 | [diff] [blame] | 2389 | anon_vma_lock_read(anon_vma); |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2390 | out: |
Joonsoo Kim | faecd8d | 2014-01-21 15:49:46 -0800 | [diff] [blame] | 2391 | return anon_vma; |
| 2392 | } |
| 2393 | |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2394 | /* |
Joonsoo Kim | e8351ac | 2014-01-21 15:49:52 -0800 | [diff] [blame] | 2395 | * rmap_walk_anon - do something to anonymous page using the object-based |
| 2396 | * rmap method |
| 2397 | * @page: the page to be handled |
| 2398 | * @rwc: control variable according to each walk type |
| 2399 | * |
| 2400 | * Find all the mappings of a page using the mapping pointer and the vma chains |
| 2401 | * contained in the anon_vma struct it points to. |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2402 | */ |
Matthew Wilcox (Oracle) | 84fbbe2 | 2022-01-29 16:16:54 -0500 | [diff] [blame] | 2403 | static void rmap_walk_anon(struct folio *folio, |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2404 | struct rmap_walk_control *rwc, bool locked) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2405 | { |
| 2406 | struct anon_vma *anon_vma; |
Kirill A. Shutemov | a8fa41ad | 2017-02-24 14:57:54 -0800 | [diff] [blame] | 2407 | pgoff_t pgoff_start, pgoff_end; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 2408 | struct anon_vma_chain *avc; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2409 | |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2410 | if (locked) { |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 2411 | anon_vma = folio_anon_vma(folio); |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2412 | /* anon_vma disappear under us? */ |
Matthew Wilcox (Oracle) | e05b345 | 2022-01-29 11:52:52 -0500 | [diff] [blame] | 2413 | VM_BUG_ON_FOLIO(!anon_vma, folio); |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2414 | } else { |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2415 | anon_vma = rmap_walk_anon_lock(folio, rwc); |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2416 | } |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2417 | if (!anon_vma) |
Minchan Kim | 1df631a | 2017-05-03 14:54:23 -0700 | [diff] [blame] | 2418 | return; |
Joonsoo Kim | faecd8d | 2014-01-21 15:49:46 -0800 | [diff] [blame] | 2419 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2420 | pgoff_start = folio_pgoff(folio); |
| 2421 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; |
Kirill A. Shutemov | a8fa41ad | 2017-02-24 14:57:54 -0800 | [diff] [blame] | 2422 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, |
| 2423 | pgoff_start, pgoff_end) { |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 2424 | struct vm_area_struct *vma = avc->vma; |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2425 | unsigned long address = vma_address(&folio->page, vma); |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2426 | |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 2427 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
Andrea Arcangeli | ad12695 | 2015-11-05 18:49:07 -0800 | [diff] [blame] | 2428 | cond_resched(); |
| 2429 | |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2430 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
| 2431 | continue; |
| 2432 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2433 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2434 | break; |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2435 | if (rwc->done && rwc->done(folio)) |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2436 | break; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2437 | } |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2438 | |
| 2439 | if (!locked) |
| 2440 | anon_vma_unlock_read(anon_vma); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2441 | } |
| 2442 | |
Joonsoo Kim | e8351ac | 2014-01-21 15:49:52 -0800 | [diff] [blame] | 2443 | /* |
| 2444 | * rmap_walk_file - do something to file page using the object-based rmap method |
| 2445 | * @page: the page to be handled |
| 2446 | * @rwc: control variable according to each walk type |
| 2447 | * |
| 2448 | * Find all the mappings of a page using the mapping pointer and the vma chains |
| 2449 | * contained in the address_space struct it points to. |
Joonsoo Kim | e8351ac | 2014-01-21 15:49:52 -0800 | [diff] [blame] | 2450 | */ |
Matthew Wilcox (Oracle) | 84fbbe2 | 2022-01-29 16:16:54 -0500 | [diff] [blame] | 2451 | static void rmap_walk_file(struct folio *folio, |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2452 | struct rmap_walk_control *rwc, bool locked) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2453 | { |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2454 | struct address_space *mapping = folio_mapping(folio); |
Kirill A. Shutemov | a8fa41ad | 2017-02-24 14:57:54 -0800 | [diff] [blame] | 2455 | pgoff_t pgoff_start, pgoff_end; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2456 | struct vm_area_struct *vma; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2457 | |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 2458 | /* |
| 2459 | * The page lock not only makes sure that page->mapping cannot |
| 2460 | * suddenly be NULLified by truncation, it makes sure that the |
| 2461 | * structure at mapping cannot be freed and reused yet, |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 2462 | * so we can safely take mapping->i_mmap_rwsem. |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 2463 | */ |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2464 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 2465 | |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2466 | if (!mapping) |
Minchan Kim | 1df631a | 2017-05-03 14:54:23 -0700 | [diff] [blame] | 2467 | return; |
Davidlohr Bueso | 3dec0ba | 2014-12-12 16:54:27 -0800 | [diff] [blame] | 2468 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2469 | pgoff_start = folio_pgoff(folio); |
| 2470 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2471 | if (!locked) { |
| 2472 | if (i_mmap_trylock_read(mapping)) |
| 2473 | goto lookup; |
| 2474 | |
| 2475 | if (rwc->try_lock) { |
| 2476 | rwc->contended = true; |
| 2477 | return; |
| 2478 | } |
| 2479 | |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2480 | i_mmap_lock_read(mapping); |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2481 | } |
| 2482 | lookup: |
Kirill A. Shutemov | a8fa41ad | 2017-02-24 14:57:54 -0800 | [diff] [blame] | 2483 | vma_interval_tree_foreach(vma, &mapping->i_mmap, |
| 2484 | pgoff_start, pgoff_end) { |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2485 | unsigned long address = vma_address(&folio->page, vma); |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2486 | |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 2487 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
Andrea Arcangeli | ad12695 | 2015-11-05 18:49:07 -0800 | [diff] [blame] | 2488 | cond_resched(); |
| 2489 | |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2490 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
| 2491 | continue; |
| 2492 | |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2493 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2494 | goto done; |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2495 | if (rwc->done && rwc->done(folio)) |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2496 | goto done; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2497 | } |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2498 | |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 2499 | done: |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2500 | if (!locked) |
| 2501 | i_mmap_unlock_read(mapping); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2502 | } |
| 2503 | |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2504 | void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2505 | { |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2506 | if (unlikely(folio_test_ksm(folio))) |
| 2507 | rmap_walk_ksm(folio, rwc); |
| 2508 | else if (folio_test_anon(folio)) |
| 2509 | rmap_walk_anon(folio, rwc, false); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2510 | else |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2511 | rmap_walk_file(folio, rwc, false); |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2512 | } |
| 2513 | |
| 2514 | /* Like rmap_walk, but caller holds relevant rmap lock */ |
Minchan Kim | 6d4675e | 2022-05-19 14:08:54 -0700 | [diff] [blame] | 2515 | void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2516 | { |
| 2517 | /* no ksm support for now */ |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2518 | VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); |
| 2519 | if (folio_test_anon(folio)) |
| 2520 | rmap_walk_anon(folio, rwc, true); |
Kirill A. Shutemov | b977319 | 2016-03-17 14:20:01 -0700 | [diff] [blame] | 2521 | else |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 2522 | rmap_walk_file(folio, rwc, true); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2523 | } |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2524 | |
Naoya Horiguchi | e3390f6 | 2010-06-15 13:18:13 +0900 | [diff] [blame] | 2525 | #ifdef CONFIG_HUGETLB_PAGE |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2526 | /* |
Kirill Tkhai | 451b951 | 2018-12-28 00:39:31 -0800 | [diff] [blame] | 2527 | * The following two functions are for anonymous (private mapped) hugepages. |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2528 | * Unlike common anonymous pages, anonymous hugepages have no accounting code |
| 2529 | * and no lru code, because we handle hugepages differently from common pages. |
David Hildenbrand | 28c5209 | 2022-05-09 18:20:43 -0700 | [diff] [blame] | 2530 | * |
| 2531 | * RMAP_COMPOUND is ignored. |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2532 | */ |
David Hildenbrand | 28c5209 | 2022-05-09 18:20:43 -0700 | [diff] [blame] | 2533 | void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, |
| 2534 | unsigned long address, rmap_t flags) |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2535 | { |
Matthew Wilcox (Oracle) | db4e5db | 2023-01-11 14:28:56 +0000 | [diff] [blame] | 2536 | struct folio *folio = page_folio(page); |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2537 | struct anon_vma *anon_vma = vma->anon_vma; |
| 2538 | int first; |
Naoya Horiguchi | a850ea3 | 2010-09-10 13:23:06 +0900 | [diff] [blame] | 2539 | |
Matthew Wilcox (Oracle) | db4e5db | 2023-01-11 14:28:56 +0000 | [diff] [blame] | 2540 | BUG_ON(!folio_test_locked(folio)); |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2541 | BUG_ON(!anon_vma); |
Liam R. Howlett | 0503ea8 | 2023-01-20 11:26:49 -0500 | [diff] [blame] | 2542 | /* address might be in next vma when migration races vma_merge */ |
Matthew Wilcox (Oracle) | db4e5db | 2023-01-11 14:28:56 +0000 | [diff] [blame] | 2543 | first = atomic_inc_and_test(&folio->_entire_mapcount); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2544 | VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); |
| 2545 | VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2546 | if (first) |
Matthew Wilcox (Oracle) | 5b4bd90 | 2023-01-16 19:29:59 +0000 | [diff] [blame] | 2547 | __page_set_anon_rmap(folio, page, vma, address, |
David Hildenbrand | 28c5209 | 2022-05-09 18:20:43 -0700 | [diff] [blame] | 2548 | !!(flags & RMAP_EXCLUSIVE)); |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2549 | } |
| 2550 | |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 2551 | void hugepage_add_new_anon_rmap(struct folio *folio, |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2552 | struct vm_area_struct *vma, unsigned long address) |
| 2553 | { |
| 2554 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 2555 | /* increment count (starts at -1) */ |
Matthew Wilcox (Oracle) | db4e5db | 2023-01-11 14:28:56 +0000 | [diff] [blame] | 2556 | atomic_set(&folio->_entire_mapcount, 0); |
| 2557 | folio_clear_hugetlb_restore_reserve(folio); |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 2558 | __page_set_anon_rmap(folio, &folio->page, vma, address, 1); |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 2559 | } |
Naoya Horiguchi | e3390f6 | 2010-06-15 13:18:13 +0900 | [diff] [blame] | 2560 | #endif /* CONFIG_HUGETLB_PAGE */ |