Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2009 Red Hat, Inc. |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 4 | */ |
| 5 | |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 8 | #include <linux/mm.h> |
| 9 | #include <linux/sched.h> |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 10 | #include <linux/sched/mm.h> |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 11 | #include <linux/sched/coredump.h> |
Ingo Molnar | 6a3827d | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 12 | #include <linux/sched/numa_balancing.h> |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 13 | #include <linux/highmem.h> |
| 14 | #include <linux/hugetlb.h> |
| 15 | #include <linux/mmu_notifier.h> |
| 16 | #include <linux/rmap.h> |
| 17 | #include <linux/swap.h> |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 18 | #include <linux/shrinker.h> |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 19 | #include <linux/mm_inline.h> |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 20 | #include <linux/swapops.h> |
Matthew Wilcox (Oracle) | fb5c202 | 2022-06-28 20:15:29 -0400 | [diff] [blame] | 21 | #include <linux/backing-dev.h> |
Matthew Wilcox | 4897c76 | 2015-09-08 14:58:45 -0700 | [diff] [blame] | 22 | #include <linux/dax.h> |
Baolin Wang | 4b98995 | 2024-06-11 18:11:07 +0800 | [diff] [blame] | 23 | #include <linux/mm_types.h> |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 24 | #include <linux/khugepaged.h> |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 25 | #include <linux/freezer.h> |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 26 | #include <linux/pfn_t.h> |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 27 | #include <linux/mman.h> |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 28 | #include <linux/memremap.h> |
Ralf Baechle | 325adeb | 2012-10-15 13:44:56 +0200 | [diff] [blame] | 29 | #include <linux/pagemap.h> |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 30 | #include <linux/debugfs.h> |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 31 | #include <linux/migrate.h> |
Sasha Levin | 43b5fbb | 2013-02-22 16:32:27 -0800 | [diff] [blame] | 32 | #include <linux/hashtable.h> |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 33 | #include <linux/userfaultfd_k.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 34 | #include <linux/page_idle.h> |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 35 | #include <linux/shmem_fs.h> |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 36 | #include <linux/oom.h> |
Anshuman Khandual | 98fa15f | 2019-03-05 15:42:58 -0800 | [diff] [blame] | 37 | #include <linux/numa.h> |
Vlastimil Babka | f7da677 | 2019-08-24 17:54:59 -0700 | [diff] [blame] | 38 | #include <linux/page_owner.h> |
Huang Ying | a1a3a2f | 2022-03-22 14:46:27 -0700 | [diff] [blame] | 39 | #include <linux/sched/sysctl.h> |
Aneesh Kumar K.V | 467b171 | 2022-08-18 18:40:41 +0530 | [diff] [blame] | 40 | #include <linux/memory-tiers.h> |
Yang Shi | 4ef9ad19 | 2024-01-18 10:05:05 -0800 | [diff] [blame] | 41 | #include <linux/compat.h> |
Suren Baghdasaryan | be25d1d | 2024-03-21 09:36:41 -0700 | [diff] [blame] | 42 | #include <linux/pgalloc_tag.h> |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 43 | #include <linux/pagewalk.h> |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 44 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 45 | #include <asm/tlb.h> |
| 46 | #include <asm/pgalloc.h> |
| 47 | #include "internal.h" |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 48 | #include "swap.h" |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 49 | |
Anshuman Khandual | 283fd6f | 2022-03-24 18:09:58 -0700 | [diff] [blame] | 50 | #define CREATE_TRACE_POINTS |
| 51 | #include <trace/events/thp.h> |
| 52 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 53 | /* |
Michael DeGuzis | b14d595 | 2017-05-17 15:19:21 -0400 | [diff] [blame] | 54 | * By default, transparent hugepage support is disabled in order to avoid |
| 55 | * risking an increased memory footprint for applications that are not |
| 56 | * guaranteed to benefit from it. When transparent hugepage support is |
| 57 | * enabled, it is for all mappings, and khugepaged scans all mappings. |
Jianguo Wu | 8bfa3f9 | 2013-11-12 15:07:16 -0800 | [diff] [blame] | 58 | * Defrag is invoked by khugepaged hugepage allocations and by page faults |
| 59 | * for all hugepage allocations. |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 60 | */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 61 | unsigned long transparent_hugepage_flags __read_mostly = |
Andrea Arcangeli | 13ece88 | 2011-01-13 15:47:07 -0800 | [diff] [blame] | 62 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 63 | (1<<TRANSPARENT_HUGEPAGE_FLAG)| |
Andrea Arcangeli | 13ece88 | 2011-01-13 15:47:07 -0800 | [diff] [blame] | 64 | #endif |
| 65 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE |
| 66 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| |
| 67 | #endif |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 68 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 69 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| |
| 70 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 71 | |
Qi Zheng | 54d9172 | 2023-09-11 17:44:16 +0800 | [diff] [blame] | 72 | static struct shrinker *deferred_split_shrinker; |
| 73 | static unsigned long deferred_split_count(struct shrinker *shrink, |
| 74 | struct shrink_control *sc); |
| 75 | static unsigned long deferred_split_scan(struct shrinker *shrink, |
| 76 | struct shrink_control *sc); |
Usama Arif | 81d3ff3 | 2024-08-30 11:03:40 +0100 | [diff] [blame] | 77 | static bool split_underused_thp = true; |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 78 | |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 79 | static atomic_t huge_zero_refcount; |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 80 | struct folio *huge_zero_folio __read_mostly; |
Hugh Dickins | 3b77e8c | 2021-06-15 18:23:49 -0700 | [diff] [blame] | 81 | unsigned long huge_zero_pfn __read_mostly = ~0UL; |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 82 | unsigned long huge_anon_orders_always __read_mostly; |
| 83 | unsigned long huge_anon_orders_madvise __read_mostly; |
| 84 | unsigned long huge_anon_orders_inherit __read_mostly; |
Ryan Roberts | dd4d30d | 2024-08-14 14:02:47 +1200 | [diff] [blame] | 85 | static bool anon_orders_configured __initdata; |
Kirill A. Shutemov | 4a6c129 | 2012-12-12 13:50:47 -0800 | [diff] [blame] | 86 | |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 87 | unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, |
Matthew Wilcox | e0ffb29 | 2024-04-25 05:00:55 +0100 | [diff] [blame] | 88 | unsigned long vm_flags, |
| 89 | unsigned long tva_flags, |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 90 | unsigned long orders) |
Michal Hocko | 7635d9c | 2018-12-28 00:38:21 -0800 | [diff] [blame] | 91 | { |
Matthew Wilcox | e0ffb29 | 2024-04-25 05:00:55 +0100 | [diff] [blame] | 92 | bool smaps = tva_flags & TVA_SMAPS; |
| 93 | bool in_pf = tva_flags & TVA_IN_PF; |
| 94 | bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS; |
Gavin Shan | d659b71 | 2024-07-15 10:04:23 +1000 | [diff] [blame] | 95 | unsigned long supported_orders; |
| 96 | |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 97 | /* Check the intersection of requested and supported orders. */ |
Gavin Shan | d659b71 | 2024-07-15 10:04:23 +1000 | [diff] [blame] | 98 | if (vma_is_anonymous(vma)) |
| 99 | supported_orders = THP_ORDERS_ALL_ANON; |
Peter Xu | 5dd4072 | 2024-08-26 16:43:38 -0400 | [diff] [blame] | 100 | else if (vma_is_special_huge(vma)) |
| 101 | supported_orders = THP_ORDERS_ALL_SPECIAL; |
Gavin Shan | d659b71 | 2024-07-15 10:04:23 +1000 | [diff] [blame] | 102 | else |
| 103 | supported_orders = THP_ORDERS_ALL_FILE_DEFAULT; |
| 104 | |
| 105 | orders &= supported_orders; |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 106 | if (!orders) |
| 107 | return 0; |
| 108 | |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 109 | if (!vma->vm_mm) /* vdso */ |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 110 | return 0; |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 111 | |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 112 | /* |
| 113 | * Explicitly disabled through madvise or prctl, or some |
| 114 | * architectures may disable THP for some mappings, for |
| 115 | * example, s390 kvm. |
| 116 | * */ |
| 117 | if ((vm_flags & VM_NOHUGEPAGE) || |
| 118 | test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 119 | return 0; |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 120 | /* |
| 121 | * If the hardware/firmware marked hugepage support disabled. |
| 122 | */ |
Peter Xu | 3c556d2 | 2023-03-15 13:16:42 -0400 | [diff] [blame] | 123 | if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 124 | return 0; |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 125 | |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 126 | /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ |
| 127 | if (vma_is_dax(vma)) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 128 | return in_pf ? orders : 0; |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 129 | |
| 130 | /* |
Zach O'Keefe | 7a81751 | 2023-09-25 13:01:10 -0700 | [diff] [blame] | 131 | * khugepaged special VMA and hugetlb VMA. |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 132 | * Must be checked after dax since some dax mappings may have |
| 133 | * VM_MIXEDMAP set. |
| 134 | */ |
Zach O'Keefe | 7a81751 | 2023-09-25 13:01:10 -0700 | [diff] [blame] | 135 | if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED)) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 136 | return 0; |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 137 | |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 138 | /* |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 139 | * Check alignment for file vma and size for both file and anon vma by |
| 140 | * filtering out the unsuitable orders. |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 141 | * |
| 142 | * Skip the check for page fault. Huge fault does the check in fault |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 143 | * handlers. |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 144 | */ |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 145 | if (!in_pf) { |
| 146 | int order = highest_order(orders); |
| 147 | unsigned long addr; |
| 148 | |
| 149 | while (orders) { |
| 150 | addr = vma->vm_end - (PAGE_SIZE << order); |
| 151 | if (thp_vma_suitable_order(vma, addr, order)) |
| 152 | break; |
| 153 | order = next_order(&orders, order); |
| 154 | } |
| 155 | |
| 156 | if (!orders) |
| 157 | return 0; |
| 158 | } |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 159 | |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 160 | /* |
| 161 | * Enabled via shmem mount options or sysfs settings. |
| 162 | * Must be done before hugepage flags check since shmem has its |
| 163 | * own flags. |
| 164 | */ |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 165 | if (!in_pf && shmem_file(vma->vm_file)) |
Bang Li | 26c7d84 | 2024-07-05 11:23:09 +0800 | [diff] [blame] | 166 | return shmem_allowable_huge_orders(file_inode(vma->vm_file), |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 167 | vma, vma->vm_pgoff, 0, |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 168 | !enforce_sysfs); |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 169 | |
Zach O'Keefe | 7a81751 | 2023-09-25 13:01:10 -0700 | [diff] [blame] | 170 | if (!vma_is_anonymous(vma)) { |
| 171 | /* |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 172 | * Enforce sysfs THP requirements as necessary. Anonymous vmas |
| 173 | * were already handled in thp_vma_allowable_orders(). |
| 174 | */ |
| 175 | if (enforce_sysfs && |
| 176 | (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) && |
| 177 | !hugepage_global_always()))) |
| 178 | return 0; |
| 179 | |
| 180 | /* |
Zach O'Keefe | 7a81751 | 2023-09-25 13:01:10 -0700 | [diff] [blame] | 181 | * Trust that ->huge_fault() handlers know what they are doing |
| 182 | * in fault path. |
| 183 | */ |
| 184 | if (((in_pf || smaps)) && vma->vm_ops->huge_fault) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 185 | return orders; |
Zach O'Keefe | 7a81751 | 2023-09-25 13:01:10 -0700 | [diff] [blame] | 186 | /* Only regular file is valid in collapse path */ |
| 187 | if (((!in_pf || smaps)) && file_thp_enabled(vma)) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 188 | return orders; |
| 189 | return 0; |
Zach O'Keefe | 7a81751 | 2023-09-25 13:01:10 -0700 | [diff] [blame] | 190 | } |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 191 | |
| 192 | if (vma_is_temporary_stack(vma)) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 193 | return 0; |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 194 | |
| 195 | /* |
| 196 | * THPeligible bit of smaps should show 1 for proper VMAs even |
| 197 | * though anon_vma is not initialized yet. |
Yang Shi | 7da4e2c | 2022-06-16 10:48:38 -0700 | [diff] [blame] | 198 | * |
| 199 | * Allow page fault since anon_vma may be not initialized until |
| 200 | * the first page fault. |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 201 | */ |
| 202 | if (!vma->anon_vma) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 203 | return (smaps || in_pf) ? orders : 0; |
Yang Shi | 9fec516 | 2022-06-16 10:48:37 -0700 | [diff] [blame] | 204 | |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 205 | return orders; |
Michal Hocko | 7635d9c | 2018-12-28 00:38:21 -0800 | [diff] [blame] | 206 | } |
| 207 | |
Miaohe Lin | aaa9705 | 2021-05-04 18:33:55 -0700 | [diff] [blame] | 208 | static bool get_huge_zero_page(void) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 209 | { |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 210 | struct folio *zero_folio; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 211 | retry: |
| 212 | if (likely(atomic_inc_not_zero(&huge_zero_refcount))) |
Miaohe Lin | aaa9705 | 2021-05-04 18:33:55 -0700 | [diff] [blame] | 213 | return true; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 214 | |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 215 | zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 216 | HPAGE_PMD_ORDER); |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 217 | if (!zero_folio) { |
Kirill A. Shutemov | d8a8e1f | 2012-12-12 13:51:09 -0800 | [diff] [blame] | 218 | count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); |
Miaohe Lin | aaa9705 | 2021-05-04 18:33:55 -0700 | [diff] [blame] | 219 | return false; |
Kirill A. Shutemov | d8a8e1f | 2012-12-12 13:51:09 -0800 | [diff] [blame] | 220 | } |
Miaohe Lin | 2a1b864 | 2024-09-14 09:53:06 +0800 | [diff] [blame] | 221 | /* Ensure zero folio won't have large_rmappable flag set. */ |
| 222 | folio_clear_large_rmappable(zero_folio); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 223 | preempt_disable(); |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 224 | if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) { |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 225 | preempt_enable(); |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 226 | folio_put(zero_folio); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 227 | goto retry; |
| 228 | } |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 229 | WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio)); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 230 | |
| 231 | /* We take additional reference here. It will be put back by shrinker */ |
| 232 | atomic_set(&huge_zero_refcount, 2); |
| 233 | preempt_enable(); |
Liu Shixin | f498150 | 2022-09-09 10:16:53 +0800 | [diff] [blame] | 234 | count_vm_event(THP_ZERO_PAGE_ALLOC); |
Miaohe Lin | aaa9705 | 2021-05-04 18:33:55 -0700 | [diff] [blame] | 235 | return true; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 236 | } |
| 237 | |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 238 | static void put_huge_zero_page(void) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 239 | { |
| 240 | /* |
| 241 | * Counter should never go to zero here. Only shrinker can put |
| 242 | * last reference. |
| 243 | */ |
| 244 | BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); |
| 245 | } |
| 246 | |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 247 | struct folio *mm_get_huge_zero_folio(struct mm_struct *mm) |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 248 | { |
| 249 | if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 250 | return READ_ONCE(huge_zero_folio); |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 251 | |
| 252 | if (!get_huge_zero_page()) |
| 253 | return NULL; |
| 254 | |
| 255 | if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
| 256 | put_huge_zero_page(); |
| 257 | |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 258 | return READ_ONCE(huge_zero_folio); |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 259 | } |
| 260 | |
Matthew Wilcox (Oracle) | 632230f | 2024-03-26 20:28:28 +0000 | [diff] [blame] | 261 | void mm_put_huge_zero_folio(struct mm_struct *mm) |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 262 | { |
| 263 | if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
| 264 | put_huge_zero_page(); |
| 265 | } |
| 266 | |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 267 | static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, |
| 268 | struct shrink_control *sc) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 269 | { |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 270 | /* we can free zero page only if last reference remains */ |
| 271 | return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; |
| 272 | } |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 273 | |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 274 | static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, |
| 275 | struct shrink_control *sc) |
| 276 | { |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 277 | if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 278 | struct folio *zero_folio = xchg(&huge_zero_folio, NULL); |
| 279 | BUG_ON(zero_folio == NULL); |
Hugh Dickins | 3b77e8c | 2021-06-15 18:23:49 -0700 | [diff] [blame] | 280 | WRITE_ONCE(huge_zero_pfn, ~0UL); |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 281 | folio_put(zero_folio); |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 282 | return HPAGE_PMD_NR; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | return 0; |
| 286 | } |
| 287 | |
Qi Zheng | 54d9172 | 2023-09-11 17:44:16 +0800 | [diff] [blame] | 288 | static struct shrinker *huge_zero_page_shrinker; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 289 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 290 | #ifdef CONFIG_SYSFS |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 291 | static ssize_t enabled_show(struct kobject *kobj, |
| 292 | struct kobj_attribute *attr, char *buf) |
| 293 | { |
Joe Perches | bfb0ffe | 2020-12-14 19:14:46 -0800 | [diff] [blame] | 294 | const char *output; |
| 295 | |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 296 | if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) |
Joe Perches | bfb0ffe | 2020-12-14 19:14:46 -0800 | [diff] [blame] | 297 | output = "[always] madvise never"; |
| 298 | else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 299 | &transparent_hugepage_flags)) |
| 300 | output = "always [madvise] never"; |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 301 | else |
Joe Perches | bfb0ffe | 2020-12-14 19:14:46 -0800 | [diff] [blame] | 302 | output = "always madvise [never]"; |
| 303 | |
| 304 | return sysfs_emit(buf, "%s\n", output); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 305 | } |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 306 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 307 | static ssize_t enabled_store(struct kobject *kobj, |
| 308 | struct kobj_attribute *attr, |
| 309 | const char *buf, size_t count) |
| 310 | { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 311 | ssize_t ret = count; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 312 | |
David Rientjes | f42f255 | 2020-01-30 22:14:48 -0800 | [diff] [blame] | 313 | if (sysfs_streq(buf, "always")) { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 314 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 315 | set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); |
David Rientjes | f42f255 | 2020-01-30 22:14:48 -0800 | [diff] [blame] | 316 | } else if (sysfs_streq(buf, "madvise")) { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 317 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); |
| 318 | set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); |
David Rientjes | f42f255 | 2020-01-30 22:14:48 -0800 | [diff] [blame] | 319 | } else if (sysfs_streq(buf, "never")) { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 320 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); |
| 321 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 322 | } else |
| 323 | ret = -EINVAL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 324 | |
| 325 | if (ret > 0) { |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 326 | int err = start_stop_khugepaged(); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 327 | if (err) |
| 328 | ret = err; |
| 329 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 330 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 331 | } |
Miaohe Lin | 37139bb | 2022-07-04 21:21:53 +0800 | [diff] [blame] | 332 | |
| 333 | static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 334 | |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 335 | ssize_t single_hugepage_flag_show(struct kobject *kobj, |
Joe Perches | bfb0ffe | 2020-12-14 19:14:46 -0800 | [diff] [blame] | 336 | struct kobj_attribute *attr, char *buf, |
| 337 | enum transparent_hugepage_flag flag) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 338 | { |
Joe Perches | bfb0ffe | 2020-12-14 19:14:46 -0800 | [diff] [blame] | 339 | return sysfs_emit(buf, "%d\n", |
| 340 | !!test_bit(flag, &transparent_hugepage_flags)); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 341 | } |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 342 | |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 343 | ssize_t single_hugepage_flag_store(struct kobject *kobj, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 344 | struct kobj_attribute *attr, |
| 345 | const char *buf, size_t count, |
| 346 | enum transparent_hugepage_flag flag) |
| 347 | { |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 348 | unsigned long value; |
| 349 | int ret; |
| 350 | |
| 351 | ret = kstrtoul(buf, 10, &value); |
| 352 | if (ret < 0) |
| 353 | return ret; |
| 354 | if (value > 1) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 355 | return -EINVAL; |
| 356 | |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 357 | if (value) |
| 358 | set_bit(flag, &transparent_hugepage_flags); |
| 359 | else |
| 360 | clear_bit(flag, &transparent_hugepage_flags); |
| 361 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 362 | return count; |
| 363 | } |
| 364 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 365 | static ssize_t defrag_show(struct kobject *kobj, |
| 366 | struct kobj_attribute *attr, char *buf) |
| 367 | { |
Joe Perches | bfb0ffe | 2020-12-14 19:14:46 -0800 | [diff] [blame] | 368 | const char *output; |
| 369 | |
| 370 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
| 371 | &transparent_hugepage_flags)) |
| 372 | output = "[always] defer defer+madvise madvise never"; |
| 373 | else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, |
| 374 | &transparent_hugepage_flags)) |
| 375 | output = "always [defer] defer+madvise madvise never"; |
| 376 | else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, |
| 377 | &transparent_hugepage_flags)) |
| 378 | output = "always defer [defer+madvise] madvise never"; |
| 379 | else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
| 380 | &transparent_hugepage_flags)) |
| 381 | output = "always defer defer+madvise [madvise] never"; |
| 382 | else |
| 383 | output = "always defer defer+madvise madvise [never]"; |
| 384 | |
| 385 | return sysfs_emit(buf, "%s\n", output); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 386 | } |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 387 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 388 | static ssize_t defrag_store(struct kobject *kobj, |
| 389 | struct kobj_attribute *attr, |
| 390 | const char *buf, size_t count) |
| 391 | { |
David Rientjes | f42f255 | 2020-01-30 22:14:48 -0800 | [diff] [blame] | 392 | if (sysfs_streq(buf, "always")) { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 393 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
| 394 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
| 395 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 396 | set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
David Rientjes | f42f255 | 2020-01-30 22:14:48 -0800 | [diff] [blame] | 397 | } else if (sysfs_streq(buf, "defer+madvise")) { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 398 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
| 399 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
| 400 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 401 | set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
David Rientjes | f42f255 | 2020-01-30 22:14:48 -0800 | [diff] [blame] | 402 | } else if (sysfs_streq(buf, "defer")) { |
David Rientjes | 4fad7fb | 2017-04-07 16:04:54 -0700 | [diff] [blame] | 403 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
| 404 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
| 405 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 406 | set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
David Rientjes | f42f255 | 2020-01-30 22:14:48 -0800 | [diff] [blame] | 407 | } else if (sysfs_streq(buf, "madvise")) { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 408 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
| 409 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
| 410 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
| 411 | set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
David Rientjes | f42f255 | 2020-01-30 22:14:48 -0800 | [diff] [blame] | 412 | } else if (sysfs_streq(buf, "never")) { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 413 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
| 414 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
| 415 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
| 416 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 417 | } else |
| 418 | return -EINVAL; |
| 419 | |
| 420 | return count; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 421 | } |
Miaohe Lin | 37139bb | 2022-07-04 21:21:53 +0800 | [diff] [blame] | 422 | static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 423 | |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 424 | static ssize_t use_zero_page_show(struct kobject *kobj, |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 425 | struct kobj_attribute *attr, char *buf) |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 426 | { |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 427 | return single_hugepage_flag_show(kobj, attr, buf, |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 428 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 429 | } |
| 430 | static ssize_t use_zero_page_store(struct kobject *kobj, |
| 431 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 432 | { |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 433 | return single_hugepage_flag_store(kobj, attr, buf, count, |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 434 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
| 435 | } |
Miaohe Lin | 37139bb | 2022-07-04 21:21:53 +0800 | [diff] [blame] | 436 | static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); |
Hugh Dickins | 49920d2 | 2016-12-12 16:44:50 -0800 | [diff] [blame] | 437 | |
| 438 | static ssize_t hpage_pmd_size_show(struct kobject *kobj, |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 439 | struct kobj_attribute *attr, char *buf) |
Hugh Dickins | 49920d2 | 2016-12-12 16:44:50 -0800 | [diff] [blame] | 440 | { |
Joe Perches | ae7a927 | 2020-12-14 19:14:42 -0800 | [diff] [blame] | 441 | return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); |
Hugh Dickins | 49920d2 | 2016-12-12 16:44:50 -0800 | [diff] [blame] | 442 | } |
| 443 | static struct kobj_attribute hpage_pmd_size_attr = |
| 444 | __ATTR_RO(hpage_pmd_size); |
| 445 | |
Usama Arif | 81d3ff3 | 2024-08-30 11:03:40 +0100 | [diff] [blame] | 446 | static ssize_t split_underused_thp_show(struct kobject *kobj, |
| 447 | struct kobj_attribute *attr, char *buf) |
| 448 | { |
| 449 | return sysfs_emit(buf, "%d\n", split_underused_thp); |
| 450 | } |
| 451 | |
| 452 | static ssize_t split_underused_thp_store(struct kobject *kobj, |
| 453 | struct kobj_attribute *attr, |
| 454 | const char *buf, size_t count) |
| 455 | { |
| 456 | int err = kstrtobool(buf, &split_underused_thp); |
| 457 | |
| 458 | if (err < 0) |
| 459 | return err; |
| 460 | |
| 461 | return count; |
| 462 | } |
| 463 | |
| 464 | static struct kobj_attribute split_underused_thp_attr = __ATTR( |
| 465 | shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store); |
| 466 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 467 | static struct attribute *hugepage_attr[] = { |
| 468 | &enabled_attr.attr, |
| 469 | &defrag_attr.attr, |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 470 | &use_zero_page_attr.attr, |
Hugh Dickins | 49920d2 | 2016-12-12 16:44:50 -0800 | [diff] [blame] | 471 | &hpage_pmd_size_attr.attr, |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 472 | #ifdef CONFIG_SHMEM |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 473 | &shmem_enabled_attr.attr, |
| 474 | #endif |
Usama Arif | 81d3ff3 | 2024-08-30 11:03:40 +0100 | [diff] [blame] | 475 | &split_underused_thp_attr.attr, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 476 | NULL, |
| 477 | }; |
| 478 | |
Arvind Yadav | 8aa95a2 | 2017-09-06 16:22:03 -0700 | [diff] [blame] | 479 | static const struct attribute_group hugepage_attr_group = { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 480 | .attrs = hugepage_attr, |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 481 | }; |
| 482 | |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 483 | static void hugepage_exit_sysfs(struct kobject *hugepage_kobj); |
| 484 | static void thpsize_release(struct kobject *kobj); |
| 485 | static DEFINE_SPINLOCK(huge_anon_orders_lock); |
| 486 | static LIST_HEAD(thpsize_list); |
| 487 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 488 | static ssize_t anon_enabled_show(struct kobject *kobj, |
| 489 | struct kobj_attribute *attr, char *buf) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 490 | { |
| 491 | int order = to_thpsize(kobj)->order; |
| 492 | const char *output; |
| 493 | |
| 494 | if (test_bit(order, &huge_anon_orders_always)) |
| 495 | output = "[always] inherit madvise never"; |
| 496 | else if (test_bit(order, &huge_anon_orders_inherit)) |
| 497 | output = "always [inherit] madvise never"; |
| 498 | else if (test_bit(order, &huge_anon_orders_madvise)) |
| 499 | output = "always inherit [madvise] never"; |
| 500 | else |
| 501 | output = "always inherit madvise [never]"; |
| 502 | |
| 503 | return sysfs_emit(buf, "%s\n", output); |
| 504 | } |
| 505 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 506 | static ssize_t anon_enabled_store(struct kobject *kobj, |
| 507 | struct kobj_attribute *attr, |
| 508 | const char *buf, size_t count) |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 509 | { |
| 510 | int order = to_thpsize(kobj)->order; |
| 511 | ssize_t ret = count; |
| 512 | |
| 513 | if (sysfs_streq(buf, "always")) { |
| 514 | spin_lock(&huge_anon_orders_lock); |
| 515 | clear_bit(order, &huge_anon_orders_inherit); |
| 516 | clear_bit(order, &huge_anon_orders_madvise); |
| 517 | set_bit(order, &huge_anon_orders_always); |
| 518 | spin_unlock(&huge_anon_orders_lock); |
| 519 | } else if (sysfs_streq(buf, "inherit")) { |
| 520 | spin_lock(&huge_anon_orders_lock); |
| 521 | clear_bit(order, &huge_anon_orders_always); |
| 522 | clear_bit(order, &huge_anon_orders_madvise); |
| 523 | set_bit(order, &huge_anon_orders_inherit); |
| 524 | spin_unlock(&huge_anon_orders_lock); |
| 525 | } else if (sysfs_streq(buf, "madvise")) { |
| 526 | spin_lock(&huge_anon_orders_lock); |
| 527 | clear_bit(order, &huge_anon_orders_always); |
| 528 | clear_bit(order, &huge_anon_orders_inherit); |
| 529 | set_bit(order, &huge_anon_orders_madvise); |
| 530 | spin_unlock(&huge_anon_orders_lock); |
| 531 | } else if (sysfs_streq(buf, "never")) { |
| 532 | spin_lock(&huge_anon_orders_lock); |
| 533 | clear_bit(order, &huge_anon_orders_always); |
| 534 | clear_bit(order, &huge_anon_orders_inherit); |
| 535 | clear_bit(order, &huge_anon_orders_madvise); |
| 536 | spin_unlock(&huge_anon_orders_lock); |
| 537 | } else |
| 538 | ret = -EINVAL; |
| 539 | |
Ryan Roberts | 00f5810 | 2024-07-04 10:10:50 +0100 | [diff] [blame] | 540 | if (ret > 0) { |
| 541 | int err; |
| 542 | |
| 543 | err = start_stop_khugepaged(); |
| 544 | if (err) |
| 545 | ret = err; |
| 546 | } |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 547 | return ret; |
| 548 | } |
| 549 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 550 | static struct kobj_attribute anon_enabled_attr = |
| 551 | __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store); |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 552 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 553 | static struct attribute *anon_ctrl_attrs[] = { |
| 554 | &anon_enabled_attr.attr, |
| 555 | NULL, |
| 556 | }; |
| 557 | |
| 558 | static const struct attribute_group anon_ctrl_attr_grp = { |
| 559 | .attrs = anon_ctrl_attrs, |
| 560 | }; |
| 561 | |
| 562 | static struct attribute *file_ctrl_attrs[] = { |
Baolin Wang | 4b98995 | 2024-06-11 18:11:07 +0800 | [diff] [blame] | 563 | #ifdef CONFIG_SHMEM |
| 564 | &thpsize_shmem_enabled_attr.attr, |
| 565 | #endif |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 566 | NULL, |
| 567 | }; |
| 568 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 569 | static const struct attribute_group file_ctrl_attr_grp = { |
| 570 | .attrs = file_ctrl_attrs, |
| 571 | }; |
| 572 | |
| 573 | static struct attribute *any_ctrl_attrs[] = { |
| 574 | NULL, |
| 575 | }; |
| 576 | |
| 577 | static const struct attribute_group any_ctrl_attr_grp = { |
| 578 | .attrs = any_ctrl_attrs, |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 579 | }; |
| 580 | |
| 581 | static const struct kobj_type thpsize_ktype = { |
| 582 | .release = &thpsize_release, |
| 583 | .sysfs_ops = &kobj_sysfs_ops, |
| 584 | }; |
| 585 | |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 586 | DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}}; |
| 587 | |
| 588 | static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item) |
| 589 | { |
| 590 | unsigned long sum = 0; |
| 591 | int cpu; |
| 592 | |
| 593 | for_each_possible_cpu(cpu) { |
| 594 | struct mthp_stat *this = &per_cpu(mthp_stats, cpu); |
| 595 | |
| 596 | sum += this->stats[order][item]; |
| 597 | } |
| 598 | |
| 599 | return sum; |
| 600 | } |
| 601 | |
| 602 | #define DEFINE_MTHP_STAT_ATTR(_name, _index) \ |
| 603 | static ssize_t _name##_show(struct kobject *kobj, \ |
| 604 | struct kobj_attribute *attr, char *buf) \ |
| 605 | { \ |
| 606 | int order = to_thpsize(kobj)->order; \ |
| 607 | \ |
| 608 | return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \ |
| 609 | } \ |
| 610 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
| 611 | |
| 612 | DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); |
| 613 | DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); |
| 614 | DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); |
Baolin Wang | 0d648dd | 2024-05-23 10:36:39 +0800 | [diff] [blame] | 615 | DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); |
| 616 | DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 617 | #ifdef CONFIG_SHMEM |
Ryan Roberts | 63d9866 | 2024-07-10 10:55:01 +0100 | [diff] [blame] | 618 | DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC); |
| 619 | DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK); |
| 620 | DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE); |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 621 | #endif |
Lance Yang | f216c84 | 2024-06-28 21:07:49 +0800 | [diff] [blame] | 622 | DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); |
| 623 | DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); |
| 624 | DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); |
Barry Song | 5d65c8d | 2024-08-24 13:04:40 +1200 | [diff] [blame] | 625 | DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON); |
Barry Song | 8175ebf | 2024-08-24 13:04:41 +1200 | [diff] [blame] | 626 | DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED); |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 627 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 628 | static struct attribute *anon_stats_attrs[] = { |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 629 | &anon_fault_alloc_attr.attr, |
| 630 | &anon_fault_fallback_attr.attr, |
| 631 | &anon_fault_fallback_charge_attr.attr, |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 632 | #ifndef CONFIG_SHMEM |
Baolin Wang | 0d648dd | 2024-05-23 10:36:39 +0800 | [diff] [blame] | 633 | &swpout_attr.attr, |
| 634 | &swpout_fallback_attr.attr, |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 635 | #endif |
Lance Yang | f216c84 | 2024-06-28 21:07:49 +0800 | [diff] [blame] | 636 | &split_deferred_attr.attr, |
Barry Song | 5d65c8d | 2024-08-24 13:04:40 +1200 | [diff] [blame] | 637 | &nr_anon_attr.attr, |
Barry Song | 8175ebf | 2024-08-24 13:04:41 +1200 | [diff] [blame] | 638 | &nr_anon_partially_mapped_attr.attr, |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 639 | NULL, |
| 640 | }; |
| 641 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 642 | static struct attribute_group anon_stats_attr_grp = { |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 643 | .name = "stats", |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 644 | .attrs = anon_stats_attrs, |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 645 | }; |
| 646 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 647 | static struct attribute *file_stats_attrs[] = { |
| 648 | #ifdef CONFIG_SHMEM |
| 649 | &shmem_alloc_attr.attr, |
| 650 | &shmem_fallback_attr.attr, |
| 651 | &shmem_fallback_charge_attr.attr, |
| 652 | #endif |
| 653 | NULL, |
| 654 | }; |
| 655 | |
| 656 | static struct attribute_group file_stats_attr_grp = { |
| 657 | .name = "stats", |
| 658 | .attrs = file_stats_attrs, |
| 659 | }; |
| 660 | |
| 661 | static struct attribute *any_stats_attrs[] = { |
| 662 | #ifdef CONFIG_SHMEM |
| 663 | &swpout_attr.attr, |
| 664 | &swpout_fallback_attr.attr, |
| 665 | #endif |
| 666 | &split_attr.attr, |
| 667 | &split_failed_attr.attr, |
| 668 | NULL, |
| 669 | }; |
| 670 | |
| 671 | static struct attribute_group any_stats_attr_grp = { |
| 672 | .name = "stats", |
| 673 | .attrs = any_stats_attrs, |
| 674 | }; |
| 675 | |
| 676 | static int sysfs_add_group(struct kobject *kobj, |
| 677 | const struct attribute_group *grp) |
| 678 | { |
| 679 | int ret = -ENOENT; |
| 680 | |
| 681 | /* |
| 682 | * If the group is named, try to merge first, assuming the subdirectory |
| 683 | * was already created. This avoids the warning emitted by |
| 684 | * sysfs_create_group() if the directory already exists. |
| 685 | */ |
| 686 | if (grp->name) |
| 687 | ret = sysfs_merge_group(kobj, grp); |
| 688 | if (ret) |
| 689 | ret = sysfs_create_group(kobj, grp); |
| 690 | |
| 691 | return ret; |
| 692 | } |
| 693 | |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 694 | static struct thpsize *thpsize_create(int order, struct kobject *parent) |
| 695 | { |
| 696 | unsigned long size = (PAGE_SIZE << order) / SZ_1K; |
| 697 | struct thpsize *thpsize; |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 698 | int ret = -ENOMEM; |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 699 | |
| 700 | thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL); |
| 701 | if (!thpsize) |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 702 | goto err; |
| 703 | |
| 704 | thpsize->order = order; |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 705 | |
| 706 | ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent, |
| 707 | "hugepages-%lukB", size); |
| 708 | if (ret) { |
| 709 | kfree(thpsize); |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 710 | goto err; |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 711 | } |
| 712 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 713 | |
| 714 | ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp); |
| 715 | if (ret) |
| 716 | goto err_put; |
| 717 | |
| 718 | ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp); |
| 719 | if (ret) |
| 720 | goto err_put; |
| 721 | |
| 722 | if (BIT(order) & THP_ORDERS_ALL_ANON) { |
| 723 | ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp); |
| 724 | if (ret) |
| 725 | goto err_put; |
| 726 | |
| 727 | ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp); |
| 728 | if (ret) |
| 729 | goto err_put; |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 730 | } |
| 731 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 732 | if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) { |
| 733 | ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp); |
| 734 | if (ret) |
| 735 | goto err_put; |
| 736 | |
| 737 | ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp); |
| 738 | if (ret) |
| 739 | goto err_put; |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 740 | } |
| 741 | |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 742 | return thpsize; |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 743 | err_put: |
| 744 | kobject_put(&thpsize->kobj); |
| 745 | err: |
| 746 | return ERR_PTR(ret); |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 747 | } |
| 748 | |
| 749 | static void thpsize_release(struct kobject *kobj) |
| 750 | { |
| 751 | kfree(to_thpsize(kobj)); |
| 752 | } |
| 753 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 754 | static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) |
| 755 | { |
| 756 | int err; |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 757 | struct thpsize *thpsize; |
| 758 | unsigned long orders; |
| 759 | int order; |
| 760 | |
| 761 | /* |
| 762 | * Default to setting PMD-sized THP to inherit the global setting and |
| 763 | * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time |
| 764 | * constant so we have to do this here. |
| 765 | */ |
Ryan Roberts | dd4d30d | 2024-08-14 14:02:47 +1200 | [diff] [blame] | 766 | if (!anon_orders_configured) |
| 767 | huge_anon_orders_inherit = BIT(PMD_ORDER); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 768 | |
| 769 | *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); |
| 770 | if (unlikely(!*hugepage_kobj)) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 771 | pr_err("failed to create transparent hugepage kobject\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 772 | return -ENOMEM; |
| 773 | } |
| 774 | |
| 775 | err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); |
| 776 | if (err) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 777 | pr_err("failed to register transparent hugepage group\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 778 | goto delete_obj; |
| 779 | } |
| 780 | |
| 781 | err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); |
| 782 | if (err) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 783 | pr_err("failed to register transparent hugepage group\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 784 | goto remove_hp_group; |
| 785 | } |
| 786 | |
Ryan Roberts | 70e59a7 | 2024-08-08 12:18:47 +0100 | [diff] [blame] | 787 | orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT; |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 788 | order = highest_order(orders); |
| 789 | while (orders) { |
| 790 | thpsize = thpsize_create(order, *hugepage_kobj); |
| 791 | if (IS_ERR(thpsize)) { |
| 792 | pr_err("failed to create thpsize for order %d\n", order); |
| 793 | err = PTR_ERR(thpsize); |
| 794 | goto remove_all; |
| 795 | } |
| 796 | list_add(&thpsize->node, &thpsize_list); |
| 797 | order = next_order(&orders, order); |
| 798 | } |
| 799 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 800 | return 0; |
| 801 | |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 802 | remove_all: |
| 803 | hugepage_exit_sysfs(*hugepage_kobj); |
| 804 | return err; |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 805 | remove_hp_group: |
| 806 | sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); |
| 807 | delete_obj: |
| 808 | kobject_put(*hugepage_kobj); |
| 809 | return err; |
| 810 | } |
| 811 | |
| 812 | static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) |
| 813 | { |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 814 | struct thpsize *thpsize, *tmp; |
| 815 | |
| 816 | list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) { |
| 817 | list_del(&thpsize->node); |
| 818 | kobject_put(&thpsize->kobj); |
| 819 | } |
| 820 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 821 | sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); |
| 822 | sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); |
| 823 | kobject_put(hugepage_kobj); |
| 824 | } |
| 825 | #else |
| 826 | static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) |
| 827 | { |
| 828 | return 0; |
| 829 | } |
| 830 | |
| 831 | static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) |
| 832 | { |
| 833 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 834 | #endif /* CONFIG_SYSFS */ |
| 835 | |
Qi Zheng | 54d9172 | 2023-09-11 17:44:16 +0800 | [diff] [blame] | 836 | static int __init thp_shrinker_init(void) |
| 837 | { |
| 838 | huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero"); |
| 839 | if (!huge_zero_page_shrinker) |
| 840 | return -ENOMEM; |
| 841 | |
| 842 | deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE | |
| 843 | SHRINKER_MEMCG_AWARE | |
| 844 | SHRINKER_NONSLAB, |
| 845 | "thp-deferred_split"); |
| 846 | if (!deferred_split_shrinker) { |
| 847 | shrinker_free(huge_zero_page_shrinker); |
| 848 | return -ENOMEM; |
| 849 | } |
| 850 | |
| 851 | huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count; |
| 852 | huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan; |
| 853 | shrinker_register(huge_zero_page_shrinker); |
| 854 | |
| 855 | deferred_split_shrinker->count_objects = deferred_split_count; |
| 856 | deferred_split_shrinker->scan_objects = deferred_split_scan; |
| 857 | shrinker_register(deferred_split_shrinker); |
| 858 | |
| 859 | return 0; |
| 860 | } |
| 861 | |
| 862 | static void __init thp_shrinker_exit(void) |
| 863 | { |
| 864 | shrinker_free(huge_zero_page_shrinker); |
| 865 | shrinker_free(deferred_split_shrinker); |
| 866 | } |
| 867 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 868 | static int __init hugepage_init(void) |
| 869 | { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 870 | int err; |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 871 | struct kobject *hugepage_kobj; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 872 | |
Andrea Arcangeli | 4b7167b | 2011-01-13 15:47:09 -0800 | [diff] [blame] | 873 | if (!has_transparent_hugepage()) { |
Peter Xu | 3c556d2 | 2023-03-15 13:16:42 -0400 | [diff] [blame] | 874 | transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED; |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 875 | return -EINVAL; |
Andrea Arcangeli | 4b7167b | 2011-01-13 15:47:09 -0800 | [diff] [blame] | 876 | } |
| 877 | |
Kirill A. Shutemov | ff20c2e | 2016-03-01 09:45:14 +0530 | [diff] [blame] | 878 | /* |
| 879 | * hugepages can't be allocated by the buddy allocator |
| 880 | */ |
Kirill A. Shutemov | 5e0a760 | 2023-12-28 17:47:04 +0300 | [diff] [blame] | 881 | MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER); |
Kirill A. Shutemov | ff20c2e | 2016-03-01 09:45:14 +0530 | [diff] [blame] | 882 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 883 | err = hugepage_init_sysfs(&hugepage_kobj); |
| 884 | if (err) |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 885 | goto err_sysfs; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 886 | |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 887 | err = khugepaged_init(); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 888 | if (err) |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 889 | goto err_slab; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 890 | |
Qi Zheng | 54d9172 | 2023-09-11 17:44:16 +0800 | [diff] [blame] | 891 | err = thp_shrinker_init(); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 892 | if (err) |
Qi Zheng | 54d9172 | 2023-09-11 17:44:16 +0800 | [diff] [blame] | 893 | goto err_shrinker; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 894 | |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 895 | /* |
| 896 | * By default disable transparent hugepages on smaller systems, |
| 897 | * where the extra memory used could hurt more than TLB overhead |
| 898 | * is likely to save. The admin can still enable it through /sys. |
| 899 | */ |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 900 | if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 901 | transparent_hugepage_flags = 0; |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 902 | return 0; |
| 903 | } |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 904 | |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 905 | err = start_stop_khugepaged(); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 906 | if (err) |
| 907 | goto err_khugepaged; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 908 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 909 | return 0; |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 910 | err_khugepaged: |
Qi Zheng | 54d9172 | 2023-09-11 17:44:16 +0800 | [diff] [blame] | 911 | thp_shrinker_exit(); |
| 912 | err_shrinker: |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 913 | khugepaged_destroy(); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 914 | err_slab: |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 915 | hugepage_exit_sysfs(hugepage_kobj); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 916 | err_sysfs: |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 917 | return err; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 918 | } |
Paul Gortmaker | a64fb3c | 2014-01-23 15:53:30 -0800 | [diff] [blame] | 919 | subsys_initcall(hugepage_init); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 920 | |
| 921 | static int __init setup_transparent_hugepage(char *str) |
| 922 | { |
| 923 | int ret = 0; |
| 924 | if (!str) |
| 925 | goto out; |
| 926 | if (!strcmp(str, "always")) { |
| 927 | set_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 928 | &transparent_hugepage_flags); |
| 929 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 930 | &transparent_hugepage_flags); |
| 931 | ret = 1; |
| 932 | } else if (!strcmp(str, "madvise")) { |
| 933 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 934 | &transparent_hugepage_flags); |
| 935 | set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 936 | &transparent_hugepage_flags); |
| 937 | ret = 1; |
| 938 | } else if (!strcmp(str, "never")) { |
| 939 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 940 | &transparent_hugepage_flags); |
| 941 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 942 | &transparent_hugepage_flags); |
| 943 | ret = 1; |
| 944 | } |
| 945 | out: |
| 946 | if (!ret) |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 947 | pr_warn("transparent_hugepage= cannot parse, ignored\n"); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 948 | return ret; |
| 949 | } |
| 950 | __setup("transparent_hugepage=", setup_transparent_hugepage); |
| 951 | |
Ryan Roberts | dd4d30d | 2024-08-14 14:02:47 +1200 | [diff] [blame] | 952 | static inline int get_order_from_str(const char *size_str) |
| 953 | { |
| 954 | unsigned long size; |
| 955 | char *endptr; |
| 956 | int order; |
| 957 | |
| 958 | size = memparse(size_str, &endptr); |
| 959 | |
| 960 | if (!is_power_of_2(size)) |
| 961 | goto err; |
| 962 | order = get_order(size); |
| 963 | if (BIT(order) & ~THP_ORDERS_ALL_ANON) |
| 964 | goto err; |
| 965 | |
| 966 | return order; |
| 967 | err: |
| 968 | pr_err("invalid size %s in thp_anon boot parameter\n", size_str); |
| 969 | return -EINVAL; |
| 970 | } |
| 971 | |
| 972 | static char str_dup[PAGE_SIZE] __initdata; |
| 973 | static int __init setup_thp_anon(char *str) |
| 974 | { |
| 975 | char *token, *range, *policy, *subtoken; |
| 976 | unsigned long always, inherit, madvise; |
| 977 | char *start_size, *end_size; |
| 978 | int start, end, nr; |
| 979 | char *p; |
| 980 | |
| 981 | if (!str || strlen(str) + 1 > PAGE_SIZE) |
| 982 | goto err; |
| 983 | strcpy(str_dup, str); |
| 984 | |
| 985 | always = huge_anon_orders_always; |
| 986 | madvise = huge_anon_orders_madvise; |
| 987 | inherit = huge_anon_orders_inherit; |
| 988 | p = str_dup; |
| 989 | while ((token = strsep(&p, ";")) != NULL) { |
| 990 | range = strsep(&token, ":"); |
| 991 | policy = token; |
| 992 | |
| 993 | if (!policy) |
| 994 | goto err; |
| 995 | |
| 996 | while ((subtoken = strsep(&range, ",")) != NULL) { |
| 997 | if (strchr(subtoken, '-')) { |
| 998 | start_size = strsep(&subtoken, "-"); |
| 999 | end_size = subtoken; |
| 1000 | |
| 1001 | start = get_order_from_str(start_size); |
| 1002 | end = get_order_from_str(end_size); |
| 1003 | } else { |
| 1004 | start = end = get_order_from_str(subtoken); |
| 1005 | } |
| 1006 | |
| 1007 | if (start < 0 || end < 0 || start > end) |
| 1008 | goto err; |
| 1009 | |
| 1010 | nr = end - start + 1; |
| 1011 | if (!strcmp(policy, "always")) { |
| 1012 | bitmap_set(&always, start, nr); |
| 1013 | bitmap_clear(&inherit, start, nr); |
| 1014 | bitmap_clear(&madvise, start, nr); |
| 1015 | } else if (!strcmp(policy, "madvise")) { |
| 1016 | bitmap_set(&madvise, start, nr); |
| 1017 | bitmap_clear(&inherit, start, nr); |
| 1018 | bitmap_clear(&always, start, nr); |
| 1019 | } else if (!strcmp(policy, "inherit")) { |
| 1020 | bitmap_set(&inherit, start, nr); |
| 1021 | bitmap_clear(&madvise, start, nr); |
| 1022 | bitmap_clear(&always, start, nr); |
| 1023 | } else if (!strcmp(policy, "never")) { |
| 1024 | bitmap_clear(&inherit, start, nr); |
| 1025 | bitmap_clear(&madvise, start, nr); |
| 1026 | bitmap_clear(&always, start, nr); |
| 1027 | } else { |
| 1028 | pr_err("invalid policy %s in thp_anon boot parameter\n", policy); |
| 1029 | goto err; |
| 1030 | } |
| 1031 | } |
| 1032 | } |
| 1033 | |
| 1034 | huge_anon_orders_always = always; |
| 1035 | huge_anon_orders_madvise = madvise; |
| 1036 | huge_anon_orders_inherit = inherit; |
| 1037 | anon_orders_configured = true; |
| 1038 | return 1; |
| 1039 | |
| 1040 | err: |
| 1041 | pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str); |
| 1042 | return 0; |
| 1043 | } |
| 1044 | __setup("thp_anon=", setup_thp_anon); |
| 1045 | |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1046 | pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1047 | { |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1048 | if (likely(vma->vm_flags & VM_WRITE)) |
Rick Edgecombe | 161e393 | 2023-06-12 17:10:29 -0700 | [diff] [blame] | 1049 | pmd = pmd_mkwrite(pmd, vma); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1050 | return pmd; |
| 1051 | } |
| 1052 | |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 1053 | #ifdef CONFIG_MEMCG |
Matthew Wilcox (Oracle) | f8baa6b | 2023-01-11 14:29:12 +0000 | [diff] [blame] | 1054 | static inline |
| 1055 | struct deferred_split *get_deferred_split_queue(struct folio *folio) |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 1056 | { |
Matthew Wilcox (Oracle) | f8baa6b | 2023-01-11 14:29:12 +0000 | [diff] [blame] | 1057 | struct mem_cgroup *memcg = folio_memcg(folio); |
| 1058 | struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 1059 | |
| 1060 | if (memcg) |
| 1061 | return &memcg->deferred_split_queue; |
| 1062 | else |
| 1063 | return &pgdat->deferred_split_queue; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 1064 | } |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 1065 | #else |
Matthew Wilcox (Oracle) | f8baa6b | 2023-01-11 14:29:12 +0000 | [diff] [blame] | 1066 | static inline |
| 1067 | struct deferred_split *get_deferred_split_queue(struct folio *folio) |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 1068 | { |
Matthew Wilcox (Oracle) | f8baa6b | 2023-01-11 14:29:12 +0000 | [diff] [blame] | 1069 | struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 1070 | |
| 1071 | return &pgdat->deferred_split_queue; |
| 1072 | } |
| 1073 | #endif |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 1074 | |
Matthew Wilcox (Oracle) | 5beaee5 | 2024-03-26 20:28:22 +0000 | [diff] [blame] | 1075 | static inline bool is_transparent_hugepage(const struct folio *folio) |
Sean Christopherson | 005ba37 | 2020-01-08 12:24:36 -0800 | [diff] [blame] | 1076 | { |
Matthew Wilcox (Oracle) | a644b0a | 2023-08-16 16:12:01 +0100 | [diff] [blame] | 1077 | if (!folio_test_large(folio)) |
Zou Wei | fa1f68c | 2020-06-04 16:49:46 -0700 | [diff] [blame] | 1078 | return false; |
Sean Christopherson | 005ba37 | 2020-01-08 12:24:36 -0800 | [diff] [blame] | 1079 | |
Matthew Wilcox (Oracle) | 5beaee5 | 2024-03-26 20:28:22 +0000 | [diff] [blame] | 1080 | return is_huge_zero_folio(folio) || |
Matthew Wilcox (Oracle) | de53c05 | 2023-08-16 16:11:56 +0100 | [diff] [blame] | 1081 | folio_test_large_rmappable(folio); |
Sean Christopherson | 005ba37 | 2020-01-08 12:24:36 -0800 | [diff] [blame] | 1082 | } |
Sean Christopherson | 005ba37 | 2020-01-08 12:24:36 -0800 | [diff] [blame] | 1083 | |
Kirill A. Shutemov | 97d3d0f | 2020-01-13 16:29:10 -0800 | [diff] [blame] | 1084 | static unsigned long __thp_get_unmapped_area(struct file *filp, |
| 1085 | unsigned long addr, unsigned long len, |
Rick Edgecombe | ed48e87 | 2024-03-25 19:16:48 -0700 | [diff] [blame] | 1086 | loff_t off, unsigned long flags, unsigned long size, |
| 1087 | vm_flags_t vm_flags) |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1088 | { |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1089 | loff_t off_end = off + len; |
| 1090 | loff_t off_align = round_up(off, size); |
Ryan Roberts | 96204e1 | 2024-01-23 17:14:20 +0000 | [diff] [blame] | 1091 | unsigned long len_pad, ret, off_sub; |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1092 | |
Yang Shi | d959202 | 2024-07-12 08:58:55 -0700 | [diff] [blame] | 1093 | if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) |
Yang Shi | 4ef9ad19 | 2024-01-18 10:05:05 -0800 | [diff] [blame] | 1094 | return 0; |
| 1095 | |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1096 | if (off_end <= off_align || (off_end - off_align) < size) |
| 1097 | return 0; |
| 1098 | |
| 1099 | len_pad = len + size; |
| 1100 | if (len_pad < len || (off + len_pad) < off) |
| 1101 | return 0; |
| 1102 | |
Rick Edgecombe | ed48e87 | 2024-03-25 19:16:48 -0700 | [diff] [blame] | 1103 | ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad, |
| 1104 | off >> PAGE_SHIFT, flags, vm_flags); |
Kirill A. Shutemov | 97d3d0f | 2020-01-13 16:29:10 -0800 | [diff] [blame] | 1105 | |
| 1106 | /* |
| 1107 | * The failure might be due to length padding. The caller will retry |
| 1108 | * without the padding. |
| 1109 | */ |
| 1110 | if (IS_ERR_VALUE(ret)) |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1111 | return 0; |
| 1112 | |
Kirill A. Shutemov | 97d3d0f | 2020-01-13 16:29:10 -0800 | [diff] [blame] | 1113 | /* |
| 1114 | * Do not try to align to THP boundary if allocation at the address |
| 1115 | * hint succeeds. |
| 1116 | */ |
| 1117 | if (ret == addr) |
| 1118 | return addr; |
| 1119 | |
Ryan Roberts | 96204e1 | 2024-01-23 17:14:20 +0000 | [diff] [blame] | 1120 | off_sub = (off - ret) & (size - 1); |
| 1121 | |
Rick Edgecombe | 529ce23 | 2024-03-25 19:16:44 -0700 | [diff] [blame] | 1122 | if (test_bit(MMF_TOPDOWN, ¤t->mm->flags) && !off_sub) |
Ryan Roberts | 96204e1 | 2024-01-23 17:14:20 +0000 | [diff] [blame] | 1123 | return ret + size; |
| 1124 | |
| 1125 | ret += off_sub; |
Kirill A. Shutemov | 97d3d0f | 2020-01-13 16:29:10 -0800 | [diff] [blame] | 1126 | return ret; |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1127 | } |
| 1128 | |
Rick Edgecombe | ed48e87 | 2024-03-25 19:16:48 -0700 | [diff] [blame] | 1129 | unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, |
| 1130 | unsigned long len, unsigned long pgoff, unsigned long flags, |
| 1131 | vm_flags_t vm_flags) |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1132 | { |
Kirill A. Shutemov | 97d3d0f | 2020-01-13 16:29:10 -0800 | [diff] [blame] | 1133 | unsigned long ret; |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1134 | loff_t off = (loff_t)pgoff << PAGE_SHIFT; |
| 1135 | |
Rick Edgecombe | ed48e87 | 2024-03-25 19:16:48 -0700 | [diff] [blame] | 1136 | ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags); |
Kirill A. Shutemov | 97d3d0f | 2020-01-13 16:29:10 -0800 | [diff] [blame] | 1137 | if (ret) |
| 1138 | return ret; |
William Kucharski | 1854bc6 | 2019-09-22 08:43:15 -0400 | [diff] [blame] | 1139 | |
Rick Edgecombe | ed48e87 | 2024-03-25 19:16:48 -0700 | [diff] [blame] | 1140 | return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags, |
| 1141 | vm_flags); |
| 1142 | } |
| 1143 | |
| 1144 | unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, |
| 1145 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 1146 | { |
| 1147 | return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0); |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 1148 | } |
| 1149 | EXPORT_SYMBOL_GPL(thp_get_unmapped_area); |
| 1150 | |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 1151 | static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, |
| 1152 | struct page *page, gfp_t gfp) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1153 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1154 | struct vm_area_struct *vma = vmf->vma; |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1155 | struct folio *folio = page_folio(page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1156 | pgtable_t pgtable; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1157 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 1158 | vm_fault_t ret = 0; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1159 | |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1160 | VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1161 | |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1162 | if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { |
| 1163 | folio_put(folio); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1164 | count_vm_event(THP_FAULT_FALLBACK); |
David Rientjes | 85b9f46 | 2020-04-06 20:04:28 -0700 | [diff] [blame] | 1165 | count_vm_event(THP_FAULT_FALLBACK_CHARGE); |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 1166 | count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); |
| 1167 | count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1168 | return VM_FAULT_FALLBACK; |
| 1169 | } |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1170 | folio_throttle_swaprate(folio, gfp); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1171 | |
Joel Fernandes (Google) | 4cf5892 | 2019-01-03 15:28:34 -0800 | [diff] [blame] | 1172 | pgtable = pte_alloc_one(vma->vm_mm); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1173 | if (unlikely(!pgtable)) { |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 1174 | ret = VM_FAULT_OOM; |
| 1175 | goto release; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1176 | } |
| 1177 | |
Kefeng Wang | 78fefd0 | 2024-06-18 17:12:39 +0800 | [diff] [blame] | 1178 | folio_zero_user(folio, vmf->address); |
Minchan Kim | 52f3762 | 2013-04-29 15:08:15 -0700 | [diff] [blame] | 1179 | /* |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1180 | * The memory barrier inside __folio_mark_uptodate makes sure that |
Kefeng Wang | 78fefd0 | 2024-06-18 17:12:39 +0800 | [diff] [blame] | 1181 | * folio_zero_user writes become visible before the set_pmd_at() |
Minchan Kim | 52f3762 | 2013-04-29 15:08:15 -0700 | [diff] [blame] | 1182 | * write. |
| 1183 | */ |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1184 | __folio_mark_uptodate(folio); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1185 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1186 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
| 1187 | if (unlikely(!pmd_none(*vmf->pmd))) { |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 1188 | goto unlock_release; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1189 | } else { |
| 1190 | pmd_t entry; |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1191 | |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 1192 | ret = check_stable_address_space(vma->vm_mm); |
| 1193 | if (ret) |
| 1194 | goto unlock_release; |
| 1195 | |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1196 | /* Deliver the page fault to userland */ |
| 1197 | if (userfaultfd_missing(vma)) { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1198 | spin_unlock(vmf->ptl); |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1199 | folio_put(folio); |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1200 | pte_free(vma->vm_mm, pgtable); |
Miaohe Lin | 8fd5eda | 2021-05-04 18:33:49 -0700 | [diff] [blame] | 1201 | ret = handle_userfault(vmf, VM_UFFD_MISSING); |
| 1202 | VM_BUG_ON(ret & VM_FAULT_FALLBACK); |
| 1203 | return ret; |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1204 | } |
| 1205 | |
Kirill A. Shutemov | 3122359 | 2013-09-12 15:14:01 -0700 | [diff] [blame] | 1206 | entry = mk_huge_pmd(page, vma->vm_page_prot); |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1207 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
Barry Song | 15bde4a | 2024-06-18 11:11:35 +1200 | [diff] [blame] | 1208 | folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1209 | folio_add_lru_vma(folio, vma); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1210 | pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); |
| 1211 | set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); |
Bibo Mao | fca4057 | 2021-02-24 12:06:42 -0800 | [diff] [blame] | 1212 | update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1213 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 1214 | mm_inc_nr_ptes(vma->vm_mm); |
Usama Arif | dafff3f | 2024-08-30 11:03:39 +0100 | [diff] [blame] | 1215 | deferred_split_folio(folio, false); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1216 | spin_unlock(vmf->ptl); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1217 | count_vm_event(THP_FAULT_ALLOC); |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 1218 | count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); |
Johannes Weiner | 9d82c69 | 2020-06-03 16:02:04 -0700 | [diff] [blame] | 1219 | count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1220 | } |
| 1221 | |
David Rientjes | aa2e878 | 2012-05-29 15:06:17 -0700 | [diff] [blame] | 1222 | return 0; |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 1223 | unlock_release: |
| 1224 | spin_unlock(vmf->ptl); |
| 1225 | release: |
| 1226 | if (pgtable) |
| 1227 | pte_free(vma->vm_mm, pgtable); |
Kefeng Wang | cfe3236 | 2023-03-02 19:58:29 +0800 | [diff] [blame] | 1228 | folio_put(folio); |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 1229 | return ret; |
| 1230 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1231 | } |
| 1232 | |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 1233 | /* |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 1234 | * always: directly stall for all thp allocations |
| 1235 | * defer: wake kswapd and fail if not immediately available |
| 1236 | * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise |
| 1237 | * fail if not immediately available |
| 1238 | * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately |
| 1239 | * available |
| 1240 | * never: never stall for any thp allocation |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 1241 | */ |
Rik van Riel | 164cc4f | 2021-02-25 17:16:18 -0800 | [diff] [blame] | 1242 | gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) |
Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 1243 | { |
Rik van Riel | 164cc4f | 2021-02-25 17:16:18 -0800 | [diff] [blame] | 1244 | const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); |
Michal Hocko | 89c83fb | 2018-11-02 15:48:31 -0700 | [diff] [blame] | 1245 | |
David Rientjes | ac79f78 | 2019-09-04 12:54:18 -0700 | [diff] [blame] | 1246 | /* Always do synchronous compaction */ |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 1247 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) |
Andrea Arcangeli | a828260 | 2019-08-13 15:37:53 -0700 | [diff] [blame] | 1248 | return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); |
David Rientjes | ac79f78 | 2019-09-04 12:54:18 -0700 | [diff] [blame] | 1249 | |
| 1250 | /* Kick kcompactd and fail quickly */ |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 1251 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) |
David Rientjes | 19deb76 | 2019-09-04 12:54:20 -0700 | [diff] [blame] | 1252 | return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; |
David Rientjes | ac79f78 | 2019-09-04 12:54:18 -0700 | [diff] [blame] | 1253 | |
| 1254 | /* Synchronous compaction if madvised, otherwise kick kcompactd */ |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 1255 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) |
David Rientjes | 19deb76 | 2019-09-04 12:54:20 -0700 | [diff] [blame] | 1256 | return GFP_TRANSHUGE_LIGHT | |
| 1257 | (vma_madvised ? __GFP_DIRECT_RECLAIM : |
| 1258 | __GFP_KSWAPD_RECLAIM); |
David Rientjes | ac79f78 | 2019-09-04 12:54:18 -0700 | [diff] [blame] | 1259 | |
| 1260 | /* Only do synchronous compaction if madvised */ |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 1261 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) |
David Rientjes | 19deb76 | 2019-09-04 12:54:20 -0700 | [diff] [blame] | 1262 | return GFP_TRANSHUGE_LIGHT | |
| 1263 | (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); |
David Rientjes | ac79f78 | 2019-09-04 12:54:18 -0700 | [diff] [blame] | 1264 | |
David Rientjes | 19deb76 | 2019-09-04 12:54:20 -0700 | [diff] [blame] | 1265 | return GFP_TRANSHUGE_LIGHT; |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 1266 | } |
| 1267 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1268 | /* Caller must hold page table lock. */ |
Matthew Wilcox (Oracle) | e28833b | 2024-03-26 20:28:26 +0000 | [diff] [blame] | 1269 | static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm, |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 1270 | struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, |
Matthew Wilcox (Oracle) | e28833b | 2024-03-26 20:28:26 +0000 | [diff] [blame] | 1271 | struct folio *zero_folio) |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1272 | { |
| 1273 | pmd_t entry; |
Andrew Morton | 7c41416 | 2015-09-08 14:58:43 -0700 | [diff] [blame] | 1274 | if (!pmd_none(*pmd)) |
Miaohe Lin | 2efeb8d | 2021-02-24 12:07:29 -0800 | [diff] [blame] | 1275 | return; |
Matthew Wilcox (Oracle) | e28833b | 2024-03-26 20:28:26 +0000 | [diff] [blame] | 1276 | entry = mk_pmd(&zero_folio->page, vma->vm_page_prot); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1277 | entry = pmd_mkhuge(entry); |
Qi Zheng | c8bb416 | 2022-08-18 16:27:48 +0800 | [diff] [blame] | 1278 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1279 | set_pmd_at(mm, haddr, pmd, entry); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 1280 | mm_inc_nr_ptes(mm); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1281 | } |
| 1282 | |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 1283 | vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1284 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1285 | struct vm_area_struct *vma = vmf->vma; |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 1286 | gfp_t gfp; |
Matthew Wilcox (Oracle) | cb196ee | 2022-05-12 20:23:01 -0700 | [diff] [blame] | 1287 | struct folio *folio; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1288 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Matthew Wilcox (Oracle) | a373bae | 2024-04-26 15:45:01 +0100 | [diff] [blame] | 1289 | vm_fault_t ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1290 | |
Ryan Roberts | 3485b88 | 2023-12-07 16:12:04 +0000 | [diff] [blame] | 1291 | if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 1292 | return VM_FAULT_FALLBACK; |
Matthew Wilcox (Oracle) | a373bae | 2024-04-26 15:45:01 +0100 | [diff] [blame] | 1293 | ret = vmf_anon_prepare(vmf); |
| 1294 | if (ret) |
| 1295 | return ret; |
Yang Shi | 4fa6893 | 2022-06-16 10:48:35 -0700 | [diff] [blame] | 1296 | khugepaged_enter_vma(vma, vma->vm_flags); |
Yang Shi | d2081b2 | 2022-05-19 14:08:49 -0700 | [diff] [blame] | 1297 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1298 | if (!(vmf->flags & FAULT_FLAG_WRITE) && |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1299 | !mm_forbids_zeropage(vma->vm_mm) && |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 1300 | transparent_hugepage_use_zero_page()) { |
| 1301 | pgtable_t pgtable; |
Matthew Wilcox (Oracle) | e28833b | 2024-03-26 20:28:26 +0000 | [diff] [blame] | 1302 | struct folio *zero_folio; |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 1303 | vm_fault_t ret; |
Matthew Wilcox (Oracle) | e28833b | 2024-03-26 20:28:26 +0000 | [diff] [blame] | 1304 | |
Joel Fernandes (Google) | 4cf5892 | 2019-01-03 15:28:34 -0800 | [diff] [blame] | 1305 | pgtable = pte_alloc_one(vma->vm_mm); |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 1306 | if (unlikely(!pgtable)) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1307 | return VM_FAULT_OOM; |
Matthew Wilcox (Oracle) | e28833b | 2024-03-26 20:28:26 +0000 | [diff] [blame] | 1308 | zero_folio = mm_get_huge_zero_folio(vma->vm_mm); |
| 1309 | if (unlikely(!zero_folio)) { |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1310 | pte_free(vma->vm_mm, pgtable); |
Andi Kleen | 81ab420 | 2011-04-14 15:22:06 -0700 | [diff] [blame] | 1311 | count_vm_event(THP_FAULT_FALLBACK); |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 1312 | return VM_FAULT_FALLBACK; |
Andi Kleen | 81ab420 | 2011-04-14 15:22:06 -0700 | [diff] [blame] | 1313 | } |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1314 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1315 | ret = 0; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1316 | if (pmd_none(*vmf->pmd)) { |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 1317 | ret = check_stable_address_space(vma->vm_mm); |
| 1318 | if (ret) { |
| 1319 | spin_unlock(vmf->ptl); |
Gerald Schaefer | bfe8cc1 | 2020-11-21 22:17:15 -0800 | [diff] [blame] | 1320 | pte_free(vma->vm_mm, pgtable); |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 1321 | } else if (userfaultfd_missing(vma)) { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1322 | spin_unlock(vmf->ptl); |
Gerald Schaefer | bfe8cc1 | 2020-11-21 22:17:15 -0800 | [diff] [blame] | 1323 | pte_free(vma->vm_mm, pgtable); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1324 | ret = handle_userfault(vmf, VM_UFFD_MISSING); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1325 | VM_BUG_ON(ret & VM_FAULT_FALLBACK); |
| 1326 | } else { |
Matthew Wilcox (Oracle) | e28833b | 2024-03-26 20:28:26 +0000 | [diff] [blame] | 1327 | set_huge_zero_folio(pgtable, vma->vm_mm, vma, |
| 1328 | haddr, vmf->pmd, zero_folio); |
Bibo Mao | fca4057 | 2021-02-24 12:06:42 -0800 | [diff] [blame] | 1329 | update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1330 | spin_unlock(vmf->ptl); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1331 | } |
Gerald Schaefer | bfe8cc1 | 2020-11-21 22:17:15 -0800 | [diff] [blame] | 1332 | } else { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1333 | spin_unlock(vmf->ptl); |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1334 | pte_free(vma->vm_mm, pgtable); |
Gerald Schaefer | bfe8cc1 | 2020-11-21 22:17:15 -0800 | [diff] [blame] | 1335 | } |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1336 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1337 | } |
Rik van Riel | 164cc4f | 2021-02-25 17:16:18 -0800 | [diff] [blame] | 1338 | gfp = vma_thp_gfp_mask(vma); |
Matthew Wilcox (Oracle) | cb196ee | 2022-05-12 20:23:01 -0700 | [diff] [blame] | 1339 | folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); |
| 1340 | if (unlikely(!folio)) { |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 1341 | count_vm_event(THP_FAULT_FALLBACK); |
Barry Song | ec33687 | 2024-04-12 23:48:55 +1200 | [diff] [blame] | 1342 | count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 1343 | return VM_FAULT_FALLBACK; |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 1344 | } |
Matthew Wilcox (Oracle) | cb196ee | 2022-05-12 20:23:01 -0700 | [diff] [blame] | 1345 | return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1346 | } |
| 1347 | |
Matthew Wilcox | ae18d6d | 2015-09-08 14:59:14 -0700 | [diff] [blame] | 1348 | static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 1349 | pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, |
| 1350 | pgtable_t pgtable) |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1351 | { |
| 1352 | struct mm_struct *mm = vma->vm_mm; |
| 1353 | pmd_t entry; |
| 1354 | spinlock_t *ptl; |
| 1355 | |
| 1356 | ptl = pmd_lock(mm, pmd); |
Aneesh Kumar K.V | c6f3c5e | 2019-04-05 18:39:10 -0700 | [diff] [blame] | 1357 | if (!pmd_none(*pmd)) { |
| 1358 | if (write) { |
| 1359 | if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { |
| 1360 | WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); |
| 1361 | goto out_unlock; |
| 1362 | } |
| 1363 | entry = pmd_mkyoung(*pmd); |
| 1364 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
| 1365 | if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) |
| 1366 | update_mmu_cache_pmd(vma, addr, pmd); |
| 1367 | } |
| 1368 | |
| 1369 | goto out_unlock; |
| 1370 | } |
| 1371 | |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 1372 | entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); |
| 1373 | if (pfn_t_devmap(pfn)) |
| 1374 | entry = pmd_mkdevmap(entry); |
Peter Xu | 3c8e44c | 2024-08-26 16:43:37 -0400 | [diff] [blame] | 1375 | else |
| 1376 | entry = pmd_mkspecial(entry); |
Ross Zwisler | 01871e5 | 2016-01-15 16:56:02 -0800 | [diff] [blame] | 1377 | if (write) { |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1378 | entry = pmd_mkyoung(pmd_mkdirty(entry)); |
| 1379 | entry = maybe_pmd_mkwrite(entry, vma); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1380 | } |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 1381 | |
| 1382 | if (pgtable) { |
| 1383 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 1384 | mm_inc_nr_ptes(mm); |
Aneesh Kumar K.V | c6f3c5e | 2019-04-05 18:39:10 -0700 | [diff] [blame] | 1385 | pgtable = NULL; |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 1386 | } |
| 1387 | |
Ross Zwisler | 01871e5 | 2016-01-15 16:56:02 -0800 | [diff] [blame] | 1388 | set_pmd_at(mm, addr, pmd, entry); |
| 1389 | update_mmu_cache_pmd(vma, addr, pmd); |
Aneesh Kumar K.V | c6f3c5e | 2019-04-05 18:39:10 -0700 | [diff] [blame] | 1390 | |
| 1391 | out_unlock: |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1392 | spin_unlock(ptl); |
Aneesh Kumar K.V | c6f3c5e | 2019-04-05 18:39:10 -0700 | [diff] [blame] | 1393 | if (pgtable) |
| 1394 | pte_free(mm, pgtable); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1395 | } |
| 1396 | |
Thomas Hellstrom (VMware) | 9a9731b | 2020-03-24 18:48:09 +0100 | [diff] [blame] | 1397 | /** |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1398 | * vmf_insert_pfn_pmd - insert a pmd size pfn |
Thomas Hellstrom (VMware) | 9a9731b | 2020-03-24 18:48:09 +0100 | [diff] [blame] | 1399 | * @vmf: Structure describing the fault |
| 1400 | * @pfn: pfn to insert |
Thomas Hellstrom (VMware) | 9a9731b | 2020-03-24 18:48:09 +0100 | [diff] [blame] | 1401 | * @write: whether it's a write fault |
| 1402 | * |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1403 | * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. |
Thomas Hellstrom (VMware) | 9a9731b | 2020-03-24 18:48:09 +0100 | [diff] [blame] | 1404 | * |
| 1405 | * Return: vm_fault_t value. |
| 1406 | */ |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1407 | vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1408 | { |
Dan Williams | fce86ff | 2019-05-13 17:15:33 -0700 | [diff] [blame] | 1409 | unsigned long addr = vmf->address & PMD_MASK; |
| 1410 | struct vm_area_struct *vma = vmf->vma; |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1411 | pgprot_t pgprot = vma->vm_page_prot; |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 1412 | pgtable_t pgtable = NULL; |
Dan Williams | fce86ff | 2019-05-13 17:15:33 -0700 | [diff] [blame] | 1413 | |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1414 | /* |
| 1415 | * If we had pmd_special, we could avoid all these restrictions, |
| 1416 | * but we need to be consistent with PTEs and architectures that |
| 1417 | * can't support a 'special' bit. |
| 1418 | */ |
Dave Jiang | e1fb4a0 | 2018-08-17 15:43:40 -0700 | [diff] [blame] | 1419 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && |
| 1420 | !pfn_t_devmap(pfn)); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1421 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == |
| 1422 | (VM_PFNMAP|VM_MIXEDMAP)); |
| 1423 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1424 | |
| 1425 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 1426 | return VM_FAULT_SIGBUS; |
Borislav Petkov | 308a047 | 2016-10-26 19:43:43 +0200 | [diff] [blame] | 1427 | |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 1428 | if (arch_needs_pgtable_deposit()) { |
Joel Fernandes (Google) | 4cf5892 | 2019-01-03 15:28:34 -0800 | [diff] [blame] | 1429 | pgtable = pte_alloc_one(vma->vm_mm); |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 1430 | if (!pgtable) |
| 1431 | return VM_FAULT_OOM; |
| 1432 | } |
| 1433 | |
Borislav Petkov | 308a047 | 2016-10-26 19:43:43 +0200 | [diff] [blame] | 1434 | track_pfn_insert(vma, &pgprot, pfn); |
| 1435 | |
Dan Williams | fce86ff | 2019-05-13 17:15:33 -0700 | [diff] [blame] | 1436 | insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); |
Matthew Wilcox | ae18d6d | 2015-09-08 14:59:14 -0700 | [diff] [blame] | 1437 | return VM_FAULT_NOPAGE; |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1438 | } |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1439 | EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 1440 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1441 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1442 | static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1443 | { |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1444 | if (likely(vma->vm_flags & VM_WRITE)) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1445 | pud = pud_mkwrite(pud); |
| 1446 | return pud; |
| 1447 | } |
| 1448 | |
| 1449 | static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1450 | pud_t *pud, pfn_t pfn, bool write) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1451 | { |
| 1452 | struct mm_struct *mm = vma->vm_mm; |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1453 | pgprot_t prot = vma->vm_page_prot; |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1454 | pud_t entry; |
| 1455 | spinlock_t *ptl; |
| 1456 | |
| 1457 | ptl = pud_lock(mm, pud); |
Aneesh Kumar K.V | c6f3c5e | 2019-04-05 18:39:10 -0700 | [diff] [blame] | 1458 | if (!pud_none(*pud)) { |
| 1459 | if (write) { |
Peter Xu | ef713ec | 2024-08-26 16:43:36 -0400 | [diff] [blame] | 1460 | if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn))) |
Aneesh Kumar K.V | c6f3c5e | 2019-04-05 18:39:10 -0700 | [diff] [blame] | 1461 | goto out_unlock; |
Aneesh Kumar K.V | c6f3c5e | 2019-04-05 18:39:10 -0700 | [diff] [blame] | 1462 | entry = pud_mkyoung(*pud); |
| 1463 | entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); |
| 1464 | if (pudp_set_access_flags(vma, addr, pud, entry, 1)) |
| 1465 | update_mmu_cache_pud(vma, addr, pud); |
| 1466 | } |
| 1467 | goto out_unlock; |
| 1468 | } |
| 1469 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1470 | entry = pud_mkhuge(pfn_t_pud(pfn, prot)); |
| 1471 | if (pfn_t_devmap(pfn)) |
| 1472 | entry = pud_mkdevmap(entry); |
Peter Xu | 3c8e44c | 2024-08-26 16:43:37 -0400 | [diff] [blame] | 1473 | else |
| 1474 | entry = pud_mkspecial(entry); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1475 | if (write) { |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1476 | entry = pud_mkyoung(pud_mkdirty(entry)); |
| 1477 | entry = maybe_pud_mkwrite(entry, vma); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1478 | } |
| 1479 | set_pud_at(mm, addr, pud, entry); |
| 1480 | update_mmu_cache_pud(vma, addr, pud); |
Aneesh Kumar K.V | c6f3c5e | 2019-04-05 18:39:10 -0700 | [diff] [blame] | 1481 | |
| 1482 | out_unlock: |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1483 | spin_unlock(ptl); |
| 1484 | } |
| 1485 | |
Thomas Hellstrom (VMware) | 9a9731b | 2020-03-24 18:48:09 +0100 | [diff] [blame] | 1486 | /** |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1487 | * vmf_insert_pfn_pud - insert a pud size pfn |
Thomas Hellstrom (VMware) | 9a9731b | 2020-03-24 18:48:09 +0100 | [diff] [blame] | 1488 | * @vmf: Structure describing the fault |
| 1489 | * @pfn: pfn to insert |
Thomas Hellstrom (VMware) | 9a9731b | 2020-03-24 18:48:09 +0100 | [diff] [blame] | 1490 | * @write: whether it's a write fault |
| 1491 | * |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1492 | * Insert a pud size pfn. See vmf_insert_pfn() for additional info. |
Thomas Hellstrom (VMware) | 9a9731b | 2020-03-24 18:48:09 +0100 | [diff] [blame] | 1493 | * |
| 1494 | * Return: vm_fault_t value. |
| 1495 | */ |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1496 | vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1497 | { |
Dan Williams | fce86ff | 2019-05-13 17:15:33 -0700 | [diff] [blame] | 1498 | unsigned long addr = vmf->address & PUD_MASK; |
| 1499 | struct vm_area_struct *vma = vmf->vma; |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1500 | pgprot_t pgprot = vma->vm_page_prot; |
Dan Williams | fce86ff | 2019-05-13 17:15:33 -0700 | [diff] [blame] | 1501 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1502 | /* |
| 1503 | * If we had pud_special, we could avoid all these restrictions, |
| 1504 | * but we need to be consistent with PTEs and architectures that |
| 1505 | * can't support a 'special' bit. |
| 1506 | */ |
Dave Jiang | 62ec0d8 | 2018-09-04 15:46:16 -0700 | [diff] [blame] | 1507 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && |
| 1508 | !pfn_t_devmap(pfn)); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1509 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == |
| 1510 | (VM_PFNMAP|VM_MIXEDMAP)); |
| 1511 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1512 | |
| 1513 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 1514 | return VM_FAULT_SIGBUS; |
| 1515 | |
| 1516 | track_pfn_insert(vma, &pgprot, pfn); |
| 1517 | |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1518 | insert_pfn_pud(vma, addr, vmf->pud, pfn, write); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1519 | return VM_FAULT_NOPAGE; |
| 1520 | } |
Lorenzo Stoakes | 7b806d2 | 2023-03-12 23:40:14 +0000 | [diff] [blame] | 1521 | EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1522 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 1523 | |
Peter Xu | 4418c52 | 2024-03-27 11:23:30 -0400 | [diff] [blame] | 1524 | void touch_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 1525 | pmd_t *pmd, bool write) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1526 | { |
| 1527 | pmd_t _pmd; |
| 1528 | |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 1529 | _pmd = pmd_mkyoung(*pmd); |
Miaohe Lin | a69e471 | 2022-07-04 21:21:50 +0800 | [diff] [blame] | 1530 | if (write) |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 1531 | _pmd = pmd_mkdirty(_pmd); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1532 | if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, |
Miaohe Lin | a69e471 | 2022-07-04 21:21:50 +0800 | [diff] [blame] | 1533 | pmd, _pmd, write)) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1534 | update_mmu_cache_pmd(vma, addr, pmd); |
| 1535 | } |
| 1536 | |
| 1537 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1538 | pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1539 | { |
| 1540 | unsigned long pfn = pmd_pfn(*pmd); |
| 1541 | struct mm_struct *mm = vma->vm_mm; |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1542 | struct page *page; |
Logan Gunthorpe | 0f08923 | 2022-10-21 11:41:08 -0600 | [diff] [blame] | 1543 | int ret; |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1544 | |
| 1545 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
| 1546 | |
Linus Torvalds | f6f3732 | 2017-12-15 18:53:22 -0800 | [diff] [blame] | 1547 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1548 | return NULL; |
| 1549 | |
| 1550 | if (pmd_present(*pmd) && pmd_devmap(*pmd)) |
| 1551 | /* pass */; |
| 1552 | else |
| 1553 | return NULL; |
| 1554 | |
| 1555 | if (flags & FOLL_TOUCH) |
Miaohe Lin | a69e471 | 2022-07-04 21:21:50 +0800 | [diff] [blame] | 1556 | touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1557 | |
| 1558 | /* |
| 1559 | * device mapped pages can only be returned if the |
| 1560 | * caller will manage the page reference count. |
| 1561 | */ |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 1562 | if (!(flags & (FOLL_GET | FOLL_PIN))) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1563 | return ERR_PTR(-EEXIST); |
| 1564 | |
| 1565 | pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1566 | *pgmap = get_dev_pagemap(pfn, *pgmap); |
| 1567 | if (!*pgmap) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1568 | return ERR_PTR(-EFAULT); |
| 1569 | page = pfn_to_page(pfn); |
Yang Shi | f442fa6 | 2024-06-28 12:14:58 -0700 | [diff] [blame] | 1570 | ret = try_grab_folio(page_folio(page), 1, flags); |
Logan Gunthorpe | 0f08923 | 2022-10-21 11:41:08 -0600 | [diff] [blame] | 1571 | if (ret) |
| 1572 | page = ERR_PTR(ret); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1573 | |
| 1574 | return page; |
| 1575 | } |
| 1576 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1577 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 1578 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
Peter Xu | 8f34f1e | 2021-06-30 18:49:02 -0700 | [diff] [blame] | 1579 | struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1580 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1581 | spinlock_t *dst_ptl, *src_ptl; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1582 | struct page *src_page; |
David Hildenbrand | 96c772c | 2023-12-20 23:44:59 +0100 | [diff] [blame] | 1583 | struct folio *src_folio; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1584 | pmd_t pmd; |
Matthew Wilcox | 12c9d70 | 2016-02-02 16:57:57 -0800 | [diff] [blame] | 1585 | pgtable_t pgtable = NULL; |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 1586 | int ret = -ENOMEM; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1587 | |
Peter Xu | bc02afb | 2024-08-26 16:43:41 -0400 | [diff] [blame] | 1588 | pmd = pmdp_get_lockless(src_pmd); |
| 1589 | if (unlikely(pmd_special(pmd))) { |
| 1590 | dst_ptl = pmd_lock(dst_mm, dst_pmd); |
| 1591 | src_ptl = pmd_lockptr(src_mm, src_pmd); |
| 1592 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
| 1593 | /* |
| 1594 | * No need to recheck the pmd, it can't change with write |
| 1595 | * mmap lock held here. |
| 1596 | * |
| 1597 | * Meanwhile, making sure it's not a CoW VMA with writable |
| 1598 | * mapping, otherwise it means either the anon page wrongly |
| 1599 | * applied special bit, or we made the PRIVATE mapping be |
| 1600 | * able to wrongly write to the backend MMIO. |
| 1601 | */ |
| 1602 | VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); |
| 1603 | goto set_pmd; |
| 1604 | } |
| 1605 | |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 1606 | /* Skip if can be re-fill on fault */ |
Peter Xu | 8f34f1e | 2021-06-30 18:49:02 -0700 | [diff] [blame] | 1607 | if (!vma_is_anonymous(dst_vma)) |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 1608 | return 0; |
| 1609 | |
Joel Fernandes (Google) | 4cf5892 | 2019-01-03 15:28:34 -0800 | [diff] [blame] | 1610 | pgtable = pte_alloc_one(dst_mm); |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 1611 | if (unlikely(!pgtable)) |
| 1612 | goto out; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1613 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1614 | dst_ptl = pmd_lock(dst_mm, dst_pmd); |
| 1615 | src_ptl = pmd_lockptr(src_mm, src_pmd); |
| 1616 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1617 | |
| 1618 | ret = -EAGAIN; |
| 1619 | pmd = *src_pmd; |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1620 | |
| 1621 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 1622 | if (unlikely(is_swap_pmd(pmd))) { |
| 1623 | swp_entry_t entry = pmd_to_swp_entry(pmd); |
| 1624 | |
| 1625 | VM_BUG_ON(!is_pmd_migration_entry(pmd)); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1626 | if (!is_readable_migration_entry(entry)) { |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 1627 | entry = make_readable_migration_entry( |
| 1628 | swp_offset(entry)); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1629 | pmd = swp_entry_to_pmd(entry); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 1630 | if (pmd_swp_soft_dirty(*src_pmd)) |
| 1631 | pmd = pmd_swp_mksoft_dirty(pmd); |
Peter Xu | 8f34f1e | 2021-06-30 18:49:02 -0700 | [diff] [blame] | 1632 | if (pmd_swp_uffd_wp(*src_pmd)) |
| 1633 | pmd = pmd_swp_mkuffd_wp(pmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1634 | set_pmd_at(src_mm, addr, src_pmd, pmd); |
| 1635 | } |
Zi Yan | dd8a67f | 2017-11-02 15:59:47 -0700 | [diff] [blame] | 1636 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Kirill A. Shutemov | af5b0f6 | 2017-11-15 17:35:40 -0800 | [diff] [blame] | 1637 | mm_inc_nr_ptes(dst_mm); |
Zi Yan | dd8a67f | 2017-11-02 15:59:47 -0700 | [diff] [blame] | 1638 | pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); |
Peter Xu | 8f34f1e | 2021-06-30 18:49:02 -0700 | [diff] [blame] | 1639 | if (!userfaultfd_wp(dst_vma)) |
| 1640 | pmd = pmd_swp_clear_uffd_wp(pmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1641 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
| 1642 | ret = 0; |
| 1643 | goto out_unlock; |
| 1644 | } |
| 1645 | #endif |
| 1646 | |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 1647 | if (unlikely(!pmd_trans_huge(pmd))) { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1648 | pte_free(dst_mm, pgtable); |
| 1649 | goto out_unlock; |
| 1650 | } |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1651 | /* |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1652 | * When page table lock is held, the huge zero pmd should not be |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1653 | * under splitting since we don't split the page itself, only pmd to |
| 1654 | * a page table. |
| 1655 | */ |
| 1656 | if (is_huge_zero_pmd(pmd)) { |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 1657 | /* |
Matthew Wilcox (Oracle) | e28833b | 2024-03-26 20:28:26 +0000 | [diff] [blame] | 1658 | * mm_get_huge_zero_folio() will never allocate a new |
| 1659 | * folio here, since we already have a zero page to |
| 1660 | * copy. It just takes a reference. |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 1661 | */ |
Matthew Wilcox (Oracle) | 5691753 | 2024-03-26 20:28:25 +0000 | [diff] [blame] | 1662 | mm_get_huge_zero_folio(dst_mm); |
Peter Xu | 5fc7a5f | 2021-06-30 18:48:59 -0700 | [diff] [blame] | 1663 | goto out_zero_page; |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1664 | } |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1665 | |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 1666 | src_page = pmd_page(pmd); |
| 1667 | VM_BUG_ON_PAGE(!PageHead(src_page), src_page); |
David Hildenbrand | 96c772c | 2023-12-20 23:44:59 +0100 | [diff] [blame] | 1668 | src_folio = page_folio(src_page); |
Peter Xu | d042035 | 2020-09-25 18:26:00 -0400 | [diff] [blame] | 1669 | |
David Hildenbrand | 96c772c | 2023-12-20 23:44:59 +0100 | [diff] [blame] | 1670 | folio_get(src_folio); |
| 1671 | if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) { |
David Hildenbrand | fb3d824 | 2022-05-09 18:20:43 -0700 | [diff] [blame] | 1672 | /* Page maybe pinned: split and retry the fault on PTEs. */ |
David Hildenbrand | 96c772c | 2023-12-20 23:44:59 +0100 | [diff] [blame] | 1673 | folio_put(src_folio); |
Peter Xu | d042035 | 2020-09-25 18:26:00 -0400 | [diff] [blame] | 1674 | pte_free(dst_mm, pgtable); |
| 1675 | spin_unlock(src_ptl); |
| 1676 | spin_unlock(dst_ptl); |
Peter Xu | 8f34f1e | 2021-06-30 18:49:02 -0700 | [diff] [blame] | 1677 | __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); |
Peter Xu | d042035 | 2020-09-25 18:26:00 -0400 | [diff] [blame] | 1678 | return -EAGAIN; |
| 1679 | } |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 1680 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Peter Xu | 5fc7a5f | 2021-06-30 18:48:59 -0700 | [diff] [blame] | 1681 | out_zero_page: |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 1682 | mm_inc_nr_ptes(dst_mm); |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 1683 | pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1684 | pmdp_set_wrprotect(src_mm, addr, src_pmd); |
Peter Xu | 8f34f1e | 2021-06-30 18:49:02 -0700 | [diff] [blame] | 1685 | if (!userfaultfd_wp(dst_vma)) |
| 1686 | pmd = pmd_clear_uffd_wp(pmd); |
Peter Xu | bc02afb | 2024-08-26 16:43:41 -0400 | [diff] [blame] | 1687 | pmd = pmd_wrprotect(pmd); |
| 1688 | set_pmd: |
| 1689 | pmd = pmd_mkold(pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1690 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1691 | |
| 1692 | ret = 0; |
| 1693 | out_unlock: |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1694 | spin_unlock(src_ptl); |
| 1695 | spin_unlock(dst_ptl); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1696 | out: |
| 1697 | return ret; |
| 1698 | } |
| 1699 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1700 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
Peter Xu | 1b16761 | 2024-03-27 11:23:29 -0400 | [diff] [blame] | 1701 | void touch_pud(struct vm_area_struct *vma, unsigned long addr, |
| 1702 | pud_t *pud, bool write) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1703 | { |
| 1704 | pud_t _pud; |
| 1705 | |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 1706 | _pud = pud_mkyoung(*pud); |
Miaohe Lin | 5fe653e | 2022-07-04 21:21:49 +0800 | [diff] [blame] | 1707 | if (write) |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 1708 | _pud = pud_mkdirty(_pud); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1709 | if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, |
Miaohe Lin | 5fe653e | 2022-07-04 21:21:49 +0800 | [diff] [blame] | 1710 | pud, _pud, write)) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1711 | update_mmu_cache_pud(vma, addr, pud); |
| 1712 | } |
| 1713 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1714 | int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 1715 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, |
| 1716 | struct vm_area_struct *vma) |
| 1717 | { |
| 1718 | spinlock_t *dst_ptl, *src_ptl; |
| 1719 | pud_t pud; |
| 1720 | int ret; |
| 1721 | |
| 1722 | dst_ptl = pud_lock(dst_mm, dst_pud); |
| 1723 | src_ptl = pud_lockptr(src_mm, src_pud); |
| 1724 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
| 1725 | |
| 1726 | ret = -EAGAIN; |
| 1727 | pud = *src_pud; |
| 1728 | if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) |
| 1729 | goto out_unlock; |
| 1730 | |
| 1731 | /* |
David Hildenbrand | 96c772c | 2023-12-20 23:44:59 +0100 | [diff] [blame] | 1732 | * TODO: once we support anonymous pages, use |
| 1733 | * folio_try_dup_anon_rmap_*() and split if duplicating fails. |
David Hildenbrand | fb3d824 | 2022-05-09 18:20:43 -0700 | [diff] [blame] | 1734 | */ |
Peter Xu | bc02afb | 2024-08-26 16:43:41 -0400 | [diff] [blame] | 1735 | if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) { |
| 1736 | pudp_set_wrprotect(src_mm, addr, src_pud); |
| 1737 | pud = pud_wrprotect(pud); |
| 1738 | } |
| 1739 | pud = pud_mkold(pud); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1740 | set_pud_at(dst_mm, addr, dst_pud, pud); |
| 1741 | |
| 1742 | ret = 0; |
| 1743 | out_unlock: |
| 1744 | spin_unlock(src_ptl); |
| 1745 | spin_unlock(dst_ptl); |
| 1746 | return ret; |
| 1747 | } |
| 1748 | |
| 1749 | void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) |
| 1750 | { |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1751 | bool write = vmf->flags & FAULT_FLAG_WRITE; |
| 1752 | |
| 1753 | vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); |
| 1754 | if (unlikely(!pud_same(*vmf->pud, orig_pud))) |
| 1755 | goto unlock; |
| 1756 | |
Miaohe Lin | 5fe653e | 2022-07-04 21:21:49 +0800 | [diff] [blame] | 1757 | touch_pud(vmf->vma, vmf->address, vmf->pud, write); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1758 | unlock: |
| 1759 | spin_unlock(vmf->ptl); |
| 1760 | } |
| 1761 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 1762 | |
Yang Shi | 5db4f15 | 2021-06-30 18:51:35 -0700 | [diff] [blame] | 1763 | void huge_pmd_set_accessed(struct vm_fault *vmf) |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1764 | { |
Minchan Kim | 20f664a | 2017-01-10 16:57:51 -0800 | [diff] [blame] | 1765 | bool write = vmf->flags & FAULT_FLAG_WRITE; |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1766 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1767 | vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); |
Miaohe Lin | a69e471 | 2022-07-04 21:21:50 +0800 | [diff] [blame] | 1768 | if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1769 | goto unlock; |
| 1770 | |
Miaohe Lin | a69e471 | 2022-07-04 21:21:50 +0800 | [diff] [blame] | 1771 | touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1772 | |
| 1773 | unlock: |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1774 | spin_unlock(vmf->ptl); |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1775 | } |
| 1776 | |
Yang Shi | 5db4f15 | 2021-06-30 18:51:35 -0700 | [diff] [blame] | 1777 | vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1778 | { |
David Hildenbrand | c89357e | 2022-05-09 18:20:45 -0700 | [diff] [blame] | 1779 | const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1780 | struct vm_area_struct *vma = vmf->vma; |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1781 | struct folio *folio; |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1782 | struct page *page; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1783 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Yang Shi | 5db4f15 | 2021-06-30 18:51:35 -0700 | [diff] [blame] | 1784 | pmd_t orig_pmd = vmf->orig_pmd; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1785 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1786 | vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); |
Sasha Levin | 81d1b09 | 2014-10-09 15:28:10 -0700 | [diff] [blame] | 1787 | VM_BUG_ON_VMA(!vma->anon_vma, vma); |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1788 | |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1789 | if (is_huge_zero_pmd(orig_pmd)) |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1790 | goto fallback; |
| 1791 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1792 | spin_lock(vmf->ptl); |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1793 | |
| 1794 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { |
| 1795 | spin_unlock(vmf->ptl); |
| 1796 | return 0; |
| 1797 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1798 | |
| 1799 | page = pmd_page(orig_pmd); |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1800 | folio = page_folio(page); |
Miaohe Lin | f6004e7 | 2021-05-04 18:34:02 -0700 | [diff] [blame] | 1801 | VM_BUG_ON_PAGE(!PageHead(page), page); |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1802 | |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1803 | /* Early check when only holding the PT lock. */ |
| 1804 | if (PageAnonExclusive(page)) |
| 1805 | goto reuse; |
| 1806 | |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1807 | if (!folio_trylock(folio)) { |
| 1808 | folio_get(folio); |
Huang Ying | ba3c4ce | 2017-09-06 16:22:19 -0700 | [diff] [blame] | 1809 | spin_unlock(vmf->ptl); |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1810 | folio_lock(folio); |
Huang Ying | ba3c4ce | 2017-09-06 16:22:19 -0700 | [diff] [blame] | 1811 | spin_lock(vmf->ptl); |
| 1812 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1813 | spin_unlock(vmf->ptl); |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1814 | folio_unlock(folio); |
| 1815 | folio_put(folio); |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1816 | return 0; |
Huang Ying | ba3c4ce | 2017-09-06 16:22:19 -0700 | [diff] [blame] | 1817 | } |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1818 | folio_put(folio); |
Huang Ying | ba3c4ce | 2017-09-06 16:22:19 -0700 | [diff] [blame] | 1819 | } |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1820 | |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1821 | /* Recheck after temporarily dropping the PT lock. */ |
| 1822 | if (PageAnonExclusive(page)) { |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1823 | folio_unlock(folio); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1824 | goto reuse; |
| 1825 | } |
| 1826 | |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1827 | /* |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1828 | * See do_wp_page(): we can only reuse the folio exclusively if |
| 1829 | * there are no additional references. Note that we always drain |
Matthew Wilcox (Oracle) | 1fec6890 | 2023-06-21 17:45:56 +0100 | [diff] [blame] | 1830 | * the LRU cache immediately after adding a THP. |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1831 | */ |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1832 | if (folio_ref_count(folio) > |
| 1833 | 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) |
David Hildenbrand | 3bff7e3 | 2022-03-24 18:13:43 -0700 | [diff] [blame] | 1834 | goto unlock_fallback; |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1835 | if (folio_test_swapcache(folio)) |
| 1836 | folio_free_swap(folio); |
| 1837 | if (folio_ref_count(folio) == 1) { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1838 | pmd_t entry; |
David Hildenbrand | 6c54dc6 | 2022-05-09 18:20:43 -0700 | [diff] [blame] | 1839 | |
David Hildenbrand | 0696862 | 2023-10-02 16:29:48 +0200 | [diff] [blame] | 1840 | folio_move_anon_rmap(folio, vma); |
David Hildenbrand | 5ca4328 | 2023-10-02 16:29:47 +0200 | [diff] [blame] | 1841 | SetPageAnonExclusive(page); |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1842 | folio_unlock(folio); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 1843 | reuse: |
David Hildenbrand | c89357e | 2022-05-09 18:20:45 -0700 | [diff] [blame] | 1844 | if (unlikely(unshare)) { |
| 1845 | spin_unlock(vmf->ptl); |
| 1846 | return 0; |
| 1847 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1848 | entry = pmd_mkyoung(orig_pmd); |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1849 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1850 | if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1851 | update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1852 | spin_unlock(vmf->ptl); |
David Hildenbrand | cb8d863 | 2022-10-21 12:11:35 +0200 | [diff] [blame] | 1853 | return 0; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1854 | } |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1855 | |
David Hildenbrand | 3bff7e3 | 2022-03-24 18:13:43 -0700 | [diff] [blame] | 1856 | unlock_fallback: |
Matthew Wilcox (Oracle) | 2fad3d1 | 2022-09-02 20:46:38 +0100 | [diff] [blame] | 1857 | folio_unlock(folio); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1858 | spin_unlock(vmf->ptl); |
Kirill A. Shutemov | 3917c80 | 2020-06-03 16:00:27 -0700 | [diff] [blame] | 1859 | fallback: |
| 1860 | __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); |
| 1861 | return VM_FAULT_FALLBACK; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1862 | } |
| 1863 | |
David Hildenbrand | c27f479 | 2022-11-08 18:46:48 +0100 | [diff] [blame] | 1864 | static inline bool can_change_pmd_writable(struct vm_area_struct *vma, |
| 1865 | unsigned long addr, pmd_t pmd) |
| 1866 | { |
| 1867 | struct page *page; |
| 1868 | |
| 1869 | if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) |
| 1870 | return false; |
| 1871 | |
| 1872 | /* Don't touch entries that are not even readable (NUMA hinting). */ |
| 1873 | if (pmd_protnone(pmd)) |
| 1874 | return false; |
| 1875 | |
| 1876 | /* Do we need write faults for softdirty tracking? */ |
Barry Song | f38ee28 | 2024-06-08 09:13:57 +1200 | [diff] [blame] | 1877 | if (pmd_needs_soft_dirty_wp(vma, pmd)) |
David Hildenbrand | c27f479 | 2022-11-08 18:46:48 +0100 | [diff] [blame] | 1878 | return false; |
| 1879 | |
| 1880 | /* Do we need write faults for uffd-wp tracking? */ |
| 1881 | if (userfaultfd_huge_pmd_wp(vma, pmd)) |
| 1882 | return false; |
| 1883 | |
| 1884 | if (!(vma->vm_flags & VM_SHARED)) { |
| 1885 | /* See can_change_pte_writable(). */ |
| 1886 | page = vm_normal_page_pmd(vma, addr, pmd); |
| 1887 | return page && PageAnon(page) && PageAnonExclusive(page); |
| 1888 | } |
| 1889 | |
| 1890 | /* See can_change_pte_writable(). */ |
| 1891 | return pmd_dirty(pmd); |
| 1892 | } |
| 1893 | |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1894 | /* NUMA hinting page fault entry point for trans huge pmds */ |
Yang Shi | 5db4f15 | 2021-06-30 18:51:35 -0700 | [diff] [blame] | 1895 | vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1896 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1897 | struct vm_area_struct *vma = vmf->vma; |
Kefeng Wang | 667ffc3 | 2023-09-21 15:44:13 +0800 | [diff] [blame] | 1898 | struct folio *folio; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1899 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Kefeng Wang | 667ffc3 | 2023-09-21 15:44:13 +0800 | [diff] [blame] | 1900 | int nid = NUMA_NO_NODE; |
Zi Yan | 727d50a7 | 2024-08-09 10:59:06 -0400 | [diff] [blame] | 1901 | int target_nid, last_cpupid; |
| 1902 | pmd_t pmd, old_pmd; |
David Hildenbrand | 4b88c23 | 2024-06-20 23:29:34 +0200 | [diff] [blame] | 1903 | bool writable = false; |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1904 | int flags = 0; |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1905 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1906 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
Zi Yan | 727d50a7 | 2024-08-09 10:59:06 -0400 | [diff] [blame] | 1907 | old_pmd = pmdp_get(vmf->pmd); |
| 1908 | |
| 1909 | if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1910 | spin_unlock(vmf->ptl); |
Zi Yan | fd8c35a | 2024-08-09 10:59:05 -0400 | [diff] [blame] | 1911 | return 0; |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1912 | } |
| 1913 | |
Zi Yan | 727d50a7 | 2024-08-09 10:59:06 -0400 | [diff] [blame] | 1914 | pmd = pmd_modify(old_pmd, vma->vm_page_prot); |
David Hildenbrand | 6a56ccb | 2022-11-08 18:46:50 +0100 | [diff] [blame] | 1915 | |
| 1916 | /* |
| 1917 | * Detect now whether the PMD could be writable; this information |
| 1918 | * is only valid while holding the PT lock. |
| 1919 | */ |
| 1920 | writable = pmd_write(pmd); |
| 1921 | if (!writable && vma_wants_manual_pte_write_upgrade(vma) && |
| 1922 | can_change_pmd_writable(vma, vmf->address, pmd)) |
| 1923 | writable = true; |
| 1924 | |
Kefeng Wang | 667ffc3 | 2023-09-21 15:44:13 +0800 | [diff] [blame] | 1925 | folio = vm_normal_folio_pmd(vma, haddr, pmd); |
| 1926 | if (!folio) |
Yang Shi | c5b5a3d | 2021-06-30 18:51:42 -0700 | [diff] [blame] | 1927 | goto out_map; |
| 1928 | |
Kefeng Wang | 667ffc3 | 2023-09-21 15:44:13 +0800 | [diff] [blame] | 1929 | nid = folio_nid(folio); |
Zi Yan | 727d50a7 | 2024-08-09 10:59:06 -0400 | [diff] [blame] | 1930 | |
| 1931 | target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable, |
| 1932 | &last_cpupid); |
David Hildenbrand | ee86814 | 2024-06-20 23:29:35 +0200 | [diff] [blame] | 1933 | if (target_nid == NUMA_NO_NODE) |
| 1934 | goto out_map; |
| 1935 | if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { |
| 1936 | flags |= TNF_MIGRATE_FAIL; |
Yang Shi | c5b5a3d | 2021-06-30 18:51:42 -0700 | [diff] [blame] | 1937 | goto out_map; |
| 1938 | } |
David Hildenbrand | ee86814 | 2024-06-20 23:29:35 +0200 | [diff] [blame] | 1939 | /* The folio is isolated and isolation code holds a folio reference. */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1940 | spin_unlock(vmf->ptl); |
David Hildenbrand | 6a56ccb | 2022-11-08 18:46:50 +0100 | [diff] [blame] | 1941 | writable = false; |
Peter Zijlstra | 8b1b436 | 2017-06-07 18:05:07 +0200 | [diff] [blame] | 1942 | |
David Hildenbrand | 4b88c23 | 2024-06-20 23:29:34 +0200 | [diff] [blame] | 1943 | if (!migrate_misplaced_folio(folio, vma, target_nid)) { |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1944 | flags |= TNF_MIGRATED; |
Kefeng Wang | 667ffc3 | 2023-09-21 15:44:13 +0800 | [diff] [blame] | 1945 | nid = target_nid; |
Zi Yan | fd8c35a | 2024-08-09 10:59:05 -0400 | [diff] [blame] | 1946 | task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); |
| 1947 | return 0; |
Yang Shi | c5b5a3d | 2021-06-30 18:51:42 -0700 | [diff] [blame] | 1948 | } |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1949 | |
Zi Yan | fd8c35a | 2024-08-09 10:59:05 -0400 | [diff] [blame] | 1950 | flags |= TNF_MIGRATE_FAIL; |
| 1951 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
Zi Yan | 727d50a7 | 2024-08-09 10:59:06 -0400 | [diff] [blame] | 1952 | if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) { |
Zi Yan | fd8c35a | 2024-08-09 10:59:05 -0400 | [diff] [blame] | 1953 | spin_unlock(vmf->ptl); |
| 1954 | return 0; |
| 1955 | } |
Yang Shi | c5b5a3d | 2021-06-30 18:51:42 -0700 | [diff] [blame] | 1956 | out_map: |
| 1957 | /* Restore the PMD */ |
Zi Yan | 727d50a7 | 2024-08-09 10:59:06 -0400 | [diff] [blame] | 1958 | pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot); |
Yang Shi | c5b5a3d | 2021-06-30 18:51:42 -0700 | [diff] [blame] | 1959 | pmd = pmd_mkyoung(pmd); |
David Hildenbrand | 6a56ccb | 2022-11-08 18:46:50 +0100 | [diff] [blame] | 1960 | if (writable) |
Rick Edgecombe | 161e393 | 2023-06-12 17:10:29 -0700 | [diff] [blame] | 1961 | pmd = pmd_mkwrite(pmd, vma); |
Yang Shi | c5b5a3d | 2021-06-30 18:51:42 -0700 | [diff] [blame] | 1962 | set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); |
| 1963 | update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
| 1964 | spin_unlock(vmf->ptl); |
Zi Yan | fd8c35a | 2024-08-09 10:59:05 -0400 | [diff] [blame] | 1965 | |
| 1966 | if (nid != NUMA_NO_NODE) |
| 1967 | task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); |
| 1968 | return 0; |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1969 | } |
| 1970 | |
Huang Ying | 319904a | 2016-07-28 15:48:03 -0700 | [diff] [blame] | 1971 | /* |
| 1972 | * Return true if we do MADV_FREE successfully on entire pmd page. |
| 1973 | * Otherwise, return false. |
| 1974 | */ |
| 1975 | bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1976 | pmd_t *pmd, unsigned long addr, unsigned long next) |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1977 | { |
| 1978 | spinlock_t *ptl; |
| 1979 | pmd_t orig_pmd; |
Kefeng Wang | fc986a3 | 2022-12-07 10:34:30 +0800 | [diff] [blame] | 1980 | struct folio *folio; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1981 | struct mm_struct *mm = tlb->mm; |
Huang Ying | 319904a | 2016-07-28 15:48:03 -0700 | [diff] [blame] | 1982 | bool ret = false; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1983 | |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 1984 | tlb_change_page_size(tlb, HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 1985 | |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 1986 | ptl = pmd_trans_huge_lock(pmd, vma); |
| 1987 | if (!ptl) |
Linus Torvalds | 25eedab | 2016-01-17 18:33:15 -0800 | [diff] [blame] | 1988 | goto out_unlocked; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1989 | |
| 1990 | orig_pmd = *pmd; |
Huang Ying | 319904a | 2016-07-28 15:48:03 -0700 | [diff] [blame] | 1991 | if (is_huge_zero_pmd(orig_pmd)) |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1992 | goto out; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1993 | |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1994 | if (unlikely(!pmd_present(orig_pmd))) { |
| 1995 | VM_BUG_ON(thp_migration_supported() && |
| 1996 | !is_pmd_migration_entry(orig_pmd)); |
| 1997 | goto out; |
| 1998 | } |
| 1999 | |
Matthew Wilcox (Oracle) | e06d03d | 2024-03-26 20:28:23 +0000 | [diff] [blame] | 2000 | folio = pmd_folio(orig_pmd); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2001 | /* |
Kefeng Wang | fc986a3 | 2022-12-07 10:34:30 +0800 | [diff] [blame] | 2002 | * If other processes are mapping this folio, we couldn't discard |
| 2003 | * the folio unless they all do MADV_FREE so let's skip the folio. |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2004 | */ |
David Hildenbrand | ebb34f7 | 2024-02-27 21:15:48 +0100 | [diff] [blame] | 2005 | if (folio_likely_mapped_shared(folio)) |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2006 | goto out; |
| 2007 | |
Kefeng Wang | fc986a3 | 2022-12-07 10:34:30 +0800 | [diff] [blame] | 2008 | if (!folio_trylock(folio)) |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2009 | goto out; |
| 2010 | |
| 2011 | /* |
| 2012 | * If user want to discard part-pages of THP, split it so MADV_FREE |
| 2013 | * will deactivate only them. |
| 2014 | */ |
| 2015 | if (next - addr != HPAGE_PMD_SIZE) { |
Kefeng Wang | fc986a3 | 2022-12-07 10:34:30 +0800 | [diff] [blame] | 2016 | folio_get(folio); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2017 | spin_unlock(ptl); |
Kefeng Wang | fc986a3 | 2022-12-07 10:34:30 +0800 | [diff] [blame] | 2018 | split_folio(folio); |
| 2019 | folio_unlock(folio); |
| 2020 | folio_put(folio); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2021 | goto out_unlocked; |
| 2022 | } |
| 2023 | |
Kefeng Wang | fc986a3 | 2022-12-07 10:34:30 +0800 | [diff] [blame] | 2024 | if (folio_test_dirty(folio)) |
| 2025 | folio_clear_dirty(folio); |
| 2026 | folio_unlock(folio); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2027 | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2028 | if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { |
Kirill A. Shutemov | 58ceeb6 | 2017-04-13 14:56:26 -0700 | [diff] [blame] | 2029 | pmdp_invalidate(vma, addr, pmd); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2030 | orig_pmd = pmd_mkold(orig_pmd); |
| 2031 | orig_pmd = pmd_mkclean(orig_pmd); |
| 2032 | |
| 2033 | set_pmd_at(mm, addr, pmd, orig_pmd); |
| 2034 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
| 2035 | } |
Shaohua Li | 802a3a9 | 2017-05-03 14:52:32 -0700 | [diff] [blame] | 2036 | |
Kefeng Wang | 6a6fe9e | 2022-12-09 10:06:18 +0800 | [diff] [blame] | 2037 | folio_mark_lazyfree(folio); |
Huang Ying | 319904a | 2016-07-28 15:48:03 -0700 | [diff] [blame] | 2038 | ret = true; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2039 | out: |
| 2040 | spin_unlock(ptl); |
| 2041 | out_unlocked: |
| 2042 | return ret; |
| 2043 | } |
| 2044 | |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 2045 | static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) |
| 2046 | { |
| 2047 | pgtable_t pgtable; |
| 2048 | |
| 2049 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
| 2050 | pte_free(mm, pgtable); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 2051 | mm_dec_nr_ptes(mm); |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 2052 | } |
| 2053 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 2054 | int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 2055 | pmd_t *pmd, unsigned long addr) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 2056 | { |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2057 | pmd_t orig_pmd; |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 2058 | spinlock_t *ptl; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 2059 | |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 2060 | tlb_change_page_size(tlb, HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 2061 | |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 2062 | ptl = __pmd_trans_huge_lock(pmd, vma); |
| 2063 | if (!ptl) |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2064 | return 0; |
| 2065 | /* |
| 2066 | * For architectures like ppc64 we look at deposited pgtable |
| 2067 | * when calling pmdp_huge_get_and_clear. So do the |
| 2068 | * pgtable_trans_huge_withdraw after finishing pmdp related |
| 2069 | * operations. |
| 2070 | */ |
Aneesh Kumar K.V | 93a9869 | 2020-05-05 12:47:28 +0530 | [diff] [blame] | 2071 | orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, |
| 2072 | tlb->fullmm); |
Rick Edgecombe | e5136e8 | 2023-06-12 17:10:43 -0700 | [diff] [blame] | 2073 | arch_check_zapped_pmd(vma, orig_pmd); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2074 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
Thomas Hellstrom (VMware) | 2484ca9 | 2020-03-24 18:47:17 +0100 | [diff] [blame] | 2075 | if (vma_is_special_huge(vma)) { |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 2076 | if (arch_needs_pgtable_deposit()) |
| 2077 | zap_deposited_table(tlb->mm, pmd); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2078 | spin_unlock(ptl); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2079 | } else if (is_huge_zero_pmd(orig_pmd)) { |
Oliver O'Halloran | c14a6eb | 2017-05-08 15:59:40 -0700 | [diff] [blame] | 2080 | zap_deposited_table(tlb->mm, pmd); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2081 | spin_unlock(ptl); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2082 | } else { |
Kefeng Wang | 0103b27 | 2024-01-11 15:24:25 +0000 | [diff] [blame] | 2083 | struct folio *folio = NULL; |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2084 | int flush_needed = 1; |
| 2085 | |
| 2086 | if (pmd_present(orig_pmd)) { |
Kefeng Wang | 0103b27 | 2024-01-11 15:24:25 +0000 | [diff] [blame] | 2087 | struct page *page = pmd_page(orig_pmd); |
| 2088 | |
| 2089 | folio = page_folio(page); |
| 2090 | folio_remove_rmap_pmd(folio, page, vma); |
David Hildenbrand | 0a7bda4 | 2024-04-09 21:22:51 +0200 | [diff] [blame] | 2091 | WARN_ON_ONCE(folio_mapcount(folio) < 0); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2092 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 2093 | } else if (thp_migration_supported()) { |
| 2094 | swp_entry_t entry; |
| 2095 | |
| 2096 | VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); |
| 2097 | entry = pmd_to_swp_entry(orig_pmd); |
Kefeng Wang | 0103b27 | 2024-01-11 15:24:25 +0000 | [diff] [blame] | 2098 | folio = pfn_swap_entry_folio(entry); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2099 | flush_needed = 0; |
| 2100 | } else |
| 2101 | WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); |
| 2102 | |
Kefeng Wang | 0103b27 | 2024-01-11 15:24:25 +0000 | [diff] [blame] | 2103 | if (folio_test_anon(folio)) { |
Oliver O'Halloran | c14a6eb | 2017-05-08 15:59:40 -0700 | [diff] [blame] | 2104 | zap_deposited_table(tlb->mm, pmd); |
Kirill A. Shutemov | b507238 | 2016-07-26 15:25:34 -0700 | [diff] [blame] | 2105 | add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); |
| 2106 | } else { |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 2107 | if (arch_needs_pgtable_deposit()) |
| 2108 | zap_deposited_table(tlb->mm, pmd); |
Kefeng Wang | 6b27cc6c | 2024-01-11 15:24:29 +0000 | [diff] [blame] | 2109 | add_mm_counter(tlb->mm, mm_counter_file(folio), |
Kefeng Wang | 0103b27 | 2024-01-11 15:24:25 +0000 | [diff] [blame] | 2110 | -HPAGE_PMD_NR); |
Kirill A. Shutemov | b507238 | 2016-07-26 15:25:34 -0700 | [diff] [blame] | 2111 | } |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2112 | |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2113 | spin_unlock(ptl); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2114 | if (flush_needed) |
Kefeng Wang | 0103b27 | 2024-01-11 15:24:25 +0000 | [diff] [blame] | 2115 | tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 2116 | } |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 2117 | return 1; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 2118 | } |
| 2119 | |
Aneesh Kumar K.V | 1dd38b6 | 2016-12-12 16:44:29 -0800 | [diff] [blame] | 2120 | #ifndef pmd_move_must_withdraw |
| 2121 | static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, |
| 2122 | spinlock_t *old_pmd_ptl, |
| 2123 | struct vm_area_struct *vma) |
| 2124 | { |
| 2125 | /* |
| 2126 | * With split pmd lock we also need to move preallocated |
| 2127 | * PTE page table if new_pmd is on different PMD page table. |
| 2128 | * |
| 2129 | * We also don't deposit and withdraw tables for file pages. |
| 2130 | */ |
| 2131 | return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); |
| 2132 | } |
| 2133 | #endif |
| 2134 | |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 2135 | static pmd_t move_soft_dirty_pmd(pmd_t pmd) |
| 2136 | { |
| 2137 | #ifdef CONFIG_MEM_SOFT_DIRTY |
| 2138 | if (unlikely(is_pmd_migration_entry(pmd))) |
| 2139 | pmd = pmd_swp_mksoft_dirty(pmd); |
| 2140 | else if (pmd_present(pmd)) |
| 2141 | pmd = pmd_mksoft_dirty(pmd); |
| 2142 | #endif |
| 2143 | return pmd; |
| 2144 | } |
| 2145 | |
Hugh Dickins | bf8616d | 2016-05-19 17:12:54 -0700 | [diff] [blame] | 2146 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
Wei Yang | b8aa9d9 | 2020-08-06 23:23:40 -0700 | [diff] [blame] | 2147 | unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2148 | { |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 2149 | spinlock_t *old_ptl, *new_ptl; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2150 | pmd_t pmd; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2151 | struct mm_struct *mm = vma->vm_mm; |
Aaron Lu | 5d19042 | 2016-11-10 17:16:33 +0800 | [diff] [blame] | 2152 | bool force_flush = false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2153 | |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2154 | /* |
| 2155 | * The destination pmd shouldn't be established, free_pgtables() |
Hugh Dickins | a5be621 | 2023-06-08 18:32:47 -0700 | [diff] [blame] | 2156 | * should have released it; but move_page_tables() might have already |
| 2157 | * inserted a page table, if racing against shmem/file collapse. |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2158 | */ |
Hugh Dickins | a5be621 | 2023-06-08 18:32:47 -0700 | [diff] [blame] | 2159 | if (!pmd_none(*new_pmd)) { |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2160 | VM_BUG_ON(pmd_trans_huge(*new_pmd)); |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 2161 | return false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2162 | } |
| 2163 | |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 2164 | /* |
| 2165 | * We don't have to worry about the ordering of src and dst |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 2166 | * ptlocks because exclusive mmap_lock prevents deadlock. |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 2167 | */ |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 2168 | old_ptl = __pmd_trans_huge_lock(old_pmd, vma); |
| 2169 | if (old_ptl) { |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 2170 | new_ptl = pmd_lockptr(mm, new_pmd); |
| 2171 | if (new_ptl != old_ptl) |
| 2172 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 2173 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); |
Linus Torvalds | eb66ae0 | 2018-10-12 15:22:59 -0700 | [diff] [blame] | 2174 | if (pmd_present(pmd)) |
Aaron Lu | a2ce266 | 2016-11-29 13:27:31 +0800 | [diff] [blame] | 2175 | force_flush = true; |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 2176 | VM_BUG_ON(!pmd_none(*new_pmd)); |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 2177 | |
Aneesh Kumar K.V | 1dd38b6 | 2016-12-12 16:44:29 -0800 | [diff] [blame] | 2178 | if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { |
Aneesh Kumar K.V | b3084f4 | 2014-01-13 11:34:24 +0530 | [diff] [blame] | 2179 | pgtable_t pgtable; |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 2180 | pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); |
| 2181 | pgtable_trans_huge_deposit(mm, new_pmd, pgtable); |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 2182 | } |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 2183 | pmd = move_soft_dirty_pmd(pmd); |
| 2184 | set_pmd_at(mm, new_addr, new_pmd, pmd); |
Aaron Lu | 5d19042 | 2016-11-10 17:16:33 +0800 | [diff] [blame] | 2185 | if (force_flush) |
Miaohe Lin | 7c38f18 | 2022-07-04 21:21:46 +0800 | [diff] [blame] | 2186 | flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); |
Linus Torvalds | eb66ae0 | 2018-10-12 15:22:59 -0700 | [diff] [blame] | 2187 | if (new_ptl != old_ptl) |
| 2188 | spin_unlock(new_ptl); |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 2189 | spin_unlock(old_ptl); |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 2190 | return true; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2191 | } |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 2192 | return false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 2193 | } |
| 2194 | |
Mel Gorman | f123d74 | 2013-10-07 11:28:49 +0100 | [diff] [blame] | 2195 | /* |
| 2196 | * Returns |
| 2197 | * - 0 if PMD could not be locked |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 2198 | * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary |
Yang Shi | e346e66 | 2021-06-30 18:51:55 -0700 | [diff] [blame] | 2199 | * or if prot_numa but THP migration is not supported |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 2200 | * - HPAGE_PMD_NR if protections changed and TLB flush necessary |
Mel Gorman | f123d74 | 2013-10-07 11:28:49 +0100 | [diff] [blame] | 2201 | */ |
Nadav Amit | 4a18419 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 2202 | int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 2203 | pmd_t *pmd, unsigned long addr, pgprot_t newprot, |
| 2204 | unsigned long cp_flags) |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 2205 | { |
| 2206 | struct mm_struct *mm = vma->vm_mm; |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 2207 | spinlock_t *ptl; |
Nadav Amit | c9fe665 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 2208 | pmd_t oldpmd, entry; |
Peter Xu | 5870544 | 2020-04-06 20:05:45 -0700 | [diff] [blame] | 2209 | bool prot_numa = cp_flags & MM_CP_PROT_NUMA; |
Peter Xu | 292924b | 2020-04-06 20:05:49 -0700 | [diff] [blame] | 2210 | bool uffd_wp = cp_flags & MM_CP_UFFD_WP; |
| 2211 | bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; |
David Hildenbrand | 6a56ccb | 2022-11-08 18:46:50 +0100 | [diff] [blame] | 2212 | int ret = 1; |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 2213 | |
Nadav Amit | 4a18419 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 2214 | tlb_change_page_size(tlb, HPAGE_PMD_SIZE); |
| 2215 | |
Yang Shi | e346e66 | 2021-06-30 18:51:55 -0700 | [diff] [blame] | 2216 | if (prot_numa && !thp_migration_supported()) |
| 2217 | return 1; |
| 2218 | |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 2219 | ptl = __pmd_trans_huge_lock(pmd, vma); |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 2220 | if (!ptl) |
| 2221 | return 0; |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 2222 | |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2223 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 2224 | if (is_swap_pmd(*pmd)) { |
| 2225 | swp_entry_t entry = pmd_to_swp_entry(*pmd); |
Matthew Wilcox (Oracle) | 5662400 | 2024-01-11 15:24:20 +0000 | [diff] [blame] | 2226 | struct folio *folio = pfn_swap_entry_folio(entry); |
David Hildenbrand | 24bf08c | 2023-04-05 18:02:35 +0200 | [diff] [blame] | 2227 | pmd_t newpmd; |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2228 | |
| 2229 | VM_BUG_ON(!is_pmd_migration_entry(*pmd)); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 2230 | if (is_writable_migration_entry(entry)) { |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2231 | /* |
| 2232 | * A protection check is difficult so |
| 2233 | * just be safe and disable write |
| 2234 | */ |
Kefeng Wang | d986ba2 | 2023-10-18 22:07:57 +0800 | [diff] [blame] | 2235 | if (folio_test_anon(folio)) |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2236 | entry = make_readable_exclusive_migration_entry(swp_offset(entry)); |
| 2237 | else |
| 2238 | entry = make_readable_migration_entry(swp_offset(entry)); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2239 | newpmd = swp_entry_to_pmd(entry); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 2240 | if (pmd_swp_soft_dirty(*pmd)) |
| 2241 | newpmd = pmd_swp_mksoft_dirty(newpmd); |
David Hildenbrand | 24bf08c | 2023-04-05 18:02:35 +0200 | [diff] [blame] | 2242 | } else { |
| 2243 | newpmd = *pmd; |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2244 | } |
David Hildenbrand | 24bf08c | 2023-04-05 18:02:35 +0200 | [diff] [blame] | 2245 | |
| 2246 | if (uffd_wp) |
| 2247 | newpmd = pmd_swp_mkuffd_wp(newpmd); |
| 2248 | else if (uffd_wp_resolve) |
| 2249 | newpmd = pmd_swp_clear_uffd_wp(newpmd); |
| 2250 | if (!pmd_same(*pmd, newpmd)) |
| 2251 | set_pmd_at(mm, addr, pmd, newpmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2252 | goto unlock; |
| 2253 | } |
| 2254 | #endif |
| 2255 | |
Huang Ying | a1a3a2f | 2022-03-22 14:46:27 -0700 | [diff] [blame] | 2256 | if (prot_numa) { |
Kefeng Wang | d986ba2 | 2023-10-18 22:07:57 +0800 | [diff] [blame] | 2257 | struct folio *folio; |
Huang Ying | 3302453 | 2022-07-13 16:39:51 +0800 | [diff] [blame] | 2258 | bool toptier; |
Huang Ying | a1a3a2f | 2022-03-22 14:46:27 -0700 | [diff] [blame] | 2259 | /* |
| 2260 | * Avoid trapping faults against the zero page. The read-only |
| 2261 | * data is likely to be read-cached on the local CPU and |
| 2262 | * local/remote hits to the zero page are not interesting. |
| 2263 | */ |
| 2264 | if (is_huge_zero_pmd(*pmd)) |
| 2265 | goto unlock; |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 2266 | |
Huang Ying | a1a3a2f | 2022-03-22 14:46:27 -0700 | [diff] [blame] | 2267 | if (pmd_protnone(*pmd)) |
| 2268 | goto unlock; |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 2269 | |
Matthew Wilcox (Oracle) | e06d03d | 2024-03-26 20:28:23 +0000 | [diff] [blame] | 2270 | folio = pmd_folio(*pmd); |
Kefeng Wang | d986ba2 | 2023-10-18 22:07:57 +0800 | [diff] [blame] | 2271 | toptier = node_is_toptier(folio_nid(folio)); |
Huang Ying | a1a3a2f | 2022-03-22 14:46:27 -0700 | [diff] [blame] | 2272 | /* |
| 2273 | * Skip scanning top tier node if normal numa |
| 2274 | * balancing is disabled |
| 2275 | */ |
| 2276 | if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && |
Huang Ying | 3302453 | 2022-07-13 16:39:51 +0800 | [diff] [blame] | 2277 | toptier) |
Huang Ying | a1a3a2f | 2022-03-22 14:46:27 -0700 | [diff] [blame] | 2278 | goto unlock; |
Huang Ying | 3302453 | 2022-07-13 16:39:51 +0800 | [diff] [blame] | 2279 | |
Zi Yan | 2a28713 | 2024-07-24 09:01:14 -0400 | [diff] [blame] | 2280 | if (folio_use_access_time(folio)) |
Kefeng Wang | d986ba2 | 2023-10-18 22:07:57 +0800 | [diff] [blame] | 2281 | folio_xchg_access_time(folio, |
| 2282 | jiffies_to_msecs(jiffies)); |
Huang Ying | a1a3a2f | 2022-03-22 14:46:27 -0700 | [diff] [blame] | 2283 | } |
Kirill A. Shutemov | ced1080 | 2017-04-13 14:56:20 -0700 | [diff] [blame] | 2284 | /* |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 2285 | * In case prot_numa, we are under mmap_read_lock(mm). It's critical |
Kirill A. Shutemov | ced1080 | 2017-04-13 14:56:20 -0700 | [diff] [blame] | 2286 | * to not clear pmd intermittently to avoid race with MADV_DONTNEED |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 2287 | * which is also under mmap_read_lock(mm): |
Kirill A. Shutemov | ced1080 | 2017-04-13 14:56:20 -0700 | [diff] [blame] | 2288 | * |
| 2289 | * CPU0: CPU1: |
| 2290 | * change_huge_pmd(prot_numa=1) |
| 2291 | * pmdp_huge_get_and_clear_notify() |
| 2292 | * madvise_dontneed() |
| 2293 | * zap_pmd_range() |
| 2294 | * pmd_trans_huge(*pmd) == 0 (without ptl) |
| 2295 | * // skip the pmd |
| 2296 | * set_pmd_at(); |
| 2297 | * // pmd is re-established |
| 2298 | * |
| 2299 | * The race makes MADV_DONTNEED miss the huge pmd and don't clear it |
| 2300 | * which may break userspace. |
| 2301 | * |
Nadav Amit | 4f83145 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 2302 | * pmdp_invalidate_ad() is required to make sure we don't miss |
Kirill A. Shutemov | ced1080 | 2017-04-13 14:56:20 -0700 | [diff] [blame] | 2303 | * dirty/young flags set by hardware. |
| 2304 | */ |
Nadav Amit | 4f83145 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 2305 | oldpmd = pmdp_invalidate_ad(vma, addr, pmd); |
Kirill A. Shutemov | ced1080 | 2017-04-13 14:56:20 -0700 | [diff] [blame] | 2306 | |
Nadav Amit | c9fe665 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 2307 | entry = pmd_modify(oldpmd, newprot); |
Peter Xu | f1eb1ba | 2022-12-14 15:15:33 -0500 | [diff] [blame] | 2308 | if (uffd_wp) |
Peter Xu | 292924b | 2020-04-06 20:05:49 -0700 | [diff] [blame] | 2309 | entry = pmd_mkuffd_wp(entry); |
Peter Xu | f1eb1ba | 2022-12-14 15:15:33 -0500 | [diff] [blame] | 2310 | else if (uffd_wp_resolve) |
Peter Xu | 292924b | 2020-04-06 20:05:49 -0700 | [diff] [blame] | 2311 | /* |
| 2312 | * Leave the write bit to be handled by PF interrupt |
| 2313 | * handler, then things like COW could be properly |
| 2314 | * handled. |
| 2315 | */ |
| 2316 | entry = pmd_clear_uffd_wp(entry); |
David Hildenbrand | c27f479 | 2022-11-08 18:46:48 +0100 | [diff] [blame] | 2317 | |
| 2318 | /* See change_pte_range(). */ |
| 2319 | if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) && |
| 2320 | can_change_pmd_writable(vma, addr, entry)) |
Rick Edgecombe | 161e393 | 2023-06-12 17:10:29 -0700 | [diff] [blame] | 2321 | entry = pmd_mkwrite(entry, vma); |
David Hildenbrand | c27f479 | 2022-11-08 18:46:48 +0100 | [diff] [blame] | 2322 | |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 2323 | ret = HPAGE_PMD_NR; |
| 2324 | set_pmd_at(mm, addr, pmd, entry); |
Nadav Amit | 4a18419 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 2325 | |
Nadav Amit | c9fe665 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 2326 | if (huge_pmd_needs_flush(oldpmd, entry)) |
| 2327 | tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 2328 | unlock: |
| 2329 | spin_unlock(ptl); |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 2330 | return ret; |
| 2331 | } |
| 2332 | |
Peter Xu | cb0f01b | 2024-08-12 14:12:25 -0400 | [diff] [blame] | 2333 | /* |
| 2334 | * Returns: |
| 2335 | * |
| 2336 | * - 0: if pud leaf changed from under us |
| 2337 | * - 1: if pud can be skipped |
| 2338 | * - HPAGE_PUD_NR: if pud was successfully processed |
| 2339 | */ |
| 2340 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 2341 | int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 2342 | pud_t *pudp, unsigned long addr, pgprot_t newprot, |
| 2343 | unsigned long cp_flags) |
| 2344 | { |
| 2345 | struct mm_struct *mm = vma->vm_mm; |
| 2346 | pud_t oldpud, entry; |
| 2347 | spinlock_t *ptl; |
| 2348 | |
| 2349 | tlb_change_page_size(tlb, HPAGE_PUD_SIZE); |
| 2350 | |
| 2351 | /* NUMA balancing doesn't apply to dax */ |
| 2352 | if (cp_flags & MM_CP_PROT_NUMA) |
| 2353 | return 1; |
| 2354 | |
| 2355 | /* |
| 2356 | * Huge entries on userfault-wp only works with anonymous, while we |
| 2357 | * don't have anonymous PUDs yet. |
| 2358 | */ |
| 2359 | if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL)) |
| 2360 | return 1; |
| 2361 | |
| 2362 | ptl = __pud_trans_huge_lock(pudp, vma); |
| 2363 | if (!ptl) |
| 2364 | return 0; |
| 2365 | |
| 2366 | /* |
| 2367 | * Can't clear PUD or it can race with concurrent zapping. See |
| 2368 | * change_huge_pmd(). |
| 2369 | */ |
| 2370 | oldpud = pudp_invalidate(vma, addr, pudp); |
| 2371 | entry = pud_modify(oldpud, newprot); |
| 2372 | set_pud_at(mm, addr, pudp, entry); |
| 2373 | tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE); |
| 2374 | |
| 2375 | spin_unlock(ptl); |
| 2376 | return HPAGE_PUD_NR; |
| 2377 | } |
| 2378 | #endif |
| 2379 | |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2380 | #ifdef CONFIG_USERFAULTFD |
| 2381 | /* |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 2382 | * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2383 | * the caller, but it must return after releasing the page_table_lock. |
| 2384 | * Just move the page from src_pmd to dst_pmd if possible. |
| 2385 | * Return zero if succeeded in moving the page, -EAGAIN if it needs to be |
| 2386 | * repeated by the caller, or other errors in case of failure. |
| 2387 | */ |
| 2388 | int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval, |
| 2389 | struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, |
| 2390 | unsigned long dst_addr, unsigned long src_addr) |
| 2391 | { |
| 2392 | pmd_t _dst_pmd, src_pmdval; |
| 2393 | struct page *src_page; |
| 2394 | struct folio *src_folio; |
| 2395 | struct anon_vma *src_anon_vma; |
| 2396 | spinlock_t *src_ptl, *dst_ptl; |
| 2397 | pgtable_t src_pgtable; |
| 2398 | struct mmu_notifier_range range; |
| 2399 | int err = 0; |
| 2400 | |
| 2401 | src_pmdval = *src_pmd; |
| 2402 | src_ptl = pmd_lockptr(mm, src_pmd); |
| 2403 | |
| 2404 | lockdep_assert_held(src_ptl); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 2405 | vma_assert_locked(src_vma); |
| 2406 | vma_assert_locked(dst_vma); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2407 | |
| 2408 | /* Sanity checks before the operation */ |
| 2409 | if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) || |
| 2410 | WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) { |
| 2411 | spin_unlock(src_ptl); |
| 2412 | return -EINVAL; |
| 2413 | } |
| 2414 | |
| 2415 | if (!pmd_trans_huge(src_pmdval)) { |
| 2416 | spin_unlock(src_ptl); |
| 2417 | if (is_pmd_migration_entry(src_pmdval)) { |
| 2418 | pmd_migration_entry_wait(mm, &src_pmdval); |
| 2419 | return -EAGAIN; |
| 2420 | } |
| 2421 | return -ENOENT; |
| 2422 | } |
| 2423 | |
| 2424 | src_page = pmd_page(src_pmdval); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2425 | |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2426 | if (!is_huge_zero_pmd(src_pmdval)) { |
| 2427 | if (unlikely(!PageAnonExclusive(src_page))) { |
| 2428 | spin_unlock(src_ptl); |
| 2429 | return -EBUSY; |
| 2430 | } |
| 2431 | |
| 2432 | src_folio = page_folio(src_page); |
| 2433 | folio_get(src_folio); |
| 2434 | } else |
| 2435 | src_folio = NULL; |
| 2436 | |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2437 | spin_unlock(src_ptl); |
| 2438 | |
| 2439 | flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); |
| 2440 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr, |
| 2441 | src_addr + HPAGE_PMD_SIZE); |
| 2442 | mmu_notifier_invalidate_range_start(&range); |
| 2443 | |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2444 | if (src_folio) { |
| 2445 | folio_lock(src_folio); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2446 | |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2447 | /* |
| 2448 | * split_huge_page walks the anon_vma chain without the page |
| 2449 | * lock. Serialize against it with the anon_vma lock, the page |
| 2450 | * lock is not enough. |
| 2451 | */ |
| 2452 | src_anon_vma = folio_get_anon_vma(src_folio); |
| 2453 | if (!src_anon_vma) { |
| 2454 | err = -EAGAIN; |
| 2455 | goto unlock_folio; |
| 2456 | } |
| 2457 | anon_vma_lock_write(src_anon_vma); |
| 2458 | } else |
| 2459 | src_anon_vma = NULL; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2460 | |
| 2461 | dst_ptl = pmd_lockptr(mm, dst_pmd); |
| 2462 | double_pt_lock(src_ptl, dst_ptl); |
| 2463 | if (unlikely(!pmd_same(*src_pmd, src_pmdval) || |
| 2464 | !pmd_same(*dst_pmd, dst_pmdval))) { |
| 2465 | err = -EAGAIN; |
| 2466 | goto unlock_ptls; |
| 2467 | } |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2468 | if (src_folio) { |
| 2469 | if (folio_maybe_dma_pinned(src_folio) || |
| 2470 | !PageAnonExclusive(&src_folio->page)) { |
| 2471 | err = -EBUSY; |
| 2472 | goto unlock_ptls; |
| 2473 | } |
| 2474 | |
| 2475 | if (WARN_ON_ONCE(!folio_test_head(src_folio)) || |
| 2476 | WARN_ON_ONCE(!folio_test_anon(src_folio))) { |
| 2477 | err = -EBUSY; |
| 2478 | goto unlock_ptls; |
| 2479 | } |
| 2480 | |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2481 | src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); |
| 2482 | /* Folio got pinned from under us. Put it back and fail the move. */ |
| 2483 | if (folio_maybe_dma_pinned(src_folio)) { |
| 2484 | set_pmd_at(mm, src_addr, src_pmd, src_pmdval); |
| 2485 | err = -EBUSY; |
| 2486 | goto unlock_ptls; |
| 2487 | } |
| 2488 | |
Lokesh Gidra | c0205ea | 2024-04-04 10:17:26 -0700 | [diff] [blame] | 2489 | folio_move_anon_rmap(src_folio, dst_vma); |
Suren Baghdasaryan | b5ba3a6 | 2024-04-14 19:08:21 -0700 | [diff] [blame] | 2490 | src_folio->index = linear_page_index(dst_vma, dst_addr); |
Lokesh Gidra | c0205ea | 2024-04-04 10:17:26 -0700 | [diff] [blame] | 2491 | |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2492 | _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot); |
| 2493 | /* Follow mremap() behavior and treat the entry dirty after the move */ |
| 2494 | _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma); |
| 2495 | } else { |
| 2496 | src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); |
| 2497 | _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2498 | } |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2499 | set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd); |
| 2500 | |
| 2501 | src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd); |
| 2502 | pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable); |
| 2503 | unlock_ptls: |
| 2504 | double_pt_unlock(src_ptl, dst_ptl); |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2505 | if (src_anon_vma) { |
| 2506 | anon_vma_unlock_write(src_anon_vma); |
| 2507 | put_anon_vma(src_anon_vma); |
| 2508 | } |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2509 | unlock_folio: |
| 2510 | /* unblock rmap walks */ |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2511 | if (src_folio) |
| 2512 | folio_unlock(src_folio); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2513 | mmu_notifier_invalidate_range_end(&range); |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 2514 | if (src_folio) |
| 2515 | folio_put(src_folio); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 2516 | return err; |
| 2517 | } |
| 2518 | #endif /* CONFIG_USERFAULTFD */ |
| 2519 | |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 2520 | /* |
Huang Ying | 8f19b0c | 2016-07-26 15:27:04 -0700 | [diff] [blame] | 2521 | * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 2522 | * |
Huang Ying | 8f19b0c | 2016-07-26 15:27:04 -0700 | [diff] [blame] | 2523 | * Note that if it returns page table lock pointer, this routine returns without |
| 2524 | * unlocking page table lock. So callers must unlock it. |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 2525 | */ |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 2526 | spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 2527 | { |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 2528 | spinlock_t *ptl; |
| 2529 | ptl = pmd_lock(vma->vm_mm, pmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2530 | if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || |
| 2531 | pmd_devmap(*pmd))) |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 2532 | return ptl; |
| 2533 | spin_unlock(ptl); |
| 2534 | return NULL; |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 2535 | } |
| 2536 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2537 | /* |
Miaohe Lin | d965e39 | 2022-07-04 21:21:48 +0800 | [diff] [blame] | 2538 | * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2539 | * |
Miaohe Lin | d965e39 | 2022-07-04 21:21:48 +0800 | [diff] [blame] | 2540 | * Note that if it returns page table lock pointer, this routine returns without |
| 2541 | * unlocking page table lock. So callers must unlock it. |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2542 | */ |
| 2543 | spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) |
| 2544 | { |
| 2545 | spinlock_t *ptl; |
| 2546 | |
| 2547 | ptl = pud_lock(vma->vm_mm, pud); |
| 2548 | if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) |
| 2549 | return ptl; |
| 2550 | spin_unlock(ptl); |
| 2551 | return NULL; |
| 2552 | } |
| 2553 | |
| 2554 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 2555 | int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 2556 | pud_t *pud, unsigned long addr) |
| 2557 | { |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2558 | spinlock_t *ptl; |
Peter Xu | 1c399e7 | 2024-08-12 14:12:23 -0400 | [diff] [blame] | 2559 | pud_t orig_pud; |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2560 | |
| 2561 | ptl = __pud_trans_huge_lock(pud, vma); |
| 2562 | if (!ptl) |
| 2563 | return 0; |
Miaohe Lin | 74929079 | 2022-07-04 21:21:54 +0800 | [diff] [blame] | 2564 | |
Peter Xu | 1c399e7 | 2024-08-12 14:12:23 -0400 | [diff] [blame] | 2565 | orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm); |
| 2566 | arch_check_zapped_pud(vma, orig_pud); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2567 | tlb_remove_pud_tlb_entry(tlb, pud, addr); |
Thomas Hellstrom (VMware) | 2484ca9 | 2020-03-24 18:47:17 +0100 | [diff] [blame] | 2568 | if (vma_is_special_huge(vma)) { |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2569 | spin_unlock(ptl); |
| 2570 | /* No zero page support yet */ |
| 2571 | } else { |
| 2572 | /* No support for anonymous PUD pages yet */ |
| 2573 | BUG(); |
| 2574 | } |
| 2575 | return 1; |
| 2576 | } |
| 2577 | |
| 2578 | static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, |
| 2579 | unsigned long haddr) |
| 2580 | { |
| 2581 | VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); |
| 2582 | VM_BUG_ON_VMA(vma->vm_start > haddr, vma); |
| 2583 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); |
| 2584 | VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); |
| 2585 | |
Yisheng Xie | ce9311c | 2017-03-09 16:17:00 -0800 | [diff] [blame] | 2586 | count_vm_event(THP_SPLIT_PUD); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2587 | |
Alistair Popple | ec8832d | 2023-07-25 23:42:06 +1000 | [diff] [blame] | 2588 | pudp_huge_clear_flush(vma, haddr, pud); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2589 | } |
| 2590 | |
| 2591 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
| 2592 | unsigned long address) |
| 2593 | { |
| 2594 | spinlock_t *ptl; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 2595 | struct mmu_notifier_range range; |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2596 | |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 2597 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, |
Jérôme Glisse | 6f4f13e | 2019-05-13 17:20:49 -0700 | [diff] [blame] | 2598 | address & HPAGE_PUD_MASK, |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 2599 | (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); |
| 2600 | mmu_notifier_invalidate_range_start(&range); |
| 2601 | ptl = pud_lock(vma->vm_mm, pud); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2602 | if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) |
| 2603 | goto out; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 2604 | __split_huge_pud_locked(vma, pud, range.start); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2605 | |
| 2606 | out: |
| 2607 | spin_unlock(ptl); |
Alistair Popple | ec8832d | 2023-07-25 23:42:06 +1000 | [diff] [blame] | 2608 | mmu_notifier_invalidate_range_end(&range); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2609 | } |
Peter Xu | cb0f01b | 2024-08-12 14:12:25 -0400 | [diff] [blame] | 2610 | #else |
| 2611 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
| 2612 | unsigned long address) |
| 2613 | { |
| 2614 | } |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2615 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 2616 | |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2617 | static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, |
| 2618 | unsigned long haddr, pmd_t *pmd) |
| 2619 | { |
| 2620 | struct mm_struct *mm = vma->vm_mm; |
| 2621 | pgtable_t pgtable; |
David Hildenbrand | 42b2af2 | 2023-03-02 18:54:23 +0100 | [diff] [blame] | 2622 | pmd_t _pmd, old_pmd; |
Hugh Dickins | c9c1ee2 | 2023-06-08 18:41:31 -0700 | [diff] [blame] | 2623 | unsigned long addr; |
| 2624 | pte_t *pte; |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2625 | int i; |
| 2626 | |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 2627 | /* |
| 2628 | * Leave pmd empty until pte is filled note that it is fine to delay |
| 2629 | * notification until mmu_notifier_invalidate_range_end() as we are |
| 2630 | * replacing a zero pmd write protected page with a zero pte write |
| 2631 | * protected page. |
| 2632 | * |
Mike Rapoport | ee65728 | 2022-06-27 09:00:26 +0300 | [diff] [blame] | 2633 | * See Documentation/mm/mmu_notifier.rst |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 2634 | */ |
David Hildenbrand | 42b2af2 | 2023-03-02 18:54:23 +0100 | [diff] [blame] | 2635 | old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2636 | |
| 2637 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
| 2638 | pmd_populate(mm, &_pmd, pgtable); |
| 2639 | |
Hugh Dickins | c9c1ee2 | 2023-06-08 18:41:31 -0700 | [diff] [blame] | 2640 | pte = pte_offset_map(&_pmd, haddr); |
| 2641 | VM_BUG_ON(!pte); |
| 2642 | for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { |
| 2643 | pte_t entry; |
| 2644 | |
| 2645 | entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2646 | entry = pte_mkspecial(entry); |
David Hildenbrand | 42b2af2 | 2023-03-02 18:54:23 +0100 | [diff] [blame] | 2647 | if (pmd_uffd_wp(old_pmd)) |
| 2648 | entry = pte_mkuffd_wp(entry); |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 2649 | VM_BUG_ON(!pte_none(ptep_get(pte))); |
Hugh Dickins | c9c1ee2 | 2023-06-08 18:41:31 -0700 | [diff] [blame] | 2650 | set_pte_at(mm, addr, pte, entry); |
| 2651 | pte++; |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2652 | } |
Hugh Dickins | c9c1ee2 | 2023-06-08 18:41:31 -0700 | [diff] [blame] | 2653 | pte_unmap(pte - 1); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2654 | smp_wmb(); /* make pte visible before pmd */ |
| 2655 | pmd_populate(mm, pmd, pgtable); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2656 | } |
| 2657 | |
| 2658 | static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2659 | unsigned long haddr, bool freeze) |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2660 | { |
| 2661 | struct mm_struct *mm = vma->vm_mm; |
David Hildenbrand | 91b2978 | 2023-12-20 23:44:39 +0100 | [diff] [blame] | 2662 | struct folio *folio; |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2663 | struct page *page; |
| 2664 | pgtable_t pgtable; |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2665 | pmd_t old_pmd, _pmd; |
Peter Xu | 292924b | 2020-04-06 20:05:49 -0700 | [diff] [blame] | 2666 | bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; |
Peter Xu | 0ccf7f1 | 2022-08-11 12:13:28 -0400 | [diff] [blame] | 2667 | bool anon_exclusive = false, dirty = false; |
Kirill A. Shutemov | 2ac015e | 2016-02-24 18:58:03 +0300 | [diff] [blame] | 2668 | unsigned long addr; |
Hugh Dickins | c9c1ee2 | 2023-06-08 18:41:31 -0700 | [diff] [blame] | 2669 | pte_t *pte; |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2670 | int i; |
| 2671 | |
| 2672 | VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); |
| 2673 | VM_BUG_ON_VMA(vma->vm_start > haddr, vma); |
| 2674 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2675 | VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) |
| 2676 | && !pmd_devmap(*pmd)); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2677 | |
| 2678 | count_vm_event(THP_SPLIT_PMD); |
| 2679 | |
Kirill A. Shutemov | d21b9e5 | 2016-07-26 15:25:37 -0700 | [diff] [blame] | 2680 | if (!vma_is_anonymous(vma)) { |
Alistair Popple | ec8832d | 2023-07-25 23:42:06 +1000 | [diff] [blame] | 2681 | old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 2682 | /* |
| 2683 | * We are going to unmap this huge page. So |
| 2684 | * just go ahead and zap it |
| 2685 | */ |
| 2686 | if (arch_needs_pgtable_deposit()) |
| 2687 | zap_deposited_table(mm, pmd); |
Thomas Hellstrom (VMware) | 2484ca9 | 2020-03-24 18:47:17 +0100 | [diff] [blame] | 2688 | if (vma_is_special_huge(vma)) |
Kirill A. Shutemov | d21b9e5 | 2016-07-26 15:25:37 -0700 | [diff] [blame] | 2689 | return; |
Hugh Dickins | 99fa8a4 | 2021-06-15 18:23:45 -0700 | [diff] [blame] | 2690 | if (unlikely(is_pmd_migration_entry(old_pmd))) { |
| 2691 | swp_entry_t entry; |
| 2692 | |
| 2693 | entry = pmd_to_swp_entry(old_pmd); |
Kefeng Wang | 439992f | 2024-01-11 15:24:24 +0000 | [diff] [blame] | 2694 | folio = pfn_swap_entry_folio(entry); |
Hugh Dickins | 99fa8a4 | 2021-06-15 18:23:45 -0700 | [diff] [blame] | 2695 | } else { |
| 2696 | page = pmd_page(old_pmd); |
David Hildenbrand | a8e61d5 | 2023-12-20 23:44:49 +0100 | [diff] [blame] | 2697 | folio = page_folio(page); |
| 2698 | if (!folio_test_dirty(folio) && pmd_dirty(old_pmd)) |
David Hildenbrand | db44c65 | 2024-01-22 18:54:07 +0100 | [diff] [blame] | 2699 | folio_mark_dirty(folio); |
David Hildenbrand | a8e61d5 | 2023-12-20 23:44:49 +0100 | [diff] [blame] | 2700 | if (!folio_test_referenced(folio) && pmd_young(old_pmd)) |
| 2701 | folio_set_referenced(folio); |
| 2702 | folio_remove_rmap_pmd(folio, page, vma); |
| 2703 | folio_put(folio); |
Hugh Dickins | 99fa8a4 | 2021-06-15 18:23:45 -0700 | [diff] [blame] | 2704 | } |
Kefeng Wang | 6b27cc6c | 2024-01-11 15:24:29 +0000 | [diff] [blame] | 2705 | add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2706 | return; |
Hugh Dickins | 99fa8a4 | 2021-06-15 18:23:45 -0700 | [diff] [blame] | 2707 | } |
| 2708 | |
Hugh Dickins | 3b77e8c | 2021-06-15 18:23:49 -0700 | [diff] [blame] | 2709 | if (is_huge_zero_pmd(*pmd)) { |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 2710 | /* |
| 2711 | * FIXME: Do we want to invalidate secondary mmu by calling |
Alistair Popple | 1af5a81 | 2023-07-25 23:42:07 +1000 | [diff] [blame] | 2712 | * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below |
| 2713 | * inside __split_huge_pmd() ? |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 2714 | * |
| 2715 | * We are going from a zero huge page write protected to zero |
| 2716 | * small page also write protected so it does not seems useful |
| 2717 | * to invalidate secondary mmu at this time. |
| 2718 | */ |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2719 | return __split_huge_zero_page_pmd(vma, haddr, pmd); |
| 2720 | } |
| 2721 | |
Ryan Roberts | 3a5a8d3 | 2024-05-01 15:33:10 +0100 | [diff] [blame] | 2722 | pmd_migration = is_pmd_migration_entry(*pmd); |
Peter Xu | 2e83ee1 | 2018-12-21 14:30:50 -0800 | [diff] [blame] | 2723 | if (unlikely(pmd_migration)) { |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2724 | swp_entry_t entry; |
| 2725 | |
Ryan Roberts | 3a5a8d3 | 2024-05-01 15:33:10 +0100 | [diff] [blame] | 2726 | old_pmd = *pmd; |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2727 | entry = pmd_to_swp_entry(old_pmd); |
Alistair Popple | af5cdaf | 2021-06-30 18:54:06 -0700 | [diff] [blame] | 2728 | page = pfn_swap_entry_to_page(entry); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 2729 | write = is_writable_migration_entry(entry); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2730 | if (PageAnon(page)) |
| 2731 | anon_exclusive = is_readable_exclusive_migration_entry(entry); |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 2732 | young = is_migration_entry_young(entry); |
| 2733 | dirty = is_migration_entry_dirty(entry); |
Peter Xu | 2e83ee1 | 2018-12-21 14:30:50 -0800 | [diff] [blame] | 2734 | soft_dirty = pmd_swp_soft_dirty(old_pmd); |
Peter Xu | f45ec5f | 2020-04-06 20:06:01 -0700 | [diff] [blame] | 2735 | uffd_wp = pmd_swp_uffd_wp(old_pmd); |
Peter Xu | 2e83ee1 | 2018-12-21 14:30:50 -0800 | [diff] [blame] | 2736 | } else { |
Ryan Roberts | 3a5a8d3 | 2024-05-01 15:33:10 +0100 | [diff] [blame] | 2737 | /* |
| 2738 | * Up to this point the pmd is present and huge and userland has |
| 2739 | * the whole access to the hugepage during the split (which |
| 2740 | * happens in place). If we overwrite the pmd with the not-huge |
| 2741 | * version pointing to the pte here (which of course we could if |
| 2742 | * all CPUs were bug free), userland could trigger a small page |
| 2743 | * size TLB miss on the small sized TLB while the hugepage TLB |
| 2744 | * entry is still established in the huge TLB. Some CPU doesn't |
| 2745 | * like that. See |
| 2746 | * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum |
| 2747 | * 383 on page 105. Intel should be safe but is also warns that |
| 2748 | * it's only safe if the permission and cache attributes of the |
| 2749 | * two entries loaded in the two TLB is identical (which should |
| 2750 | * be the case here). But it is generally safer to never allow |
| 2751 | * small and huge TLB entries for the same virtual address to be |
| 2752 | * loaded simultaneously. So instead of doing "pmd_populate(); |
| 2753 | * flush_pmd_tlb_range();" we first mark the current pmd |
| 2754 | * notpresent (atomically because here the pmd_trans_huge must |
| 2755 | * remain set at all times on the pmd until the split is |
| 2756 | * complete for this pmd), then we flush the SMP TLB and finally |
| 2757 | * we write the non-huge version of the pmd entry with |
| 2758 | * pmd_populate. |
| 2759 | */ |
| 2760 | old_pmd = pmdp_invalidate(vma, haddr, pmd); |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2761 | page = pmd_page(old_pmd); |
David Hildenbrand | 91b2978 | 2023-12-20 23:44:39 +0100 | [diff] [blame] | 2762 | folio = page_folio(page); |
Peter Xu | 0ccf7f1 | 2022-08-11 12:13:28 -0400 | [diff] [blame] | 2763 | if (pmd_dirty(old_pmd)) { |
| 2764 | dirty = true; |
David Hildenbrand | 91b2978 | 2023-12-20 23:44:39 +0100 | [diff] [blame] | 2765 | folio_set_dirty(folio); |
Peter Xu | 0ccf7f1 | 2022-08-11 12:13:28 -0400 | [diff] [blame] | 2766 | } |
Peter Xu | 2e83ee1 | 2018-12-21 14:30:50 -0800 | [diff] [blame] | 2767 | write = pmd_write(old_pmd); |
| 2768 | young = pmd_young(old_pmd); |
| 2769 | soft_dirty = pmd_soft_dirty(old_pmd); |
Peter Xu | 292924b | 2020-04-06 20:05:49 -0700 | [diff] [blame] | 2770 | uffd_wp = pmd_uffd_wp(old_pmd); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2771 | |
David Hildenbrand | 91b2978 | 2023-12-20 23:44:39 +0100 | [diff] [blame] | 2772 | VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio); |
| 2773 | VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2774 | |
| 2775 | /* |
| 2776 | * Without "freeze", we'll simply split the PMD, propagating the |
| 2777 | * PageAnonExclusive() flag for each PTE by setting it for |
| 2778 | * each subpage -- no need to (temporarily) clear. |
| 2779 | * |
| 2780 | * With "freeze" we want to replace mapped pages by |
| 2781 | * migration entries right away. This is only possible if we |
| 2782 | * managed to clear PageAnonExclusive() -- see |
| 2783 | * set_pmd_migration_entry(). |
| 2784 | * |
| 2785 | * In case we cannot clear PageAnonExclusive(), split the PMD |
| 2786 | * only and let try_to_migrate_one() fail later. |
David Hildenbrand | 088b8aa | 2022-09-01 10:35:59 +0200 | [diff] [blame] | 2787 | * |
David Hildenbrand | e3b4b13 | 2023-12-20 23:45:02 +0100 | [diff] [blame] | 2788 | * See folio_try_share_anon_rmap_pmd(): invalidate PMD first. |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2789 | */ |
David Hildenbrand | 91b2978 | 2023-12-20 23:44:39 +0100 | [diff] [blame] | 2790 | anon_exclusive = PageAnonExclusive(page); |
David Hildenbrand | e3b4b13 | 2023-12-20 23:45:02 +0100 | [diff] [blame] | 2791 | if (freeze && anon_exclusive && |
| 2792 | folio_try_share_anon_rmap_pmd(folio, page)) |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2793 | freeze = false; |
David Hildenbrand | 91b2978 | 2023-12-20 23:44:39 +0100 | [diff] [blame] | 2794 | if (!freeze) { |
| 2795 | rmap_t rmap_flags = RMAP_NONE; |
| 2796 | |
| 2797 | folio_ref_add(folio, HPAGE_PMD_NR - 1); |
| 2798 | if (anon_exclusive) |
| 2799 | rmap_flags |= RMAP_EXCLUSIVE; |
| 2800 | folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR, |
| 2801 | vma, haddr, rmap_flags); |
| 2802 | } |
Peter Xu | 2e83ee1 | 2018-12-21 14:30:50 -0800 | [diff] [blame] | 2803 | } |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2804 | |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2805 | /* |
| 2806 | * Withdraw the table only after we mark the pmd entry invalid. |
| 2807 | * This's critical for some architectures (Power). |
| 2808 | */ |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2809 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
| 2810 | pmd_populate(mm, &_pmd, pgtable); |
| 2811 | |
Hugh Dickins | c9c1ee2 | 2023-06-08 18:41:31 -0700 | [diff] [blame] | 2812 | pte = pte_offset_map(&_pmd, haddr); |
| 2813 | VM_BUG_ON(!pte); |
Ryan Roberts | 2bdba98 | 2024-02-15 10:31:49 +0000 | [diff] [blame] | 2814 | |
| 2815 | /* |
| 2816 | * Note that NUMA hinting access restrictions are not transferred to |
| 2817 | * avoid any possibility of altering permissions across VMAs. |
| 2818 | */ |
| 2819 | if (freeze || pmd_migration) { |
| 2820 | for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { |
| 2821 | pte_t entry; |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2822 | swp_entry_t swp_entry; |
Ryan Roberts | 2bdba98 | 2024-02-15 10:31:49 +0000 | [diff] [blame] | 2823 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 2824 | if (write) |
| 2825 | swp_entry = make_writable_migration_entry( |
| 2826 | page_to_pfn(page + i)); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 2827 | else if (anon_exclusive) |
| 2828 | swp_entry = make_readable_exclusive_migration_entry( |
| 2829 | page_to_pfn(page + i)); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 2830 | else |
| 2831 | swp_entry = make_readable_migration_entry( |
| 2832 | page_to_pfn(page + i)); |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 2833 | if (young) |
| 2834 | swp_entry = make_migration_entry_young(swp_entry); |
| 2835 | if (dirty) |
| 2836 | swp_entry = make_migration_entry_dirty(swp_entry); |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2837 | entry = swp_entry_to_pte(swp_entry); |
Andrea Arcangeli | 804dd15 | 2016-08-25 15:16:57 -0700 | [diff] [blame] | 2838 | if (soft_dirty) |
| 2839 | entry = pte_swp_mksoft_dirty(entry); |
Peter Xu | f45ec5f | 2020-04-06 20:06:01 -0700 | [diff] [blame] | 2840 | if (uffd_wp) |
| 2841 | entry = pte_swp_mkuffd_wp(entry); |
Ryan Roberts | 2bdba98 | 2024-02-15 10:31:49 +0000 | [diff] [blame] | 2842 | |
| 2843 | VM_WARN_ON(!pte_none(ptep_get(pte + i))); |
| 2844 | set_pte_at(mm, addr, pte + i, entry); |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2845 | } |
Ryan Roberts | 2bdba98 | 2024-02-15 10:31:49 +0000 | [diff] [blame] | 2846 | } else { |
| 2847 | pte_t entry; |
| 2848 | |
| 2849 | entry = mk_pte(page, READ_ONCE(vma->vm_page_prot)); |
| 2850 | if (write) |
| 2851 | entry = pte_mkwrite(entry, vma); |
| 2852 | if (!young) |
| 2853 | entry = pte_mkold(entry); |
| 2854 | /* NOTE: this may set soft-dirty too on some archs */ |
| 2855 | if (dirty) |
| 2856 | entry = pte_mkdirty(entry); |
| 2857 | if (soft_dirty) |
| 2858 | entry = pte_mksoft_dirty(entry); |
| 2859 | if (uffd_wp) |
| 2860 | entry = pte_mkuffd_wp(entry); |
| 2861 | |
| 2862 | for (i = 0; i < HPAGE_PMD_NR; i++) |
| 2863 | VM_WARN_ON(!pte_none(ptep_get(pte + i))); |
| 2864 | |
| 2865 | set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2866 | } |
Ryan Roberts | 2bdba98 | 2024-02-15 10:31:49 +0000 | [diff] [blame] | 2867 | pte_unmap(pte); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2868 | |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 2869 | if (!pmd_migration) |
David Hildenbrand | a8e61d5 | 2023-12-20 23:44:49 +0100 | [diff] [blame] | 2870 | folio_remove_rmap_pmd(folio, page, vma); |
Hugh Dickins | 96d82de | 2022-11-22 01:51:50 -0800 | [diff] [blame] | 2871 | if (freeze) |
| 2872 | put_page(page); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2873 | |
| 2874 | smp_wmb(); /* make pte visible before pmd */ |
| 2875 | pmd_populate(mm, pmd, pgtable); |
| 2876 | } |
| 2877 | |
Lance Yang | 29e847d | 2024-06-14 09:51:37 +0800 | [diff] [blame] | 2878 | void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, |
| 2879 | pmd_t *pmd, bool freeze, struct folio *folio) |
| 2880 | { |
| 2881 | VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio)); |
| 2882 | VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE)); |
| 2883 | VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); |
| 2884 | VM_BUG_ON(freeze && !folio); |
| 2885 | |
| 2886 | /* |
| 2887 | * When the caller requests to set up a migration entry, we |
| 2888 | * require a folio to check the PMD against. Otherwise, there |
| 2889 | * is a risk of replacing the wrong folio. |
| 2890 | */ |
| 2891 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || |
| 2892 | is_pmd_migration_entry(*pmd)) { |
| 2893 | if (folio && folio != pmd_folio(*pmd)) |
| 2894 | return; |
| 2895 | __split_huge_pmd_locked(vma, pmd, address, freeze); |
| 2896 | } |
| 2897 | } |
| 2898 | |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2899 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
Matthew Wilcox (Oracle) | af28a98 | 2022-01-21 10:44:52 -0500 | [diff] [blame] | 2900 | unsigned long address, bool freeze, struct folio *folio) |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2901 | { |
| 2902 | spinlock_t *ptl; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 2903 | struct mmu_notifier_range range; |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2904 | |
Alistair Popple | 7d4a8be | 2023-01-10 13:57:22 +1100 | [diff] [blame] | 2905 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, |
Jérôme Glisse | 6f4f13e | 2019-05-13 17:20:49 -0700 | [diff] [blame] | 2906 | address & HPAGE_PMD_MASK, |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 2907 | (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); |
| 2908 | mmu_notifier_invalidate_range_start(&range); |
| 2909 | ptl = pmd_lock(vma->vm_mm, pmd); |
Lance Yang | 29e847d | 2024-06-14 09:51:37 +0800 | [diff] [blame] | 2910 | split_huge_pmd_locked(vma, range.start, pmd, freeze, folio); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2911 | spin_unlock(ptl); |
Alistair Popple | ec8832d | 2023-07-25 23:42:06 +1000 | [diff] [blame] | 2912 | mmu_notifier_invalidate_range_end(&range); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2913 | } |
| 2914 | |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2915 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
Matthew Wilcox (Oracle) | af28a98 | 2022-01-21 10:44:52 -0500 | [diff] [blame] | 2916 | bool freeze, struct folio *folio) |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2917 | { |
Zach O'Keefe | 5072280 | 2022-07-06 16:59:26 -0700 | [diff] [blame] | 2918 | pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2919 | |
Zach O'Keefe | 5072280 | 2022-07-06 16:59:26 -0700 | [diff] [blame] | 2920 | if (!pmd) |
Hugh Dickins | f72e7dc | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 2921 | return; |
| 2922 | |
Matthew Wilcox (Oracle) | af28a98 | 2022-01-21 10:44:52 -0500 | [diff] [blame] | 2923 | __split_huge_pmd(vma, pmd, address, freeze, folio); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2924 | } |
| 2925 | |
Miaohe Lin | 71f9e58 | 2021-05-04 18:33:52 -0700 | [diff] [blame] | 2926 | static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) |
| 2927 | { |
| 2928 | /* |
| 2929 | * If the new address isn't hpage aligned and it could previously |
| 2930 | * contain an hugepage: check if we need to split an huge pmd. |
| 2931 | */ |
| 2932 | if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && |
| 2933 | range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), |
| 2934 | ALIGN(address, HPAGE_PMD_SIZE))) |
| 2935 | split_huge_pmd_address(vma, address, false, NULL); |
| 2936 | } |
| 2937 | |
Kirill A. Shutemov | e1b9996 | 2015-09-08 14:58:37 -0700 | [diff] [blame] | 2938 | void vma_adjust_trans_huge(struct vm_area_struct *vma, |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2939 | unsigned long start, |
| 2940 | unsigned long end, |
| 2941 | long adjust_next) |
| 2942 | { |
Miaohe Lin | 71f9e58 | 2021-05-04 18:33:52 -0700 | [diff] [blame] | 2943 | /* Check if we need to split start first. */ |
| 2944 | split_huge_pmd_if_needed(vma, start); |
| 2945 | |
| 2946 | /* Check if we need to split end next. */ |
| 2947 | split_huge_pmd_if_needed(vma, end); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2948 | |
| 2949 | /* |
Matthew Wilcox (Oracle) | 6854050 | 2022-09-06 19:49:00 +0000 | [diff] [blame] | 2950 | * If we're also updating the next vma vm_start, |
Miaohe Lin | 71f9e58 | 2021-05-04 18:33:52 -0700 | [diff] [blame] | 2951 | * check if we need to split it. |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2952 | */ |
| 2953 | if (adjust_next > 0) { |
Matthew Wilcox (Oracle) | 6854050 | 2022-09-06 19:49:00 +0000 | [diff] [blame] | 2954 | struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2955 | unsigned long nstart = next->vm_start; |
Wei Yang | f9d86a6 | 2020-10-13 16:53:57 -0700 | [diff] [blame] | 2956 | nstart += adjust_next; |
Miaohe Lin | 71f9e58 | 2021-05-04 18:33:52 -0700 | [diff] [blame] | 2957 | split_huge_pmd_if_needed(next, nstart); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2958 | } |
| 2959 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2960 | |
Matthew Wilcox (Oracle) | 684555a | 2022-09-02 20:46:49 +0100 | [diff] [blame] | 2961 | static void unmap_folio(struct folio *folio) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2962 | { |
Zi Yan | 319a624 | 2024-02-26 15:55:27 -0500 | [diff] [blame] | 2963 | enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC | |
| 2964 | TTU_BATCH_FLUSH; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2965 | |
Matthew Wilcox (Oracle) | 684555a | 2022-09-02 20:46:49 +0100 | [diff] [blame] | 2966 | VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2967 | |
Zi Yan | 319a624 | 2024-02-26 15:55:27 -0500 | [diff] [blame] | 2968 | if (folio_test_pmd_mappable(folio)) |
| 2969 | ttu_flags |= TTU_SPLIT_HUGE_PMD; |
| 2970 | |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2971 | /* |
| 2972 | * Anon pages need migration entries to preserve them, but file |
| 2973 | * pages can simply be left unmapped, then faulted back on demand. |
| 2974 | * If that is ever changed (perhaps for mlock), update remap_page(). |
| 2975 | */ |
Matthew Wilcox (Oracle) | 4b8554c | 2022-01-28 14:29:43 -0500 | [diff] [blame] | 2976 | if (folio_test_anon(folio)) |
| 2977 | try_to_migrate(folio, ttu_flags); |
Alistair Popple | a98a2f0 | 2021-06-30 18:54:16 -0700 | [diff] [blame] | 2978 | else |
Matthew Wilcox (Oracle) | 869f7ee | 2022-02-15 09:28:49 -0500 | [diff] [blame] | 2979 | try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); |
Baolin Wang | 3027c6f | 2023-10-30 09:11:47 +0800 | [diff] [blame] | 2980 | |
| 2981 | try_to_unmap_flush(); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2982 | } |
| 2983 | |
Lance Yang | 735ecdf | 2024-06-14 09:51:38 +0800 | [diff] [blame] | 2984 | static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma, |
| 2985 | unsigned long addr, pmd_t *pmdp, |
| 2986 | struct folio *folio) |
| 2987 | { |
| 2988 | struct mm_struct *mm = vma->vm_mm; |
| 2989 | int ref_count, map_count; |
| 2990 | pmd_t orig_pmd = *pmdp; |
Lance Yang | 735ecdf | 2024-06-14 09:51:38 +0800 | [diff] [blame] | 2991 | |
| 2992 | if (folio_test_dirty(folio) || pmd_dirty(orig_pmd)) |
| 2993 | return false; |
| 2994 | |
| 2995 | orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp); |
| 2996 | |
| 2997 | /* |
| 2998 | * Syncing against concurrent GUP-fast: |
| 2999 | * - clear PMD; barrier; read refcount |
| 3000 | * - inc refcount; barrier; read PMD |
| 3001 | */ |
| 3002 | smp_mb(); |
| 3003 | |
| 3004 | ref_count = folio_ref_count(folio); |
| 3005 | map_count = folio_mapcount(folio); |
| 3006 | |
| 3007 | /* |
| 3008 | * Order reads for folio refcount and dirty flag |
| 3009 | * (see comments in __remove_mapping()). |
| 3010 | */ |
| 3011 | smp_rmb(); |
| 3012 | |
| 3013 | /* |
| 3014 | * If the folio or its PMD is redirtied at this point, or if there |
| 3015 | * are unexpected references, we will give up to discard this folio |
| 3016 | * and remap it. |
| 3017 | * |
| 3018 | * The only folio refs must be one from isolation plus the rmap(s). |
| 3019 | */ |
| 3020 | if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) || |
| 3021 | ref_count != map_count + 1) { |
| 3022 | set_pmd_at(mm, addr, pmdp, orig_pmd); |
| 3023 | return false; |
| 3024 | } |
| 3025 | |
Andrew Morton | d40f74a | 2024-06-25 14:51:36 -0700 | [diff] [blame] | 3026 | folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma); |
Lance Yang | 735ecdf | 2024-06-14 09:51:38 +0800 | [diff] [blame] | 3027 | zap_deposited_table(mm, pmdp); |
| 3028 | add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR); |
| 3029 | if (vma->vm_flags & VM_LOCKED) |
| 3030 | mlock_drain_local(); |
| 3031 | folio_put(folio); |
| 3032 | |
| 3033 | return true; |
| 3034 | } |
| 3035 | |
| 3036 | bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, |
| 3037 | pmd_t *pmdp, struct folio *folio) |
| 3038 | { |
| 3039 | VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio); |
| 3040 | VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); |
| 3041 | VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE)); |
| 3042 | |
| 3043 | if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) |
| 3044 | return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); |
| 3045 | |
| 3046 | return false; |
| 3047 | } |
| 3048 | |
Yu Zhao | b1f2020 | 2024-08-30 11:03:36 +0100 | [diff] [blame] | 3049 | static void remap_page(struct folio *folio, unsigned long nr, int flags) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3050 | { |
Matthew Wilcox (Oracle) | 4eecb8b | 2022-01-28 23:32:59 -0500 | [diff] [blame] | 3051 | int i = 0; |
Hugh Dickins | ab02c25 | 2021-06-30 18:52:04 -0700 | [diff] [blame] | 3052 | |
Matthew Wilcox (Oracle) | 684555a | 2022-09-02 20:46:49 +0100 | [diff] [blame] | 3053 | /* If unmap_folio() uses try_to_migrate() on file, remove this check */ |
Matthew Wilcox (Oracle) | 4eecb8b | 2022-01-28 23:32:59 -0500 | [diff] [blame] | 3054 | if (!folio_test_anon(folio)) |
Hugh Dickins | ab02c25 | 2021-06-30 18:52:04 -0700 | [diff] [blame] | 3055 | return; |
Matthew Wilcox (Oracle) | 4eecb8b | 2022-01-28 23:32:59 -0500 | [diff] [blame] | 3056 | for (;;) { |
Yu Zhao | b1f2020 | 2024-08-30 11:03:36 +0100 | [diff] [blame] | 3057 | remove_migration_ptes(folio, folio, RMP_LOCKED | flags); |
Matthew Wilcox (Oracle) | 4eecb8b | 2022-01-28 23:32:59 -0500 | [diff] [blame] | 3058 | i += folio_nr_pages(folio); |
| 3059 | if (i >= nr) |
| 3060 | break; |
| 3061 | folio = folio_next(folio); |
Kirill A. Shutemov | ace71a1 | 2017-02-24 14:57:45 -0800 | [diff] [blame] | 3062 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3063 | } |
| 3064 | |
Matthew Wilcox (Oracle) | cb29e79 | 2024-08-21 20:34:38 +0100 | [diff] [blame] | 3065 | static void lru_add_page_tail(struct folio *folio, struct page *tail, |
Alex Shi | 88dcb9a | 2020-12-15 12:33:20 -0800 | [diff] [blame] | 3066 | struct lruvec *lruvec, struct list_head *list) |
| 3067 | { |
Matthew Wilcox (Oracle) | cb29e79 | 2024-08-21 20:34:38 +0100 | [diff] [blame] | 3068 | VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); |
| 3069 | VM_BUG_ON_FOLIO(PageLRU(tail), folio); |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 3070 | lockdep_assert_held(&lruvec->lru_lock); |
Alex Shi | 88dcb9a | 2020-12-15 12:33:20 -0800 | [diff] [blame] | 3071 | |
Alex Shi | 6dbb574 | 2020-12-15 12:33:29 -0800 | [diff] [blame] | 3072 | if (list) { |
Alex Shi | 88dcb9a | 2020-12-15 12:33:20 -0800 | [diff] [blame] | 3073 | /* page reclaim is reclaiming a huge page */ |
Matthew Wilcox (Oracle) | cb29e79 | 2024-08-21 20:34:38 +0100 | [diff] [blame] | 3074 | VM_WARN_ON(folio_test_lru(folio)); |
Alex Shi | 9486663 | 2020-12-15 12:33:24 -0800 | [diff] [blame] | 3075 | get_page(tail); |
| 3076 | list_add_tail(&tail->lru, list); |
Alex Shi | 88dcb9a | 2020-12-15 12:33:20 -0800 | [diff] [blame] | 3077 | } else { |
Alex Shi | 6dbb574 | 2020-12-15 12:33:29 -0800 | [diff] [blame] | 3078 | /* head is still on lru (and we have it frozen) */ |
Matthew Wilcox (Oracle) | cb29e79 | 2024-08-21 20:34:38 +0100 | [diff] [blame] | 3079 | VM_WARN_ON(!folio_test_lru(folio)); |
| 3080 | if (folio_test_unevictable(folio)) |
Hugh Dickins | 07ca760 | 2022-02-14 18:29:54 -0800 | [diff] [blame] | 3081 | tail->mlock_count = 0; |
| 3082 | else |
Matthew Wilcox (Oracle) | cb29e79 | 2024-08-21 20:34:38 +0100 | [diff] [blame] | 3083 | list_add_tail(&tail->lru, &folio->lru); |
Alex Shi | 6dbb574 | 2020-12-15 12:33:29 -0800 | [diff] [blame] | 3084 | SetPageLRU(tail); |
Alex Shi | 88dcb9a | 2020-12-15 12:33:20 -0800 | [diff] [blame] | 3085 | } |
| 3086 | } |
| 3087 | |
David Hildenbrand | 07e09c4 | 2023-08-21 18:08:49 +0200 | [diff] [blame] | 3088 | static void __split_huge_page_tail(struct folio *folio, int tail, |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3089 | struct lruvec *lruvec, struct list_head *list, |
| 3090 | unsigned int new_order) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3091 | { |
David Hildenbrand | 07e09c4 | 2023-08-21 18:08:49 +0200 | [diff] [blame] | 3092 | struct page *head = &folio->page; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3093 | struct page *page_tail = head + tail; |
David Hildenbrand | 07e09c4 | 2023-08-21 18:08:49 +0200 | [diff] [blame] | 3094 | /* |
| 3095 | * Careful: new_folio is not a "real" folio before we cleared PageTail. |
| 3096 | * Don't pass it around before clear_compound_head(). |
| 3097 | */ |
| 3098 | struct folio *new_folio = (struct folio *)page_tail; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3099 | |
Kirill A. Shutemov | 8df651c | 2016-03-15 14:57:30 -0700 | [diff] [blame] | 3100 | VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3101 | |
| 3102 | /* |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 3103 | * Clone page flags before unfreezing refcount. |
| 3104 | * |
| 3105 | * After successful get_page_unless_zero() might follow flags change, |
Haitao Shi | 8958b24 | 2020-12-15 20:47:26 -0800 | [diff] [blame] | 3106 | * for example lock_page() which set PG_waiters. |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 3107 | * |
| 3108 | * Note that for mapped sub-pages of an anonymous THP, |
Matthew Wilcox (Oracle) | 684555a | 2022-09-02 20:46:49 +0100 | [diff] [blame] | 3109 | * PG_anon_exclusive has been cleared in unmap_folio() and is stored in |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 3110 | * the migration entry instead from where remap_page() will restore it. |
| 3111 | * We can still have PG_anon_exclusive set on effectively unmapped and |
| 3112 | * unreferenced sub-pages of an anonymous THP: we can simply drop |
| 3113 | * PG_anon_exclusive (-> PG_mappedtodisk) for these here. |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3114 | */ |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3115 | page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; |
| 3116 | page_tail->flags |= (head->flags & |
| 3117 | ((1L << PG_referenced) | |
| 3118 | (1L << PG_swapbacked) | |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 3119 | (1L << PG_swapcache) | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3120 | (1L << PG_mlocked) | |
| 3121 | (1L << PG_uptodate) | |
| 3122 | (1L << PG_active) | |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 3123 | (1L << PG_workingset) | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3124 | (1L << PG_locked) | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 3125 | (1L << PG_unevictable) | |
Matthew Wilcox (Oracle) | 7a87225 | 2024-08-21 20:34:43 +0100 | [diff] [blame] | 3126 | #ifdef CONFIG_ARCH_USES_PG_ARCH_2 |
Catalin Marinas | 72e6afa | 2020-07-02 10:19:30 +0100 | [diff] [blame] | 3127 | (1L << PG_arch_2) | |
Matthew Wilcox (Oracle) | 7a87225 | 2024-08-21 20:34:43 +0100 | [diff] [blame] | 3128 | #endif |
| 3129 | #ifdef CONFIG_ARCH_USES_PG_ARCH_3 |
Peter Collingbourne | ef6458b | 2022-11-03 18:10:37 -0700 | [diff] [blame] | 3130 | (1L << PG_arch_3) | |
Catalin Marinas | 72e6afa | 2020-07-02 10:19:30 +0100 | [diff] [blame] | 3131 | #endif |
Yu Zhao | ec1c86b | 2022-09-18 02:00:02 -0600 | [diff] [blame] | 3132 | (1L << PG_dirty) | |
| 3133 | LRU_GEN_MASK | LRU_REFS_MASK)); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3134 | |
Hugh Dickins | cb67f42 | 2022-11-02 18:51:38 -0700 | [diff] [blame] | 3135 | /* ->mapping in first and second tail page is replaced by other uses */ |
Hugh Dickins | 173d9d9 | 2018-11-30 14:10:16 -0800 | [diff] [blame] | 3136 | VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, |
| 3137 | page_tail); |
| 3138 | page_tail->mapping = head->mapping; |
| 3139 | page_tail->index = head->index + tail; |
Mel Gorman | 71e2d66 | 2022-10-19 14:41:56 +0100 | [diff] [blame] | 3140 | |
| 3141 | /* |
David Hildenbrand | cfeed8f | 2023-08-21 18:08:46 +0200 | [diff] [blame] | 3142 | * page->private should not be set in tail pages. Fix up and warn once |
| 3143 | * if private is unexpectedly set. |
Mel Gorman | 71e2d66 | 2022-10-19 14:41:56 +0100 | [diff] [blame] | 3144 | */ |
David Hildenbrand | cfeed8f | 2023-08-21 18:08:46 +0200 | [diff] [blame] | 3145 | if (unlikely(page_tail->private)) { |
| 3146 | VM_WARN_ON_ONCE_PAGE(true, page_tail); |
Mel Gorman | 71e2d66 | 2022-10-19 14:41:56 +0100 | [diff] [blame] | 3147 | page_tail->private = 0; |
| 3148 | } |
David Hildenbrand | 07e09c4 | 2023-08-21 18:08:49 +0200 | [diff] [blame] | 3149 | if (folio_test_swapcache(folio)) |
| 3150 | new_folio->swap.val = folio->swap.val + tail; |
Hugh Dickins | 173d9d9 | 2018-11-30 14:10:16 -0800 | [diff] [blame] | 3151 | |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 3152 | /* Page flags must be visible before we make the page non-compound. */ |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3153 | smp_wmb(); |
| 3154 | |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 3155 | /* |
| 3156 | * Clear PageTail before unfreezing page refcount. |
| 3157 | * |
| 3158 | * After successful get_page_unless_zero() might follow put_page() |
| 3159 | * which needs correct compound_head(). |
| 3160 | */ |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3161 | clear_compound_head(page_tail); |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3162 | if (new_order) { |
| 3163 | prep_compound_page(page_tail, new_order); |
Matthew Wilcox (Oracle) | 85edc15 | 2024-03-21 14:24:41 +0000 | [diff] [blame] | 3164 | folio_set_large_rmappable(new_folio); |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3165 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3166 | |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 3167 | /* Finally unfreeze refcount. Additional reference from page cache. */ |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3168 | page_ref_unfreeze(page_tail, |
| 3169 | 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ? |
| 3170 | folio_nr_pages(new_folio) : 0)); |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 3171 | |
Kefeng Wang | b754276 | 2023-11-10 11:33:21 +0800 | [diff] [blame] | 3172 | if (folio_test_young(folio)) |
| 3173 | folio_set_young(new_folio); |
| 3174 | if (folio_test_idle(folio)) |
| 3175 | folio_set_idle(new_folio); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3176 | |
Kefeng Wang | c825301 | 2023-10-18 22:08:02 +0800 | [diff] [blame] | 3177 | folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); |
Michal Hocko | 94723aa | 2018-04-10 16:30:07 -0700 | [diff] [blame] | 3178 | |
| 3179 | /* |
| 3180 | * always add to the tail because some iterators expect new |
| 3181 | * pages to show after the currently processed elements - e.g. |
| 3182 | * migrate_pages |
| 3183 | */ |
Matthew Wilcox (Oracle) | cb29e79 | 2024-08-21 20:34:38 +0100 | [diff] [blame] | 3184 | lru_add_page_tail(folio, page_tail, lruvec, list); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3185 | } |
| 3186 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3187 | static void __split_huge_page(struct page *page, struct list_head *list, |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3188 | pgoff_t end, unsigned int new_order) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3189 | { |
Matthew Wilcox (Oracle) | e809c3f | 2021-06-28 21:59:47 -0400 | [diff] [blame] | 3190 | struct folio *folio = page_folio(page); |
| 3191 | struct page *head = &folio->page; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3192 | struct lruvec *lruvec; |
Matthew Wilcox (Oracle) | 4101196 | 2019-09-23 15:34:52 -0700 | [diff] [blame] | 3193 | struct address_space *swap_cache = NULL; |
| 3194 | unsigned long offset = 0; |
Hugh Dickins | 509f006 | 2023-07-25 16:45:10 +0200 | [diff] [blame] | 3195 | int i, nr_dropped = 0; |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3196 | unsigned int new_nr = 1 << new_order; |
Zi Yan | 502003b | 2024-02-26 15:55:29 -0500 | [diff] [blame] | 3197 | int order = folio_order(folio); |
| 3198 | unsigned int nr = 1 << order; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3199 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3200 | /* complete memcg works before add pages to LRU */ |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3201 | split_page_memcg(head, order, new_order); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3202 | |
David Hildenbrand | 07e09c4 | 2023-08-21 18:08:49 +0200 | [diff] [blame] | 3203 | if (folio_test_anon(folio) && folio_test_swapcache(folio)) { |
Kairui Song | 7aad25b | 2024-05-22 01:58:53 +0800 | [diff] [blame] | 3204 | offset = swap_cache_index(folio->swap); |
David Hildenbrand | 07e09c4 | 2023-08-21 18:08:49 +0200 | [diff] [blame] | 3205 | swap_cache = swap_address_space(folio->swap); |
Matthew Wilcox (Oracle) | 4101196 | 2019-09-23 15:34:52 -0700 | [diff] [blame] | 3206 | xa_lock(&swap_cache->i_pages); |
| 3207 | } |
| 3208 | |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 3209 | /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ |
Matthew Wilcox (Oracle) | e809c3f | 2021-06-28 21:59:47 -0400 | [diff] [blame] | 3210 | lruvec = folio_lruvec_lock(folio); |
Alex Shi | b676983 | 2020-12-15 12:33:33 -0800 | [diff] [blame] | 3211 | |
Yang Shi | eac96c3 | 2021-10-28 14:36:11 -0700 | [diff] [blame] | 3212 | ClearPageHasHWPoisoned(head); |
| 3213 | |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3214 | for (i = nr - new_nr; i >= new_nr; i -= new_nr) { |
| 3215 | __split_huge_page_tail(folio, i, lruvec, list, new_order); |
Hugh Dickins | d144bf6 | 2021-09-02 14:54:21 -0700 | [diff] [blame] | 3216 | /* Some pages can be beyond EOF: drop them from page cache */ |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3217 | if (head[i].index >= end) { |
Matthew Wilcox (Oracle) | fb5c202 | 2022-06-28 20:15:29 -0400 | [diff] [blame] | 3218 | struct folio *tail = page_folio(head + i); |
| 3219 | |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3220 | if (shmem_mapping(folio->mapping)) |
Hugh Dickins | 509f006 | 2023-07-25 16:45:10 +0200 | [diff] [blame] | 3221 | nr_dropped++; |
Matthew Wilcox (Oracle) | fb5c202 | 2022-06-28 20:15:29 -0400 | [diff] [blame] | 3222 | else if (folio_test_clear_dirty(tail)) |
| 3223 | folio_account_cleaned(tail, |
| 3224 | inode_to_wb(folio->mapping->host)); |
| 3225 | __filemap_remove_folio(tail, NULL); |
| 3226 | folio_put(tail); |
Matthew Wilcox (Oracle) | 4101196 | 2019-09-23 15:34:52 -0700 | [diff] [blame] | 3227 | } else if (!PageAnon(page)) { |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3228 | __xa_store(&folio->mapping->i_pages, head[i].index, |
Matthew Wilcox (Oracle) | 4101196 | 2019-09-23 15:34:52 -0700 | [diff] [blame] | 3229 | head + i, 0); |
| 3230 | } else if (swap_cache) { |
| 3231 | __xa_store(&swap_cache->i_pages, offset + i, |
| 3232 | head + i, 0); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3233 | } |
| 3234 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3235 | |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3236 | if (!new_order) |
| 3237 | ClearPageCompound(head); |
| 3238 | else { |
| 3239 | struct folio *new_folio = (struct folio *)head; |
| 3240 | |
| 3241 | folio_set_order(new_folio, new_order); |
| 3242 | } |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 3243 | unlock_page_lruvec(lruvec); |
Alex Shi | b676983 | 2020-12-15 12:33:33 -0800 | [diff] [blame] | 3244 | /* Caller disabled irqs, so they are still disabled here */ |
Vlastimil Babka | f7da677 | 2019-08-24 17:54:59 -0700 | [diff] [blame] | 3245 | |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3246 | split_page_owner(head, order, new_order); |
Yu Zhao | 95599ef | 2024-09-05 22:21:07 -0600 | [diff] [blame] | 3247 | pgalloc_tag_split(folio, order, new_order); |
Vlastimil Babka | f7da677 | 2019-08-24 17:54:59 -0700 | [diff] [blame] | 3248 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3249 | /* See comment in __split_huge_page_tail() */ |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3250 | if (folio_test_anon(folio)) { |
Matthew Wilcox | aa5dc07 | 2017-12-04 10:16:10 -0500 | [diff] [blame] | 3251 | /* Additional pin to swap cache */ |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3252 | if (folio_test_swapcache(folio)) { |
| 3253 | folio_ref_add(folio, 1 + new_nr); |
Matthew Wilcox (Oracle) | 4101196 | 2019-09-23 15:34:52 -0700 | [diff] [blame] | 3254 | xa_unlock(&swap_cache->i_pages); |
| 3255 | } else { |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3256 | folio_ref_inc(folio); |
Matthew Wilcox (Oracle) | 4101196 | 2019-09-23 15:34:52 -0700 | [diff] [blame] | 3257 | } |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3258 | } else { |
Matthew Wilcox | aa5dc07 | 2017-12-04 10:16:10 -0500 | [diff] [blame] | 3259 | /* Additional pin to page cache */ |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3260 | folio_ref_add(folio, 1 + new_nr); |
| 3261 | xa_unlock(&folio->mapping->i_pages); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3262 | } |
Alex Shi | b676983 | 2020-12-15 12:33:33 -0800 | [diff] [blame] | 3263 | local_irq_enable(); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3264 | |
Hugh Dickins | 509f006 | 2023-07-25 16:45:10 +0200 | [diff] [blame] | 3265 | if (nr_dropped) |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3266 | shmem_uncharge(folio->mapping->host, nr_dropped); |
Yu Zhao | b1f2020 | 2024-08-30 11:03:36 +0100 | [diff] [blame] | 3267 | remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3268 | |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3269 | /* |
| 3270 | * set page to its compound_head when split to non order-0 pages, so |
| 3271 | * we can skip unlocking it below, since PG_locked is transferred to |
| 3272 | * the compound_head of the page and the caller will unlock it. |
| 3273 | */ |
| 3274 | if (new_order) |
| 3275 | page = compound_head(page); |
| 3276 | |
| 3277 | for (i = 0; i < nr; i += new_nr) { |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3278 | struct page *subpage = head + i; |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3279 | struct folio *new_folio = page_folio(subpage); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3280 | if (subpage == page) |
| 3281 | continue; |
Matthew Wilcox (Oracle) | 435a755 | 2024-02-28 16:42:36 +0000 | [diff] [blame] | 3282 | folio_unlock(new_folio); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3283 | |
| 3284 | /* |
| 3285 | * Subpages may be freed if there wasn't any mapping |
| 3286 | * like if add_to_swap() is running on a lru page that |
| 3287 | * had its mapping zapped. And freeing these pages |
| 3288 | * requires taking the lru_lock so we do the put_page |
| 3289 | * of the tail pages after the split is complete. |
| 3290 | */ |
Miaohe Lin | 0b17546 | 2022-07-04 21:21:56 +0800 | [diff] [blame] | 3291 | free_page_and_swap_cache(subpage); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3292 | } |
| 3293 | } |
| 3294 | |
Huang Ying | b8f593c | 2017-07-06 15:37:28 -0700 | [diff] [blame] | 3295 | /* Racy check whether the huge page can be split */ |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 3296 | bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) |
Huang Ying | b8f593c | 2017-07-06 15:37:28 -0700 | [diff] [blame] | 3297 | { |
| 3298 | int extra_pins; |
| 3299 | |
Matthew Wilcox | aa5dc07 | 2017-12-04 10:16:10 -0500 | [diff] [blame] | 3300 | /* Additional pins from page cache */ |
Matthew Wilcox (Oracle) | d4b4084 | 2022-02-04 14:13:31 -0500 | [diff] [blame] | 3301 | if (folio_test_anon(folio)) |
| 3302 | extra_pins = folio_test_swapcache(folio) ? |
| 3303 | folio_nr_pages(folio) : 0; |
Huang Ying | b8f593c | 2017-07-06 15:37:28 -0700 | [diff] [blame] | 3304 | else |
Matthew Wilcox (Oracle) | d4b4084 | 2022-02-04 14:13:31 -0500 | [diff] [blame] | 3305 | extra_pins = folio_nr_pages(folio); |
Huang Ying | b8f593c | 2017-07-06 15:37:28 -0700 | [diff] [blame] | 3306 | if (pextra_pins) |
| 3307 | *pextra_pins = extra_pins; |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 3308 | return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - |
| 3309 | caller_pins; |
Huang Ying | b8f593c | 2017-07-06 15:37:28 -0700 | [diff] [blame] | 3310 | } |
| 3311 | |
Andrea Arcangeli | 6d0a07e | 2016-05-12 15:42:25 -0700 | [diff] [blame] | 3312 | /* |
John Hubbard | a8353dc | 2024-03-24 21:44:52 -0700 | [diff] [blame] | 3313 | * This function splits a large folio into smaller folios of order @new_order. |
| 3314 | * @page can point to any page of the large folio to split. The split operation |
| 3315 | * does not change the position of @page. |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3316 | * |
John Hubbard | a8353dc | 2024-03-24 21:44:52 -0700 | [diff] [blame] | 3317 | * Prerequisites: |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3318 | * |
John Hubbard | a8353dc | 2024-03-24 21:44:52 -0700 | [diff] [blame] | 3319 | * 1) The caller must hold a reference on the @page's owning folio, also known |
| 3320 | * as the large folio. |
| 3321 | * |
| 3322 | * 2) The large folio must be locked. |
| 3323 | * |
| 3324 | * 3) The folio must not be pinned. Any unexpected folio references, including |
| 3325 | * GUP pins, will result in the folio not getting split; instead, the caller |
David Hildenbrand | d21f996 | 2024-04-18 17:18:34 +0200 | [diff] [blame] | 3326 | * will receive an -EAGAIN. |
John Hubbard | a8353dc | 2024-03-24 21:44:52 -0700 | [diff] [blame] | 3327 | * |
| 3328 | * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not |
| 3329 | * supported for non-file-backed folios, because folio->_deferred_list, which |
| 3330 | * is used by partially mapped folios, is stored in subpage 2, but an order-1 |
| 3331 | * folio only has subpages 0 and 1. File-backed order-1 folios are supported, |
| 3332 | * since they do not use _deferred_list. |
| 3333 | * |
| 3334 | * After splitting, the caller's folio reference will be transferred to @page, |
| 3335 | * resulting in a raised refcount of @page after this call. The other pages may |
| 3336 | * be freed if they are not mapped. |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3337 | * |
| 3338 | * If @list is null, tail pages will be added to LRU list, otherwise, to @list. |
| 3339 | * |
John Hubbard | a8353dc | 2024-03-24 21:44:52 -0700 | [diff] [blame] | 3340 | * Pages in @new_order will inherit the mapping, flags, and so on from the |
| 3341 | * huge page. |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3342 | * |
John Hubbard | a8353dc | 2024-03-24 21:44:52 -0700 | [diff] [blame] | 3343 | * Returns 0 if the huge page was split successfully. |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3344 | * |
David Hildenbrand | d21f996 | 2024-04-18 17:18:34 +0200 | [diff] [blame] | 3345 | * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if |
| 3346 | * the folio was concurrently removed from the page cache. |
| 3347 | * |
| 3348 | * Returns -EBUSY when trying to split the huge zeropage, if the folio is |
| 3349 | * under writeback, if fs-specific folio metadata cannot currently be |
| 3350 | * released, or if some unexpected race happened (e.g., anon VMA disappeared, |
| 3351 | * truncation). |
| 3352 | * |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3353 | * Callers should ensure that the order respects the address space mapping |
| 3354 | * min-order if one is set for non-anonymous folios. |
| 3355 | * |
David Hildenbrand | d21f996 | 2024-04-18 17:18:34 +0200 | [diff] [blame] | 3356 | * Returns -EINVAL when trying to split to an order that is incompatible |
| 3357 | * with the folio. Splitting to order 0 is compatible with all folios. |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3358 | */ |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3359 | int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| 3360 | unsigned int new_order) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3361 | { |
Matthew Wilcox (Oracle) | 4eecb8b | 2022-01-28 23:32:59 -0500 | [diff] [blame] | 3362 | struct folio *folio = page_folio(page); |
Matthew Wilcox (Oracle) | f8baa6b | 2023-01-11 14:29:12 +0000 | [diff] [blame] | 3363 | struct deferred_split *ds_queue = get_deferred_split_queue(folio); |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3364 | /* reset xarray order to new order after split */ |
| 3365 | XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); |
Barry Song | 5d65c8d | 2024-08-24 13:04:40 +1200 | [diff] [blame] | 3366 | bool is_anon = folio_test_anon(folio); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3367 | struct address_space *mapping = NULL; |
Barry Song | 5d65c8d | 2024-08-24 13:04:40 +1200 | [diff] [blame] | 3368 | struct anon_vma *anon_vma = NULL; |
Lance Yang | f216c84 | 2024-06-28 21:07:49 +0800 | [diff] [blame] | 3369 | int order = folio_order(folio); |
Yang Shi | 504e070 | 2021-06-15 18:24:07 -0700 | [diff] [blame] | 3370 | int extra_pins, ret; |
Hugh Dickins | 006d3ff | 2018-11-30 14:10:21 -0800 | [diff] [blame] | 3371 | pgoff_t end; |
Xu Yu | 478d134 | 2022-04-28 23:14:43 -0700 | [diff] [blame] | 3372 | bool is_hzp; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3373 | |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3374 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
| 3375 | VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3376 | |
Zi Yan | 1412ecb | 2024-03-07 13:18:53 -0500 | [diff] [blame] | 3377 | if (new_order >= folio_order(folio)) |
| 3378 | return -EINVAL; |
| 3379 | |
Barry Song | 5d65c8d | 2024-08-24 13:04:40 +1200 | [diff] [blame] | 3380 | if (is_anon) { |
Ran Xiaokai | 6a50c9b | 2024-06-07 17:40:48 +0800 | [diff] [blame] | 3381 | /* order-1 is not supported for anonymous THP. */ |
| 3382 | if (new_order == 1) { |
| 3383 | VM_WARN_ONCE(1, "Cannot split to order-1 folio"); |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3384 | return -EINVAL; |
Ran Xiaokai | 6a50c9b | 2024-06-07 17:40:48 +0800 | [diff] [blame] | 3385 | } |
| 3386 | } else if (new_order) { |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3387 | /* Split shmem folio to non-zero order not supported */ |
| 3388 | if (shmem_mapping(folio->mapping)) { |
| 3389 | VM_WARN_ONCE(1, |
| 3390 | "Cannot split shmem folio to non-0 order"); |
| 3391 | return -EINVAL; |
| 3392 | } |
Ran Xiaokai | 6a50c9b | 2024-06-07 17:40:48 +0800 | [diff] [blame] | 3393 | /* |
| 3394 | * No split if the file system does not support large folio. |
| 3395 | * Note that we might still have THPs in such mappings due to |
| 3396 | * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping |
| 3397 | * does not actually support large folios properly. |
| 3398 | */ |
| 3399 | if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && |
| 3400 | !mapping_large_folio_support(folio->mapping)) { |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3401 | VM_WARN_ONCE(1, |
| 3402 | "Cannot split file folio to non-0 order"); |
| 3403 | return -EINVAL; |
| 3404 | } |
| 3405 | } |
| 3406 | |
Ran Xiaokai | 6a50c9b | 2024-06-07 17:40:48 +0800 | [diff] [blame] | 3407 | /* Only swapping a whole PMD-mapped folio is supported */ |
| 3408 | if (folio_test_swapcache(folio) && new_order) |
| 3409 | return -EINVAL; |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3410 | |
Matthew Wilcox (Oracle) | 5beaee5 | 2024-03-26 20:28:22 +0000 | [diff] [blame] | 3411 | is_hzp = is_huge_zero_folio(folio); |
Naoya Horiguchi | 4737edb | 2023-04-06 17:20:04 +0900 | [diff] [blame] | 3412 | if (is_hzp) { |
| 3413 | pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); |
Xu Yu | 478d134 | 2022-04-28 23:14:43 -0700 | [diff] [blame] | 3414 | return -EBUSY; |
Naoya Horiguchi | 4737edb | 2023-04-06 17:20:04 +0900 | [diff] [blame] | 3415 | } |
Xu Yu | 478d134 | 2022-04-28 23:14:43 -0700 | [diff] [blame] | 3416 | |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3417 | if (folio_test_writeback(folio)) |
Huang Ying | 5980768 | 2017-09-06 16:22:34 -0700 | [diff] [blame] | 3418 | return -EBUSY; |
| 3419 | |
Barry Song | 5d65c8d | 2024-08-24 13:04:40 +1200 | [diff] [blame] | 3420 | if (is_anon) { |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3421 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 3422 | * The caller does not necessarily hold an mmap_lock that would |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3423 | * prevent the anon_vma disappearing so we first we take a |
| 3424 | * reference to it and then lock the anon_vma for write. This |
Matthew Wilcox (Oracle) | 2f031c6 | 2022-01-29 16:06:53 -0500 | [diff] [blame] | 3425 | * is similar to folio_lock_anon_vma_read except the write lock |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3426 | * is taken to serialise against parallel split or collapse |
| 3427 | * operations. |
| 3428 | */ |
Matthew Wilcox (Oracle) | 29eea9b | 2022-09-02 20:46:50 +0100 | [diff] [blame] | 3429 | anon_vma = folio_get_anon_vma(folio); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3430 | if (!anon_vma) { |
| 3431 | ret = -EBUSY; |
| 3432 | goto out; |
| 3433 | } |
Hugh Dickins | 006d3ff | 2018-11-30 14:10:21 -0800 | [diff] [blame] | 3434 | end = -1; |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3435 | mapping = NULL; |
| 3436 | anon_vma_lock_write(anon_vma); |
| 3437 | } else { |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3438 | unsigned int min_order; |
Yin Fengwei | 6a3edd29 | 2022-08-10 14:49:07 +0800 | [diff] [blame] | 3439 | gfp_t gfp; |
| 3440 | |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3441 | mapping = folio->mapping; |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3442 | |
| 3443 | /* Truncated ? */ |
| 3444 | if (!mapping) { |
| 3445 | ret = -EBUSY; |
| 3446 | goto out; |
| 3447 | } |
| 3448 | |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3449 | min_order = mapping_min_folio_order(folio->mapping); |
| 3450 | if (new_order < min_order) { |
| 3451 | VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u", |
| 3452 | min_order); |
| 3453 | ret = -EINVAL; |
| 3454 | goto out; |
| 3455 | } |
| 3456 | |
Yin Fengwei | 6a3edd29 | 2022-08-10 14:49:07 +0800 | [diff] [blame] | 3457 | gfp = current_gfp_context(mapping_gfp_mask(mapping) & |
| 3458 | GFP_RECLAIM_MASK); |
| 3459 | |
David Howells | 0201ebf | 2023-06-28 11:48:51 +0100 | [diff] [blame] | 3460 | if (!filemap_release_folio(folio, gfp)) { |
Yin Fengwei | 6a3edd29 | 2022-08-10 14:49:07 +0800 | [diff] [blame] | 3461 | ret = -EBUSY; |
| 3462 | goto out; |
| 3463 | } |
| 3464 | |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3465 | xas_split_alloc(&xas, folio, folio_order(folio), gfp); |
Matthew Wilcox (Oracle) | 6b24ca4 | 2020-06-27 22:19:08 -0400 | [diff] [blame] | 3466 | if (xas_error(&xas)) { |
| 3467 | ret = xas_error(&xas); |
| 3468 | goto out; |
| 3469 | } |
| 3470 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3471 | anon_vma = NULL; |
| 3472 | i_mmap_lock_read(mapping); |
Hugh Dickins | 006d3ff | 2018-11-30 14:10:21 -0800 | [diff] [blame] | 3473 | |
| 3474 | /* |
| 3475 | *__split_huge_page() may need to trim off pages beyond EOF: |
| 3476 | * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, |
| 3477 | * which cannot be nested inside the page tree lock. So note |
| 3478 | * end now: i_size itself may be changed at any moment, but |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3479 | * folio lock is good enough to serialize the trimming. |
Hugh Dickins | 006d3ff | 2018-11-30 14:10:21 -0800 | [diff] [blame] | 3480 | */ |
| 3481 | end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); |
Hugh Dickins | d144bf6 | 2021-09-02 14:54:21 -0700 | [diff] [blame] | 3482 | if (shmem_mapping(mapping)) |
| 3483 | end = shmem_fallocend(mapping->host, end); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3484 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3485 | |
| 3486 | /* |
Matthew Wilcox (Oracle) | 684555a | 2022-09-02 20:46:49 +0100 | [diff] [blame] | 3487 | * Racy check if we can split the page, before unmap_folio() will |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3488 | * split PMDs |
| 3489 | */ |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 3490 | if (!can_split_folio(folio, 1, &extra_pins)) { |
Baolin Wang | fd4a7ac | 2022-10-24 16:34:22 +0800 | [diff] [blame] | 3491 | ret = -EAGAIN; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3492 | goto out_unlock; |
| 3493 | } |
| 3494 | |
Matthew Wilcox (Oracle) | 684555a | 2022-09-02 20:46:49 +0100 | [diff] [blame] | 3495 | unmap_folio(folio); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3496 | |
Alex Shi | b676983 | 2020-12-15 12:33:33 -0800 | [diff] [blame] | 3497 | /* block interrupt reentry in xa_lock and spinlock */ |
| 3498 | local_irq_disable(); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3499 | if (mapping) { |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3500 | /* |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3501 | * Check if the folio is present in page cache. |
| 3502 | * We assume all tail are present too, if folio is there. |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3503 | */ |
Matthew Wilcox (Oracle) | 6b24ca4 | 2020-06-27 22:19:08 -0400 | [diff] [blame] | 3504 | xas_lock(&xas); |
| 3505 | xas_reset(&xas); |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3506 | if (xas_load(&xas) != folio) |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3507 | goto fail; |
| 3508 | } |
| 3509 | |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 3510 | /* Prevent deferred_split_scan() touching ->_refcount */ |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3511 | spin_lock(&ds_queue->split_queue_lock); |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3512 | if (folio_ref_freeze(folio, 1 + extra_pins)) { |
Matthew Wilcox (Oracle) | 8897277 | 2024-02-26 15:55:28 -0500 | [diff] [blame] | 3513 | if (folio_order(folio) > 1 && |
| 3514 | !list_empty(&folio->_deferred_list)) { |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3515 | ds_queue->split_queue_len--; |
Usama Arif | 8422acd | 2024-08-30 11:03:38 +0100 | [diff] [blame] | 3516 | if (folio_test_partially_mapped(folio)) { |
| 3517 | __folio_clear_partially_mapped(folio); |
| 3518 | mod_mthp_stat(folio_order(folio), |
| 3519 | MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); |
| 3520 | } |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3521 | /* |
| 3522 | * Reinitialize page_deferred_list after removing the |
| 3523 | * page from the split_queue, otherwise a subsequent |
| 3524 | * split will see list corruption when checking the |
| 3525 | * page_deferred_list. |
| 3526 | */ |
| 3527 | list_del_init(&folio->_deferred_list); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3528 | } |
Wei Yang | afb9717 | 2020-01-30 22:14:35 -0800 | [diff] [blame] | 3529 | spin_unlock(&ds_queue->split_queue_lock); |
Kirill A. Shutemov | 06d3eff | 2019-10-18 20:20:30 -0700 | [diff] [blame] | 3530 | if (mapping) { |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3531 | int nr = folio_nr_pages(folio); |
Muchun Song | bf9ecea | 2021-02-24 12:03:27 -0800 | [diff] [blame] | 3532 | |
Matthew Wilcox (Oracle) | 3e9a13d | 2022-09-02 20:46:48 +0100 | [diff] [blame] | 3533 | xas_split(&xas, folio, folio_order(folio)); |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3534 | if (folio_test_pmd_mappable(folio) && |
| 3535 | new_order < HPAGE_PMD_ORDER) { |
Stefan Roesch | a48d5bd | 2023-11-06 10:19:18 -0800 | [diff] [blame] | 3536 | if (folio_test_swapbacked(folio)) { |
| 3537 | __lruvec_stat_mod_folio(folio, |
| 3538 | NR_SHMEM_THPS, -nr); |
| 3539 | } else { |
| 3540 | __lruvec_stat_mod_folio(folio, |
| 3541 | NR_FILE_THPS, -nr); |
| 3542 | filemap_nr_thps_dec(mapping); |
| 3543 | } |
Marek Szyprowski | 1ca7554 | 2021-10-18 15:16:19 -0700 | [diff] [blame] | 3544 | } |
Kirill A. Shutemov | 06d3eff | 2019-10-18 20:20:30 -0700 | [diff] [blame] | 3545 | } |
| 3546 | |
Barry Song | 5d65c8d | 2024-08-24 13:04:40 +1200 | [diff] [blame] | 3547 | if (is_anon) { |
| 3548 | mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); |
| 3549 | mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order)); |
| 3550 | } |
Zi Yan | c010d47 | 2024-02-26 15:55:33 -0500 | [diff] [blame] | 3551 | __split_huge_page(page, list, end, new_order); |
Huang Ying | c4f9c70 | 2020-10-15 20:06:07 -0700 | [diff] [blame] | 3552 | ret = 0; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3553 | } else { |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3554 | spin_unlock(&ds_queue->split_queue_lock); |
Yang Shi | 504e070 | 2021-06-15 18:24:07 -0700 | [diff] [blame] | 3555 | fail: |
| 3556 | if (mapping) |
Matthew Wilcox (Oracle) | 6b24ca4 | 2020-06-27 22:19:08 -0400 | [diff] [blame] | 3557 | xas_unlock(&xas); |
Alex Shi | b676983 | 2020-12-15 12:33:33 -0800 | [diff] [blame] | 3558 | local_irq_enable(); |
Yu Zhao | b1f2020 | 2024-08-30 11:03:36 +0100 | [diff] [blame] | 3559 | remap_page(folio, folio_nr_pages(folio), 0); |
Baolin Wang | fd4a7ac | 2022-10-24 16:34:22 +0800 | [diff] [blame] | 3560 | ret = -EAGAIN; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3561 | } |
| 3562 | |
| 3563 | out_unlock: |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 3564 | if (anon_vma) { |
| 3565 | anon_vma_unlock_write(anon_vma); |
| 3566 | put_anon_vma(anon_vma); |
| 3567 | } |
| 3568 | if (mapping) |
| 3569 | i_mmap_unlock_read(mapping); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3570 | out: |
Matthew Wilcox (Oracle) | 69a37a8 | 2022-06-08 15:18:34 -0400 | [diff] [blame] | 3571 | xas_destroy(&xas); |
Lance Yang | f216c84 | 2024-06-28 21:07:49 +0800 | [diff] [blame] | 3572 | if (order == HPAGE_PMD_ORDER) |
Baolin Wang | 835c3a2 | 2024-03-29 14:59:33 +0800 | [diff] [blame] | 3573 | count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); |
Lance Yang | f216c84 | 2024-06-28 21:07:49 +0800 | [diff] [blame] | 3574 | count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3575 | return ret; |
| 3576 | } |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3577 | |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3578 | int min_order_for_split(struct folio *folio) |
| 3579 | { |
| 3580 | if (folio_test_anon(folio)) |
| 3581 | return 0; |
| 3582 | |
| 3583 | if (!folio->mapping) { |
| 3584 | if (folio_test_pmd_mappable(folio)) |
| 3585 | count_vm_event(THP_SPLIT_PAGE_FAILED); |
| 3586 | return -EBUSY; |
| 3587 | } |
| 3588 | |
| 3589 | return mapping_min_folio_order(folio->mapping); |
| 3590 | } |
| 3591 | |
| 3592 | int split_folio_to_list(struct folio *folio, struct list_head *list) |
| 3593 | { |
| 3594 | int ret = min_order_for_split(folio); |
| 3595 | |
| 3596 | if (ret < 0) |
| 3597 | return ret; |
| 3598 | |
| 3599 | return split_huge_page_to_list_to_order(&folio->page, list, ret); |
| 3600 | } |
| 3601 | |
Kefeng Wang | 593a10d | 2024-05-21 21:03:15 +0800 | [diff] [blame] | 3602 | void __folio_undo_large_rmappable(struct folio *folio) |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3603 | { |
Matthew Wilcox (Oracle) | 8dc4a8f | 2023-08-16 16:11:52 +0100 | [diff] [blame] | 3604 | struct deferred_split *ds_queue; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3605 | unsigned long flags; |
| 3606 | |
Matthew Wilcox (Oracle) | 8dc4a8f | 2023-08-16 16:11:52 +0100 | [diff] [blame] | 3607 | ds_queue = get_deferred_split_queue(folio); |
| 3608 | spin_lock_irqsave(&ds_queue->split_queue_lock, flags); |
| 3609 | if (!list_empty(&folio->_deferred_list)) { |
| 3610 | ds_queue->split_queue_len--; |
Usama Arif | 8422acd | 2024-08-30 11:03:38 +0100 | [diff] [blame] | 3611 | if (folio_test_partially_mapped(folio)) { |
| 3612 | __folio_clear_partially_mapped(folio); |
| 3613 | mod_mthp_stat(folio_order(folio), |
| 3614 | MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); |
| 3615 | } |
Baolin Wang | 9bcef59 | 2023-12-20 14:51:40 +0800 | [diff] [blame] | 3616 | list_del_init(&folio->_deferred_list); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3617 | } |
Matthew Wilcox (Oracle) | 8dc4a8f | 2023-08-16 16:11:52 +0100 | [diff] [blame] | 3618 | spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3619 | } |
| 3620 | |
Usama Arif | 8422acd | 2024-08-30 11:03:38 +0100 | [diff] [blame] | 3621 | /* partially_mapped=false won't clear PG_partially_mapped folio flag */ |
| 3622 | void deferred_split_folio(struct folio *folio, bool partially_mapped) |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3623 | { |
Matthew Wilcox (Oracle) | f8baa6b | 2023-01-11 14:29:12 +0000 | [diff] [blame] | 3624 | struct deferred_split *ds_queue = get_deferred_split_queue(folio); |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3625 | #ifdef CONFIG_MEMCG |
Matthew Wilcox (Oracle) | 8991de9 | 2023-01-11 14:29:11 +0000 | [diff] [blame] | 3626 | struct mem_cgroup *memcg = folio_memcg(folio); |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3627 | #endif |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3628 | unsigned long flags; |
| 3629 | |
Matthew Wilcox (Oracle) | 8897277 | 2024-02-26 15:55:28 -0500 | [diff] [blame] | 3630 | /* |
| 3631 | * Order 1 folios have no space for a deferred list, but we also |
| 3632 | * won't waste much memory by not adding them to the deferred list. |
| 3633 | */ |
| 3634 | if (folio_order(folio) <= 1) |
| 3635 | return; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3636 | |
Usama Arif | 81d3ff3 | 2024-08-30 11:03:40 +0100 | [diff] [blame] | 3637 | if (!partially_mapped && !split_underused_thp) |
| 3638 | return; |
| 3639 | |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3640 | /* |
| 3641 | * The try_to_unmap() in page reclaim path might reach here too, |
| 3642 | * this may cause a race condition to corrupt deferred split queue. |
Matthew Wilcox (Oracle) | 8991de9 | 2023-01-11 14:29:11 +0000 | [diff] [blame] | 3643 | * And, if page reclaim is already handling the same folio, it is |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3644 | * unnecessary to handle it again in shrinker. |
| 3645 | * |
Matthew Wilcox (Oracle) | 8991de9 | 2023-01-11 14:29:11 +0000 | [diff] [blame] | 3646 | * Check the swapcache flag to determine if the folio is being |
| 3647 | * handled by page reclaim since THP swap would add the folio into |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3648 | * swap cache before calling try_to_unmap(). |
| 3649 | */ |
Matthew Wilcox (Oracle) | 8991de9 | 2023-01-11 14:29:11 +0000 | [diff] [blame] | 3650 | if (folio_test_swapcache(folio)) |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3651 | return; |
| 3652 | |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3653 | spin_lock_irqsave(&ds_queue->split_queue_lock, flags); |
Usama Arif | 8422acd | 2024-08-30 11:03:38 +0100 | [diff] [blame] | 3654 | if (partially_mapped) { |
| 3655 | if (!folio_test_partially_mapped(folio)) { |
| 3656 | __folio_set_partially_mapped(folio); |
| 3657 | if (folio_test_pmd_mappable(folio)) |
| 3658 | count_vm_event(THP_DEFERRED_SPLIT_PAGE); |
| 3659 | count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); |
| 3660 | mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); |
| 3661 | |
| 3662 | } |
| 3663 | } else { |
| 3664 | /* partially mapped folios cannot become non-partially mapped */ |
| 3665 | VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio); |
| 3666 | } |
Matthew Wilcox (Oracle) | 8991de9 | 2023-01-11 14:29:11 +0000 | [diff] [blame] | 3667 | if (list_empty(&folio->_deferred_list)) { |
Matthew Wilcox (Oracle) | 8991de9 | 2023-01-11 14:29:11 +0000 | [diff] [blame] | 3668 | list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3669 | ds_queue->split_queue_len++; |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3670 | #ifdef CONFIG_MEMCG |
| 3671 | if (memcg) |
Matthew Wilcox (Oracle) | 8991de9 | 2023-01-11 14:29:11 +0000 | [diff] [blame] | 3672 | set_shrinker_bit(memcg, folio_nid(folio), |
Qi Zheng | 54d9172 | 2023-09-11 17:44:16 +0800 | [diff] [blame] | 3673 | deferred_split_shrinker->id); |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3674 | #endif |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3675 | } |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3676 | spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3677 | } |
| 3678 | |
| 3679 | static unsigned long deferred_split_count(struct shrinker *shrink, |
| 3680 | struct shrink_control *sc) |
| 3681 | { |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 3682 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3683 | struct deferred_split *ds_queue = &pgdata->deferred_split_queue; |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3684 | |
| 3685 | #ifdef CONFIG_MEMCG |
| 3686 | if (sc->memcg) |
| 3687 | ds_queue = &sc->memcg->deferred_split_queue; |
| 3688 | #endif |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3689 | return READ_ONCE(ds_queue->split_queue_len); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3690 | } |
| 3691 | |
Usama Arif | dafff3f | 2024-08-30 11:03:39 +0100 | [diff] [blame] | 3692 | static bool thp_underused(struct folio *folio) |
| 3693 | { |
| 3694 | int num_zero_pages = 0, num_filled_pages = 0; |
| 3695 | void *kaddr; |
| 3696 | int i; |
| 3697 | |
| 3698 | if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1) |
| 3699 | return false; |
| 3700 | |
| 3701 | for (i = 0; i < folio_nr_pages(folio); i++) { |
| 3702 | kaddr = kmap_local_folio(folio, i * PAGE_SIZE); |
| 3703 | if (!memchr_inv(kaddr, 0, PAGE_SIZE)) { |
| 3704 | num_zero_pages++; |
| 3705 | if (num_zero_pages > khugepaged_max_ptes_none) { |
| 3706 | kunmap_local(kaddr); |
| 3707 | return true; |
| 3708 | } |
| 3709 | } else { |
| 3710 | /* |
| 3711 | * Another path for early exit once the number |
| 3712 | * of non-zero filled pages exceeds threshold. |
| 3713 | */ |
| 3714 | num_filled_pages++; |
| 3715 | if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) { |
| 3716 | kunmap_local(kaddr); |
| 3717 | return false; |
| 3718 | } |
| 3719 | } |
| 3720 | kunmap_local(kaddr); |
| 3721 | } |
| 3722 | return false; |
| 3723 | } |
| 3724 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3725 | static unsigned long deferred_split_scan(struct shrinker *shrink, |
| 3726 | struct shrink_control *sc) |
| 3727 | { |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 3728 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3729 | struct deferred_split *ds_queue = &pgdata->deferred_split_queue; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3730 | unsigned long flags; |
Matthew Wilcox (Oracle) | 4375a55 | 2023-01-11 14:29:10 +0000 | [diff] [blame] | 3731 | LIST_HEAD(list); |
| 3732 | struct folio *folio, *next; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3733 | int split = 0; |
| 3734 | |
Yang Shi | 87eaceb | 2019-09-23 15:38:15 -0700 | [diff] [blame] | 3735 | #ifdef CONFIG_MEMCG |
| 3736 | if (sc->memcg) |
| 3737 | ds_queue = &sc->memcg->deferred_split_queue; |
| 3738 | #endif |
| 3739 | |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3740 | spin_lock_irqsave(&ds_queue->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3741 | /* Take pin on all head pages to avoid freeing them under us */ |
Matthew Wilcox (Oracle) | 4375a55 | 2023-01-11 14:29:10 +0000 | [diff] [blame] | 3742 | list_for_each_entry_safe(folio, next, &ds_queue->split_queue, |
| 3743 | _deferred_list) { |
| 3744 | if (folio_try_get(folio)) { |
| 3745 | list_move(&folio->_deferred_list, &list); |
Kirill A. Shutemov | e3ae195 | 2016-02-02 16:57:15 -0800 | [diff] [blame] | 3746 | } else { |
Matthew Wilcox (Oracle) | 4375a55 | 2023-01-11 14:29:10 +0000 | [diff] [blame] | 3747 | /* We lost race with folio_put() */ |
Usama Arif | 8422acd | 2024-08-30 11:03:38 +0100 | [diff] [blame] | 3748 | if (folio_test_partially_mapped(folio)) { |
| 3749 | __folio_clear_partially_mapped(folio); |
| 3750 | mod_mthp_stat(folio_order(folio), |
| 3751 | MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); |
| 3752 | } |
Matthew Wilcox (Oracle) | 4375a55 | 2023-01-11 14:29:10 +0000 | [diff] [blame] | 3753 | list_del_init(&folio->_deferred_list); |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3754 | ds_queue->split_queue_len--; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3755 | } |
Kirill A. Shutemov | e3ae195 | 2016-02-02 16:57:15 -0800 | [diff] [blame] | 3756 | if (!--sc->nr_to_scan) |
| 3757 | break; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3758 | } |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3759 | spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3760 | |
Matthew Wilcox (Oracle) | 4375a55 | 2023-01-11 14:29:10 +0000 | [diff] [blame] | 3761 | list_for_each_entry_safe(folio, next, &list, _deferred_list) { |
Usama Arif | dafff3f | 2024-08-30 11:03:39 +0100 | [diff] [blame] | 3762 | bool did_split = false; |
| 3763 | bool underused = false; |
| 3764 | |
| 3765 | if (!folio_test_partially_mapped(folio)) { |
| 3766 | underused = thp_underused(folio); |
| 3767 | if (!underused) |
| 3768 | goto next; |
| 3769 | } |
Matthew Wilcox (Oracle) | 4375a55 | 2023-01-11 14:29:10 +0000 | [diff] [blame] | 3770 | if (!folio_trylock(folio)) |
Kirill A. Shutemov | fa41b90 | 2018-03-22 16:17:31 -0700 | [diff] [blame] | 3771 | goto next; |
Usama Arif | dafff3f | 2024-08-30 11:03:39 +0100 | [diff] [blame] | 3772 | if (!split_folio(folio)) { |
| 3773 | did_split = true; |
| 3774 | if (underused) |
| 3775 | count_vm_event(THP_UNDERUSED_SPLIT_PAGE); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3776 | split++; |
Usama Arif | dafff3f | 2024-08-30 11:03:39 +0100 | [diff] [blame] | 3777 | } |
Matthew Wilcox (Oracle) | 4375a55 | 2023-01-11 14:29:10 +0000 | [diff] [blame] | 3778 | folio_unlock(folio); |
Kirill A. Shutemov | fa41b90 | 2018-03-22 16:17:31 -0700 | [diff] [blame] | 3779 | next: |
Usama Arif | dafff3f | 2024-08-30 11:03:39 +0100 | [diff] [blame] | 3780 | /* |
| 3781 | * split_folio() removes folio from list on success. |
| 3782 | * Only add back to the queue if folio is partially mapped. |
| 3783 | * If thp_underused returns false, or if split_folio fails |
| 3784 | * in the case it was underused, then consider it used and |
| 3785 | * don't add it back to split_queue. |
| 3786 | */ |
| 3787 | if (!did_split && !folio_test_partially_mapped(folio)) { |
| 3788 | list_del_init(&folio->_deferred_list); |
| 3789 | ds_queue->split_queue_len--; |
| 3790 | } |
Matthew Wilcox (Oracle) | 4375a55 | 2023-01-11 14:29:10 +0000 | [diff] [blame] | 3791 | folio_put(folio); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3792 | } |
| 3793 | |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3794 | spin_lock_irqsave(&ds_queue->split_queue_lock, flags); |
| 3795 | list_splice_tail(&list, &ds_queue->split_queue); |
| 3796 | spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3797 | |
Kirill A. Shutemov | cb8d68e | 2016-02-02 16:57:12 -0800 | [diff] [blame] | 3798 | /* |
| 3799 | * Stop shrinker if we didn't split any page, but the queue is empty. |
| 3800 | * This can happen if pages were freed under us. |
| 3801 | */ |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 3802 | if (!split && list_empty(&ds_queue->split_queue)) |
Kirill A. Shutemov | cb8d68e | 2016-02-02 16:57:12 -0800 | [diff] [blame] | 3803 | return SHRINK_STOP; |
| 3804 | return split; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3805 | } |
| 3806 | |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3807 | #ifdef CONFIG_DEBUG_FS |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3808 | static void split_huge_pages_all(void) |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3809 | { |
| 3810 | struct zone *zone; |
| 3811 | struct page *page; |
Kefeng Wang | 630e7c5 | 2022-12-29 20:25:03 +0800 | [diff] [blame] | 3812 | struct folio *folio; |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3813 | unsigned long pfn, max_zone_pfn; |
| 3814 | unsigned long total = 0, split = 0; |
| 3815 | |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3816 | pr_debug("Split all THPs\n"); |
Miaohe Lin | a17206d | 2022-07-04 21:21:57 +0800 | [diff] [blame] | 3817 | for_each_zone(zone) { |
| 3818 | if (!managed_zone(zone)) |
| 3819 | continue; |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3820 | max_zone_pfn = zone_end_pfn(zone); |
| 3821 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { |
Miaohe Lin | a17206d | 2022-07-04 21:21:57 +0800 | [diff] [blame] | 3822 | int nr_pages; |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3823 | |
Naoya Horiguchi | 2b7aa91 | 2022-09-08 13:11:50 +0900 | [diff] [blame] | 3824 | page = pfn_to_online_page(pfn); |
Kefeng Wang | 630e7c5 | 2022-12-29 20:25:03 +0800 | [diff] [blame] | 3825 | if (!page || PageTail(page)) |
| 3826 | continue; |
| 3827 | folio = page_folio(page); |
| 3828 | if (!folio_try_get(folio)) |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3829 | continue; |
| 3830 | |
Kefeng Wang | 630e7c5 | 2022-12-29 20:25:03 +0800 | [diff] [blame] | 3831 | if (unlikely(page_folio(page) != folio)) |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3832 | goto next; |
| 3833 | |
Kefeng Wang | 630e7c5 | 2022-12-29 20:25:03 +0800 | [diff] [blame] | 3834 | if (zone != folio_zone(folio)) |
| 3835 | goto next; |
| 3836 | |
| 3837 | if (!folio_test_large(folio) |
| 3838 | || folio_test_hugetlb(folio) |
| 3839 | || !folio_test_lru(folio)) |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3840 | goto next; |
| 3841 | |
| 3842 | total++; |
Kefeng Wang | 630e7c5 | 2022-12-29 20:25:03 +0800 | [diff] [blame] | 3843 | folio_lock(folio); |
| 3844 | nr_pages = folio_nr_pages(folio); |
| 3845 | if (!split_folio(folio)) |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3846 | split++; |
Miaohe Lin | a17206d | 2022-07-04 21:21:57 +0800 | [diff] [blame] | 3847 | pfn += nr_pages - 1; |
Kefeng Wang | 630e7c5 | 2022-12-29 20:25:03 +0800 | [diff] [blame] | 3848 | folio_unlock(folio); |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3849 | next: |
Kefeng Wang | 630e7c5 | 2022-12-29 20:25:03 +0800 | [diff] [blame] | 3850 | folio_put(folio); |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3851 | cond_resched(); |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3852 | } |
| 3853 | } |
| 3854 | |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3855 | pr_debug("%lu of %lu THP split\n", split, total); |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3856 | } |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3857 | |
| 3858 | static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) |
| 3859 | { |
| 3860 | return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || |
| 3861 | is_vm_hugetlb_page(vma); |
| 3862 | } |
| 3863 | |
| 3864 | static int split_huge_pages_pid(int pid, unsigned long vaddr_start, |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 3865 | unsigned long vaddr_end, unsigned int new_order) |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3866 | { |
| 3867 | int ret = 0; |
| 3868 | struct task_struct *task; |
| 3869 | struct mm_struct *mm; |
| 3870 | unsigned long total = 0, split = 0; |
| 3871 | unsigned long addr; |
| 3872 | |
| 3873 | vaddr_start &= PAGE_MASK; |
| 3874 | vaddr_end &= PAGE_MASK; |
| 3875 | |
Nanyong Sun | e4bfc67 | 2024-09-05 23:30:28 +0800 | [diff] [blame] | 3876 | task = find_get_task_by_vpid(pid); |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3877 | if (!task) { |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3878 | ret = -ESRCH; |
| 3879 | goto out; |
| 3880 | } |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3881 | |
| 3882 | /* Find the mm_struct */ |
| 3883 | mm = get_task_mm(task); |
| 3884 | put_task_struct(task); |
| 3885 | |
| 3886 | if (!mm) { |
| 3887 | ret = -EINVAL; |
| 3888 | goto out; |
| 3889 | } |
| 3890 | |
| 3891 | pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", |
| 3892 | pid, vaddr_start, vaddr_end); |
| 3893 | |
| 3894 | mmap_read_lock(mm); |
| 3895 | /* |
| 3896 | * always increase addr by PAGE_SIZE, since we could have a PTE page |
| 3897 | * table filled with PTE-mapped THPs, each of which is distinct. |
| 3898 | */ |
| 3899 | for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { |
Miaohe Lin | 74ba2b3 | 2022-07-04 21:21:52 +0800 | [diff] [blame] | 3900 | struct vm_area_struct *vma = vma_lookup(mm, addr); |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 3901 | struct folio_walk fw; |
Matthew Wilcox (Oracle) | a644b0a | 2023-08-16 16:12:01 +0100 | [diff] [blame] | 3902 | struct folio *folio; |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3903 | struct address_space *mapping; |
| 3904 | unsigned int target_order = new_order; |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3905 | |
Miaohe Lin | 74ba2b3 | 2022-07-04 21:21:52 +0800 | [diff] [blame] | 3906 | if (!vma) |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3907 | break; |
| 3908 | |
| 3909 | /* skip special VMA and hugetlb VMA */ |
| 3910 | if (vma_not_suitable_for_thp_split(vma)) { |
| 3911 | addr = vma->vm_end; |
| 3912 | continue; |
| 3913 | } |
| 3914 | |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 3915 | folio = folio_walk_start(&fw, vma, addr, 0); |
| 3916 | if (!folio) |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3917 | continue; |
| 3918 | |
Matthew Wilcox (Oracle) | a644b0a | 2023-08-16 16:12:01 +0100 | [diff] [blame] | 3919 | if (!is_transparent_hugepage(folio)) |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3920 | goto next; |
| 3921 | |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3922 | if (!folio_test_anon(folio)) { |
| 3923 | mapping = folio->mapping; |
| 3924 | target_order = max(new_order, |
| 3925 | mapping_min_folio_order(mapping)); |
| 3926 | } |
| 3927 | |
| 3928 | if (target_order >= folio_order(folio)) |
Zi Yan | 2394aef | 2024-03-07 13:18:54 -0500 | [diff] [blame] | 3929 | goto next; |
| 3930 | |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3931 | total++; |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 3932 | /* |
| 3933 | * For folios with private, split_huge_page_to_list_to_order() |
| 3934 | * will try to drop it before split and then check if the folio |
| 3935 | * can be split or not. So skip the check here. |
| 3936 | */ |
| 3937 | if (!folio_test_private(folio) && |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 3938 | !can_split_folio(folio, 0, NULL)) |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3939 | goto next; |
| 3940 | |
Matthew Wilcox (Oracle) | a644b0a | 2023-08-16 16:12:01 +0100 | [diff] [blame] | 3941 | if (!folio_trylock(folio)) |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3942 | goto next; |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 3943 | folio_get(folio); |
| 3944 | folio_walk_end(&fw, vma); |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3945 | |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3946 | if (!folio_test_anon(folio) && folio->mapping != mapping) |
| 3947 | goto unlock; |
| 3948 | |
| 3949 | if (!split_folio_to_order(folio, target_order)) |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3950 | split++; |
| 3951 | |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3952 | unlock: |
| 3953 | |
Matthew Wilcox (Oracle) | a644b0a | 2023-08-16 16:12:01 +0100 | [diff] [blame] | 3954 | folio_unlock(folio); |
Matthew Wilcox (Oracle) | a644b0a | 2023-08-16 16:12:01 +0100 | [diff] [blame] | 3955 | folio_put(folio); |
David Hildenbrand | 8710f6e | 2024-08-02 17:55:20 +0200 | [diff] [blame] | 3956 | |
| 3957 | cond_resched(); |
| 3958 | continue; |
| 3959 | next: |
| 3960 | folio_walk_end(&fw, vma); |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 3961 | cond_resched(); |
| 3962 | } |
| 3963 | mmap_read_unlock(mm); |
| 3964 | mmput(mm); |
| 3965 | |
| 3966 | pr_debug("%lu of %lu THP split\n", split, total); |
| 3967 | |
| 3968 | out: |
| 3969 | return ret; |
| 3970 | } |
| 3971 | |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 3972 | static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 3973 | pgoff_t off_end, unsigned int new_order) |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 3974 | { |
| 3975 | struct filename *file; |
| 3976 | struct file *candidate; |
| 3977 | struct address_space *mapping; |
| 3978 | int ret = -EINVAL; |
| 3979 | pgoff_t index; |
| 3980 | int nr_pages = 1; |
| 3981 | unsigned long total = 0, split = 0; |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3982 | unsigned int min_order; |
| 3983 | unsigned int target_order; |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 3984 | |
| 3985 | file = getname_kernel(file_path); |
| 3986 | if (IS_ERR(file)) |
| 3987 | return ret; |
| 3988 | |
| 3989 | candidate = file_open_name(file, O_RDONLY, 0); |
| 3990 | if (IS_ERR(candidate)) |
| 3991 | goto out; |
| 3992 | |
| 3993 | pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", |
| 3994 | file_path, off_start, off_end); |
| 3995 | |
| 3996 | mapping = candidate->f_mapping; |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 3997 | min_order = mapping_min_folio_order(mapping); |
| 3998 | target_order = max(new_order, min_order); |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 3999 | |
| 4000 | for (index = off_start; index < off_end; index += nr_pages) { |
Christoph Hellwig | 1fb130b | 2023-03-07 15:34:04 +0100 | [diff] [blame] | 4001 | struct folio *folio = filemap_get_folio(mapping, index); |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4002 | |
| 4003 | nr_pages = 1; |
Christoph Hellwig | 66dabbb | 2023-03-07 15:34:10 +0100 | [diff] [blame] | 4004 | if (IS_ERR(folio)) |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4005 | continue; |
| 4006 | |
Matthew Wilcox (Oracle) | 9ee2c08 | 2022-10-19 19:33:29 +0100 | [diff] [blame] | 4007 | if (!folio_test_large(folio)) |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4008 | goto next; |
| 4009 | |
| 4010 | total++; |
Matthew Wilcox (Oracle) | 9ee2c08 | 2022-10-19 19:33:29 +0100 | [diff] [blame] | 4011 | nr_pages = folio_nr_pages(folio); |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4012 | |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 4013 | if (target_order >= folio_order(folio)) |
Zi Yan | 2394aef | 2024-03-07 13:18:54 -0500 | [diff] [blame] | 4014 | goto next; |
| 4015 | |
Matthew Wilcox (Oracle) | 9ee2c08 | 2022-10-19 19:33:29 +0100 | [diff] [blame] | 4016 | if (!folio_trylock(folio)) |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4017 | goto next; |
| 4018 | |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 4019 | if (folio->mapping != mapping) |
| 4020 | goto unlock; |
| 4021 | |
| 4022 | if (!split_folio_to_order(folio, target_order)) |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4023 | split++; |
| 4024 | |
Luis Chamberlain | e220917 | 2024-08-22 15:50:12 +0200 | [diff] [blame] | 4025 | unlock: |
Matthew Wilcox (Oracle) | 9ee2c08 | 2022-10-19 19:33:29 +0100 | [diff] [blame] | 4026 | folio_unlock(folio); |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4027 | next: |
Matthew Wilcox (Oracle) | 9ee2c08 | 2022-10-19 19:33:29 +0100 | [diff] [blame] | 4028 | folio_put(folio); |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4029 | cond_resched(); |
| 4030 | } |
| 4031 | |
| 4032 | filp_close(candidate, NULL); |
| 4033 | ret = 0; |
| 4034 | |
| 4035 | pr_debug("%lu of %lu file-backed THP split\n", split, total); |
| 4036 | out: |
| 4037 | putname(file); |
| 4038 | return ret; |
| 4039 | } |
| 4040 | |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 4041 | #define MAX_INPUT_BUF_SZ 255 |
| 4042 | |
| 4043 | static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, |
| 4044 | size_t count, loff_t *ppops) |
| 4045 | { |
| 4046 | static DEFINE_MUTEX(split_debug_mutex); |
| 4047 | ssize_t ret; |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 4048 | /* |
| 4049 | * hold pid, start_vaddr, end_vaddr, new_order or |
| 4050 | * file_path, off_start, off_end, new_order |
| 4051 | */ |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4052 | char input_buf[MAX_INPUT_BUF_SZ]; |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 4053 | int pid; |
| 4054 | unsigned long vaddr_start, vaddr_end; |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 4055 | unsigned int new_order = 0; |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 4056 | |
| 4057 | ret = mutex_lock_interruptible(&split_debug_mutex); |
| 4058 | if (ret) |
| 4059 | return ret; |
| 4060 | |
| 4061 | ret = -EFAULT; |
| 4062 | |
| 4063 | memset(input_buf, 0, MAX_INPUT_BUF_SZ); |
| 4064 | if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) |
| 4065 | goto out; |
| 4066 | |
| 4067 | input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4068 | |
| 4069 | if (input_buf[0] == '/') { |
| 4070 | char *tok; |
| 4071 | char *buf = input_buf; |
| 4072 | char file_path[MAX_INPUT_BUF_SZ]; |
| 4073 | pgoff_t off_start = 0, off_end = 0; |
| 4074 | size_t input_len = strlen(input_buf); |
| 4075 | |
| 4076 | tok = strsep(&buf, ","); |
| 4077 | if (tok) { |
Matthew Wilcox (Oracle) | 1212e00 | 2021-06-30 18:52:11 -0700 | [diff] [blame] | 4078 | strcpy(file_path, tok); |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4079 | } else { |
| 4080 | ret = -EINVAL; |
| 4081 | goto out; |
| 4082 | } |
| 4083 | |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 4084 | ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order); |
| 4085 | if (ret != 2 && ret != 3) { |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4086 | ret = -EINVAL; |
| 4087 | goto out; |
| 4088 | } |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 4089 | ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order); |
Zi Yan | fbe3750 | 2021-05-04 18:34:26 -0700 | [diff] [blame] | 4090 | if (!ret) |
| 4091 | ret = input_len; |
| 4092 | |
| 4093 | goto out; |
| 4094 | } |
| 4095 | |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 4096 | ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order); |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 4097 | if (ret == 1 && pid == 1) { |
| 4098 | split_huge_pages_all(); |
| 4099 | ret = strlen(input_buf); |
| 4100 | goto out; |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 4101 | } else if (ret != 3 && ret != 4) { |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 4102 | ret = -EINVAL; |
| 4103 | goto out; |
| 4104 | } |
| 4105 | |
Zi Yan | fc4d182 | 2024-02-26 15:55:34 -0500 | [diff] [blame] | 4106 | ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order); |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 4107 | if (!ret) |
| 4108 | ret = strlen(input_buf); |
| 4109 | out: |
| 4110 | mutex_unlock(&split_debug_mutex); |
| 4111 | return ret; |
| 4112 | |
| 4113 | } |
| 4114 | |
| 4115 | static const struct file_operations split_huge_pages_fops = { |
| 4116 | .owner = THIS_MODULE, |
| 4117 | .write = split_huge_pages_write, |
Zi Yan | fa6c023 | 2021-05-04 18:34:23 -0700 | [diff] [blame] | 4118 | }; |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 4119 | |
| 4120 | static int __init split_huge_pages_debugfs(void) |
| 4121 | { |
Greg Kroah-Hartman | d9f7979 | 2019-03-05 15:46:09 -0800 | [diff] [blame] | 4122 | debugfs_create_file("split_huge_pages", 0200, NULL, NULL, |
| 4123 | &split_huge_pages_fops); |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 4124 | return 0; |
| 4125 | } |
| 4126 | late_initcall(split_huge_pages_debugfs); |
| 4127 | #endif |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4128 | |
| 4129 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
David Hildenbrand | 7f5abe6 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4130 | int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4131 | struct page *page) |
| 4132 | { |
David Hildenbrand | a8e61d5 | 2023-12-20 23:44:49 +0100 | [diff] [blame] | 4133 | struct folio *folio = page_folio(page); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4134 | struct vm_area_struct *vma = pvmw->vma; |
| 4135 | struct mm_struct *mm = vma->vm_mm; |
| 4136 | unsigned long address = pvmw->address; |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4137 | bool anon_exclusive; |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4138 | pmd_t pmdval; |
| 4139 | swp_entry_t entry; |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 4140 | pmd_t pmdswp; |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4141 | |
| 4142 | if (!(pvmw->pmd && !pvmw->pte)) |
David Hildenbrand | 7f5abe6 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4143 | return 0; |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4144 | |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4145 | flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); |
Huang Ying | 8a8683a | 2020-03-05 22:28:29 -0800 | [diff] [blame] | 4146 | pmdval = pmdp_invalidate(vma, address, pvmw->pmd); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4147 | |
David Hildenbrand | e3b4b13 | 2023-12-20 23:45:02 +0100 | [diff] [blame] | 4148 | /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */ |
David Hildenbrand | a8e61d5 | 2023-12-20 23:44:49 +0100 | [diff] [blame] | 4149 | anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page); |
David Hildenbrand | e3b4b13 | 2023-12-20 23:45:02 +0100 | [diff] [blame] | 4150 | if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) { |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4151 | set_pmd_at(mm, address, pvmw->pmd, pmdval); |
David Hildenbrand | 7f5abe6 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4152 | return -EBUSY; |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4153 | } |
| 4154 | |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4155 | if (pmd_dirty(pmdval)) |
David Hildenbrand | db44c65 | 2024-01-22 18:54:07 +0100 | [diff] [blame] | 4156 | folio_mark_dirty(folio); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 4157 | if (pmd_write(pmdval)) |
| 4158 | entry = make_writable_migration_entry(page_to_pfn(page)); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4159 | else if (anon_exclusive) |
| 4160 | entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 4161 | else |
| 4162 | entry = make_readable_migration_entry(page_to_pfn(page)); |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 4163 | if (pmd_young(pmdval)) |
| 4164 | entry = make_migration_entry_young(entry); |
| 4165 | if (pmd_dirty(pmdval)) |
| 4166 | entry = make_migration_entry_dirty(entry); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 4167 | pmdswp = swp_entry_to_pmd(entry); |
| 4168 | if (pmd_soft_dirty(pmdval)) |
| 4169 | pmdswp = pmd_swp_mksoft_dirty(pmdswp); |
David Hildenbrand | 24bf08c | 2023-04-05 18:02:35 +0200 | [diff] [blame] | 4170 | if (pmd_uffd_wp(pmdval)) |
| 4171 | pmdswp = pmd_swp_mkuffd_wp(pmdswp); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 4172 | set_pmd_at(mm, address, pvmw->pmd, pmdswp); |
David Hildenbrand | a8e61d5 | 2023-12-20 23:44:49 +0100 | [diff] [blame] | 4173 | folio_remove_rmap_pmd(folio, page, vma); |
| 4174 | folio_put(folio); |
Anshuman Khandual | 283fd6f | 2022-03-24 18:09:58 -0700 | [diff] [blame] | 4175 | trace_set_migration_pmd(address, pmd_val(pmdswp)); |
David Hildenbrand | 7f5abe6 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4176 | |
| 4177 | return 0; |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4178 | } |
| 4179 | |
| 4180 | void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) |
| 4181 | { |
David Hildenbrand | 14d85a6 | 2023-12-20 23:44:33 +0100 | [diff] [blame] | 4182 | struct folio *folio = page_folio(new); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4183 | struct vm_area_struct *vma = pvmw->vma; |
| 4184 | struct mm_struct *mm = vma->vm_mm; |
| 4185 | unsigned long address = pvmw->address; |
Miaohe Lin | 4fba8f2 | 2022-07-04 21:21:51 +0800 | [diff] [blame] | 4186 | unsigned long haddr = address & HPAGE_PMD_MASK; |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4187 | pmd_t pmde; |
| 4188 | swp_entry_t entry; |
| 4189 | |
| 4190 | if (!(pvmw->pmd && !pvmw->pte)) |
| 4191 | return; |
| 4192 | |
| 4193 | entry = pmd_to_swp_entry(*pvmw->pmd); |
David Hildenbrand | 14d85a6 | 2023-12-20 23:44:33 +0100 | [diff] [blame] | 4194 | folio_get(folio); |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 4195 | pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 4196 | if (pmd_swp_soft_dirty(*pvmw->pmd)) |
| 4197 | pmde = pmd_mksoft_dirty(pmde); |
David Hildenbrand | 3c811f7 | 2023-04-11 16:25:10 +0200 | [diff] [blame] | 4198 | if (is_writable_migration_entry(entry)) |
Rick Edgecombe | 161e393 | 2023-06-12 17:10:29 -0700 | [diff] [blame] | 4199 | pmde = pmd_mkwrite(pmde, vma); |
Peter Xu | 8f34f1e | 2021-06-30 18:49:02 -0700 | [diff] [blame] | 4200 | if (pmd_swp_uffd_wp(*pvmw->pmd)) |
Peter Xu | f1eb1ba | 2022-12-14 15:15:33 -0500 | [diff] [blame] | 4201 | pmde = pmd_mkuffd_wp(pmde); |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 4202 | if (!is_migration_entry_young(entry)) |
| 4203 | pmde = pmd_mkold(pmde); |
| 4204 | /* NOTE: this may contain setting soft-dirty on some archs */ |
David Hildenbrand | 14d85a6 | 2023-12-20 23:44:33 +0100 | [diff] [blame] | 4205 | if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 4206 | pmde = pmd_mkdirty(pmde); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4207 | |
David Hildenbrand | 14d85a6 | 2023-12-20 23:44:33 +0100 | [diff] [blame] | 4208 | if (folio_test_anon(folio)) { |
David Hildenbrand | 395db7b | 2023-12-20 23:44:40 +0100 | [diff] [blame] | 4209 | rmap_t rmap_flags = RMAP_NONE; |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4210 | |
| 4211 | if (!is_readable_migration_entry(entry)) |
| 4212 | rmap_flags |= RMAP_EXCLUSIVE; |
| 4213 | |
David Hildenbrand | 395db7b | 2023-12-20 23:44:40 +0100 | [diff] [blame] | 4214 | folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4215 | } else { |
David Hildenbrand | 14d85a6 | 2023-12-20 23:44:33 +0100 | [diff] [blame] | 4216 | folio_add_file_rmap_pmd(folio, new, vma); |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 4217 | } |
David Hildenbrand | 14d85a6 | 2023-12-20 23:44:33 +0100 | [diff] [blame] | 4218 | VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new)); |
Miaohe Lin | 4fba8f2 | 2022-07-04 21:21:51 +0800 | [diff] [blame] | 4219 | set_pmd_at(mm, haddr, pvmw->pmd, pmde); |
Muchun Song | 5cbcf22 | 2022-03-22 14:41:53 -0700 | [diff] [blame] | 4220 | |
| 4221 | /* No need to invalidate - it was non-present before */ |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4222 | update_mmu_cache_pmd(vma, address, pvmw->pmd); |
Anshuman Khandual | 283fd6f | 2022-03-24 18:09:58 -0700 | [diff] [blame] | 4223 | trace_remove_migration_pmd(address, pmd_val(pmde)); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 4224 | } |
| 4225 | #endif |