Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * SPARC64 Huge TLB page support. |
| 4 | * |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 5 | * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/fs.h> |
| 9 | #include <linux/mm.h> |
Ingo Molnar | 0104260 | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 10 | #include <linux/sched/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/hugetlb.h> |
| 12 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/sysctl.h> |
| 14 | |
| 15 | #include <asm/mman.h> |
| 16 | #include <asm/pgalloc.h> |
| 17 | #include <asm/tlb.h> |
| 18 | #include <asm/tlbflush.h> |
| 19 | #include <asm/cacheflush.h> |
| 20 | #include <asm/mmu_context.h> |
| 21 | |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 22 | /* Slightly simplified from the non-hugepage variant because by |
| 23 | * definition we don't have to worry about any page coloring stuff |
| 24 | */ |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 25 | |
| 26 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, |
| 27 | unsigned long addr, |
| 28 | unsigned long len, |
| 29 | unsigned long pgoff, |
| 30 | unsigned long flags) |
| 31 | { |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 32 | struct hstate *h = hstate_file(filp); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 33 | unsigned long task_size = TASK_SIZE; |
Rick Edgecombe | b80fa3cb | 2024-03-25 19:16:52 -0700 | [diff] [blame] | 34 | struct vm_unmapped_area_info info = {}; |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 35 | |
| 36 | if (test_thread_flag(TIF_32BIT)) |
| 37 | task_size = STACK_TOP32; |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 38 | |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 39 | info.length = len; |
| 40 | info.low_limit = TASK_UNMAPPED_BASE; |
| 41 | info.high_limit = min(task_size, VA_EXCLUDE_START); |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 42 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 43 | addr = vm_unmapped_area(&info); |
| 44 | |
| 45 | if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { |
| 46 | VM_BUG_ON(addr != -ENOMEM); |
| 47 | info.low_limit = VA_EXCLUDE_END; |
| 48 | info.high_limit = task_size; |
| 49 | addr = vm_unmapped_area(&info); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 50 | } |
| 51 | |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 52 | return addr; |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | static unsigned long |
| 56 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
| 57 | const unsigned long len, |
| 58 | const unsigned long pgoff, |
| 59 | const unsigned long flags) |
| 60 | { |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 61 | struct hstate *h = hstate_file(filp); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 62 | struct mm_struct *mm = current->mm; |
| 63 | unsigned long addr = addr0; |
Rick Edgecombe | b80fa3cb | 2024-03-25 19:16:52 -0700 | [diff] [blame] | 64 | struct vm_unmapped_area_info info = {}; |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 65 | |
| 66 | /* This should only ever run for 32-bit processes. */ |
| 67 | BUG_ON(!test_thread_flag(TIF_32BIT)); |
| 68 | |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 69 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 70 | info.length = len; |
| 71 | info.low_limit = PAGE_SIZE; |
| 72 | info.high_limit = mm->mmap_base; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 73 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 74 | addr = vm_unmapped_area(&info); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 75 | |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 76 | /* |
| 77 | * A failed mmap() very likely causes application failure, |
| 78 | * so fall back to the bottom-up function here. This scenario |
| 79 | * can happen with large stack limits and large mmap() |
| 80 | * allocations. |
| 81 | */ |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 82 | if (addr & ~PAGE_MASK) { |
| 83 | VM_BUG_ON(addr != -ENOMEM); |
| 84 | info.flags = 0; |
| 85 | info.low_limit = TASK_UNMAPPED_BASE; |
| 86 | info.high_limit = STACK_TOP32; |
| 87 | addr = vm_unmapped_area(&info); |
| 88 | } |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 89 | |
| 90 | return addr; |
| 91 | } |
| 92 | |
| 93 | unsigned long |
| 94 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 95 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 96 | { |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 97 | struct hstate *h = hstate_file(file); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 98 | struct mm_struct *mm = current->mm; |
| 99 | struct vm_area_struct *vma; |
| 100 | unsigned long task_size = TASK_SIZE; |
| 101 | |
| 102 | if (test_thread_flag(TIF_32BIT)) |
| 103 | task_size = STACK_TOP32; |
| 104 | |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 105 | if (len & ~huge_page_mask(h)) |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 106 | return -EINVAL; |
| 107 | if (len > task_size) |
| 108 | return -ENOMEM; |
| 109 | |
Benjamin Herrenschmidt | ac35ee4 | 2007-05-06 14:50:10 -0700 | [diff] [blame] | 110 | if (flags & MAP_FIXED) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 111 | if (prepare_hugepage_range(file, addr, len)) |
Benjamin Herrenschmidt | ac35ee4 | 2007-05-06 14:50:10 -0700 | [diff] [blame] | 112 | return -EINVAL; |
| 113 | return addr; |
| 114 | } |
| 115 | |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 116 | if (addr) { |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 117 | addr = ALIGN(addr, huge_page_size(h)); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 118 | vma = find_vma(mm, addr); |
| 119 | if (task_size - len >= addr && |
Hugh Dickins | 1be7107 | 2017-06-19 04:03:24 -0700 | [diff] [blame] | 120 | (!vma || addr + len <= vm_start_gap(vma))) |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 121 | return addr; |
| 122 | } |
Rick Edgecombe | 529ce23 | 2024-03-25 19:16:44 -0700 | [diff] [blame] | 123 | if (!test_bit(MMF_TOPDOWN, &mm->flags)) |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 124 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
| 125 | pgoff, flags); |
| 126 | else |
| 127 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
| 128 | pgoff, flags); |
| 129 | } |
| 130 | |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 131 | static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) |
| 132 | { |
| 133 | return entry; |
| 134 | } |
| 135 | |
| 136 | static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) |
| 137 | { |
| 138 | unsigned long hugepage_size = _PAGE_SZ4MB_4V; |
| 139 | |
| 140 | pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; |
| 141 | |
| 142 | switch (shift) { |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 143 | case HPAGE_16GB_SHIFT: |
| 144 | hugepage_size = _PAGE_SZ16GB_4V; |
| 145 | pte_val(entry) |= _PAGE_PUD_HUGE; |
| 146 | break; |
Nitin Gupta | 85b1da7 | 2017-03-09 14:22:23 -0800 | [diff] [blame] | 147 | case HPAGE_2GB_SHIFT: |
| 148 | hugepage_size = _PAGE_SZ2GB_4V; |
| 149 | pte_val(entry) |= _PAGE_PMD_HUGE; |
| 150 | break; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 151 | case HPAGE_256MB_SHIFT: |
| 152 | hugepage_size = _PAGE_SZ256MB_4V; |
| 153 | pte_val(entry) |= _PAGE_PMD_HUGE; |
| 154 | break; |
| 155 | case HPAGE_SHIFT: |
| 156 | pte_val(entry) |= _PAGE_PMD_HUGE; |
| 157 | break; |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 158 | case HPAGE_64K_SHIFT: |
| 159 | hugepage_size = _PAGE_SZ64K_4V; |
| 160 | break; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 161 | default: |
| 162 | WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); |
| 163 | } |
| 164 | |
| 165 | pte_val(entry) = pte_val(entry) | hugepage_size; |
| 166 | return entry; |
| 167 | } |
| 168 | |
| 169 | static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift) |
| 170 | { |
| 171 | if (tlb_type == hypervisor) |
| 172 | return sun4v_hugepage_shift_to_tte(entry, shift); |
| 173 | else |
| 174 | return sun4u_hugepage_shift_to_tte(entry, shift); |
| 175 | } |
| 176 | |
Christophe Leroy | 79c1c59 | 2021-06-30 18:48:00 -0700 | [diff] [blame] | 177 | pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 178 | { |
Khalid Aziz | 74a0496 | 2018-02-23 15:46:41 -0700 | [diff] [blame] | 179 | pte_t pte; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 180 | |
Anshuman Khandual | 16785bd | 2022-03-22 14:41:47 -0700 | [diff] [blame] | 181 | entry = pte_mkhuge(entry); |
Khalid Aziz | 74a0496 | 2018-02-23 15:46:41 -0700 | [diff] [blame] | 182 | pte = hugepage_shift_to_tte(entry, shift); |
| 183 | |
| 184 | #ifdef CONFIG_SPARC64 |
| 185 | /* If this vma has ADI enabled on it, turn on TTE.mcd |
| 186 | */ |
Christophe Leroy | 79c1c59 | 2021-06-30 18:48:00 -0700 | [diff] [blame] | 187 | if (flags & VM_SPARC_ADI) |
Khalid Aziz | 74a0496 | 2018-02-23 15:46:41 -0700 | [diff] [blame] | 188 | return pte_mkmcd(pte); |
| 189 | else |
| 190 | return pte_mknotmcd(pte); |
| 191 | #else |
| 192 | return pte; |
| 193 | #endif |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | static unsigned int sun4v_huge_tte_to_shift(pte_t entry) |
| 197 | { |
| 198 | unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V; |
| 199 | unsigned int shift; |
| 200 | |
| 201 | switch (tte_szbits) { |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 202 | case _PAGE_SZ16GB_4V: |
| 203 | shift = HPAGE_16GB_SHIFT; |
| 204 | break; |
Nitin Gupta | 85b1da7 | 2017-03-09 14:22:23 -0800 | [diff] [blame] | 205 | case _PAGE_SZ2GB_4V: |
| 206 | shift = HPAGE_2GB_SHIFT; |
| 207 | break; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 208 | case _PAGE_SZ256MB_4V: |
| 209 | shift = HPAGE_256MB_SHIFT; |
| 210 | break; |
| 211 | case _PAGE_SZ4MB_4V: |
| 212 | shift = REAL_HPAGE_SHIFT; |
| 213 | break; |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 214 | case _PAGE_SZ64K_4V: |
| 215 | shift = HPAGE_64K_SHIFT; |
| 216 | break; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 217 | default: |
| 218 | shift = PAGE_SHIFT; |
| 219 | break; |
| 220 | } |
| 221 | return shift; |
| 222 | } |
| 223 | |
| 224 | static unsigned int sun4u_huge_tte_to_shift(pte_t entry) |
| 225 | { |
| 226 | unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U; |
| 227 | unsigned int shift; |
| 228 | |
| 229 | switch (tte_szbits) { |
| 230 | case _PAGE_SZ256MB_4U: |
| 231 | shift = HPAGE_256MB_SHIFT; |
| 232 | break; |
| 233 | case _PAGE_SZ4MB_4U: |
| 234 | shift = REAL_HPAGE_SHIFT; |
| 235 | break; |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 236 | case _PAGE_SZ64K_4U: |
| 237 | shift = HPAGE_64K_SHIFT; |
| 238 | break; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 239 | default: |
| 240 | shift = PAGE_SHIFT; |
| 241 | break; |
| 242 | } |
| 243 | return shift; |
| 244 | } |
| 245 | |
Peter Zijlstra | e6e4f42 | 2020-11-13 11:46:23 +0100 | [diff] [blame] | 246 | static unsigned long tte_to_shift(pte_t entry) |
| 247 | { |
| 248 | if (tlb_type == hypervisor) |
| 249 | return sun4v_huge_tte_to_shift(entry); |
| 250 | |
| 251 | return sun4u_huge_tte_to_shift(entry); |
| 252 | } |
| 253 | |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 254 | static unsigned int huge_tte_to_shift(pte_t entry) |
| 255 | { |
Peter Zijlstra | e6e4f42 | 2020-11-13 11:46:23 +0100 | [diff] [blame] | 256 | unsigned long shift = tte_to_shift(entry); |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 257 | |
| 258 | if (shift == PAGE_SHIFT) |
| 259 | WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n", |
| 260 | pte_val(entry)); |
| 261 | |
| 262 | return shift; |
| 263 | } |
| 264 | |
| 265 | static unsigned long huge_tte_to_size(pte_t pte) |
| 266 | { |
| 267 | unsigned long size = 1UL << huge_tte_to_shift(pte); |
| 268 | |
| 269 | if (size == REAL_HPAGE_SIZE) |
| 270 | size = HPAGE_SIZE; |
| 271 | return size; |
| 272 | } |
| 273 | |
Peter Zijlstra | e6e4f42 | 2020-11-13 11:46:23 +0100 | [diff] [blame] | 274 | unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); } |
| 275 | unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); } |
| 276 | unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } |
| 277 | |
Peter Xu | aec44e0 | 2021-05-04 18:33:00 -0700 | [diff] [blame] | 278 | pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 279 | unsigned long addr, unsigned long sz) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | { |
| 281 | pgd_t *pgd; |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 282 | p4d_t *p4d; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | pud_t *pud; |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 284 | pmd_t *pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | |
| 286 | pgd = pgd_offset(mm, addr); |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 287 | p4d = p4d_offset(pgd, addr); |
| 288 | pud = pud_alloc(mm, p4d, addr); |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 289 | if (!pud) |
| 290 | return NULL; |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 291 | if (sz >= PUD_SIZE) |
Nitin Gupta | 4dbe87d | 2017-08-11 16:46:51 -0700 | [diff] [blame] | 292 | return (pte_t *)pud; |
| 293 | pmd = pmd_alloc(mm, pud, addr); |
| 294 | if (!pmd) |
| 295 | return NULL; |
| 296 | if (sz >= PMD_SIZE) |
| 297 | return (pte_t *)pmd; |
Hugh Dickins | c65d09f | 2023-06-08 12:31:10 -0700 | [diff] [blame] | 298 | return pte_alloc_huge(mm, pmd, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | } |
| 300 | |
Punit Agrawal | 7868a20 | 2017-07-06 15:39:42 -0700 | [diff] [blame] | 301 | pte_t *huge_pte_offset(struct mm_struct *mm, |
| 302 | unsigned long addr, unsigned long sz) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | { |
| 304 | pgd_t *pgd; |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 305 | p4d_t *p4d; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | pud_t *pud; |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 307 | pmd_t *pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | |
| 309 | pgd = pgd_offset(mm, addr); |
Nitin Gupta | 4dbe87d | 2017-08-11 16:46:51 -0700 | [diff] [blame] | 310 | if (pgd_none(*pgd)) |
| 311 | return NULL; |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 312 | p4d = p4d_offset(pgd, addr); |
| 313 | if (p4d_none(*p4d)) |
| 314 | return NULL; |
| 315 | pud = pud_offset(p4d, addr); |
Nitin Gupta | 4dbe87d | 2017-08-11 16:46:51 -0700 | [diff] [blame] | 316 | if (pud_none(*pud)) |
| 317 | return NULL; |
| 318 | if (is_hugetlb_pud(*pud)) |
| 319 | return (pte_t *)pud; |
| 320 | pmd = pmd_offset(pud, addr); |
| 321 | if (pmd_none(*pmd)) |
| 322 | return NULL; |
| 323 | if (is_hugetlb_pmd(*pmd)) |
| 324 | return (pte_t *)pmd; |
Hugh Dickins | c65d09f | 2023-06-08 12:31:10 -0700 | [diff] [blame] | 325 | return pte_offset_huge(pmd, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | } |
| 327 | |
Ryan Roberts | 935d4f0 | 2023-09-22 12:58:03 +0100 | [diff] [blame] | 328 | void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 329 | pte_t *ptep, pte_t entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | { |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 331 | unsigned int nptes, orig_shift, shift; |
| 332 | unsigned long i, size; |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 333 | pte_t orig; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 335 | size = huge_tte_to_size(entry); |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 336 | |
| 337 | shift = PAGE_SHIFT; |
| 338 | if (size >= PUD_SIZE) |
| 339 | shift = PUD_SHIFT; |
| 340 | else if (size >= PMD_SIZE) |
| 341 | shift = PMD_SHIFT; |
| 342 | else |
| 343 | shift = PAGE_SHIFT; |
| 344 | |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 345 | nptes = size >> shift; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 346 | |
David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 347 | if (!pte_present(*ptep) && pte_present(entry)) |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 348 | mm->context.hugetlb_pte_count += nptes; |
David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 349 | |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 350 | addr &= ~(size - 1); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 351 | orig = *ptep; |
Nitin Gupta | ac65e28 | 2017-02-24 03:03:16 -0800 | [diff] [blame] | 352 | orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig); |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 353 | |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 354 | for (i = 0; i < nptes; i++) |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 355 | ptep[i] = __pte(pte_val(entry) + (i << shift)); |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 356 | |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 357 | maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift); |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 358 | /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ |
| 359 | if (size == HPAGE_SIZE) |
| 360 | maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 361 | orig_shift); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | } |
| 363 | |
Ryan Roberts | 935d4f0 | 2023-09-22 12:58:03 +0100 | [diff] [blame] | 364 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 365 | pte_t *ptep, pte_t entry, unsigned long sz) |
| 366 | { |
| 367 | __set_huge_pte_at(mm, addr, ptep, entry); |
| 368 | } |
| 369 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 370 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 371 | pte_t *ptep) |
| 372 | { |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 373 | unsigned int i, nptes, orig_shift, shift; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 374 | unsigned long size; |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 375 | pte_t entry; |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 376 | |
| 377 | entry = *ptep; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 378 | size = huge_tte_to_size(entry); |
Nitin Gupta | f10bb00 | 2017-07-29 11:42:18 -0700 | [diff] [blame] | 379 | |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 380 | shift = PAGE_SHIFT; |
| 381 | if (size >= PUD_SIZE) |
| 382 | shift = PUD_SHIFT; |
| 383 | else if (size >= PMD_SIZE) |
| 384 | shift = PMD_SHIFT; |
| 385 | else |
| 386 | shift = PAGE_SHIFT; |
| 387 | |
| 388 | nptes = size >> shift; |
| 389 | orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry); |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 390 | |
David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 391 | if (pte_present(entry)) |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 392 | mm->context.hugetlb_pte_count -= nptes; |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 393 | |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 394 | addr &= ~(size - 1); |
| 395 | for (i = 0; i < nptes; i++) |
| 396 | ptep[i] = __pte(0UL); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 397 | |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 398 | maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift); |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 399 | /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ |
| 400 | if (size == HPAGE_SIZE) |
| 401 | maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 402 | orig_shift); |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 403 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 404 | return entry; |
| 405 | } |
| 406 | |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 407 | static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, |
| 408 | unsigned long addr) |
| 409 | { |
| 410 | pgtable_t token = pmd_pgtable(*pmd); |
| 411 | |
| 412 | pmd_clear(pmd); |
| 413 | pte_free_tlb(tlb, token, addr); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 414 | mm_dec_nr_ptes(tlb->mm); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 415 | } |
| 416 | |
| 417 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
| 418 | unsigned long addr, unsigned long end, |
| 419 | unsigned long floor, unsigned long ceiling) |
| 420 | { |
| 421 | pmd_t *pmd; |
| 422 | unsigned long next; |
| 423 | unsigned long start; |
| 424 | |
| 425 | start = addr; |
| 426 | pmd = pmd_offset(pud, addr); |
| 427 | do { |
| 428 | next = pmd_addr_end(addr, end); |
| 429 | if (pmd_none(*pmd)) |
| 430 | continue; |
| 431 | if (is_hugetlb_pmd(*pmd)) |
| 432 | pmd_clear(pmd); |
| 433 | else |
| 434 | hugetlb_free_pte_range(tlb, pmd, addr); |
| 435 | } while (pmd++, addr = next, addr != end); |
| 436 | |
| 437 | start &= PUD_MASK; |
| 438 | if (start < floor) |
| 439 | return; |
| 440 | if (ceiling) { |
| 441 | ceiling &= PUD_MASK; |
| 442 | if (!ceiling) |
| 443 | return; |
| 444 | } |
| 445 | if (end - 1 > ceiling - 1) |
| 446 | return; |
| 447 | |
| 448 | pmd = pmd_offset(pud, start); |
| 449 | pud_clear(pud); |
| 450 | pmd_free_tlb(tlb, pmd, start); |
| 451 | mm_dec_nr_pmds(tlb->mm); |
| 452 | } |
| 453 | |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 454 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 455 | unsigned long addr, unsigned long end, |
| 456 | unsigned long floor, unsigned long ceiling) |
| 457 | { |
| 458 | pud_t *pud; |
| 459 | unsigned long next; |
| 460 | unsigned long start; |
| 461 | |
| 462 | start = addr; |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 463 | pud = pud_offset(p4d, addr); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 464 | do { |
| 465 | next = pud_addr_end(addr, end); |
| 466 | if (pud_none_or_clear_bad(pud)) |
| 467 | continue; |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 468 | if (is_hugetlb_pud(*pud)) |
| 469 | pud_clear(pud); |
| 470 | else |
| 471 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, |
| 472 | ceiling); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 473 | } while (pud++, addr = next, addr != end); |
| 474 | |
| 475 | start &= PGDIR_MASK; |
| 476 | if (start < floor) |
| 477 | return; |
| 478 | if (ceiling) { |
| 479 | ceiling &= PGDIR_MASK; |
| 480 | if (!ceiling) |
| 481 | return; |
| 482 | } |
| 483 | if (end - 1 > ceiling - 1) |
| 484 | return; |
| 485 | |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 486 | pud = pud_offset(p4d, start); |
| 487 | p4d_clear(p4d); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 488 | pud_free_tlb(tlb, pud, start); |
Kirill A. Shutemov | b4e98d9a | 2017-11-15 17:35:33 -0800 | [diff] [blame] | 489 | mm_dec_nr_puds(tlb->mm); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 490 | } |
| 491 | |
| 492 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
| 493 | unsigned long addr, unsigned long end, |
| 494 | unsigned long floor, unsigned long ceiling) |
| 495 | { |
| 496 | pgd_t *pgd; |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 497 | p4d_t *p4d; |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 498 | unsigned long next; |
| 499 | |
Nitin Gupta | 544f8f9 | 2017-04-17 15:46:41 -0700 | [diff] [blame] | 500 | addr &= PMD_MASK; |
| 501 | if (addr < floor) { |
| 502 | addr += PMD_SIZE; |
| 503 | if (!addr) |
| 504 | return; |
| 505 | } |
| 506 | if (ceiling) { |
| 507 | ceiling &= PMD_MASK; |
| 508 | if (!ceiling) |
| 509 | return; |
| 510 | } |
| 511 | if (end - 1 > ceiling - 1) |
| 512 | end -= PMD_SIZE; |
| 513 | if (addr > end - 1) |
| 514 | return; |
| 515 | |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 516 | pgd = pgd_offset(tlb->mm, addr); |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 517 | p4d = p4d_offset(pgd, addr); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 518 | do { |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 519 | next = p4d_addr_end(addr, end); |
| 520 | if (p4d_none_or_clear_bad(p4d)) |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 521 | continue; |
Mike Rapoport | 5637bc5 | 2019-11-24 10:57:20 +0200 | [diff] [blame] | 522 | hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); |
| 523 | } while (p4d++, addr = next, addr != end); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 524 | } |