blob: cc91ca7a1e182cf6f6de2c96be13c0e7e248bed1 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * SPARC64 Huge TLB page support.
4 *
David S. Millerf6b83f02006-03-20 01:17:17 -08005 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/fs.h>
9#include <linux/mm.h>
Ingo Molnar01042602017-02-08 18:51:31 +010010#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/hugetlb.h>
12#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/sysctl.h>
14
15#include <asm/mman.h>
16#include <asm/pgalloc.h>
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
David S. Millerf6b83f02006-03-20 01:17:17 -080022/* Slightly simplified from the non-hugepage variant because by
23 * definition we don't have to worry about any page coloring stuff
24 */
David S. Millerf6b83f02006-03-20 01:17:17 -080025
26static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
27 unsigned long addr,
28 unsigned long len,
29 unsigned long pgoff,
30 unsigned long flags)
31{
Nitin Guptac7d9f772017-02-01 16:16:36 -080032 struct hstate *h = hstate_file(filp);
David S. Millerf6b83f02006-03-20 01:17:17 -080033 unsigned long task_size = TASK_SIZE;
Rick Edgecombeb80fa3cb2024-03-25 19:16:52 -070034 struct vm_unmapped_area_info info = {};
David S. Millerf6b83f02006-03-20 01:17:17 -080035
36 if (test_thread_flag(TIF_32BIT))
37 task_size = STACK_TOP32;
David S. Millerf6b83f02006-03-20 01:17:17 -080038
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080039 info.length = len;
40 info.low_limit = TASK_UNMAPPED_BASE;
41 info.high_limit = min(task_size, VA_EXCLUDE_START);
Nitin Guptac7d9f772017-02-01 16:16:36 -080042 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080043 addr = vm_unmapped_area(&info);
44
45 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
46 VM_BUG_ON(addr != -ENOMEM);
47 info.low_limit = VA_EXCLUDE_END;
48 info.high_limit = task_size;
49 addr = vm_unmapped_area(&info);
David S. Millerf6b83f02006-03-20 01:17:17 -080050 }
51
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080052 return addr;
David S. Millerf6b83f02006-03-20 01:17:17 -080053}
54
55static unsigned long
56hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57 const unsigned long len,
58 const unsigned long pgoff,
59 const unsigned long flags)
60{
Nitin Guptac7d9f772017-02-01 16:16:36 -080061 struct hstate *h = hstate_file(filp);
David S. Millerf6b83f02006-03-20 01:17:17 -080062 struct mm_struct *mm = current->mm;
63 unsigned long addr = addr0;
Rick Edgecombeb80fa3cb2024-03-25 19:16:52 -070064 struct vm_unmapped_area_info info = {};
David S. Millerf6b83f02006-03-20 01:17:17 -080065
66 /* This should only ever run for 32-bit processes. */
67 BUG_ON(!test_thread_flag(TIF_32BIT));
68
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080069 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
70 info.length = len;
71 info.low_limit = PAGE_SIZE;
72 info.high_limit = mm->mmap_base;
Nitin Guptac7d9f772017-02-01 16:16:36 -080073 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080074 addr = vm_unmapped_area(&info);
David S. Millerf6b83f02006-03-20 01:17:17 -080075
David S. Millerf6b83f02006-03-20 01:17:17 -080076 /*
77 * A failed mmap() very likely causes application failure,
78 * so fall back to the bottom-up function here. This scenario
79 * can happen with large stack limits and large mmap()
80 * allocations.
81 */
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080082 if (addr & ~PAGE_MASK) {
83 VM_BUG_ON(addr != -ENOMEM);
84 info.flags = 0;
85 info.low_limit = TASK_UNMAPPED_BASE;
86 info.high_limit = STACK_TOP32;
87 addr = vm_unmapped_area(&info);
88 }
David S. Millerf6b83f02006-03-20 01:17:17 -080089
90 return addr;
91}
92
93unsigned long
94hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
95 unsigned long len, unsigned long pgoff, unsigned long flags)
96{
Nitin Guptac7d9f772017-02-01 16:16:36 -080097 struct hstate *h = hstate_file(file);
David S. Millerf6b83f02006-03-20 01:17:17 -080098 struct mm_struct *mm = current->mm;
99 struct vm_area_struct *vma;
100 unsigned long task_size = TASK_SIZE;
101
102 if (test_thread_flag(TIF_32BIT))
103 task_size = STACK_TOP32;
104
Nitin Guptac7d9f772017-02-01 16:16:36 -0800105 if (len & ~huge_page_mask(h))
David S. Millerf6b83f02006-03-20 01:17:17 -0800106 return -EINVAL;
107 if (len > task_size)
108 return -ENOMEM;
109
Benjamin Herrenschmidtac35ee42007-05-06 14:50:10 -0700110 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700111 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidtac35ee42007-05-06 14:50:10 -0700112 return -EINVAL;
113 return addr;
114 }
115
David S. Millerf6b83f02006-03-20 01:17:17 -0800116 if (addr) {
Nitin Guptac7d9f772017-02-01 16:16:36 -0800117 addr = ALIGN(addr, huge_page_size(h));
David S. Millerf6b83f02006-03-20 01:17:17 -0800118 vma = find_vma(mm, addr);
119 if (task_size - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700120 (!vma || addr + len <= vm_start_gap(vma)))
David S. Millerf6b83f02006-03-20 01:17:17 -0800121 return addr;
122 }
Rick Edgecombe529ce232024-03-25 19:16:44 -0700123 if (!test_bit(MMF_TOPDOWN, &mm->flags))
David S. Millerf6b83f02006-03-20 01:17:17 -0800124 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
125 pgoff, flags);
126 else
127 return hugetlb_get_unmapped_area_topdown(file, addr, len,
128 pgoff, flags);
129}
130
Nitin Guptac7d9f772017-02-01 16:16:36 -0800131static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
132{
133 return entry;
134}
135
136static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
137{
138 unsigned long hugepage_size = _PAGE_SZ4MB_4V;
139
140 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
141
142 switch (shift) {
Nitin Guptadf7b2152017-08-11 16:46:50 -0700143 case HPAGE_16GB_SHIFT:
144 hugepage_size = _PAGE_SZ16GB_4V;
145 pte_val(entry) |= _PAGE_PUD_HUGE;
146 break;
Nitin Gupta85b1da72017-03-09 14:22:23 -0800147 case HPAGE_2GB_SHIFT:
148 hugepage_size = _PAGE_SZ2GB_4V;
149 pte_val(entry) |= _PAGE_PMD_HUGE;
150 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800151 case HPAGE_256MB_SHIFT:
152 hugepage_size = _PAGE_SZ256MB_4V;
153 pte_val(entry) |= _PAGE_PMD_HUGE;
154 break;
155 case HPAGE_SHIFT:
156 pte_val(entry) |= _PAGE_PMD_HUGE;
157 break;
Nitin Guptadcd19122017-02-06 12:33:26 -0800158 case HPAGE_64K_SHIFT:
159 hugepage_size = _PAGE_SZ64K_4V;
160 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800161 default:
162 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
163 }
164
165 pte_val(entry) = pte_val(entry) | hugepage_size;
166 return entry;
167}
168
169static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
170{
171 if (tlb_type == hypervisor)
172 return sun4v_hugepage_shift_to_tte(entry, shift);
173 else
174 return sun4u_hugepage_shift_to_tte(entry, shift);
175}
176
Christophe Leroy79c1c592021-06-30 18:48:00 -0700177pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
Nitin Guptac7d9f772017-02-01 16:16:36 -0800178{
Khalid Aziz74a04962018-02-23 15:46:41 -0700179 pte_t pte;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800180
Anshuman Khandual16785bd2022-03-22 14:41:47 -0700181 entry = pte_mkhuge(entry);
Khalid Aziz74a04962018-02-23 15:46:41 -0700182 pte = hugepage_shift_to_tte(entry, shift);
183
184#ifdef CONFIG_SPARC64
185 /* If this vma has ADI enabled on it, turn on TTE.mcd
186 */
Christophe Leroy79c1c592021-06-30 18:48:00 -0700187 if (flags & VM_SPARC_ADI)
Khalid Aziz74a04962018-02-23 15:46:41 -0700188 return pte_mkmcd(pte);
189 else
190 return pte_mknotmcd(pte);
191#else
192 return pte;
193#endif
Nitin Guptac7d9f772017-02-01 16:16:36 -0800194}
195
196static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
197{
198 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
199 unsigned int shift;
200
201 switch (tte_szbits) {
Nitin Guptadf7b2152017-08-11 16:46:50 -0700202 case _PAGE_SZ16GB_4V:
203 shift = HPAGE_16GB_SHIFT;
204 break;
Nitin Gupta85b1da72017-03-09 14:22:23 -0800205 case _PAGE_SZ2GB_4V:
206 shift = HPAGE_2GB_SHIFT;
207 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800208 case _PAGE_SZ256MB_4V:
209 shift = HPAGE_256MB_SHIFT;
210 break;
211 case _PAGE_SZ4MB_4V:
212 shift = REAL_HPAGE_SHIFT;
213 break;
Nitin Guptadcd19122017-02-06 12:33:26 -0800214 case _PAGE_SZ64K_4V:
215 shift = HPAGE_64K_SHIFT;
216 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800217 default:
218 shift = PAGE_SHIFT;
219 break;
220 }
221 return shift;
222}
223
224static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
225{
226 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
227 unsigned int shift;
228
229 switch (tte_szbits) {
230 case _PAGE_SZ256MB_4U:
231 shift = HPAGE_256MB_SHIFT;
232 break;
233 case _PAGE_SZ4MB_4U:
234 shift = REAL_HPAGE_SHIFT;
235 break;
Nitin Guptadcd19122017-02-06 12:33:26 -0800236 case _PAGE_SZ64K_4U:
237 shift = HPAGE_64K_SHIFT;
238 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800239 default:
240 shift = PAGE_SHIFT;
241 break;
242 }
243 return shift;
244}
245
Peter Zijlstrae6e4f422020-11-13 11:46:23 +0100246static unsigned long tte_to_shift(pte_t entry)
247{
248 if (tlb_type == hypervisor)
249 return sun4v_huge_tte_to_shift(entry);
250
251 return sun4u_huge_tte_to_shift(entry);
252}
253
Nitin Guptac7d9f772017-02-01 16:16:36 -0800254static unsigned int huge_tte_to_shift(pte_t entry)
255{
Peter Zijlstrae6e4f422020-11-13 11:46:23 +0100256 unsigned long shift = tte_to_shift(entry);
Nitin Guptac7d9f772017-02-01 16:16:36 -0800257
258 if (shift == PAGE_SHIFT)
259 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
260 pte_val(entry));
261
262 return shift;
263}
264
265static unsigned long huge_tte_to_size(pte_t pte)
266{
267 unsigned long size = 1UL << huge_tte_to_shift(pte);
268
269 if (size == REAL_HPAGE_SIZE)
270 size = HPAGE_SIZE;
271 return size;
272}
273
Peter Zijlstrae6e4f422020-11-13 11:46:23 +0100274unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
275unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
276unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
277
Peter Xuaec44e02021-05-04 18:33:00 -0700278pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
Andi Kleena5516432008-07-23 21:27:41 -0700279 unsigned long addr, unsigned long sz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
281 pgd_t *pgd;
Mike Rapoport5637bc52019-11-24 10:57:20 +0200282 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 pud_t *pud;
Nitin Guptadcd19122017-02-06 12:33:26 -0800284 pmd_t *pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
286 pgd = pgd_offset(mm, addr);
Mike Rapoport5637bc52019-11-24 10:57:20 +0200287 p4d = p4d_offset(pgd, addr);
288 pud = pud_alloc(mm, p4d, addr);
Nitin Guptadf7b2152017-08-11 16:46:50 -0700289 if (!pud)
290 return NULL;
Nitin Guptadf7b2152017-08-11 16:46:50 -0700291 if (sz >= PUD_SIZE)
Nitin Gupta4dbe87d2017-08-11 16:46:51 -0700292 return (pte_t *)pud;
293 pmd = pmd_alloc(mm, pud, addr);
294 if (!pmd)
295 return NULL;
296 if (sz >= PMD_SIZE)
297 return (pte_t *)pmd;
Hugh Dickinsc65d09f2023-06-08 12:31:10 -0700298 return pte_alloc_huge(mm, pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Punit Agrawal7868a202017-07-06 15:39:42 -0700301pte_t *huge_pte_offset(struct mm_struct *mm,
302 unsigned long addr, unsigned long sz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
304 pgd_t *pgd;
Mike Rapoport5637bc52019-11-24 10:57:20 +0200305 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 pud_t *pud;
Nitin Guptadcd19122017-02-06 12:33:26 -0800307 pmd_t *pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
309 pgd = pgd_offset(mm, addr);
Nitin Gupta4dbe87d2017-08-11 16:46:51 -0700310 if (pgd_none(*pgd))
311 return NULL;
Mike Rapoport5637bc52019-11-24 10:57:20 +0200312 p4d = p4d_offset(pgd, addr);
313 if (p4d_none(*p4d))
314 return NULL;
315 pud = pud_offset(p4d, addr);
Nitin Gupta4dbe87d2017-08-11 16:46:51 -0700316 if (pud_none(*pud))
317 return NULL;
318 if (is_hugetlb_pud(*pud))
319 return (pte_t *)pud;
320 pmd = pmd_offset(pud, addr);
321 if (pmd_none(*pmd))
322 return NULL;
323 if (is_hugetlb_pmd(*pmd))
324 return (pte_t *)pmd;
Hugh Dickinsc65d09f2023-06-08 12:31:10 -0700325 return pte_offset_huge(pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
Ryan Roberts935d4f02023-09-22 12:58:03 +0100328void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
David Gibson63551ae2005-06-21 17:14:44 -0700329 pte_t *ptep, pte_t entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
Nitin Guptadf7b2152017-08-11 16:46:50 -0700331 unsigned int nptes, orig_shift, shift;
332 unsigned long i, size;
Nitin Gupta7bc37772016-07-29 00:54:21 -0700333 pte_t orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Nitin Guptac7d9f772017-02-01 16:16:36 -0800335 size = huge_tte_to_size(entry);
Nitin Guptadf7b2152017-08-11 16:46:50 -0700336
337 shift = PAGE_SHIFT;
338 if (size >= PUD_SIZE)
339 shift = PUD_SHIFT;
340 else if (size >= PMD_SIZE)
341 shift = PMD_SHIFT;
342 else
343 shift = PAGE_SHIFT;
344
Nitin Guptadcd19122017-02-06 12:33:26 -0800345 nptes = size >> shift;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800346
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800347 if (!pte_present(*ptep) && pte_present(entry))
Nitin Guptac7d9f772017-02-01 16:16:36 -0800348 mm->context.hugetlb_pte_count += nptes;
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800349
Nitin Guptac7d9f772017-02-01 16:16:36 -0800350 addr &= ~(size - 1);
Nitin Gupta7bc37772016-07-29 00:54:21 -0700351 orig = *ptep;
Nitin Guptaac65e282017-02-24 03:03:16 -0800352 orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
Nitin Gupta24e49ee2016-03-30 11:17:13 -0700353
Nitin Guptac7d9f772017-02-01 16:16:36 -0800354 for (i = 0; i < nptes; i++)
Nitin Guptadcd19122017-02-06 12:33:26 -0800355 ptep[i] = __pte(pte_val(entry) + (i << shift));
Nitin Guptac7d9f772017-02-01 16:16:36 -0800356
Nitin Guptadcd19122017-02-06 12:33:26 -0800357 maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
Nitin Guptac7d9f772017-02-01 16:16:36 -0800358 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
359 if (size == HPAGE_SIZE)
360 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
Nitin Guptadcd19122017-02-06 12:33:26 -0800361 orig_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363
Ryan Roberts935d4f02023-09-22 12:58:03 +0100364void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
365 pte_t *ptep, pte_t entry, unsigned long sz)
366{
367 __set_huge_pte_at(mm, addr, ptep, entry);
368}
369
David Gibson63551ae2005-06-21 17:14:44 -0700370pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
371 pte_t *ptep)
372{
Nitin Guptadf7b2152017-08-11 16:46:50 -0700373 unsigned int i, nptes, orig_shift, shift;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800374 unsigned long size;
David Gibson63551ae2005-06-21 17:14:44 -0700375 pte_t entry;
David Gibson63551ae2005-06-21 17:14:44 -0700376
377 entry = *ptep;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800378 size = huge_tte_to_size(entry);
Nitin Guptaf10bb002017-07-29 11:42:18 -0700379
Nitin Guptadf7b2152017-08-11 16:46:50 -0700380 shift = PAGE_SHIFT;
381 if (size >= PUD_SIZE)
382 shift = PUD_SHIFT;
383 else if (size >= PMD_SIZE)
384 shift = PMD_SHIFT;
385 else
386 shift = PAGE_SHIFT;
387
388 nptes = size >> shift;
389 orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
Nitin Guptac7d9f772017-02-01 16:16:36 -0800390
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800391 if (pte_present(entry))
Nitin Guptac7d9f772017-02-01 16:16:36 -0800392 mm->context.hugetlb_pte_count -= nptes;
David Gibson63551ae2005-06-21 17:14:44 -0700393
Nitin Guptac7d9f772017-02-01 16:16:36 -0800394 addr &= ~(size - 1);
395 for (i = 0; i < nptes; i++)
396 ptep[i] = __pte(0UL);
David Gibson63551ae2005-06-21 17:14:44 -0700397
Nitin Guptadf7b2152017-08-11 16:46:50 -0700398 maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
Nitin Guptac7d9f772017-02-01 16:16:36 -0800399 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
400 if (size == HPAGE_SIZE)
401 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
Nitin Guptadf7b2152017-08-11 16:46:50 -0700402 orig_shift);
Nitin Gupta24e49ee2016-03-30 11:17:13 -0700403
David Gibson63551ae2005-06-21 17:14:44 -0700404 return entry;
405}
406
Nitin Gupta7bc37772016-07-29 00:54:21 -0700407static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
408 unsigned long addr)
409{
410 pgtable_t token = pmd_pgtable(*pmd);
411
412 pmd_clear(pmd);
413 pte_free_tlb(tlb, token, addr);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800414 mm_dec_nr_ptes(tlb->mm);
Nitin Gupta7bc37772016-07-29 00:54:21 -0700415}
416
417static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
418 unsigned long addr, unsigned long end,
419 unsigned long floor, unsigned long ceiling)
420{
421 pmd_t *pmd;
422 unsigned long next;
423 unsigned long start;
424
425 start = addr;
426 pmd = pmd_offset(pud, addr);
427 do {
428 next = pmd_addr_end(addr, end);
429 if (pmd_none(*pmd))
430 continue;
431 if (is_hugetlb_pmd(*pmd))
432 pmd_clear(pmd);
433 else
434 hugetlb_free_pte_range(tlb, pmd, addr);
435 } while (pmd++, addr = next, addr != end);
436
437 start &= PUD_MASK;
438 if (start < floor)
439 return;
440 if (ceiling) {
441 ceiling &= PUD_MASK;
442 if (!ceiling)
443 return;
444 }
445 if (end - 1 > ceiling - 1)
446 return;
447
448 pmd = pmd_offset(pud, start);
449 pud_clear(pud);
450 pmd_free_tlb(tlb, pmd, start);
451 mm_dec_nr_pmds(tlb->mm);
452}
453
Mike Rapoport5637bc52019-11-24 10:57:20 +0200454static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
Nitin Gupta7bc37772016-07-29 00:54:21 -0700455 unsigned long addr, unsigned long end,
456 unsigned long floor, unsigned long ceiling)
457{
458 pud_t *pud;
459 unsigned long next;
460 unsigned long start;
461
462 start = addr;
Mike Rapoport5637bc52019-11-24 10:57:20 +0200463 pud = pud_offset(p4d, addr);
Nitin Gupta7bc37772016-07-29 00:54:21 -0700464 do {
465 next = pud_addr_end(addr, end);
466 if (pud_none_or_clear_bad(pud))
467 continue;
Nitin Guptadf7b2152017-08-11 16:46:50 -0700468 if (is_hugetlb_pud(*pud))
469 pud_clear(pud);
470 else
471 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
472 ceiling);
Nitin Gupta7bc37772016-07-29 00:54:21 -0700473 } while (pud++, addr = next, addr != end);
474
475 start &= PGDIR_MASK;
476 if (start < floor)
477 return;
478 if (ceiling) {
479 ceiling &= PGDIR_MASK;
480 if (!ceiling)
481 return;
482 }
483 if (end - 1 > ceiling - 1)
484 return;
485
Mike Rapoport5637bc52019-11-24 10:57:20 +0200486 pud = pud_offset(p4d, start);
487 p4d_clear(p4d);
Nitin Gupta7bc37772016-07-29 00:54:21 -0700488 pud_free_tlb(tlb, pud, start);
Kirill A. Shutemovb4e98d9a2017-11-15 17:35:33 -0800489 mm_dec_nr_puds(tlb->mm);
Nitin Gupta7bc37772016-07-29 00:54:21 -0700490}
491
492void hugetlb_free_pgd_range(struct mmu_gather *tlb,
493 unsigned long addr, unsigned long end,
494 unsigned long floor, unsigned long ceiling)
495{
496 pgd_t *pgd;
Mike Rapoport5637bc52019-11-24 10:57:20 +0200497 p4d_t *p4d;
Nitin Gupta7bc37772016-07-29 00:54:21 -0700498 unsigned long next;
499
Nitin Gupta544f8f92017-04-17 15:46:41 -0700500 addr &= PMD_MASK;
501 if (addr < floor) {
502 addr += PMD_SIZE;
503 if (!addr)
504 return;
505 }
506 if (ceiling) {
507 ceiling &= PMD_MASK;
508 if (!ceiling)
509 return;
510 }
511 if (end - 1 > ceiling - 1)
512 end -= PMD_SIZE;
513 if (addr > end - 1)
514 return;
515
Nitin Gupta7bc37772016-07-29 00:54:21 -0700516 pgd = pgd_offset(tlb->mm, addr);
Mike Rapoport5637bc52019-11-24 10:57:20 +0200517 p4d = p4d_offset(pgd, addr);
Nitin Gupta7bc37772016-07-29 00:54:21 -0700518 do {
Mike Rapoport5637bc52019-11-24 10:57:20 +0200519 next = p4d_addr_end(addr, end);
520 if (p4d_none_or_clear_bad(p4d))
Nitin Gupta7bc37772016-07-29 00:54:21 -0700521 continue;
Mike Rapoport5637bc52019-11-24 10:57:20 +0200522 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
523 } while (p4d++, addr = next, addr != end);
Nitin Gupta7bc37772016-07-29 00:54:21 -0700524}