Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Virtual Memory Map support |
| 4 | * |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 5 | * (C) 2007 sgi. Christoph Lameter. |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 6 | * |
| 7 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, |
| 8 | * virt_to_page, page_address() to be implemented as a base offset |
| 9 | * calculation without memory access. |
| 10 | * |
| 11 | * However, virtual mappings need a page table and TLBs. Many Linux |
| 12 | * architectures already map their physical space using 1-1 mappings |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 13 | * via TLBs. For those arches the virtual memory map is essentially |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 14 | * for free if we use the same page size as the 1-1 mappings. In that |
| 15 | * case the overhead consists of a few additional pages that are |
| 16 | * allocated to create a view of memory for vmemmap. |
| 17 | * |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 18 | * The architecture is expected to provide a vmemmap_populate() function |
| 19 | * to instantiate the mapping. |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 20 | */ |
| 21 | #include <linux/mm.h> |
| 22 | #include <linux/mmzone.h> |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 23 | #include <linux/memblock.h> |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 24 | #include <linux/memremap.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 25 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 26 | #include <linux/slab.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 27 | #include <linux/spinlock.h> |
| 28 | #include <linux/vmalloc.h> |
Glauber de Oliveira Costa | 8bca44b | 2007-10-29 14:37:19 -0700 | [diff] [blame] | 29 | #include <linux/sched.h> |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 30 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 31 | #include <asm/dma.h> |
| 32 | #include <asm/pgalloc.h> |
Muchun Song | ad2fa37 | 2021-06-30 18:47:21 -0700 | [diff] [blame] | 33 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 34 | /* |
| 35 | * Allocate a block of memory to be used to back the virtual memory map |
| 36 | * or to back the page tables that are used to create the mapping. |
| 37 | * Uses the main allocators if they are available, else bootmem. |
| 38 | */ |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 39 | |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 40 | static void * __ref __earlyonly_bootmem_alloc(int node, |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 41 | unsigned long size, |
| 42 | unsigned long align, |
| 43 | unsigned long goal) |
| 44 | { |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 45 | return memblock_alloc_try_nid_raw(size, align, goal, |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 46 | MEMBLOCK_ALLOC_ACCESSIBLE, node); |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 47 | } |
| 48 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 49 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
| 50 | { |
| 51 | /* If the main allocator is up use that, fallback to bootmem. */ |
| 52 | if (slab_is_available()) { |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 53 | gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; |
| 54 | int order = get_order(size); |
| 55 | static bool warned; |
Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 56 | struct page *page; |
| 57 | |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 58 | page = alloc_pages_node(node, gfp_mask, order); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 59 | if (page) |
| 60 | return page_address(page); |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 61 | |
| 62 | if (!warned) { |
| 63 | warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, |
| 64 | "vmemmap alloc failure: order:%u", order); |
| 65 | warned = true; |
| 66 | } |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 67 | return NULL; |
| 68 | } else |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 69 | return __earlyonly_bootmem_alloc(node, size, size, |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 70 | __pa(MAX_DMA_ADDRESS)); |
| 71 | } |
| 72 | |
Anshuman Khandual | 56993b4 | 2020-08-06 23:23:24 -0700 | [diff] [blame] | 73 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
| 74 | struct vmem_altmap *altmap); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 75 | |
Anshuman Khandual | 56993b4 | 2020-08-06 23:23:24 -0700 | [diff] [blame] | 76 | /* need to make sure size is all the same during early stage */ |
| 77 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, |
| 78 | struct vmem_altmap *altmap) |
| 79 | { |
| 80 | void *ptr; |
| 81 | |
| 82 | if (altmap) |
| 83 | return altmap_alloc_block_buf(size, altmap); |
| 84 | |
| 85 | ptr = sparse_buffer_alloc(size); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 86 | if (!ptr) |
| 87 | ptr = vmemmap_alloc_block(size, node); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 88 | return ptr; |
| 89 | } |
| 90 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 91 | static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
| 92 | { |
| 93 | return altmap->base_pfn + altmap->reserve + altmap->alloc |
| 94 | + altmap->align; |
| 95 | } |
| 96 | |
| 97 | static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) |
| 98 | { |
| 99 | unsigned long allocated = altmap->alloc + altmap->align; |
| 100 | |
| 101 | if (altmap->free > allocated) |
| 102 | return altmap->free - allocated; |
| 103 | return 0; |
| 104 | } |
| 105 | |
Anshuman Khandual | 56993b4 | 2020-08-06 23:23:24 -0700 | [diff] [blame] | 106 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
| 107 | struct vmem_altmap *altmap) |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 108 | { |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 109 | unsigned long pfn, nr_pfns, nr_align; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 110 | |
| 111 | if (size & ~PAGE_MASK) { |
| 112 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", |
| 113 | __func__, size); |
| 114 | return NULL; |
| 115 | } |
| 116 | |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 117 | pfn = vmem_altmap_next_pfn(altmap); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 118 | nr_pfns = size >> PAGE_SHIFT; |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 119 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); |
| 120 | nr_align = ALIGN(pfn, nr_align) - pfn; |
| 121 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) |
| 122 | return NULL; |
| 123 | |
| 124 | altmap->alloc += nr_pfns; |
| 125 | altmap->align += nr_align; |
| 126 | pfn += nr_align; |
| 127 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 128 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", |
| 129 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 130 | return __va(__pfn_to_phys(pfn)); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 131 | } |
| 132 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 133 | void __meminit vmemmap_verify(pte_t *pte, int node, |
| 134 | unsigned long start, unsigned long end) |
| 135 | { |
| 136 | unsigned long pfn = pte_pfn(*pte); |
| 137 | int actual_node = early_pfn_to_nid(pfn); |
| 138 | |
David Rientjes | b41ad14 | 2008-11-06 12:53:31 -0800 | [diff] [blame] | 139 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
Ma Wupeng | abd6237 | 2022-06-14 17:21:54 +0800 | [diff] [blame] | 140 | pr_warn_once("[%lx-%lx] potential offnode page_structs\n", |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 141 | start, end - 1); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 142 | } |
| 143 | |
Anshuman Khandual | 1d9cfee | 2020-08-06 23:23:19 -0700 | [diff] [blame] | 144 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 145 | struct vmem_altmap *altmap, |
| 146 | struct page *reuse) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 147 | { |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 148 | pte_t *pte = pte_offset_kernel(pmd, addr); |
| 149 | if (pte_none(*pte)) { |
| 150 | pte_t entry; |
Anshuman Khandual | 1d9cfee | 2020-08-06 23:23:19 -0700 | [diff] [blame] | 151 | void *p; |
| 152 | |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 153 | if (!reuse) { |
| 154 | p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); |
| 155 | if (!p) |
| 156 | return NULL; |
| 157 | } else { |
| 158 | /* |
| 159 | * When a PTE/PMD entry is freed from the init_mm |
XueBing Chen | f673bd7 | 2022-06-25 16:51:35 +0800 | [diff] [blame] | 160 | * there's a free_pages() call to this page allocated |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 161 | * above. Thus this get_page() is paired with the |
| 162 | * put_page_testzero() on the freeing path. |
| 163 | * This can only called by certain ZONE_DEVICE path, |
| 164 | * and through vmemmap_populate_compound_pages() when |
| 165 | * slab is available. |
| 166 | */ |
| 167 | get_page(reuse); |
| 168 | p = page_to_virt(reuse); |
| 169 | } |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 170 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
| 171 | set_pte_at(&init_mm, addr, pte, entry); |
| 172 | } |
| 173 | return pte; |
| 174 | } |
| 175 | |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 176 | static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) |
| 177 | { |
| 178 | void *p = vmemmap_alloc_block(size, node); |
| 179 | |
| 180 | if (!p) |
| 181 | return NULL; |
| 182 | memset(p, 0, size); |
| 183 | |
| 184 | return p; |
| 185 | } |
| 186 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 187 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
| 188 | { |
| 189 | pmd_t *pmd = pmd_offset(pud, addr); |
| 190 | if (pmd_none(*pmd)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 191 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 192 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 193 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 194 | pmd_populate_kernel(&init_mm, pmd, p); |
| 195 | } |
| 196 | return pmd; |
| 197 | } |
| 198 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 199 | pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 200 | { |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 201 | pud_t *pud = pud_offset(p4d, addr); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 202 | if (pud_none(*pud)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 203 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 204 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 205 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 206 | pud_populate(&init_mm, pud, p); |
| 207 | } |
| 208 | return pud; |
| 209 | } |
| 210 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 211 | p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) |
| 212 | { |
| 213 | p4d_t *p4d = p4d_offset(pgd, addr); |
| 214 | if (p4d_none(*p4d)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 215 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 216 | if (!p) |
| 217 | return NULL; |
| 218 | p4d_populate(&init_mm, p4d, p); |
| 219 | } |
| 220 | return p4d; |
| 221 | } |
| 222 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 223 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
| 224 | { |
| 225 | pgd_t *pgd = pgd_offset_k(addr); |
| 226 | if (pgd_none(*pgd)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 227 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 228 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 229 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 230 | pgd_populate(&init_mm, pgd, p); |
| 231 | } |
| 232 | return pgd; |
| 233 | } |
| 234 | |
Joao Martins | 2beea70 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 235 | static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 236 | struct vmem_altmap *altmap, |
| 237 | struct page *reuse) |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 238 | { |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 239 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 240 | p4d_t *p4d; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 241 | pud_t *pud; |
| 242 | pmd_t *pmd; |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 243 | pte_t *pte; |
| 244 | |
Joao Martins | 2beea70 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 245 | pgd = vmemmap_pgd_populate(addr, node); |
| 246 | if (!pgd) |
| 247 | return NULL; |
| 248 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
| 249 | if (!p4d) |
| 250 | return NULL; |
| 251 | pud = vmemmap_pud_populate(p4d, addr, node); |
| 252 | if (!pud) |
| 253 | return NULL; |
| 254 | pmd = vmemmap_pmd_populate(pud, addr, node); |
| 255 | if (!pmd) |
| 256 | return NULL; |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 257 | pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse); |
Joao Martins | 2beea70 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 258 | if (!pte) |
| 259 | return NULL; |
| 260 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); |
| 261 | |
| 262 | return pte; |
| 263 | } |
| 264 | |
| 265 | static int __meminit vmemmap_populate_range(unsigned long start, |
| 266 | unsigned long end, int node, |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 267 | struct vmem_altmap *altmap, |
| 268 | struct page *reuse) |
Joao Martins | 2beea70 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 269 | { |
| 270 | unsigned long addr = start; |
| 271 | pte_t *pte; |
| 272 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 273 | for (; addr < end; addr += PAGE_SIZE) { |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 274 | pte = vmemmap_populate_address(addr, node, altmap, reuse); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 275 | if (!pte) |
| 276 | return -ENOMEM; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 277 | } |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 278 | |
| 279 | return 0; |
| 280 | } |
| 281 | |
Joao Martins | 2beea70 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 282 | int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, |
| 283 | int node, struct vmem_altmap *altmap) |
| 284 | { |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 285 | return vmemmap_populate_range(start, end, node, altmap, NULL); |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * For compound pages bigger than section size (e.g. x86 1G compound |
| 290 | * pages with 2M subsection size) fill the rest of sections as tail |
| 291 | * pages. |
| 292 | * |
| 293 | * Note that memremap_pages() resets @nr_range value and will increment |
| 294 | * it after each range successful onlining. Thus the value or @nr_range |
| 295 | * at section memmap populate corresponds to the in-progress range |
| 296 | * being onlined here. |
| 297 | */ |
| 298 | static bool __meminit reuse_compound_section(unsigned long start_pfn, |
| 299 | struct dev_pagemap *pgmap) |
| 300 | { |
| 301 | unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); |
| 302 | unsigned long offset = start_pfn - |
| 303 | PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); |
| 304 | |
| 305 | return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION; |
| 306 | } |
| 307 | |
| 308 | static pte_t * __meminit compound_section_tail_page(unsigned long addr) |
| 309 | { |
| 310 | pte_t *pte; |
| 311 | |
| 312 | addr -= PAGE_SIZE; |
| 313 | |
| 314 | /* |
| 315 | * Assuming sections are populated sequentially, the previous section's |
| 316 | * page data can be reused. |
| 317 | */ |
| 318 | pte = pte_offset_kernel(pmd_off_k(addr), addr); |
| 319 | if (!pte) |
| 320 | return NULL; |
| 321 | |
| 322 | return pte; |
| 323 | } |
| 324 | |
| 325 | static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, |
| 326 | unsigned long start, |
| 327 | unsigned long end, int node, |
| 328 | struct dev_pagemap *pgmap) |
| 329 | { |
| 330 | unsigned long size, addr; |
| 331 | pte_t *pte; |
| 332 | int rc; |
| 333 | |
| 334 | if (reuse_compound_section(start_pfn, pgmap)) { |
| 335 | pte = compound_section_tail_page(start); |
| 336 | if (!pte) |
| 337 | return -ENOMEM; |
| 338 | |
| 339 | /* |
| 340 | * Reuse the page that was populated in the prior iteration |
| 341 | * with just tail struct pages. |
| 342 | */ |
| 343 | return vmemmap_populate_range(start, end, node, NULL, |
| 344 | pte_page(*pte)); |
| 345 | } |
| 346 | |
| 347 | size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); |
| 348 | for (addr = start; addr < end; addr += size) { |
Gautam Menghani | 55896f9 | 2022-06-12 11:23:20 -0700 | [diff] [blame] | 349 | unsigned long next, last = addr + size; |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 350 | |
| 351 | /* Populate the head page vmemmap page */ |
| 352 | pte = vmemmap_populate_address(addr, node, NULL, NULL); |
| 353 | if (!pte) |
| 354 | return -ENOMEM; |
| 355 | |
| 356 | /* Populate the tail pages vmemmap page */ |
| 357 | next = addr + PAGE_SIZE; |
| 358 | pte = vmemmap_populate_address(next, node, NULL, NULL); |
| 359 | if (!pte) |
| 360 | return -ENOMEM; |
| 361 | |
| 362 | /* |
| 363 | * Reuse the previous page for the rest of tail pages |
Mike Rapoport | ee65728 | 2022-06-27 09:00:26 +0300 | [diff] [blame] | 364 | * See layout diagram in Documentation/mm/vmemmap_dedup.rst |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 365 | */ |
| 366 | next += PAGE_SIZE; |
| 367 | rc = vmemmap_populate_range(next, last, node, NULL, |
| 368 | pte_page(*pte)); |
| 369 | if (rc) |
| 370 | return -ENOMEM; |
| 371 | } |
| 372 | |
| 373 | return 0; |
Joao Martins | 2beea70 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 374 | } |
| 375 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 376 | struct page * __meminit __populate_section_memmap(unsigned long pfn, |
Joao Martins | e3246d8 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 377 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap, |
| 378 | struct dev_pagemap *pgmap) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 379 | { |
Wei Yang | 6cda7204 | 2020-08-06 23:23:59 -0700 | [diff] [blame] | 380 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
| 381 | unsigned long end = start + nr_pages * sizeof(struct page); |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 382 | int r; |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 383 | |
Wei Yang | 6cda7204 | 2020-08-06 23:23:59 -0700 | [diff] [blame] | 384 | if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || |
| 385 | !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) |
| 386 | return NULL; |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 387 | |
Joao Martins | 4917f55 | 2022-04-28 23:16:16 -0700 | [diff] [blame] | 388 | if (is_power_of_2(sizeof(struct page)) && |
| 389 | pgmap && pgmap_vmemmap_nr(pgmap) > 1 && !altmap) |
| 390 | r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap); |
| 391 | else |
| 392 | r = vmemmap_populate(start, end, nid, altmap); |
| 393 | |
| 394 | if (r < 0) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 395 | return NULL; |
| 396 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 397 | return pfn_to_page(pfn); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 398 | } |