Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Provide common bits of early_ioremap() support for architectures needing |
| 4 | * temporary mappings during boot before ioremap() is available. |
| 5 | * |
| 6 | * This is mostly a direct copy of the x86 early_ioremap implementation. |
| 7 | * |
| 8 | * (C) Copyright 1995 1996, 2014 Linus Torvalds |
| 9 | * |
| 10 | */ |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/vmalloc.h> |
| 18 | #include <asm/fixmap.h> |
Ard Biesheuvel | 4f1af60 | 2015-09-11 13:07:50 -0700 | [diff] [blame] | 19 | #include <asm/early_ioremap.h> |
Vlastimil Babka | be4893d | 2022-03-22 14:47:55 -0700 | [diff] [blame] | 20 | #include "internal.h" |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 21 | |
| 22 | #ifdef CONFIG_MMU |
| 23 | static int early_ioremap_debug __initdata; |
| 24 | |
| 25 | static int __init early_ioremap_debug_setup(char *str) |
| 26 | { |
| 27 | early_ioremap_debug = 1; |
| 28 | |
| 29 | return 0; |
| 30 | } |
| 31 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
| 32 | |
| 33 | static int after_paging_init __initdata; |
| 34 | |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 35 | pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr, |
| 36 | unsigned long size, |
| 37 | pgprot_t prot) |
| 38 | { |
| 39 | return prot; |
| 40 | } |
| 41 | |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 42 | void __init early_ioremap_reset(void) |
| 43 | { |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 44 | after_paging_init = 1; |
| 45 | } |
| 46 | |
| 47 | /* |
| 48 | * Generally, ioremap() is available after paging_init() has been called. |
| 49 | * Architectures wanting to allow early_ioremap after paging_init() can |
| 50 | * define __late_set_fixmap and __late_clear_fixmap to do the right thing. |
| 51 | */ |
| 52 | #ifndef __late_set_fixmap |
| 53 | static inline void __init __late_set_fixmap(enum fixed_addresses idx, |
| 54 | phys_addr_t phys, pgprot_t prot) |
| 55 | { |
| 56 | BUG(); |
| 57 | } |
| 58 | #endif |
| 59 | |
| 60 | #ifndef __late_clear_fixmap |
| 61 | static inline void __init __late_clear_fixmap(enum fixed_addresses idx) |
| 62 | { |
| 63 | BUG(); |
| 64 | } |
| 65 | #endif |
| 66 | |
| 67 | static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; |
| 68 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; |
| 69 | static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; |
| 70 | |
| 71 | void __init early_ioremap_setup(void) |
| 72 | { |
| 73 | int i; |
| 74 | |
Liam Ni | 5e07472 | 2023-06-03 10:31:16 +0800 | [diff] [blame] | 75 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 76 | WARN_ON_ONCE(prev_map[i]); |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 77 | slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); |
Liam Ni | 5e07472 | 2023-06-03 10:31:16 +0800 | [diff] [blame] | 78 | } |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | static int __init check_early_ioremap_leak(void) |
| 82 | { |
| 83 | int count = 0; |
| 84 | int i; |
| 85 | |
| 86 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) |
| 87 | if (prev_map[i]) |
| 88 | count++; |
| 89 | |
| 90 | if (WARN(count, KERN_WARNING |
| 91 | "Debug warning: early ioremap leak of %d areas detected.\n" |
| 92 | "please boot with early_ioremap_debug and report the dmesg.\n", |
| 93 | count)) |
| 94 | return 1; |
| 95 | return 0; |
| 96 | } |
| 97 | late_initcall(check_early_ioremap_leak); |
| 98 | |
| 99 | static void __init __iomem * |
| 100 | __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) |
| 101 | { |
| 102 | unsigned long offset; |
| 103 | resource_size_t last_addr; |
| 104 | unsigned int nrpages; |
| 105 | enum fixed_addresses idx; |
| 106 | int i, slot; |
| 107 | |
Dave Young | 7f6f60a | 2017-12-09 12:16:10 +0800 | [diff] [blame] | 108 | WARN_ON(system_state >= SYSTEM_RUNNING); |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 109 | |
| 110 | slot = -1; |
| 111 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 112 | if (!prev_map[i]) { |
| 113 | slot = i; |
| 114 | break; |
| 115 | } |
| 116 | } |
| 117 | |
Andy Shevchenko | 7b69d79 | 2020-01-30 22:13:54 -0800 | [diff] [blame] | 118 | if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n", |
| 119 | __func__, &phys_addr, size)) |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 120 | return NULL; |
| 121 | |
| 122 | /* Don't allow wraparound or zero size */ |
| 123 | last_addr = phys_addr + size - 1; |
| 124 | if (WARN_ON(!size || last_addr < phys_addr)) |
| 125 | return NULL; |
| 126 | |
| 127 | prev_size[slot] = size; |
| 128 | /* |
| 129 | * Mappings have to be page-aligned |
| 130 | */ |
Alexander Kuleshov | 5d57b01 | 2015-11-05 18:46:40 -0800 | [diff] [blame] | 131 | offset = offset_in_page(phys_addr); |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 132 | phys_addr &= PAGE_MASK; |
| 133 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
| 134 | |
| 135 | /* |
| 136 | * Mappings have to fit in the FIX_BTMAP area. |
| 137 | */ |
| 138 | nrpages = size >> PAGE_SHIFT; |
| 139 | if (WARN_ON(nrpages > NR_FIX_BTMAPS)) |
| 140 | return NULL; |
| 141 | |
| 142 | /* |
| 143 | * Ok, go for it.. |
| 144 | */ |
| 145 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
| 146 | while (nrpages > 0) { |
| 147 | if (after_paging_init) |
| 148 | __late_set_fixmap(idx, phys_addr, prot); |
| 149 | else |
| 150 | __early_set_fixmap(idx, phys_addr, prot); |
| 151 | phys_addr += PAGE_SIZE; |
| 152 | --idx; |
| 153 | --nrpages; |
| 154 | } |
Andy Shevchenko | 7b69d79 | 2020-01-30 22:13:54 -0800 | [diff] [blame] | 155 | WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n", |
| 156 | __func__, &phys_addr, size, slot, offset, slot_virt[slot]); |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 157 | |
| 158 | prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); |
| 159 | return prev_map[slot]; |
| 160 | } |
| 161 | |
| 162 | void __init early_iounmap(void __iomem *addr, unsigned long size) |
| 163 | { |
| 164 | unsigned long virt_addr; |
| 165 | unsigned long offset; |
| 166 | unsigned int nrpages; |
| 167 | enum fixed_addresses idx; |
| 168 | int i, slot; |
| 169 | |
| 170 | slot = -1; |
| 171 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 172 | if (prev_map[i] == addr) { |
| 173 | slot = i; |
| 174 | break; |
| 175 | } |
| 176 | } |
| 177 | |
Stephen Zhang | 8700539 | 2021-02-25 17:18:48 -0800 | [diff] [blame] | 178 | if (WARN(slot < 0, "%s(%p, %08lx) not found slot\n", |
| 179 | __func__, addr, size)) |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 180 | return; |
| 181 | |
| 182 | if (WARN(prev_size[slot] != size, |
Stephen Zhang | 8700539 | 2021-02-25 17:18:48 -0800 | [diff] [blame] | 183 | "%s(%p, %08lx) [%d] size not consistent %08lx\n", |
| 184 | __func__, addr, size, slot, prev_size[slot])) |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 185 | return; |
| 186 | |
Stephen Zhang | 8700539 | 2021-02-25 17:18:48 -0800 | [diff] [blame] | 187 | WARN(early_ioremap_debug, "%s(%p, %08lx) [%d]\n", |
| 188 | __func__, addr, size, slot); |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 189 | |
| 190 | virt_addr = (unsigned long)addr; |
| 191 | if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))) |
| 192 | return; |
| 193 | |
Alexander Kuleshov | 5d57b01 | 2015-11-05 18:46:40 -0800 | [diff] [blame] | 194 | offset = offset_in_page(virt_addr); |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 195 | nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; |
| 196 | |
| 197 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
| 198 | while (nrpages > 0) { |
| 199 | if (after_paging_init) |
| 200 | __late_clear_fixmap(idx); |
| 201 | else |
| 202 | __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR); |
| 203 | --idx; |
| 204 | --nrpages; |
| 205 | } |
| 206 | prev_map[slot] = NULL; |
| 207 | } |
| 208 | |
| 209 | /* Remap an IO device */ |
| 210 | void __init __iomem * |
| 211 | early_ioremap(resource_size_t phys_addr, unsigned long size) |
| 212 | { |
| 213 | return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO); |
| 214 | } |
| 215 | |
| 216 | /* Remap memory */ |
| 217 | void __init * |
| 218 | early_memremap(resource_size_t phys_addr, unsigned long size) |
| 219 | { |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 220 | pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size, |
| 221 | FIXMAP_PAGE_NORMAL); |
| 222 | |
| 223 | return (__force void *)__early_ioremap(phys_addr, size, prot); |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 224 | } |
Juergen Gross | 2592dbb | 2015-07-17 06:51:33 +0200 | [diff] [blame] | 225 | #ifdef FIXMAP_PAGE_RO |
| 226 | void __init * |
| 227 | early_memremap_ro(resource_size_t phys_addr, unsigned long size) |
| 228 | { |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 229 | pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size, |
| 230 | FIXMAP_PAGE_RO); |
| 231 | |
| 232 | return (__force void *)__early_ioremap(phys_addr, size, prot); |
Juergen Gross | 2592dbb | 2015-07-17 06:51:33 +0200 | [diff] [blame] | 233 | } |
| 234 | #endif |
Mark Salter | 6b0f68e | 2015-09-08 15:03:01 -0700 | [diff] [blame] | 235 | |
Tom Lendacky | f88a68f | 2017-07-17 16:10:09 -0500 | [diff] [blame] | 236 | #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT |
| 237 | void __init * |
| 238 | early_memremap_prot(resource_size_t phys_addr, unsigned long size, |
| 239 | unsigned long prot_val) |
| 240 | { |
| 241 | return (__force void *)__early_ioremap(phys_addr, size, |
| 242 | __pgprot(prot_val)); |
| 243 | } |
| 244 | #endif |
| 245 | |
Mark Salter | 6b0f68e | 2015-09-08 15:03:01 -0700 | [diff] [blame] | 246 | #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) |
| 247 | |
| 248 | void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) |
| 249 | { |
| 250 | unsigned long slop, clen; |
| 251 | char *p; |
| 252 | |
| 253 | while (size) { |
Alexander Kuleshov | 5d57b01 | 2015-11-05 18:46:40 -0800 | [diff] [blame] | 254 | slop = offset_in_page(src); |
Mark Salter | 6b0f68e | 2015-09-08 15:03:01 -0700 | [diff] [blame] | 255 | clen = size; |
| 256 | if (clen > MAX_MAP_CHUNK - slop) |
| 257 | clen = MAX_MAP_CHUNK - slop; |
| 258 | p = early_memremap(src & PAGE_MASK, clen + slop); |
| 259 | memcpy(dest, p + slop, clen); |
| 260 | early_memunmap(p, clen + slop); |
| 261 | dest += clen; |
| 262 | src += clen; |
| 263 | size -= clen; |
| 264 | } |
| 265 | } |
| 266 | |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 267 | #else /* CONFIG_MMU */ |
| 268 | |
| 269 | void __init __iomem * |
| 270 | early_ioremap(resource_size_t phys_addr, unsigned long size) |
| 271 | { |
| 272 | return (__force void __iomem *)phys_addr; |
| 273 | } |
| 274 | |
| 275 | /* Remap memory */ |
| 276 | void __init * |
| 277 | early_memremap(resource_size_t phys_addr, unsigned long size) |
| 278 | { |
| 279 | return (void *)phys_addr; |
| 280 | } |
Juergen Gross | 2592dbb | 2015-07-17 06:51:33 +0200 | [diff] [blame] | 281 | void __init * |
| 282 | early_memremap_ro(resource_size_t phys_addr, unsigned long size) |
| 283 | { |
| 284 | return (void *)phys_addr; |
| 285 | } |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 286 | |
| 287 | void __init early_iounmap(void __iomem *addr, unsigned long size) |
| 288 | { |
| 289 | } |
| 290 | |
| 291 | #endif /* CONFIG_MMU */ |
| 292 | |
| 293 | |
| 294 | void __init early_memunmap(void *addr, unsigned long size) |
| 295 | { |
| 296 | early_iounmap((__force void __iomem *)addr, size); |
| 297 | } |