Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 3 | * linux/arch/m68k/mm/motorola.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * Routines specific to the Motorola MMU, originally from: |
| 6 | * linux/arch/m68k/init.c |
| 7 | * which are Copyright (C) 1995 Hamish Macdonald |
| 8 | * |
| 9 | * Moved 8/20/1999 Sam Creasey |
| 10 | */ |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/signal.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/swap.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/string.h> |
| 19 | #include <linux/types.h> |
| 20 | #include <linux/init.h> |
Mike Rapoport | 1008a11 | 2018-07-04 09:28:16 +0300 | [diff] [blame] | 21 | #include <linux/memblock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 22 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | #include <asm/setup.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 25 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <asm/page.h> |
| 27 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <asm/machdep.h> |
| 29 | #include <asm/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #ifdef CONFIG_ATARI |
| 31 | #include <asm/atari_stram.h> |
| 32 | #endif |
Geert Uytterhoeven | c85627f | 2008-12-21 12:03:37 +0100 | [diff] [blame] | 33 | #include <asm/sections.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
| 35 | #undef DEBUG |
| 36 | |
| 37 | #ifndef mm_cachebits |
| 38 | /* |
| 39 | * Bits to add to page descriptors for "normal" caching mode. |
| 40 | * For 68020/030 this is 0. |
| 41 | * For 68040, this is _PAGE_CACHE040 (cachable, copyback) |
| 42 | */ |
| 43 | unsigned long mm_cachebits; |
| 44 | EXPORT_SYMBOL(mm_cachebits); |
| 45 | #endif |
| 46 | |
Mike Rapoport | 1bcdc68 | 2020-06-08 21:32:56 -0700 | [diff] [blame] | 47 | /* Prior to calling these routines, the page should have been flushed |
| 48 | * from both the cache and ATC, or the CPU might not notice that the |
| 49 | * cache setting for the page has been changed. -jskov |
| 50 | */ |
| 51 | static inline void nocache_page(void *vaddr) |
| 52 | { |
| 53 | unsigned long addr = (unsigned long)vaddr; |
| 54 | |
| 55 | if (CPU_IS_040_OR_060) { |
Mike Rapoport | e05c7b1 | 2020-06-08 21:33:05 -0700 | [diff] [blame] | 56 | pte_t *ptep = virt_to_kpte(addr); |
Mike Rapoport | 1bcdc68 | 2020-06-08 21:32:56 -0700 | [diff] [blame] | 57 | |
Mike Rapoport | 1bcdc68 | 2020-06-08 21:32:56 -0700 | [diff] [blame] | 58 | *ptep = pte_mknocache(*ptep); |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | static inline void cache_page(void *vaddr) |
| 63 | { |
| 64 | unsigned long addr = (unsigned long)vaddr; |
| 65 | |
| 66 | if (CPU_IS_040_OR_060) { |
Mike Rapoport | e05c7b1 | 2020-06-08 21:33:05 -0700 | [diff] [blame] | 67 | pte_t *ptep = virt_to_kpte(addr); |
Mike Rapoport | 1bcdc68 | 2020-06-08 21:32:56 -0700 | [diff] [blame] | 68 | |
Mike Rapoport | 1bcdc68 | 2020-06-08 21:32:56 -0700 | [diff] [blame] | 69 | *ptep = pte_mkcache(*ptep); |
| 70 | } |
| 71 | } |
Peter Zijlstra | 13076a2 | 2020-01-31 13:45:34 +0100 | [diff] [blame] | 72 | |
| 73 | /* |
| 74 | * Motorola 680x0 user's manual recommends using uncached memory for address |
| 75 | * translation tables. |
| 76 | * |
| 77 | * Seeing how the MMU can be external on (some of) these chips, that seems like |
| 78 | * a very important recommendation to follow. Provide some helpers to combat |
| 79 | * 'variation' amongst the users of this. |
| 80 | */ |
| 81 | |
| 82 | void mmu_page_ctor(void *page) |
| 83 | { |
| 84 | __flush_page_to_ram(page); |
| 85 | flush_tlb_kernel_page(page); |
| 86 | nocache_page(page); |
| 87 | } |
| 88 | |
| 89 | void mmu_page_dtor(void *page) |
| 90 | { |
| 91 | cache_page(page); |
| 92 | } |
| 93 | |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 94 | /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from |
| 95 | struct page instead of separately kmalloced struct. Stolen from |
| 96 | arch/sparc/mm/srmmu.c ... */ |
| 97 | |
| 98 | typedef struct list_head ptable_desc; |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 99 | |
| 100 | static struct list_head ptable_list[2] = { |
| 101 | LIST_HEAD_INIT(ptable_list[0]), |
| 102 | LIST_HEAD_INIT(ptable_list[1]), |
| 103 | }; |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 104 | |
| 105 | #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) |
| 106 | #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 107 | #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 108 | |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 109 | static const int ptable_shift[2] = { |
| 110 | 7+2, /* PGD, PMD */ |
| 111 | 6+2, /* PTE */ |
| 112 | }; |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 113 | |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 114 | #define ptable_size(type) (1U << ptable_shift[type]) |
| 115 | #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1) |
| 116 | |
| 117 | void __init init_pointer_table(void *table, int type) |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 118 | { |
| 119 | ptable_desc *dp; |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 120 | unsigned long ptable = (unsigned long)table; |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 121 | unsigned long page = ptable & PAGE_MASK; |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 122 | unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 123 | |
| 124 | dp = PD_PTABLE(page); |
| 125 | if (!(PD_MARKBITS(dp) & mask)) { |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 126 | PD_MARKBITS(dp) = ptable_mask(type); |
| 127 | list_add(dp, &ptable_list[type]); |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | PD_MARKBITS(dp) &= ~mask; |
| 131 | pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); |
| 132 | |
| 133 | /* unreserve the page so it's possible to free that page */ |
| 134 | __ClearPageReserved(PD_PAGE(dp)); |
| 135 | init_page_count(PD_PAGE(dp)); |
| 136 | |
| 137 | return; |
| 138 | } |
| 139 | |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 140 | void *get_pointer_table(int type) |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 141 | { |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 142 | ptable_desc *dp = ptable_list[type].next; |
| 143 | unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp); |
| 144 | unsigned int tmp, off; |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 145 | |
| 146 | /* |
| 147 | * For a pointer table for a user process address space, a |
| 148 | * table is taken from a page allocated for the purpose. Each |
| 149 | * page can hold 8 pointer tables. The page is remapped in |
| 150 | * virtual address space to be noncacheable. |
| 151 | */ |
| 152 | if (mask == 0) { |
| 153 | void *page; |
| 154 | ptable_desc *new; |
| 155 | |
| 156 | if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) |
| 157 | return NULL; |
| 158 | |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 159 | if (type == TABLE_PTE) { |
| 160 | /* |
| 161 | * m68k doesn't have SPLIT_PTE_PTLOCKS for not having |
| 162 | * SMP. |
| 163 | */ |
| 164 | pgtable_pte_page_ctor(virt_to_page(page)); |
| 165 | } |
| 166 | |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 167 | mmu_page_ctor(page); |
| 168 | |
| 169 | new = PD_PTABLE(page); |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 170 | PD_MARKBITS(new) = ptable_mask(type) - 1; |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 171 | list_add_tail(new, dp); |
| 172 | |
| 173 | return (pmd_t *)page; |
| 174 | } |
| 175 | |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 176 | for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type)) |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 177 | ; |
| 178 | PD_MARKBITS(dp) = mask & ~tmp; |
| 179 | if (!PD_MARKBITS(dp)) { |
| 180 | /* move to end of list */ |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 181 | list_move_tail(dp, &ptable_list[type]); |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 182 | } |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 183 | return page_address(PD_PAGE(dp)) + off; |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 184 | } |
| 185 | |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 186 | int free_pointer_table(void *table, int type) |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 187 | { |
| 188 | ptable_desc *dp; |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 189 | unsigned long ptable = (unsigned long)table; |
| 190 | unsigned long page = ptable & PAGE_MASK; |
| 191 | unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 192 | |
| 193 | dp = PD_PTABLE(page); |
| 194 | if (PD_MARKBITS (dp) & mask) |
| 195 | panic ("table already free!"); |
| 196 | |
| 197 | PD_MARKBITS (dp) |= mask; |
| 198 | |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 199 | if (PD_MARKBITS(dp) == ptable_mask(type)) { |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 200 | /* all tables in page are free, free page */ |
| 201 | list_del(dp); |
| 202 | mmu_page_dtor((void *)page); |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 203 | if (type == TABLE_PTE) |
| 204 | pgtable_pte_page_dtor(virt_to_page(page)); |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 205 | free_page (page); |
| 206 | return 1; |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 207 | } else if (ptable_list[type].next != dp) { |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 208 | /* |
| 209 | * move this descriptor to the front of the list, since |
| 210 | * it has one or more free tables. |
| 211 | */ |
Peter Zijlstra | 0e071ee | 2020-01-31 13:45:39 +0100 | [diff] [blame] | 212 | list_move(dp, &ptable_list[type]); |
Peter Zijlstra | 5ad272a | 2020-01-31 13:45:35 +0100 | [diff] [blame] | 213 | } |
| 214 | return 0; |
| 215 | } |
| 216 | |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 217 | /* size of memory already mapped in head.S */ |
Andreas Schwab | 486df8b | 2014-04-24 12:24:48 +0200 | [diff] [blame] | 218 | extern __initdata unsigned long m68k_init_mapped_size; |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 219 | |
| 220 | extern unsigned long availmem; |
| 221 | |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 222 | static pte_t *last_pte_table __initdata = NULL; |
| 223 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | static pte_t * __init kernel_page_table(void) |
| 225 | { |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 226 | pte_t *pte_table = last_pte_table; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | |
Geert Uytterhoeven | 41f1bf3 | 2020-08-26 15:01:03 +0200 | [diff] [blame] | 228 | if (PAGE_ALIGNED(last_pte_table)) { |
Geert Uytterhoeven | 7e15882 | 2020-08-26 15:04:44 +0200 | [diff] [blame] | 229 | pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 230 | if (!pte_table) { |
| 231 | panic("%s: Failed to allocate %lu bytes align=%lx\n", |
| 232 | __func__, PAGE_SIZE, PAGE_SIZE); |
| 233 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 235 | clear_page(pte_table); |
| 236 | mmu_page_ctor(pte_table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 238 | last_pte_table = pte_table; |
| 239 | } |
| 240 | |
| 241 | last_pte_table += PTRS_PER_PTE; |
| 242 | |
| 243 | return pte_table; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | } |
| 245 | |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 246 | static pmd_t *last_pmd_table __initdata = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
| 248 | static pmd_t * __init kernel_ptr_table(void) |
| 249 | { |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 250 | if (!last_pmd_table) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | unsigned long pmd, last; |
| 252 | int i; |
| 253 | |
| 254 | /* Find the last ptr table that was used in head.S and |
| 255 | * reuse the remaining space in that page for further |
| 256 | * ptr tables. |
| 257 | */ |
| 258 | last = (unsigned long)kernel_pg_dir; |
| 259 | for (i = 0; i < PTRS_PER_PGD; i++) { |
Mike Rapoport | 60e50f3 | 2019-12-04 16:53:59 -0800 | [diff] [blame] | 260 | pud_t *pud = (pud_t *)(&kernel_pg_dir[i]); |
| 261 | |
| 262 | if (!pud_present(*pud)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | continue; |
Mike Rapoport | 60e50f3 | 2019-12-04 16:53:59 -0800 | [diff] [blame] | 264 | pmd = pgd_page_vaddr(kernel_pg_dir[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | if (pmd > last) |
| 266 | last = pmd; |
| 267 | } |
| 268 | |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 269 | last_pmd_table = (pmd_t *)last; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | #ifdef DEBUG |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 271 | printk("kernel_ptr_init: %p\n", last_pmd_table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | #endif |
| 273 | } |
| 274 | |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 275 | last_pmd_table += PTRS_PER_PMD; |
Geert Uytterhoeven | 41f1bf3 | 2020-08-26 15:01:03 +0200 | [diff] [blame] | 276 | if (PAGE_ALIGNED(last_pmd_table)) { |
Geert Uytterhoeven | 7e15882 | 2020-08-26 15:04:44 +0200 | [diff] [blame] | 277 | last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 278 | if (!last_pmd_table) |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 279 | panic("%s: Failed to allocate %lu bytes align=%lx\n", |
| 280 | __func__, PAGE_SIZE, PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 282 | clear_page(last_pmd_table); |
| 283 | mmu_page_ctor(last_pmd_table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | } |
| 285 | |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 286 | return last_pmd_table; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | } |
| 288 | |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 289 | static void __init map_node(int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | { |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 291 | unsigned long physaddr, virtaddr, size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | pgd_t *pgd_dir; |
Mike Rapoport | 60e50f3 | 2019-12-04 16:53:59 -0800 | [diff] [blame] | 293 | p4d_t *p4d_dir; |
| 294 | pud_t *pud_dir; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | pmd_t *pmd_dir; |
| 296 | pte_t *pte_dir; |
| 297 | |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 298 | size = m68k_memory[node].size; |
| 299 | physaddr = m68k_memory[node].addr; |
| 300 | virtaddr = (unsigned long)phys_to_virt(physaddr); |
| 301 | physaddr |= m68k_supervisor_cachemode | |
| 302 | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | if (CPU_IS_040_OR_060) |
| 304 | physaddr |= _PAGE_GLOBAL040; |
| 305 | |
| 306 | while (size > 0) { |
| 307 | #ifdef DEBUG |
Peter Zijlstra | ef22d8a | 2020-01-31 13:45:36 +0100 | [diff] [blame] | 308 | if (!(virtaddr & (PMD_SIZE-1))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, |
| 310 | virtaddr); |
| 311 | #endif |
| 312 | pgd_dir = pgd_offset_k(virtaddr); |
| 313 | if (virtaddr && CPU_IS_020_OR_030) { |
Peter Zijlstra | ef22d8a | 2020-01-31 13:45:36 +0100 | [diff] [blame] | 314 | if (!(virtaddr & (PGDIR_SIZE-1)) && |
| 315 | size >= PGDIR_SIZE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | #ifdef DEBUG |
| 317 | printk ("[very early term]"); |
| 318 | #endif |
| 319 | pgd_val(*pgd_dir) = physaddr; |
Peter Zijlstra | ef22d8a | 2020-01-31 13:45:36 +0100 | [diff] [blame] | 320 | size -= PGDIR_SIZE; |
| 321 | virtaddr += PGDIR_SIZE; |
| 322 | physaddr += PGDIR_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | continue; |
| 324 | } |
| 325 | } |
Mike Rapoport | 60e50f3 | 2019-12-04 16:53:59 -0800 | [diff] [blame] | 326 | p4d_dir = p4d_offset(pgd_dir, virtaddr); |
| 327 | pud_dir = pud_offset(p4d_dir, virtaddr); |
| 328 | if (!pud_present(*pud_dir)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | pmd_dir = kernel_ptr_table(); |
| 330 | #ifdef DEBUG |
| 331 | printk ("[new pointer %p]", pmd_dir); |
| 332 | #endif |
Mike Rapoport | 60e50f3 | 2019-12-04 16:53:59 -0800 | [diff] [blame] | 333 | pud_set(pud_dir, pmd_dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | } else |
Mike Rapoport | 60e50f3 | 2019-12-04 16:53:59 -0800 | [diff] [blame] | 335 | pmd_dir = pmd_offset(pud_dir, virtaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
| 337 | if (CPU_IS_020_OR_030) { |
| 338 | if (virtaddr) { |
| 339 | #ifdef DEBUG |
| 340 | printk ("[early term]"); |
| 341 | #endif |
Peter Zijlstra | ef22d8a | 2020-01-31 13:45:36 +0100 | [diff] [blame] | 342 | pmd_val(*pmd_dir) = physaddr; |
| 343 | physaddr += PMD_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | } else { |
| 345 | int i; |
| 346 | #ifdef DEBUG |
| 347 | printk ("[zero map]"); |
| 348 | #endif |
Peter Zijlstra | ef9285f | 2020-01-31 13:45:37 +0100 | [diff] [blame] | 349 | pte_dir = kernel_page_table(); |
Peter Zijlstra | ef22d8a | 2020-01-31 13:45:36 +0100 | [diff] [blame] | 350 | pmd_set(pmd_dir, pte_dir); |
| 351 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | pte_val(*pte_dir++) = 0; |
| 353 | physaddr += PAGE_SIZE; |
Peter Zijlstra | ef22d8a | 2020-01-31 13:45:36 +0100 | [diff] [blame] | 354 | for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | pte_val(*pte_dir++) = physaddr; |
| 356 | } |
Peter Zijlstra | ef22d8a | 2020-01-31 13:45:36 +0100 | [diff] [blame] | 357 | size -= PMD_SIZE; |
| 358 | virtaddr += PMD_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | } else { |
| 360 | if (!pmd_present(*pmd_dir)) { |
| 361 | #ifdef DEBUG |
| 362 | printk ("[new table]"); |
| 363 | #endif |
| 364 | pte_dir = kernel_page_table(); |
| 365 | pmd_set(pmd_dir, pte_dir); |
| 366 | } |
| 367 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); |
| 368 | |
| 369 | if (virtaddr) { |
| 370 | if (!pte_present(*pte_dir)) |
| 371 | pte_val(*pte_dir) = physaddr; |
| 372 | } else |
| 373 | pte_val(*pte_dir) = 0; |
| 374 | size -= PAGE_SIZE; |
| 375 | virtaddr += PAGE_SIZE; |
| 376 | physaddr += PAGE_SIZE; |
| 377 | } |
| 378 | |
| 379 | } |
| 380 | #ifdef DEBUG |
| 381 | printk("\n"); |
| 382 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | /* |
Anshuman Khandual | 6d0b922 | 2022-07-11 12:35:55 +0530 | [diff] [blame] | 386 | * Alternate definitions that are compile time constants, for |
| 387 | * initializing protection_map. The cachebits are fixed later. |
| 388 | */ |
| 389 | #define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
| 390 | #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) |
| 391 | #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) |
| 392 | #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) |
| 393 | |
| 394 | static pgprot_t protection_map[16] __ro_after_init = { |
| 395 | [VM_NONE] = PAGE_NONE_C, |
| 396 | [VM_READ] = PAGE_READONLY_C, |
| 397 | [VM_WRITE] = PAGE_COPY_C, |
| 398 | [VM_WRITE | VM_READ] = PAGE_COPY_C, |
| 399 | [VM_EXEC] = PAGE_READONLY_C, |
| 400 | [VM_EXEC | VM_READ] = PAGE_READONLY_C, |
| 401 | [VM_EXEC | VM_WRITE] = PAGE_COPY_C, |
| 402 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C, |
| 403 | [VM_SHARED] = PAGE_NONE_C, |
| 404 | [VM_SHARED | VM_READ] = PAGE_READONLY_C, |
| 405 | [VM_SHARED | VM_WRITE] = PAGE_SHARED_C, |
| 406 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C, |
| 407 | [VM_SHARED | VM_EXEC] = PAGE_READONLY_C, |
| 408 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C, |
| 409 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C, |
| 410 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C |
| 411 | }; |
| 412 | DECLARE_VM_GET_PAGE_PROT |
| 413 | |
| 414 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | * paging_init() continues the virtual memory environment setup which |
| 416 | * was begun by the code in arch/head.S. |
| 417 | */ |
| 418 | void __init paging_init(void) |
| 419 | { |
Mike Rapoport | 5d2ee1a | 2020-06-03 15:57:32 -0700 | [diff] [blame] | 420 | unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 421 | unsigned long min_addr, max_addr; |
Mike Rapoport | 1008a11 | 2018-07-04 09:28:16 +0300 | [diff] [blame] | 422 | unsigned long addr; |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 423 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | |
| 425 | #ifdef DEBUG |
Geert Uytterhoeven | fb425d0 | 2007-08-22 14:01:30 -0700 | [diff] [blame] | 426 | printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | #endif |
| 428 | |
| 429 | /* Fix the cache mode in the page descriptors for the 680[46]0. */ |
| 430 | if (CPU_IS_040_OR_060) { |
| 431 | int i; |
| 432 | #ifndef mm_cachebits |
| 433 | mm_cachebits = _PAGE_CACHE040; |
| 434 | #endif |
| 435 | for (i = 0; i < 16; i++) |
| 436 | pgprot_val(protection_map[i]) |= _PAGE_CACHE040; |
| 437 | } |
| 438 | |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 439 | min_addr = m68k_memory[0].addr; |
Kars de Jong | 0d9fad9 | 2023-02-23 12:23:49 +0100 | [diff] [blame] | 440 | max_addr = min_addr + m68k_memory[0].size - 1; |
David Hildenbrand | 952eea9 | 2021-11-05 13:44:49 -0700 | [diff] [blame] | 441 | memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0, |
| 442 | MEMBLOCK_NONE); |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 443 | for (i = 1; i < m68k_num_memory;) { |
| 444 | if (m68k_memory[i].addr < min_addr) { |
| 445 | printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", |
| 446 | m68k_memory[i].addr, m68k_memory[i].size); |
| 447 | printk("Fix your bootloader or use a memfile to make use of this area!\n"); |
| 448 | m68k_num_memory--; |
| 449 | memmove(m68k_memory + i, m68k_memory + i + 1, |
Geert Uytterhoeven | 7993008 | 2013-10-02 21:50:56 +0200 | [diff] [blame] | 450 | (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 451 | continue; |
| 452 | } |
David Hildenbrand | 952eea9 | 2021-11-05 13:44:49 -0700 | [diff] [blame] | 453 | memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i, |
| 454 | MEMBLOCK_NONE); |
Kars de Jong | 0d9fad9 | 2023-02-23 12:23:49 +0100 | [diff] [blame] | 455 | addr = m68k_memory[i].addr + m68k_memory[i].size - 1; |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 456 | if (addr > max_addr) |
| 457 | max_addr = addr; |
| 458 | i++; |
| 459 | } |
| 460 | m68k_memoffset = min_addr - PAGE_OFFSET; |
Kars de Jong | 0d9fad9 | 2023-02-23 12:23:49 +0100 | [diff] [blame] | 461 | m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6; |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 462 | |
Roman Zippel | fbe9c96 | 2007-05-31 00:40:50 -0700 | [diff] [blame] | 463 | module_fixup(NULL, __start_fixup, __stop_fixup); |
| 464 | flush_icache(); |
| 465 | |
Kars de Jong | 0d9fad9 | 2023-02-23 12:23:49 +0100 | [diff] [blame] | 466 | high_memory = phys_to_virt(max_addr) + 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 468 | min_low_pfn = availmem >> PAGE_SHIFT; |
Kars de Jong | 0d9fad9 | 2023-02-23 12:23:49 +0100 | [diff] [blame] | 469 | max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | |
Mike Rapoport | 1008a11 | 2018-07-04 09:28:16 +0300 | [diff] [blame] | 471 | /* Reserve kernel text/data/bss and the memory allocated in head.S */ |
| 472 | memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 474 | /* |
| 475 | * Map the physical memory available into the kernel virtual |
Mike Rapoport | 1008a11 | 2018-07-04 09:28:16 +0300 | [diff] [blame] | 476 | * address space. Make sure memblock will not try to allocate |
| 477 | * pages beyond the memory we already mapped in head.S |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 478 | */ |
Mike Rapoport | 1008a11 | 2018-07-04 09:28:16 +0300 | [diff] [blame] | 479 | memblock_set_bottom_up(true); |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 480 | |
Mike Rapoport | 1008a11 | 2018-07-04 09:28:16 +0300 | [diff] [blame] | 481 | for (i = 0; i < m68k_num_memory; i++) { |
| 482 | m68k_setup_node(i); |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 483 | map_node(i); |
Mike Rapoport | 1008a11 | 2018-07-04 09:28:16 +0300 | [diff] [blame] | 484 | } |
Roman Zippel | 12d810c | 2007-05-31 00:40:54 -0700 | [diff] [blame] | 485 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | flush_tlb_all(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | |
Finn Thain | 376e3fd | 2021-11-19 09:29:38 +1100 | [diff] [blame] | 488 | early_memtest(min_addr, max_addr); |
| 489 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | /* |
| 491 | * initialize the bad page table and bad page to point |
| 492 | * to a couple of allocated pages |
| 493 | */ |
Mike Rapoport | 15c3c11 | 2018-10-30 15:08:58 -0700 | [diff] [blame] | 494 | empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 495 | if (!empty_zero_page) |
| 496 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 497 | __func__, PAGE_SIZE, PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | |
| 499 | /* |
| 500 | * Set up SFC/DFC registers |
| 501 | */ |
Christoph Hellwig | 9fde0348 | 2021-09-16 09:04:05 +0200 | [diff] [blame] | 502 | set_fc(USER_DATA); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | |
| 504 | #ifdef DEBUG |
| 505 | printk ("before free_area_init\n"); |
| 506 | #endif |
Mike Rapoport | 5d2ee1a | 2020-06-03 15:57:32 -0700 | [diff] [blame] | 507 | for (i = 0; i < m68k_num_memory; i++) |
Michael Schmitz | 4aac0b48 | 2011-04-26 14:51:53 +1200 | [diff] [blame] | 508 | if (node_present_pages(i)) |
| 509 | node_set_state(i, N_NORMAL_MEMORY); |
Mike Rapoport | 5d2ee1a | 2020-06-03 15:57:32 -0700 | [diff] [blame] | 510 | |
| 511 | max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM(); |
| 512 | free_area_init(max_zone_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | } |