| /* SPDX-License-Identifier: GPL-2.0 */ |
| #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H |
| #define _ASM_POWERPC_NOHASH_32_PGTABLE_H |
| |
| #include <asm-generic/pgtable-nopmd.h> |
| |
| #ifndef __ASSEMBLY__ |
| #include <linux/sched.h> |
| #include <linux/threads.h> |
| #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ |
| |
| #endif /* __ASSEMBLY__ */ |
| |
| #define PTE_INDEX_SIZE PTE_SHIFT |
| #define PMD_INDEX_SIZE 0 |
| #define PUD_INDEX_SIZE 0 |
| #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) |
| |
| #define PMD_CACHE_INDEX PMD_INDEX_SIZE |
| #define PUD_CACHE_INDEX PUD_INDEX_SIZE |
| |
| #ifndef __ASSEMBLY__ |
| #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) |
| #define PMD_TABLE_SIZE 0 |
| #define PUD_TABLE_SIZE 0 |
| #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) |
| |
| #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) |
| #endif /* __ASSEMBLY__ */ |
| |
| #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) |
| #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) |
| |
| /* |
| * The normal case is that PTEs are 32-bits and we have a 1-page |
| * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus |
| * |
| * For any >32-bit physical address platform, we can use the following |
| * two level page table layout where the pgdir is 8KB and the MS 13 bits |
| * are an index to the second level table. The combined pgdir/pmd first |
| * level has 2048 entries and the second level has 512 64-bit PTE entries. |
| * -Matt |
| */ |
| /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
| #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
| #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| |
| /* Bits to mask out from a PGD to get to the PUD page */ |
| #define PGD_MASKED_BITS 0 |
| |
| #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
| |
| #define pgd_ERROR(e) \ |
| pr_err("%s:%d: bad pgd %08llx.\n", __FILE__, __LINE__, (unsigned long long)pgd_val(e)) |
| |
| /* |
| * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary |
| * value (for now) on others, from where we can start layout kernel |
| * virtual space that goes below PKMAP and FIXMAP |
| */ |
| |
| #define FIXADDR_SIZE 0 |
| #ifdef CONFIG_KASAN |
| #include <asm/kasan.h> |
| #define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE) |
| #else |
| #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) |
| #endif |
| |
| /* |
| * ioremap_bot starts at that address. Early ioremaps move down from there, |
| * until mem_init() at which point this becomes the top of the vmalloc |
| * and ioremap space |
| */ |
| #ifdef CONFIG_HIGHMEM |
| #define IOREMAP_TOP PKMAP_BASE |
| #else |
| #define IOREMAP_TOP FIXADDR_START |
| #endif |
| |
| /* PPC32 shares vmalloc area with ioremap */ |
| #define IOREMAP_START VMALLOC_START |
| #define IOREMAP_END VMALLOC_END |
| |
| /* |
| * Just any arbitrary offset to the start of the vmalloc VM area: the |
| * current 16MB value just means that there will be a 64MB "hole" after the |
| * physical memory until the kernel virtual memory starts. That means that |
| * any out-of-bounds memory accesses will hopefully be caught. |
| * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
| * area for the same reason. ;) |
| * |
| * We no longer map larger than phys RAM with the BATs so we don't have |
| * to worry about the VMALLOC_OFFSET causing problems. We do have to worry |
| * about clashes between our early calls to ioremap() that start growing down |
| * from IOREMAP_TOP being run into the VM area allocations (growing upwards |
| * from VMALLOC_START). For this reason we have ioremap_bot to check when |
| * we actually run into our mappings setup in the early boot with the VM |
| * system. This really does become a problem for machines with good amounts |
| * of RAM. -- Cort |
| */ |
| #define VMALLOC_OFFSET (0x1000000) /* 16M */ |
| #ifdef PPC_PIN_SIZE |
| #define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
| #else |
| #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
| #endif |
| |
| #ifdef CONFIG_KASAN_VMALLOC |
| #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) |
| #else |
| #define VMALLOC_END ioremap_bot |
| #endif |
| |
| /* |
| * Bits in a linux-style PTE. These match the bits in the |
| * (hardware-defined) PowerPC PTE as closely as possible. |
| */ |
| |
| #if defined(CONFIG_44x) |
| #include <asm/nohash/32/pte-44x.h> |
| #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) |
| #include <asm/nohash/pte-e500.h> |
| #elif defined(CONFIG_PPC_85xx) |
| #include <asm/nohash/32/pte-85xx.h> |
| #elif defined(CONFIG_PPC_8xx) |
| #include <asm/nohash/32/pte-8xx.h> |
| #endif |
| |
| /* |
| * Location of the PFN in the PTE. Most 32-bit platforms use the same |
| * as _PAGE_SHIFT here (ie, naturally aligned). |
| * Platform who don't just pre-define the value so we don't override it here. |
| */ |
| #ifndef PTE_RPN_SHIFT |
| #define PTE_RPN_SHIFT (PAGE_SHIFT) |
| #endif |
| |
| /* |
| * The mask covered by the RPN must be a ULL on 32-bit platforms with |
| * 64-bit PTEs. |
| */ |
| #ifdef CONFIG_PTE_64BIT |
| #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) |
| #define MAX_POSSIBLE_PHYSMEM_BITS 36 |
| #else |
| #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) |
| #define MAX_POSSIBLE_PHYSMEM_BITS 32 |
| #endif |
| |
| #ifndef __ASSEMBLY__ |
| |
| #define pmd_none(pmd) (!pmd_val(pmd)) |
| #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) |
| #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) |
| static inline void pmd_clear(pmd_t *pmdp) |
| { |
| *pmdp = __pmd(0); |
| } |
| |
| /* |
| * Note that on Book E processors, the pmd contains the kernel virtual |
| * (lowmem) address of the pte page. The physical address is less useful |
| * because everything runs with translation enabled (even the TLB miss |
| * handler). On everything else the pmd contains the physical address |
| * of the pte page. -- paulus |
| */ |
| #ifndef CONFIG_BOOKE |
| #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) |
| #else |
| #define pmd_page_vaddr(pmd) \ |
| ((const void *)((unsigned long)pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) |
| #define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT) |
| #endif |
| |
| #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) |
| |
| /* |
| * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that |
| * are !pte_none() && !pte_present(). |
| * |
| * Format of swap PTEs (32bit PTEs): |
| * |
| * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 |
| * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
| * <------------------ offset -------------------> < type -> E 0 0 |
| * |
| * E is the exclusive marker that is not stored in swap entries. |
| * |
| * For 64bit PTEs, the offset is extended by 32bit. |
| */ |
| #define __swp_type(entry) ((entry).val & 0x1f) |
| #define __swp_offset(entry) ((entry).val >> 5) |
| #define __swp_entry(type, offset) ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) }) |
| #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) |
| #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) |
| |
| /* We borrow LSB 2 to store the exclusive marker in swap PTEs. */ |
| #define _PAGE_SWP_EXCLUSIVE 0x000004 |
| |
| #endif /* !__ASSEMBLY__ */ |
| |
| #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ |