Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * vineetg: May 2011 |
| 9 | * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1. |
| 10 | * They are semantically the same although in different contexts |
| 11 | * VALID marks a TLB entry exists and it will only happen if PRESENT |
| 12 | * - Utilise some unused free bits to confine PTE flags to 12 bits |
| 13 | * This is a must for 4k pg-sz |
| 14 | * |
Adam Buchbinder | 7423cc0 | 2016-02-23 15:24:55 -0800 | [diff] [blame] | 15 | * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 16 | * -TLB Locking never really existed, except for initial specs |
| 17 | * -SILENT_xxx not needed for our port |
| 18 | * -Per my request, MMU V3 changes the layout of some of the bits |
| 19 | * to avoid a few shifts in TLB Miss handlers. |
| 20 | * |
| 21 | * vineetg: April 2010 |
| 22 | * -PGD entry no longer contains any flags. If empty it is 0, otherwise has |
| 23 | * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler |
| 24 | * |
| 25 | * vineetg: April 2010 |
| 26 | * -Switched form 8:11:13 split for page table lookup to 11:8:13 |
| 27 | * -this speeds up page table allocation itself as we now have to memset 1K |
| 28 | * instead of 8k per page table. |
| 29 | * -TODO: Right now page table alloc is 8K and rest 7K is unused |
| 30 | * need to optimise it |
| 31 | * |
| 32 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 |
| 33 | */ |
| 34 | |
| 35 | #ifndef _ASM_ARC_PGTABLE_H |
| 36 | #define _ASM_ARC_PGTABLE_H |
| 37 | |
| 38 | #include <asm/page.h> |
| 39 | #include <asm/mmu.h> |
| 40 | #include <asm-generic/pgtable-nopmd.h> |
Alexey Brodkin | d408464 | 2015-09-02 20:43:30 +0300 | [diff] [blame] | 41 | #include <linux/const.h> |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 42 | |
| 43 | /************************************************************************** |
| 44 | * Page Table Flags |
| 45 | * |
| 46 | * ARC700 MMU only deals with softare managed TLB entries. |
| 47 | * Page Tables are purely for Linux VM's consumption and the bits below are |
| 48 | * suited to that (uniqueness). Hence some are not implemented in the TLB and |
| 49 | * some have different value in TLB. |
Andrea Gelmini | 2547476 | 2016-05-21 13:45:35 +0200 | [diff] [blame] | 50 | * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 51 | * seperate PD0 and PD1, which combined forms a translation entry) |
| 52 | * while for PTE perspective, they are 8 and 9 respectively |
| 53 | * with MMU v3: Most bits (except SHARED) represent the exact hardware pos |
| 54 | * (saves some bit shift ops in TLB Miss hdlrs) |
| 55 | */ |
| 56 | |
| 57 | #if (CONFIG_ARC_MMU_VER <= 2) |
| 58 | |
| 59 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ |
| 60 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 61 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
| 62 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ |
| 63 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ |
Vineet Gupta | 129cbed | 2013-12-05 12:05:05 +0530 | [diff] [blame] | 64 | #define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */ |
Vineet Gupta | 24830fc | 2015-02-16 19:01:29 +0530 | [diff] [blame] | 65 | #define _PAGE_SPECIAL (1<<7) |
Vineet Gupta | d091fcb | 2013-06-17 19:44:06 +0530 | [diff] [blame] | 66 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
| 67 | #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 68 | |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 69 | #else /* MMU v3 onwards */ |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 70 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 71 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 72 | #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ |
| 73 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ |
| 74 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ |
Vineet Gupta | d091fcb | 2013-06-17 19:44:06 +0530 | [diff] [blame] | 75 | #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ |
Vineet Gupta | 129cbed | 2013-12-05 12:05:05 +0530 | [diff] [blame] | 76 | #define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */ |
Vineet Gupta | 24830fc | 2015-02-16 19:01:29 +0530 | [diff] [blame] | 77 | #define _PAGE_SPECIAL (1<<6) |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 78 | |
| 79 | #if (CONFIG_ARC_MMU_VER >= 4) |
| 80 | #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */ |
| 81 | #endif |
| 82 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 83 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
| 84 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 85 | |
| 86 | #if (CONFIG_ARC_MMU_VER >= 4) |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 87 | #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */ |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 88 | #endif |
| 89 | |
Vineet Gupta | d091fcb | 2013-06-17 19:44:06 +0530 | [diff] [blame] | 90 | #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 91 | usable for shared TLB entries (H) */ |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 92 | |
| 93 | #define _PAGE_UNUSED_BIT (1<<12) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 94 | #endif |
| 95 | |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 96 | /* vmalloc permissions */ |
| 97 | #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ |
Vineet Gupta | a950549 | 2013-05-21 15:25:11 +0530 | [diff] [blame] | 98 | _PAGE_GLOBAL | _PAGE_PRESENT) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 99 | |
Vineet Gupta | 129cbed | 2013-12-05 12:05:05 +0530 | [diff] [blame] | 100 | #ifndef CONFIG_ARC_CACHE_PAGES |
| 101 | #undef _PAGE_CACHEABLE |
| 102 | #define _PAGE_CACHEABLE 0 |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 103 | #endif |
| 104 | |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 105 | #ifndef _PAGE_HW_SZ |
| 106 | #define _PAGE_HW_SZ 0 |
| 107 | #endif |
| 108 | |
Vineet Gupta | 129cbed | 2013-12-05 12:05:05 +0530 | [diff] [blame] | 109 | /* Defaults for every user page */ |
| 110 | #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) |
Vineet Gupta | a950549 | 2013-05-21 15:25:11 +0530 | [diff] [blame] | 111 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 112 | /* Set of bits not changed in pte_modify */ |
Vineet Gupta | 3925a16 | 2016-07-28 11:35:50 -0700 | [diff] [blame] | 113 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 114 | |
| 115 | /* More Abbrevaited helpers */ |
| 116 | #define PAGE_U_NONE __pgprot(___DEF) |
| 117 | #define PAGE_U_R __pgprot(___DEF | _PAGE_READ) |
| 118 | #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE) |
| 119 | #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE) |
| 120 | #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \ |
| 121 | _PAGE_EXECUTE) |
| 122 | |
| 123 | #define PAGE_SHARED PAGE_U_W_R |
| 124 | |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 125 | /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of |
| 126 | * user vaddr space - visible in all addr spaces, but kernel mode only |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 127 | * Thus Global, all-kernel-access, no-user-access, cached |
| 128 | */ |
Vineet Gupta | 129cbed | 2013-12-05 12:05:05 +0530 | [diff] [blame] | 129 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 130 | |
| 131 | /* ioremap */ |
Vineet Gupta | a950549 | 2013-05-21 15:25:11 +0530 | [diff] [blame] | 132 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 133 | |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 134 | /* Masks for actual TLB "PD"s */ |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 135 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ) |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 136 | #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) |
Vineet Gupta | 5a364c2 | 2015-02-06 18:44:57 +0300 | [diff] [blame] | 137 | |
| 138 | #ifdef CONFIG_ARC_HAS_PAE40 |
| 139 | #define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE) |
| 140 | #else |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 141 | #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) |
Vineet Gupta | 5a364c2 | 2015-02-06 18:44:57 +0300 | [diff] [blame] | 142 | #endif |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 143 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 144 | /************************************************************************** |
| 145 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) |
| 146 | * |
| 147 | * Certain cases have 1:1 mapping |
| 148 | * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED |
| 149 | * which directly corresponds to PAGE_U_X_R |
| 150 | * |
| 151 | * Other rules which cause the divergence from 1:1 mapping |
| 152 | * |
| 153 | * 1. Although ARC700 can do exclusive execute/write protection (meaning R |
| 154 | * can be tracked independet of X/W unlike some other CPUs), still to |
| 155 | * keep things consistent with other archs: |
| 156 | * -Write implies Read: W => R |
| 157 | * -Execute implies Read: X => R |
| 158 | * |
| 159 | * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W |
| 160 | * This is to enable COW mechanism |
| 161 | */ |
| 162 | /* xwr */ |
| 163 | #define __P000 PAGE_U_NONE |
| 164 | #define __P001 PAGE_U_R |
| 165 | #define __P010 PAGE_U_R /* Pvt-W => !W */ |
| 166 | #define __P011 PAGE_U_R /* Pvt-W => !W */ |
| 167 | #define __P100 PAGE_U_X_R /* X => R */ |
| 168 | #define __P101 PAGE_U_X_R |
| 169 | #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */ |
| 170 | #define __P111 PAGE_U_X_R /* Pvt-W => !W */ |
| 171 | |
| 172 | #define __S000 PAGE_U_NONE |
| 173 | #define __S001 PAGE_U_R |
| 174 | #define __S010 PAGE_U_W_R /* W => R */ |
| 175 | #define __S011 PAGE_U_W_R |
| 176 | #define __S100 PAGE_U_X_R /* X => R */ |
| 177 | #define __S101 PAGE_U_X_R |
| 178 | #define __S110 PAGE_U_X_W_R /* X => R */ |
| 179 | #define __S111 PAGE_U_X_W_R |
| 180 | |
| 181 | /**************************************************************** |
Vineet Gupta | 37eda9d | 2016-02-10 06:52:07 +0530 | [diff] [blame] | 182 | * 2 tier (PGD:PTE) software page walker |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 183 | * |
Vineet Gupta | 37eda9d | 2016-02-10 06:52:07 +0530 | [diff] [blame] | 184 | * [31] 32 bit virtual address [0] |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 185 | * ------------------------------------------------------- |
Vineet Gupta | 37eda9d | 2016-02-10 06:52:07 +0530 | [diff] [blame] | 186 | * | | <------------ PGDIR_SHIFT ----------> | |
| 187 | * | | | |
| 188 | * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 189 | * ------------------------------------------------------- |
| 190 | * | | | |
| 191 | * | | --> off in page frame |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 192 | * | ---> index into Page Table |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 193 | * ----> index into Page Directory |
Vineet Gupta | 37eda9d | 2016-02-10 06:52:07 +0530 | [diff] [blame] | 194 | * |
| 195 | * In a single page size configuration, only PAGE_SHIFT is fixed |
| 196 | * So both PGD and PTE sizing can be tweaked |
| 197 | * e.g. 8K page (PAGE_SHIFT 13) can have |
| 198 | * - PGDIR_SHIFT 21 -> 11:8:13 address split |
| 199 | * - PGDIR_SHIFT 24 -> 8:11:13 address split |
| 200 | * |
| 201 | * If Super Page is configured, PGDIR_SHIFT becomes fixed too, |
| 202 | * so the sizing flexibility is gone. |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 203 | */ |
| 204 | |
Vineet Gupta | 37eda9d | 2016-02-10 06:52:07 +0530 | [diff] [blame] | 205 | #if defined(CONFIG_ARC_HUGEPAGE_16M) |
| 206 | #define PGDIR_SHIFT 24 |
| 207 | #elif defined(CONFIG_ARC_HUGEPAGE_2M) |
| 208 | #define PGDIR_SHIFT 21 |
| 209 | #else |
| 210 | /* |
| 211 | * Only Normal page support so "hackable" (see comment above) |
| 212 | * Default value provides 11:8:13 (8K), 11:9:12 (4K) |
| 213 | */ |
| 214 | #define PGDIR_SHIFT 21 |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 215 | #endif |
| 216 | |
Vineet Gupta | 37eda9d | 2016-02-10 06:52:07 +0530 | [diff] [blame] | 217 | #define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT) |
| 218 | #define BITS_FOR_PGD (32 - PGDIR_SHIFT) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 219 | |
Noam Camus | 15ca68a | 2014-09-07 22:52:33 +0300 | [diff] [blame] | 220 | #define PGDIR_SIZE _BITUL(PGDIR_SHIFT) /* vaddr span, not PDG sz */ |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 221 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 222 | |
Alexey Brodkin | d408464 | 2015-09-02 20:43:30 +0300 | [diff] [blame] | 223 | #define PTRS_PER_PTE _BITUL(BITS_FOR_PTE) |
| 224 | #define PTRS_PER_PGD _BITUL(BITS_FOR_PGD) |
| 225 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 226 | /* |
| 227 | * Number of entries a user land program use. |
| 228 | * TASK_SIZE is the maximum vaddr that can be used by a userland program. |
| 229 | */ |
| 230 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
| 231 | |
| 232 | /* |
| 233 | * No special requirements for lowest virtual address we permit any user space |
| 234 | * mapping to be mapped at. |
| 235 | */ |
Kirill A. Shutemov | d016bf7 | 2015-02-11 15:26:41 -0800 | [diff] [blame] | 236 | #define FIRST_USER_ADDRESS 0UL |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 237 | |
| 238 | |
| 239 | /**************************************************************** |
| 240 | * Bucket load of VM Helpers |
| 241 | */ |
| 242 | |
| 243 | #ifndef __ASSEMBLY__ |
| 244 | |
| 245 | #define pte_ERROR(e) \ |
| 246 | pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
| 247 | #define pgd_ERROR(e) \ |
| 248 | pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 249 | |
| 250 | /* the zero page used for uninitialized and anonymous pages */ |
| 251 | extern char empty_zero_page[PAGE_SIZE]; |
| 252 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 253 | |
| 254 | #define pte_unmap(pte) do { } while (0) |
| 255 | #define pte_unmap_nested(pte) do { } while (0) |
| 256 | |
| 257 | #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) |
| 258 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
| 259 | |
| 260 | /* find the page descriptor of the Page Tbl ref by PMD entry */ |
| 261 | #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK) |
| 262 | |
| 263 | /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */ |
| 264 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) |
| 265 | |
| 266 | /* In a 2 level sys, setup the PGD entry with PTE value */ |
| 267 | static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) |
| 268 | { |
| 269 | pmd_val(*pmdp) = (unsigned long)ptep; |
| 270 | } |
| 271 | |
| 272 | #define pte_none(x) (!pte_val(x)) |
| 273 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) |
| 274 | #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0)) |
| 275 | |
| 276 | #define pmd_none(x) (!pmd_val(x)) |
| 277 | #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) |
| 278 | #define pmd_present(x) (pmd_val(x)) |
| 279 | #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) |
| 280 | |
Vineet Gupta | 2519d75 | 2016-05-05 14:53:48 +0530 | [diff] [blame] | 281 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
Vineet Gupta | 336e213 | 2015-03-05 17:06:31 +0530 | [diff] [blame] | 282 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
Vineet Gupta | 2519d75 | 2016-05-05 14:53:48 +0530 | [diff] [blame] | 283 | #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) |
| 284 | |
| 285 | /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ |
| 286 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
| 287 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 288 | |
| 289 | /* |
| 290 | * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system) |
| 291 | * and returns ptr to PTE entry corresponding to @addr |
| 292 | */ |
| 293 | #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\ |
| 294 | __pte_index(addr)) |
| 295 | |
| 296 | /* No mapping of Page Tables in high mem etc, so following same as above */ |
| 297 | #define pte_offset_kernel(dir, addr) pte_offset(dir, addr) |
| 298 | #define pte_offset_map(dir, addr) pte_offset(dir, addr) |
| 299 | |
| 300 | /* Zoo of pte_xxx function */ |
| 301 | #define pte_read(pte) (pte_val(pte) & _PAGE_READ) |
| 302 | #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) |
Vineet Gupta | 129cbed | 2013-12-05 12:05:05 +0530 | [diff] [blame] | 303 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 304 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) |
Vineet Gupta | 24830fc | 2015-02-16 19:01:29 +0530 | [diff] [blame] | 305 | #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 306 | |
| 307 | #define PTE_BIT_FUNC(fn, op) \ |
| 308 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
| 309 | |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 310 | PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT)); |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 311 | PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); |
| 312 | PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); |
Vineet Gupta | 129cbed | 2013-12-05 12:05:05 +0530 | [diff] [blame] | 313 | PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY)); |
| 314 | PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY)); |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 315 | PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); |
| 316 | PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); |
| 317 | PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); |
| 318 | PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); |
Vineet Gupta | 24830fc | 2015-02-16 19:01:29 +0530 | [diff] [blame] | 319 | PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL)); |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 320 | PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ)); |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 321 | |
Vineet Gupta | 24830fc | 2015-02-16 19:01:29 +0530 | [diff] [blame] | 322 | #define __HAVE_ARCH_PTE_SPECIAL |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 323 | |
| 324 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 325 | { |
| 326 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); |
| 327 | } |
| 328 | |
| 329 | /* Macro to mark a page protection as uncacheable */ |
| 330 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)) |
| 331 | |
| 332 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 333 | pte_t *ptep, pte_t pteval) |
| 334 | { |
| 335 | set_pte(ptep, pteval); |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * All kernel related VM pages are in init's mm. |
| 340 | */ |
| 341 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 342 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) |
| 343 | #define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr)) |
| 344 | |
| 345 | /* |
| 346 | * Macro to quickly access the PGD entry, utlising the fact that some |
| 347 | * arch may cache the pointer to Page Directory of "current" task |
| 348 | * in a MMU register |
| 349 | * |
| 350 | * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply |
| 351 | * becomes read a register |
| 352 | * |
| 353 | * ********CAUTION*******: |
| 354 | * Kernel code might be dealing with some mm_struct of NON "current" |
| 355 | * Thus use this macro only when you are certain that "current" is current |
| 356 | * e.g. when dealing with signal frame setup code etc |
| 357 | */ |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 358 | #ifndef CONFIG_SMP |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 359 | #define pgd_offset_fast(mm, addr) \ |
| 360 | ({ \ |
| 361 | pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ |
| 362 | pgd_base + pgd_index(addr); \ |
| 363 | }) |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 364 | #else |
| 365 | #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) |
| 366 | #endif |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 367 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 368 | extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); |
| 369 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
| 370 | pte_t *ptep); |
| 371 | |
| 372 | /* Encode swap {type,off} tuple into PTE |
| 373 | * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that |
Kirill A. Shutemov | 1874715 | 2015-02-10 14:10:12 -0800 | [diff] [blame] | 374 | * PAGE_PRESENT is zero in a PTE holding swap "identifier" |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 375 | */ |
| 376 | #define __swp_entry(type, off) ((swp_entry_t) { \ |
| 377 | ((type) & 0x1f) | ((off) << 13) }) |
| 378 | |
| 379 | /* Decode a PTE containing swap "identifier "into constituents */ |
| 380 | #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) |
| 381 | #define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) |
| 382 | |
| 383 | /* NOPs, to keep generic kernel happy */ |
| 384 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 385 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 386 | |
| 387 | #define kern_addr_valid(addr) (1) |
| 388 | |
| 389 | /* |
| 390 | * remap a physical page `pfn' of size `size' with page protection `prot' |
| 391 | * into virtual address `from' |
| 392 | */ |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 393 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 394 | #include <asm/hugepage.h> |
| 395 | #endif |
| 396 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 397 | #include <asm-generic/pgtable.h> |
| 398 | |
Vineet Gupta | 5bba49f | 2013-05-09 19:20:43 +0530 | [diff] [blame] | 399 | /* to cope with aliasing VIPT cache */ |
| 400 | #define HAVE_ARCH_UNMAPPED_AREA |
| 401 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 402 | /* |
| 403 | * No page table caches to initialise |
| 404 | */ |
| 405 | #define pgtable_cache_init() do { } while (0) |
| 406 | |
| 407 | #endif /* __ASSEMBLY__ */ |
| 408 | |
| 409 | #endif |