Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * vineetg: May 2011 |
| 9 | * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1. |
| 10 | * They are semantically the same although in different contexts |
| 11 | * VALID marks a TLB entry exists and it will only happen if PRESENT |
| 12 | * - Utilise some unused free bits to confine PTE flags to 12 bits |
| 13 | * This is a must for 4k pg-sz |
| 14 | * |
| 15 | * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods |
| 16 | * -TLB Locking never really existed, except for initial specs |
| 17 | * -SILENT_xxx not needed for our port |
| 18 | * -Per my request, MMU V3 changes the layout of some of the bits |
| 19 | * to avoid a few shifts in TLB Miss handlers. |
| 20 | * |
| 21 | * vineetg: April 2010 |
| 22 | * -PGD entry no longer contains any flags. If empty it is 0, otherwise has |
| 23 | * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler |
| 24 | * |
| 25 | * vineetg: April 2010 |
| 26 | * -Switched form 8:11:13 split for page table lookup to 11:8:13 |
| 27 | * -this speeds up page table allocation itself as we now have to memset 1K |
| 28 | * instead of 8k per page table. |
| 29 | * -TODO: Right now page table alloc is 8K and rest 7K is unused |
| 30 | * need to optimise it |
| 31 | * |
| 32 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 |
| 33 | */ |
| 34 | |
| 35 | #ifndef _ASM_ARC_PGTABLE_H |
| 36 | #define _ASM_ARC_PGTABLE_H |
| 37 | |
| 38 | #include <asm/page.h> |
| 39 | #include <asm/mmu.h> |
| 40 | #include <asm-generic/pgtable-nopmd.h> |
| 41 | |
| 42 | /************************************************************************** |
| 43 | * Page Table Flags |
| 44 | * |
| 45 | * ARC700 MMU only deals with softare managed TLB entries. |
| 46 | * Page Tables are purely for Linux VM's consumption and the bits below are |
| 47 | * suited to that (uniqueness). Hence some are not implemented in the TLB and |
| 48 | * some have different value in TLB. |
| 49 | * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in |
| 50 | * seperate PD0 and PD1, which combined forms a translation entry) |
| 51 | * while for PTE perspective, they are 8 and 9 respectively |
| 52 | * with MMU v3: Most bits (except SHARED) represent the exact hardware pos |
| 53 | * (saves some bit shift ops in TLB Miss hdlrs) |
| 54 | */ |
| 55 | |
| 56 | #if (CONFIG_ARC_MMU_VER <= 2) |
| 57 | |
| 58 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ |
| 59 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 60 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
| 61 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ |
| 62 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ |
Vineet Gupta | d091fcb | 2013-06-17 19:44:06 +0530 | [diff] [blame] | 63 | #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ |
Vineet Gupta | d091fcb | 2013-06-17 19:44:06 +0530 | [diff] [blame] | 64 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
| 65 | #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 66 | |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 67 | #else /* MMU v3 onwards */ |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 68 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 69 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 70 | #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ |
| 71 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ |
| 72 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ |
Vineet Gupta | d091fcb | 2013-06-17 19:44:06 +0530 | [diff] [blame] | 73 | #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ |
| 74 | #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 75 | |
| 76 | #if (CONFIG_ARC_MMU_VER >= 4) |
| 77 | #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */ |
| 78 | #endif |
| 79 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 80 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
| 81 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 82 | |
| 83 | #if (CONFIG_ARC_MMU_VER >= 4) |
| 84 | #define _PAGE_SZ (1<<10) /* Page Size indicator (H) */ |
| 85 | #endif |
| 86 | |
Vineet Gupta | d091fcb | 2013-06-17 19:44:06 +0530 | [diff] [blame] | 87 | #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 88 | usable for shared TLB entries (H) */ |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 89 | #endif |
| 90 | |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 91 | /* vmalloc permissions */ |
| 92 | #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ |
Vineet Gupta | a950549 | 2013-05-21 15:25:11 +0530 | [diff] [blame] | 93 | _PAGE_GLOBAL | _PAGE_PRESENT) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 94 | |
| 95 | #ifdef CONFIG_ARC_CACHE_PAGES |
| 96 | #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE |
| 97 | #else |
| 98 | #define _PAGE_DEF_CACHEABLE (0) |
| 99 | #endif |
| 100 | |
| 101 | /* Helper for every "user" page |
| 102 | * -kernel can R/W/X |
| 103 | * -by default cached, unless config otherwise |
| 104 | * -present in memory |
| 105 | */ |
Vineet Gupta | a950549 | 2013-05-21 15:25:11 +0530 | [diff] [blame] | 106 | #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) |
| 107 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 108 | /* Set of bits not changed in pte_modify */ |
| 109 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) |
| 110 | |
| 111 | /* More Abbrevaited helpers */ |
| 112 | #define PAGE_U_NONE __pgprot(___DEF) |
| 113 | #define PAGE_U_R __pgprot(___DEF | _PAGE_READ) |
| 114 | #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE) |
| 115 | #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE) |
| 116 | #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \ |
| 117 | _PAGE_EXECUTE) |
| 118 | |
| 119 | #define PAGE_SHARED PAGE_U_W_R |
| 120 | |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 121 | /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of |
| 122 | * user vaddr space - visible in all addr spaces, but kernel mode only |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 123 | * Thus Global, all-kernel-access, no-user-access, cached |
| 124 | */ |
Vineet Gupta | a950549 | 2013-05-21 15:25:11 +0530 | [diff] [blame] | 125 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 126 | |
| 127 | /* ioremap */ |
Vineet Gupta | a950549 | 2013-05-21 15:25:11 +0530 | [diff] [blame] | 128 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 129 | |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 130 | /* Masks for actual TLB "PD"s */ |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 131 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) |
| 132 | #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) |
| 133 | #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 134 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 135 | /************************************************************************** |
| 136 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) |
| 137 | * |
| 138 | * Certain cases have 1:1 mapping |
| 139 | * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED |
| 140 | * which directly corresponds to PAGE_U_X_R |
| 141 | * |
| 142 | * Other rules which cause the divergence from 1:1 mapping |
| 143 | * |
| 144 | * 1. Although ARC700 can do exclusive execute/write protection (meaning R |
| 145 | * can be tracked independet of X/W unlike some other CPUs), still to |
| 146 | * keep things consistent with other archs: |
| 147 | * -Write implies Read: W => R |
| 148 | * -Execute implies Read: X => R |
| 149 | * |
| 150 | * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W |
| 151 | * This is to enable COW mechanism |
| 152 | */ |
| 153 | /* xwr */ |
| 154 | #define __P000 PAGE_U_NONE |
| 155 | #define __P001 PAGE_U_R |
| 156 | #define __P010 PAGE_U_R /* Pvt-W => !W */ |
| 157 | #define __P011 PAGE_U_R /* Pvt-W => !W */ |
| 158 | #define __P100 PAGE_U_X_R /* X => R */ |
| 159 | #define __P101 PAGE_U_X_R |
| 160 | #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */ |
| 161 | #define __P111 PAGE_U_X_R /* Pvt-W => !W */ |
| 162 | |
| 163 | #define __S000 PAGE_U_NONE |
| 164 | #define __S001 PAGE_U_R |
| 165 | #define __S010 PAGE_U_W_R /* W => R */ |
| 166 | #define __S011 PAGE_U_W_R |
| 167 | #define __S100 PAGE_U_X_R /* X => R */ |
| 168 | #define __S101 PAGE_U_X_R |
| 169 | #define __S110 PAGE_U_X_W_R /* X => R */ |
| 170 | #define __S111 PAGE_U_X_W_R |
| 171 | |
| 172 | /**************************************************************** |
| 173 | * Page Table Lookup split |
| 174 | * |
| 175 | * We implement 2 tier paging and since this is all software, we are free |
| 176 | * to customize the span of a PGD / PTE entry to suit us |
| 177 | * |
| 178 | * 32 bit virtual address |
| 179 | * ------------------------------------------------------- |
| 180 | * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE | |
| 181 | * ------------------------------------------------------- |
| 182 | * | | | |
| 183 | * | | --> off in page frame |
| 184 | * | | |
| 185 | * | ---> index into Page Table |
| 186 | * | |
| 187 | * ----> index into Page Directory |
| 188 | */ |
| 189 | |
| 190 | #define BITS_IN_PAGE PAGE_SHIFT |
| 191 | |
| 192 | /* Optimal Sizing of Pg Tbl - based on MMU page size */ |
| 193 | #if defined(CONFIG_ARC_PAGE_SIZE_8K) |
| 194 | #define BITS_FOR_PTE 8 |
| 195 | #elif defined(CONFIG_ARC_PAGE_SIZE_16K) |
| 196 | #define BITS_FOR_PTE 8 |
| 197 | #elif defined(CONFIG_ARC_PAGE_SIZE_4K) |
| 198 | #define BITS_FOR_PTE 9 |
| 199 | #endif |
| 200 | |
| 201 | #define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE) |
| 202 | |
| 203 | #define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE) |
| 204 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */ |
| 205 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 206 | |
| 207 | #ifdef __ASSEMBLY__ |
| 208 | #define PTRS_PER_PTE (1 << BITS_FOR_PTE) |
| 209 | #define PTRS_PER_PGD (1 << BITS_FOR_PGD) |
| 210 | #else |
| 211 | #define PTRS_PER_PTE (1UL << BITS_FOR_PTE) |
| 212 | #define PTRS_PER_PGD (1UL << BITS_FOR_PGD) |
| 213 | #endif |
| 214 | /* |
| 215 | * Number of entries a user land program use. |
| 216 | * TASK_SIZE is the maximum vaddr that can be used by a userland program. |
| 217 | */ |
| 218 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
| 219 | |
| 220 | /* |
| 221 | * No special requirements for lowest virtual address we permit any user space |
| 222 | * mapping to be mapped at. |
| 223 | */ |
Kirill A. Shutemov | d016bf7 | 2015-02-11 15:26:41 -0800 | [diff] [blame] | 224 | #define FIRST_USER_ADDRESS 0UL |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 225 | |
| 226 | |
| 227 | /**************************************************************** |
| 228 | * Bucket load of VM Helpers |
| 229 | */ |
| 230 | |
| 231 | #ifndef __ASSEMBLY__ |
| 232 | |
| 233 | #define pte_ERROR(e) \ |
| 234 | pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
| 235 | #define pgd_ERROR(e) \ |
| 236 | pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 237 | |
| 238 | /* the zero page used for uninitialized and anonymous pages */ |
| 239 | extern char empty_zero_page[PAGE_SIZE]; |
| 240 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 241 | |
| 242 | #define pte_unmap(pte) do { } while (0) |
| 243 | #define pte_unmap_nested(pte) do { } while (0) |
| 244 | |
| 245 | #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) |
| 246 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
| 247 | |
| 248 | /* find the page descriptor of the Page Tbl ref by PMD entry */ |
| 249 | #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK) |
| 250 | |
| 251 | /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */ |
| 252 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) |
| 253 | |
| 254 | /* In a 2 level sys, setup the PGD entry with PTE value */ |
| 255 | static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) |
| 256 | { |
| 257 | pmd_val(*pmdp) = (unsigned long)ptep; |
| 258 | } |
| 259 | |
| 260 | #define pte_none(x) (!pte_val(x)) |
| 261 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) |
| 262 | #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0)) |
| 263 | |
| 264 | #define pmd_none(x) (!pmd_val(x)) |
| 265 | #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) |
| 266 | #define pmd_present(x) (pmd_val(x)) |
| 267 | #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) |
| 268 | |
| 269 | #define pte_page(x) (mem_map + \ |
Alexey Brodkin | 06f34e1c | 2015-02-12 21:10:11 +0300 | [diff] [blame] | 270 | (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ |
| 271 | PAGE_SHIFT))) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 272 | |
| 273 | #define mk_pte(page, pgprot) \ |
| 274 | ({ \ |
| 275 | pte_t pte; \ |
| 276 | pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \ |
| 277 | pte; \ |
| 278 | }) |
| 279 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 280 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
| 281 | #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) |
| 282 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 283 | |
| 284 | /* |
| 285 | * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system) |
| 286 | * and returns ptr to PTE entry corresponding to @addr |
| 287 | */ |
| 288 | #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\ |
| 289 | __pte_index(addr)) |
| 290 | |
| 291 | /* No mapping of Page Tables in high mem etc, so following same as above */ |
| 292 | #define pte_offset_kernel(dir, addr) pte_offset(dir, addr) |
| 293 | #define pte_offset_map(dir, addr) pte_offset(dir, addr) |
| 294 | |
| 295 | /* Zoo of pte_xxx function */ |
| 296 | #define pte_read(pte) (pte_val(pte) & _PAGE_READ) |
| 297 | #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) |
| 298 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) |
| 299 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) |
| 300 | #define pte_special(pte) (0) |
| 301 | |
| 302 | #define PTE_BIT_FUNC(fn, op) \ |
| 303 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
| 304 | |
| 305 | PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); |
| 306 | PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); |
| 307 | PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED)); |
| 308 | PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED)); |
| 309 | PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); |
| 310 | PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); |
| 311 | PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); |
| 312 | PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); |
| 313 | |
| 314 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
| 315 | |
| 316 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 317 | { |
| 318 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); |
| 319 | } |
| 320 | |
| 321 | /* Macro to mark a page protection as uncacheable */ |
| 322 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)) |
| 323 | |
| 324 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 325 | pte_t *ptep, pte_t pteval) |
| 326 | { |
| 327 | set_pte(ptep, pteval); |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * All kernel related VM pages are in init's mm. |
| 332 | */ |
| 333 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 334 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) |
| 335 | #define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr)) |
| 336 | |
| 337 | /* |
| 338 | * Macro to quickly access the PGD entry, utlising the fact that some |
| 339 | * arch may cache the pointer to Page Directory of "current" task |
| 340 | * in a MMU register |
| 341 | * |
| 342 | * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply |
| 343 | * becomes read a register |
| 344 | * |
| 345 | * ********CAUTION*******: |
| 346 | * Kernel code might be dealing with some mm_struct of NON "current" |
| 347 | * Thus use this macro only when you are certain that "current" is current |
| 348 | * e.g. when dealing with signal frame setup code etc |
| 349 | */ |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 350 | #ifndef CONFIG_SMP |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 351 | #define pgd_offset_fast(mm, addr) \ |
| 352 | ({ \ |
| 353 | pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ |
| 354 | pgd_base + pgd_index(addr); \ |
| 355 | }) |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 356 | #else |
| 357 | #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) |
| 358 | #endif |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 359 | |
| 360 | extern void paging_init(void); |
| 361 | extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); |
| 362 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
| 363 | pte_t *ptep); |
| 364 | |
| 365 | /* Encode swap {type,off} tuple into PTE |
| 366 | * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that |
Kirill A. Shutemov | 1874715 | 2015-02-10 14:10:12 -0800 | [diff] [blame] | 367 | * PAGE_PRESENT is zero in a PTE holding swap "identifier" |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 368 | */ |
| 369 | #define __swp_entry(type, off) ((swp_entry_t) { \ |
| 370 | ((type) & 0x1f) | ((off) << 13) }) |
| 371 | |
| 372 | /* Decode a PTE containing swap "identifier "into constituents */ |
| 373 | #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) |
| 374 | #define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) |
| 375 | |
| 376 | /* NOPs, to keep generic kernel happy */ |
| 377 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 378 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 379 | |
| 380 | #define kern_addr_valid(addr) (1) |
| 381 | |
| 382 | /* |
| 383 | * remap a physical page `pfn' of size `size' with page protection `prot' |
| 384 | * into virtual address `from' |
| 385 | */ |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 386 | #include <asm-generic/pgtable.h> |
| 387 | |
Vineet Gupta | 5bba49f | 2013-05-09 19:20:43 +0530 | [diff] [blame] | 388 | /* to cope with aliasing VIPT cache */ |
| 389 | #define HAVE_ARCH_UNMAPPED_AREA |
| 390 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 391 | /* |
| 392 | * No page table caches to initialise |
| 393 | */ |
| 394 | #define pgtable_cache_init() do { } while (0) |
| 395 | |
| 396 | #endif /* __ASSEMBLY__ */ |
| 397 | |
| 398 | #endif |