Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * PowerPC64 SLB support. |
| 3 | * |
| 4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM |
Sankar P | 5cdcd9d | 2009-05-12 12:41:13 +0530 | [diff] [blame] | 5 | * Based on earlier code written by: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com |
| 7 | * Copyright (c) 2001 Dave Engebretsen |
| 8 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM |
| 9 | * |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU General Public License |
| 13 | * as published by the Free Software Foundation; either version |
| 14 | * 2 of the License, or (at your option) any later version. |
| 15 | */ |
| 16 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 17 | #include <asm/asm-prototypes.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/pgtable.h> |
| 19 | #include <asm/mmu.h> |
| 20 | #include <asm/mmu_context.h> |
| 21 | #include <asm/paca.h> |
Michael Ellerman | 08e6a34 | 2018-11-06 19:25:18 +1100 | [diff] [blame] | 22 | #include <asm/ppc-opcode.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/cputable.h> |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 24 | #include <asm/cacheflush.h> |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 25 | #include <asm/smp.h> |
| 26 | #include <linux/compiler.h> |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 27 | #include <linux/context_tracking.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 28 | #include <linux/mm_types.h> |
| 29 | |
will schmidt | aa39be0 | 2007-10-30 06:24:19 +1100 | [diff] [blame] | 30 | #include <asm/udbg.h> |
Anton Blanchard | b68a70c | 2011-04-04 23:56:18 +0000 | [diff] [blame] | 31 | #include <asm/code-patching.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 33 | enum slb_index { |
| 34 | LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ |
Nicholas Piggin | 85376e2 | 2018-09-15 01:30:48 +1000 | [diff] [blame] | 35 | KSTACK_INDEX = 1, /* Kernel stack map */ |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 36 | }; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 37 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 38 | static long slb_allocate_user(struct mm_struct *mm, unsigned long ea); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Paul Mackerras | 3b57506 | 2008-05-02 14:29:12 +1000 | [diff] [blame] | 40 | #define slb_esid_mask(ssize) \ |
| 41 | (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) |
| 42 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 43 | static inline unsigned long mk_esid_data(unsigned long ea, int ssize, |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 44 | enum slb_index index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | { |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 46 | return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | } |
| 48 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 49 | static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize, |
| 50 | unsigned long flags) |
| 51 | { |
| 52 | return (vsid << slb_vsid_shift(ssize)) | flags | |
| 53 | ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); |
| 54 | } |
| 55 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 56 | static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, |
| 57 | unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | { |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 59 | return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Michael Ellerman | 0ae7906 | 2018-11-06 19:23:28 +1100 | [diff] [blame] | 62 | static void assert_slb_presence(bool present, unsigned long ea) |
Nicholas Piggin | e15a4fe | 2018-10-03 00:27:59 +1000 | [diff] [blame] | 63 | { |
| 64 | #ifdef CONFIG_DEBUG_VM |
| 65 | unsigned long tmp; |
| 66 | |
| 67 | WARN_ON_ONCE(mfmsr() & MSR_EE); |
| 68 | |
Michael Ellerman | 9586d56 | 2018-11-06 19:25:38 +1100 | [diff] [blame] | 69 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) |
| 70 | return; |
| 71 | |
Michael Ellerman | 08e6a34 | 2018-11-06 19:25:18 +1100 | [diff] [blame] | 72 | asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); |
Nicholas Piggin | e15a4fe | 2018-10-03 00:27:59 +1000 | [diff] [blame] | 73 | |
Michael Ellerman | 0ae7906 | 2018-11-06 19:23:28 +1100 | [diff] [blame] | 74 | WARN_ON(present == (tmp == 0)); |
Nicholas Piggin | e15a4fe | 2018-10-03 00:27:59 +1000 | [diff] [blame] | 75 | #endif |
| 76 | } |
| 77 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 78 | static inline void slb_shadow_update(unsigned long ea, int ssize, |
Michael Neuling | 67439b7 | 2007-08-03 11:55:39 +1000 | [diff] [blame] | 79 | unsigned long flags, |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 80 | enum slb_index index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | { |
Michael Ellerman | 26cd835 | 2015-08-13 17:11:18 +1000 | [diff] [blame] | 82 | struct slb_shadow *p = get_slb_shadow(); |
| 83 | |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 84 | /* |
| 85 | * Clear the ESID first so the entry is not valid while we are |
Michael Neuling | 00efee7 | 2007-08-24 16:58:37 +1000 | [diff] [blame] | 86 | * updating it. No write barriers are needed here, provided |
| 87 | * we only update the current CPU's SLB shadow buffer. |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 88 | */ |
Nicholas Piggin | 926bc2f | 2018-05-30 20:31:22 +1000 | [diff] [blame] | 89 | WRITE_ONCE(p->save_area[index].esid, 0); |
| 90 | WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); |
| 91 | WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 92 | } |
| 93 | |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 94 | static inline void slb_shadow_clear(enum slb_index index) |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 95 | { |
Mahesh Salgaonkar | 0f52b3a | 2018-08-23 10:26:08 +0530 | [diff] [blame] | 96 | WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
| 98 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 99 | static inline void create_shadowed_slbe(unsigned long ea, int ssize, |
| 100 | unsigned long flags, |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 101 | enum slb_index index) |
Paul Mackerras | 175587c | 2007-08-25 13:14:28 +1000 | [diff] [blame] | 102 | { |
| 103 | /* |
| 104 | * Updating the shadow buffer before writing the SLB ensures |
| 105 | * we don't get a stale entry here if we get preempted by PHYP |
| 106 | * between these two statements. |
| 107 | */ |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 108 | slb_shadow_update(ea, ssize, flags, index); |
Paul Mackerras | 175587c | 2007-08-25 13:14:28 +1000 | [diff] [blame] | 109 | |
Michael Ellerman | 0ae7906 | 2018-11-06 19:23:28 +1100 | [diff] [blame] | 110 | assert_slb_presence(false, ea); |
Paul Mackerras | 175587c | 2007-08-25 13:14:28 +1000 | [diff] [blame] | 111 | asm volatile("slbmte %0,%1" : |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 112 | : "r" (mk_vsid_data(ea, ssize, flags)), |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 113 | "r" (mk_esid_data(ea, ssize, index)) |
Paul Mackerras | 175587c | 2007-08-25 13:14:28 +1000 | [diff] [blame] | 114 | : "memory" ); |
| 115 | } |
| 116 | |
Nicholas Piggin | e7e8184 | 2018-08-10 16:42:48 +1000 | [diff] [blame] | 117 | /* |
| 118 | * Insert bolted entries into SLB (which may not be empty, so don't clear |
| 119 | * slb_cache_ptr). |
| 120 | */ |
| 121 | void __slb_restore_bolted_realmode(void) |
| 122 | { |
| 123 | struct slb_shadow *p = get_slb_shadow(); |
| 124 | enum slb_index index; |
| 125 | |
| 126 | /* No isync needed because realmode. */ |
| 127 | for (index = 0; index < SLB_NUM_BOLTED; index++) { |
| 128 | asm volatile("slbmte %0,%1" : |
| 129 | : "r" (be64_to_cpu(p->save_area[index].vsid)), |
| 130 | "r" (be64_to_cpu(p->save_area[index].esid))); |
| 131 | } |
Nicholas Piggin | e15a4fe | 2018-10-03 00:27:59 +1000 | [diff] [blame] | 132 | |
Michael Ellerman | 0ae7906 | 2018-11-06 19:23:28 +1100 | [diff] [blame] | 133 | assert_slb_presence(true, local_paca->kstack); |
Nicholas Piggin | e7e8184 | 2018-08-10 16:42:48 +1000 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | /* |
| 137 | * Insert the bolted entries into an empty SLB. |
Nicholas Piggin | e7e8184 | 2018-08-10 16:42:48 +1000 | [diff] [blame] | 138 | */ |
| 139 | void slb_restore_bolted_realmode(void) |
| 140 | { |
| 141 | __slb_restore_bolted_realmode(); |
| 142 | get_paca()->slb_cache_ptr = 0; |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 143 | |
| 144 | get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; |
| 145 | get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; |
Nicholas Piggin | e7e8184 | 2018-08-10 16:42:48 +1000 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | /* |
| 149 | * This flushes all SLB entries including 0, so it must be realmode. |
| 150 | */ |
| 151 | void slb_flush_all_realmode(void) |
| 152 | { |
Nicholas Piggin | e7e8184 | 2018-08-10 16:42:48 +1000 | [diff] [blame] | 153 | asm volatile("slbmte %0,%0; slbia" : : "r" (0)); |
| 154 | } |
| 155 | |
Nicholas Piggin | 94ee4272 | 2018-10-03 00:27:58 +1000 | [diff] [blame] | 156 | /* |
| 157 | * This flushes non-bolted entries, it can be run in virtual mode. Must |
| 158 | * be called with interrupts disabled. |
| 159 | */ |
| 160 | void slb_flush_and_restore_bolted(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | { |
Nicholas Piggin | 94ee4272 | 2018-10-03 00:27:58 +1000 | [diff] [blame] | 162 | struct slb_shadow *p = get_slb_shadow(); |
| 163 | |
| 164 | BUILD_BUG_ON(SLB_NUM_BOLTED != 2); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | |
Nicholas Piggin | 5141c18 | 2018-09-15 01:30:49 +1000 | [diff] [blame] | 166 | WARN_ON(!irqs_disabled()); |
| 167 | |
| 168 | /* |
| 169 | * We can't take a PMU exception in the following code, so hard |
| 170 | * disable interrupts. |
| 171 | */ |
| 172 | hard_irq_disable(); |
| 173 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | asm volatile("isync\n" |
| 175 | "slbia\n" |
Nicholas Piggin | 94ee4272 | 2018-10-03 00:27:58 +1000 | [diff] [blame] | 176 | "slbmte %0, %1\n" |
| 177 | "isync\n" |
| 178 | :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), |
| 179 | "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | : "memory"); |
Michael Ellerman | 0ae7906 | 2018-11-06 19:23:28 +1100 | [diff] [blame] | 181 | assert_slb_presence(true, get_paca()->kstack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | |
Paul Mackerras | 9c1e105 | 2009-08-17 15:17:54 +1000 | [diff] [blame] | 183 | get_paca()->slb_cache_ptr = 0; |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 184 | |
| 185 | get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; |
| 186 | get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; |
Paul Mackerras | 9c1e105 | 2009-08-17 15:17:54 +1000 | [diff] [blame] | 187 | } |
| 188 | |
Mahesh Salgaonkar | c6d15258 | 2018-09-11 19:57:15 +0530 | [diff] [blame] | 189 | void slb_save_contents(struct slb_entry *slb_ptr) |
| 190 | { |
| 191 | int i; |
| 192 | unsigned long e, v; |
| 193 | |
| 194 | /* Save slb_cache_ptr value. */ |
| 195 | get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr; |
| 196 | |
| 197 | if (!slb_ptr) |
| 198 | return; |
| 199 | |
| 200 | for (i = 0; i < mmu_slb_size; i++) { |
| 201 | asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i)); |
| 202 | asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i)); |
| 203 | slb_ptr->esid = e; |
| 204 | slb_ptr->vsid = v; |
| 205 | slb_ptr++; |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | void slb_dump_contents(struct slb_entry *slb_ptr) |
| 210 | { |
| 211 | int i, n; |
| 212 | unsigned long e, v; |
| 213 | unsigned long llp; |
| 214 | |
| 215 | if (!slb_ptr) |
| 216 | return; |
| 217 | |
| 218 | pr_err("SLB contents of cpu 0x%x\n", smp_processor_id()); |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 219 | pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr); |
Mahesh Salgaonkar | c6d15258 | 2018-09-11 19:57:15 +0530 | [diff] [blame] | 220 | |
| 221 | for (i = 0; i < mmu_slb_size; i++) { |
| 222 | e = slb_ptr->esid; |
| 223 | v = slb_ptr->vsid; |
| 224 | slb_ptr++; |
| 225 | |
| 226 | if (!e && !v) |
| 227 | continue; |
| 228 | |
| 229 | pr_err("%02d %016lx %016lx\n", i, e, v); |
| 230 | |
| 231 | if (!(e & SLB_ESID_V)) { |
| 232 | pr_err("\n"); |
| 233 | continue; |
| 234 | } |
| 235 | llp = v & SLB_VSID_LLP; |
| 236 | if (v & SLB_VSID_B_1T) { |
| 237 | pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n", |
| 238 | GET_ESID_1T(e), |
| 239 | (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); |
| 240 | } else { |
| 241 | pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n", |
| 242 | GET_ESID(e), |
| 243 | (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); |
| 244 | } |
| 245 | } |
| 246 | pr_err("----------------------------------\n"); |
| 247 | |
| 248 | /* Dump slb cache entires as well. */ |
| 249 | pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr); |
| 250 | pr_err("Valid SLB cache entries:\n"); |
| 251 | n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); |
| 252 | for (i = 0; i < n; i++) |
| 253 | pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); |
| 254 | pr_err("Rest of SLB cache entries:\n"); |
| 255 | for (i = n; i < SLB_CACHE_ENTRIES; i++) |
| 256 | pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); |
| 257 | } |
| 258 | |
Michael Neuling | 67439b7 | 2007-08-03 11:55:39 +1000 | [diff] [blame] | 259 | void slb_vmalloc_update(void) |
| 260 | { |
Nicholas Piggin | 94ee4272 | 2018-10-03 00:27:58 +1000 | [diff] [blame] | 261 | /* |
| 262 | * vmalloc is not bolted, so just have to flush non-bolted. |
| 263 | */ |
| 264 | slb_flush_and_restore_bolted(); |
Michael Neuling | 67439b7 | 2007-08-03 11:55:39 +1000 | [diff] [blame] | 265 | } |
| 266 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 267 | static bool preload_hit(struct thread_info *ti, unsigned long esid) |
will schmidt | 465ccab | 2007-10-31 05:59:33 +1100 | [diff] [blame] | 268 | { |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 269 | unsigned char i; |
will schmidt | 465ccab | 2007-10-31 05:59:33 +1100 | [diff] [blame] | 270 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 271 | for (i = 0; i < ti->slb_preload_nr; i++) { |
| 272 | unsigned char idx; |
will schmidt | 465ccab | 2007-10-31 05:59:33 +1100 | [diff] [blame] | 273 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 274 | idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; |
| 275 | if (esid == ti->slb_preload_esid[idx]) |
| 276 | return true; |
| 277 | } |
| 278 | return false; |
will schmidt | 465ccab | 2007-10-31 05:59:33 +1100 | [diff] [blame] | 279 | } |
| 280 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 281 | static bool preload_add(struct thread_info *ti, unsigned long ea) |
| 282 | { |
| 283 | unsigned char idx; |
| 284 | unsigned long esid; |
| 285 | |
| 286 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { |
| 287 | /* EAs are stored >> 28 so 256MB segments don't need clearing */ |
| 288 | if (ea & ESID_MASK_1T) |
| 289 | ea &= ESID_MASK_1T; |
| 290 | } |
| 291 | |
| 292 | esid = ea >> SID_SHIFT; |
| 293 | |
| 294 | if (preload_hit(ti, esid)) |
| 295 | return false; |
| 296 | |
| 297 | idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR; |
| 298 | ti->slb_preload_esid[idx] = esid; |
| 299 | if (ti->slb_preload_nr == SLB_PRELOAD_NR) |
| 300 | ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; |
| 301 | else |
| 302 | ti->slb_preload_nr++; |
| 303 | |
| 304 | return true; |
| 305 | } |
| 306 | |
| 307 | static void preload_age(struct thread_info *ti) |
| 308 | { |
| 309 | if (!ti->slb_preload_nr) |
| 310 | return; |
| 311 | ti->slb_preload_nr--; |
| 312 | ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; |
| 313 | } |
| 314 | |
| 315 | void slb_setup_new_exec(void) |
| 316 | { |
| 317 | struct thread_info *ti = current_thread_info(); |
| 318 | struct mm_struct *mm = current->mm; |
| 319 | unsigned long exec = 0x10000000; |
| 320 | |
| 321 | WARN_ON(irqs_disabled()); |
| 322 | |
| 323 | /* |
| 324 | * preload cache can only be used to determine whether a SLB |
| 325 | * entry exists if it does not start to overflow. |
| 326 | */ |
| 327 | if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) |
| 328 | return; |
| 329 | |
| 330 | hard_irq_disable(); |
| 331 | |
| 332 | /* |
| 333 | * We have no good place to clear the slb preload cache on exec, |
| 334 | * flush_thread is about the earliest arch hook but that happens |
| 335 | * after we switch to the mm and have aleady preloaded the SLBEs. |
| 336 | * |
| 337 | * For the most part that's probably okay to use entries from the |
| 338 | * previous exec, they will age out if unused. It may turn out to |
| 339 | * be an advantage to clear the cache before switching to it, |
| 340 | * however. |
| 341 | */ |
| 342 | |
| 343 | /* |
| 344 | * preload some userspace segments into the SLB. |
| 345 | * Almost all 32 and 64bit PowerPC executables are linked at |
| 346 | * 0x10000000 so it makes sense to preload this segment. |
| 347 | */ |
| 348 | if (!is_kernel_addr(exec)) { |
| 349 | if (preload_add(ti, exec)) |
| 350 | slb_allocate_user(mm, exec); |
| 351 | } |
| 352 | |
| 353 | /* Libraries and mmaps. */ |
| 354 | if (!is_kernel_addr(mm->mmap_base)) { |
| 355 | if (preload_add(ti, mm->mmap_base)) |
| 356 | slb_allocate_user(mm, mm->mmap_base); |
| 357 | } |
| 358 | |
| 359 | /* see switch_slb */ |
| 360 | asm volatile("isync" : : : "memory"); |
| 361 | |
| 362 | local_irq_enable(); |
| 363 | } |
| 364 | |
| 365 | void preload_new_slb_context(unsigned long start, unsigned long sp) |
| 366 | { |
| 367 | struct thread_info *ti = current_thread_info(); |
| 368 | struct mm_struct *mm = current->mm; |
| 369 | unsigned long heap = mm->start_brk; |
| 370 | |
| 371 | WARN_ON(irqs_disabled()); |
| 372 | |
| 373 | /* see above */ |
| 374 | if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) |
| 375 | return; |
| 376 | |
| 377 | hard_irq_disable(); |
| 378 | |
| 379 | /* Userspace entry address. */ |
| 380 | if (!is_kernel_addr(start)) { |
| 381 | if (preload_add(ti, start)) |
| 382 | slb_allocate_user(mm, start); |
| 383 | } |
| 384 | |
| 385 | /* Top of stack, grows down. */ |
| 386 | if (!is_kernel_addr(sp)) { |
| 387 | if (preload_add(ti, sp)) |
| 388 | slb_allocate_user(mm, sp); |
| 389 | } |
| 390 | |
| 391 | /* Bottom of heap, grows up. */ |
| 392 | if (heap && !is_kernel_addr(heap)) { |
| 393 | if (preload_add(ti, heap)) |
| 394 | slb_allocate_user(mm, heap); |
| 395 | } |
| 396 | |
| 397 | /* see switch_slb */ |
| 398 | asm volatile("isync" : : : "memory"); |
| 399 | |
| 400 | local_irq_enable(); |
| 401 | } |
| 402 | |
| 403 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | /* Flush all user entries from the segment table of the current processor. */ |
| 405 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
| 406 | { |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 407 | struct thread_info *ti = task_thread_info(tsk); |
| 408 | unsigned char i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | |
Paul Mackerras | 9c1e105 | 2009-08-17 15:17:54 +1000 | [diff] [blame] | 410 | /* |
| 411 | * We need interrupts hard-disabled here, not just soft-disabled, |
| 412 | * so that a PMU interrupt can't occur, which might try to access |
| 413 | * user memory (to get a stack trace) and possible cause an SLB miss |
| 414 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. |
| 415 | */ |
| 416 | hard_irq_disable(); |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 417 | asm volatile("isync" : : : "memory"); |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 418 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 419 | /* |
| 420 | * SLBIA IH=3 invalidates all Class=1 SLBEs and their |
| 421 | * associated lookaside structures, which matches what |
| 422 | * switch_slb wants. So ARCH_300 does not use the slb |
| 423 | * cache. |
| 424 | */ |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 425 | asm volatile(PPC_SLBIA(3)); |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 426 | } else { |
| 427 | unsigned long offset = get_paca()->slb_cache_ptr; |
Nicholas Piggin | 8b92887 | 2018-09-15 01:30:47 +1000 | [diff] [blame] | 428 | |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 429 | if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && |
| 430 | offset <= SLB_CACHE_ENTRIES) { |
| 431 | unsigned long slbie_data = 0; |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 432 | |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 433 | for (i = 0; i < offset; i++) { |
Nicholas Piggin | e15a4fe | 2018-10-03 00:27:59 +1000 | [diff] [blame] | 434 | unsigned long ea; |
| 435 | |
| 436 | ea = (unsigned long) |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 437 | get_paca()->slb_cache[i] << SID_SHIFT; |
Nicholas Piggin | e15a4fe | 2018-10-03 00:27:59 +1000 | [diff] [blame] | 438 | /* |
Michael Ellerman | 0ae7906 | 2018-11-06 19:23:28 +1100 | [diff] [blame] | 439 | * Could assert_slb_presence(true) here, but |
| 440 | * hypervisor or machine check could have come |
| 441 | * in and removed the entry at this point. |
Nicholas Piggin | e15a4fe | 2018-10-03 00:27:59 +1000 | [diff] [blame] | 442 | */ |
| 443 | |
| 444 | slbie_data = ea; |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 445 | slbie_data |= user_segment_size(slbie_data) |
| 446 | << SLBIE_SSIZE_SHIFT; |
| 447 | slbie_data |= SLBIE_C; /* user slbs have C=1 */ |
| 448 | asm volatile("slbie %0" : : "r" (slbie_data)); |
| 449 | } |
| 450 | |
| 451 | /* Workaround POWER5 < DD2.1 issue */ |
| 452 | if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1) |
| 453 | asm volatile("slbie %0" : : "r" (slbie_data)); |
| 454 | |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 455 | } else { |
| 456 | struct slb_shadow *p = get_slb_shadow(); |
| 457 | unsigned long ksp_esid_data = |
| 458 | be64_to_cpu(p->save_area[KSTACK_INDEX].esid); |
| 459 | unsigned long ksp_vsid_data = |
| 460 | be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); |
| 461 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 462 | asm volatile(PPC_SLBIA(1) "\n" |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 463 | "slbmte %0,%1\n" |
| 464 | "isync" |
| 465 | :: "r"(ksp_vsid_data), |
| 466 | "r"(ksp_esid_data)); |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 467 | |
| 468 | get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | } |
Nicholas Piggin | 8b92887 | 2018-09-15 01:30:47 +1000 | [diff] [blame] | 470 | |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 471 | get_paca()->slb_cache_ptr = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | } |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 473 | get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; |
Michael Ellerman | 54be0b9 | 2018-10-02 23:56:39 +1000 | [diff] [blame] | 474 | |
| 475 | copy_mm_to_paca(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | /* |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 478 | * We gradually age out SLBs after a number of context switches to |
| 479 | * reduce reload overhead of unused entries (like we do with FP/VEC |
| 480 | * reload). Each time we wrap 256 switches, take an entry out of the |
| 481 | * SLB preload cache. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | */ |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 483 | tsk->thread.load_slb++; |
| 484 | if (!tsk->thread.load_slb) { |
| 485 | unsigned long pc = KSTK_EIP(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 487 | preload_age(ti); |
| 488 | preload_add(ti, pc); |
| 489 | } |
Anton Blanchard | 5eb9bac | 2009-07-13 20:53:52 +0000 | [diff] [blame] | 490 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 491 | for (i = 0; i < ti->slb_preload_nr; i++) { |
| 492 | unsigned char idx; |
| 493 | unsigned long ea; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 495 | idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; |
| 496 | ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 498 | slb_allocate_user(mm, ea); |
| 499 | } |
| 500 | |
| 501 | /* |
| 502 | * Synchronize slbmte preloads with possible subsequent user memory |
| 503 | * address accesses by the kernel (user mode won't happen until |
| 504 | * rfid, which is safe). |
| 505 | */ |
| 506 | asm volatile("isync" : : : "memory"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | } |
| 508 | |
Brian King | 46db2f8 | 2009-08-28 12:06:29 +0000 | [diff] [blame] | 509 | void slb_set_size(u16 size) |
| 510 | { |
Michael Ellerman | 54be0b9 | 2018-10-02 23:56:39 +1000 | [diff] [blame] | 511 | mmu_slb_size = size; |
Nicholas Piggin | 8fed04d | 2018-09-15 01:30:52 +1000 | [diff] [blame] | 512 | } |
| 513 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | void slb_initialize(void) |
| 515 | { |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 516 | unsigned long linear_llp, vmalloc_llp, io_llp; |
Nicholas Piggin | 85376e2 | 2018-09-15 01:30:48 +1000 | [diff] [blame] | 517 | unsigned long lflags; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 518 | static int slb_encoding_inited; |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 519 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 520 | unsigned long vmemmap_llp; |
| 521 | #endif |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 522 | |
| 523 | /* Prepare our SLB miss handler based on our page size */ |
| 524 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 525 | io_llp = mmu_psize_defs[mmu_io_psize].sllp; |
| 526 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
| 527 | get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 528 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 529 | vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; |
| 530 | #endif |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 531 | if (!slb_encoding_inited) { |
| 532 | slb_encoding_inited = 1; |
Michael Ellerman | 651e2dd | 2009-06-17 18:13:51 +0000 | [diff] [blame] | 533 | pr_devel("SLB: linear LLP = %04lx\n", linear_llp); |
| 534 | pr_devel("SLB: io LLP = %04lx\n", io_llp); |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 535 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Michael Ellerman | 651e2dd | 2009-06-17 18:13:51 +0000 | [diff] [blame] | 536 | pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 537 | #endif |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 538 | } |
| 539 | |
Nicholas Piggin | 09b4438 | 2018-09-15 01:30:45 +1000 | [diff] [blame] | 540 | get_paca()->stab_rr = SLB_NUM_BOLTED - 1; |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 541 | get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; |
| 542 | get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; |
Stephen Rothwell | 56291e1 | 2006-11-14 12:57:38 +1100 | [diff] [blame] | 543 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 544 | lflags = SLB_VSID_KERNEL | linear_llp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | |
Anshuman Khandual | 2be682a | 2015-07-29 12:39:59 +0530 | [diff] [blame] | 546 | /* Invalidate the entire SLB (even entry 0) & all the ERATS */ |
Paul Mackerras | 175587c | 2007-08-25 13:14:28 +1000 | [diff] [blame] | 547 | asm volatile("isync":::"memory"); |
| 548 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); |
| 549 | asm volatile("isync; slbia; isync":::"memory"); |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 550 | create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 551 | |
Paul Mackerras | 3b57506 | 2008-05-02 14:29:12 +1000 | [diff] [blame] | 552 | /* For the boot cpu, we're running on the stack in init_thread_union, |
| 553 | * which is in the first segment of the linear mapping, and also |
| 554 | * get_paca()->kstack hasn't been initialized yet. |
| 555 | * For secondary cpus, we need to bolt the kernel stack entry now. |
| 556 | */ |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 557 | slb_shadow_clear(KSTACK_INDEX); |
Paul Mackerras | 3b57506 | 2008-05-02 14:29:12 +1000 | [diff] [blame] | 558 | if (raw_smp_processor_id() != boot_cpuid && |
| 559 | (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) |
| 560 | create_shadowed_slbe(get_paca()->kstack, |
Anshuman Khandual | 1d15010 | 2015-08-13 17:07:54 +1000 | [diff] [blame] | 561 | mmu_kernel_ssize, lflags, KSTACK_INDEX); |
Paul Mackerras | dfbe0d3 | 2008-01-15 17:29:33 +1100 | [diff] [blame] | 562 | |
Paul Mackerras | 175587c | 2007-08-25 13:14:28 +1000 | [diff] [blame] | 563 | asm volatile("isync":::"memory"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | } |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 565 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 566 | static void slb_cache_update(unsigned long esid_data) |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 567 | { |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 568 | int slb_cache_index; |
| 569 | |
Nicholas Piggin | 82d8f4c | 2018-09-15 01:30:50 +1000 | [diff] [blame] | 570 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 571 | return; /* ISAv3.0B and later does not use slb_cache */ |
| 572 | |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 573 | /* |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 574 | * Now update slb cache entries |
| 575 | */ |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 576 | slb_cache_index = local_paca->slb_cache_ptr; |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 577 | if (slb_cache_index < SLB_CACHE_ENTRIES) { |
| 578 | /* |
| 579 | * We have space in slb cache for optimized switch_slb(). |
| 580 | * Top 36 bits from esid_data as per ISA |
| 581 | */ |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 582 | local_paca->slb_cache[slb_cache_index++] = esid_data >> 28; |
| 583 | local_paca->slb_cache_ptr++; |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 584 | } else { |
| 585 | /* |
| 586 | * Our cache is full and the current cache content strictly |
| 587 | * doesn't indicate the active SLB conents. Bump the ptr |
| 588 | * so that switch_slb() will ignore the cache. |
| 589 | */ |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 590 | local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 591 | } |
| 592 | } |
| 593 | |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 594 | static enum slb_index alloc_slb_index(bool kernel) |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 595 | { |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 596 | enum slb_index index; |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 597 | |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 598 | /* |
| 599 | * The allocation bitmaps can become out of synch with the SLB |
| 600 | * when the _switch code does slbie when bolting a new stack |
| 601 | * segment and it must not be anywhere else in the SLB. This leaves |
| 602 | * a kernel allocated entry that is unused in the SLB. With very |
| 603 | * large systems or small segment sizes, the bitmaps could slowly |
| 604 | * fill with these entries. They will eventually be cleared out |
| 605 | * by the round robin allocator in that case, so it's probably not |
| 606 | * worth accounting for. |
| 607 | */ |
| 608 | |
| 609 | /* |
| 610 | * SLBs beyond 32 entries are allocated with stab_rr only |
| 611 | * POWER7/8/9 have 32 SLB entries, this could be expanded if a |
| 612 | * future CPU has more. |
| 613 | */ |
| 614 | if (local_paca->slb_used_bitmap != U32_MAX) { |
| 615 | index = ffz(local_paca->slb_used_bitmap); |
| 616 | local_paca->slb_used_bitmap |= 1U << index; |
| 617 | if (kernel) |
| 618 | local_paca->slb_kern_bitmap |= 1U << index; |
| 619 | } else { |
| 620 | /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */ |
| 621 | index = local_paca->stab_rr; |
| 622 | if (index < (mmu_slb_size - 1)) |
| 623 | index++; |
| 624 | else |
| 625 | index = SLB_NUM_BOLTED; |
| 626 | local_paca->stab_rr = index; |
| 627 | if (index < 32) { |
| 628 | if (kernel) |
| 629 | local_paca->slb_kern_bitmap |= 1U << index; |
| 630 | else |
| 631 | local_paca->slb_kern_bitmap &= ~(1U << index); |
| 632 | } |
| 633 | } |
| 634 | BUG_ON(index < SLB_NUM_BOLTED); |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 635 | |
| 636 | return index; |
Nicholas Piggin | 5e46e29 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 637 | } |
| 638 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 639 | static long slb_insert_entry(unsigned long ea, unsigned long context, |
| 640 | unsigned long flags, int ssize, bool kernel) |
Nicholas Piggin | 5e46e29 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 641 | { |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 642 | unsigned long vsid; |
| 643 | unsigned long vsid_data, esid_data; |
| 644 | enum slb_index index; |
Nicholas Piggin | 5e46e29 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 645 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 646 | vsid = get_vsid(context, ea, ssize); |
| 647 | if (!vsid) |
| 648 | return -EFAULT; |
Nicholas Piggin | 5e46e29 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 649 | |
Michael Ellerman | 54be0b9 | 2018-10-02 23:56:39 +1000 | [diff] [blame] | 650 | /* |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 651 | * There must not be a kernel SLB fault in alloc_slb_index or before |
| 652 | * slbmte here or the allocation bitmaps could get out of whack with |
| 653 | * the SLB. |
| 654 | * |
| 655 | * User SLB faults or preloads take this path which might get inlined |
| 656 | * into the caller, so add compiler barriers here to ensure unsafe |
| 657 | * memory accesses do not come between. |
Michael Ellerman | 54be0b9 | 2018-10-02 23:56:39 +1000 | [diff] [blame] | 658 | */ |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 659 | barrier(); |
Nicholas Piggin | 5e46e29 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 660 | |
Nicholas Piggin | 126b11b | 2018-09-15 01:30:53 +1000 | [diff] [blame] | 661 | index = alloc_slb_index(kernel); |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 662 | |
| 663 | vsid_data = __mk_vsid_data(vsid, ssize, flags); |
| 664 | esid_data = mk_esid_data(ea, ssize, index); |
| 665 | |
| 666 | /* |
| 667 | * No need for an isync before or after this slbmte. The exception |
| 668 | * we enter with and the rfid we exit with are context synchronizing. |
| 669 | * User preloads should add isync afterwards in case the kernel |
| 670 | * accesses user memory before it returns to userspace with rfid. |
| 671 | */ |
Michael Ellerman | 0ae7906 | 2018-11-06 19:23:28 +1100 | [diff] [blame] | 672 | assert_slb_presence(false, ea); |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 673 | asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); |
| 674 | |
| 675 | barrier(); |
| 676 | |
| 677 | if (!kernel) |
| 678 | slb_cache_update(esid_data); |
| 679 | |
| 680 | return 0; |
| 681 | } |
| 682 | |
| 683 | static long slb_allocate_kernel(unsigned long ea, unsigned long id) |
| 684 | { |
| 685 | unsigned long context; |
| 686 | unsigned long flags; |
| 687 | int ssize; |
| 688 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 689 | if (id == KERNEL_REGION_ID) { |
Aneesh Kumar K.V | 4ffe713 | 2018-09-20 14:03:58 +0530 | [diff] [blame] | 690 | |
| 691 | /* We only support upto MAX_PHYSMEM_BITS */ |
| 692 | if ((ea & ~REGION_MASK) > (1UL << MAX_PHYSMEM_BITS)) |
| 693 | return -EFAULT; |
| 694 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 695 | flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp; |
Aneesh Kumar K.V | 4ffe713 | 2018-09-20 14:03:58 +0530 | [diff] [blame] | 696 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 697 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 698 | } else if (id == VMEMMAP_REGION_ID) { |
Aneesh Kumar K.V | 4ffe713 | 2018-09-20 14:03:58 +0530 | [diff] [blame] | 699 | |
| 700 | if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT)) |
| 701 | return -EFAULT; |
| 702 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 703 | flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp; |
| 704 | #endif |
| 705 | } else if (id == VMALLOC_REGION_ID) { |
Aneesh Kumar K.V | 4ffe713 | 2018-09-20 14:03:58 +0530 | [diff] [blame] | 706 | |
| 707 | if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT)) |
| 708 | return -EFAULT; |
| 709 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 710 | if (ea < H_VMALLOC_END) |
Michael Ellerman | c8b00bb7 | 2018-11-01 16:21:05 +1100 | [diff] [blame] | 711 | flags = local_paca->vmalloc_sllp; |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 712 | else |
| 713 | flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; |
| 714 | } else { |
| 715 | return -EFAULT; |
| 716 | } |
| 717 | |
| 718 | ssize = MMU_SEGSIZE_1T; |
| 719 | if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
| 720 | ssize = MMU_SEGSIZE_256M; |
| 721 | |
Aneesh Kumar K.V | 4ffe713 | 2018-09-20 14:03:58 +0530 | [diff] [blame] | 722 | context = get_kernel_context(ea); |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 723 | return slb_insert_entry(ea, context, flags, ssize, true); |
| 724 | } |
| 725 | |
| 726 | static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) |
| 727 | { |
| 728 | unsigned long context; |
| 729 | unsigned long flags; |
| 730 | int bpsize; |
| 731 | int ssize; |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 732 | |
| 733 | /* |
| 734 | * consider this as bad access if we take a SLB miss |
| 735 | * on an address above addr limit. |
| 736 | */ |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 737 | if (ea >= mm->context.slb_addr_limit) |
| 738 | return -EFAULT; |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 739 | |
Aneesh Kumar K.V | c9f8073 | 2018-09-20 14:03:57 +0530 | [diff] [blame] | 740 | context = get_user_context(&mm->context, ea); |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 741 | if (!context) |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 742 | return -EFAULT; |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 743 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 744 | if (unlikely(ea >= H_PGTABLE_RANGE)) { |
| 745 | WARN_ON(1); |
| 746 | return -EFAULT; |
| 747 | } |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 748 | |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 749 | ssize = user_segment_size(ea); |
| 750 | |
| 751 | bpsize = get_slice_psize(mm, ea); |
| 752 | flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp; |
| 753 | |
| 754 | return slb_insert_entry(ea, context, flags, ssize, false); |
| 755 | } |
| 756 | |
| 757 | long do_slb_fault(struct pt_regs *regs, unsigned long ea) |
| 758 | { |
| 759 | unsigned long id = REGION_ID(ea); |
| 760 | |
| 761 | /* IRQs are not reconciled here, so can't check irqs_disabled */ |
| 762 | VM_WARN_ON(mfmsr() & MSR_EE); |
| 763 | |
| 764 | if (unlikely(!(regs->msr & MSR_RI))) |
| 765 | return -EINVAL; |
| 766 | |
| 767 | /* |
| 768 | * SLB kernel faults must be very careful not to touch anything |
| 769 | * that is not bolted. E.g., PACA and global variables are okay, |
| 770 | * mm->context stuff is not. |
| 771 | * |
| 772 | * SLB user faults can access all of kernel memory, but must be |
| 773 | * careful not to touch things like IRQ state because it is not |
| 774 | * "reconciled" here. The difficulty is that we must use |
| 775 | * fast_exception_return to return from kernel SLB faults without |
| 776 | * looking at possible non-bolted memory. We could test user vs |
| 777 | * kernel faults in the interrupt handler asm and do a full fault, |
| 778 | * reconcile, ret_from_except for user faults which would make them |
| 779 | * first class kernel code. But for performance it's probably nicer |
| 780 | * if they go via fast_exception_return too. |
| 781 | */ |
| 782 | if (id >= KERNEL_REGION_ID) { |
Nicholas Piggin | e15a4fe | 2018-10-03 00:27:59 +1000 | [diff] [blame] | 783 | long err; |
| 784 | #ifdef CONFIG_DEBUG_VM |
| 785 | /* Catch recursive kernel SLB faults. */ |
| 786 | BUG_ON(local_paca->in_kernel_slb_handler); |
| 787 | local_paca->in_kernel_slb_handler = 1; |
| 788 | #endif |
| 789 | err = slb_allocate_kernel(ea, id); |
| 790 | #ifdef CONFIG_DEBUG_VM |
| 791 | local_paca->in_kernel_slb_handler = 0; |
| 792 | #endif |
| 793 | return err; |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 794 | } else { |
| 795 | struct mm_struct *mm = current->mm; |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 796 | long err; |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 797 | |
| 798 | if (unlikely(!mm)) |
| 799 | return -EFAULT; |
| 800 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 801 | err = slb_allocate_user(mm, ea); |
| 802 | if (!err) |
| 803 | preload_add(current_thread_info(), ea); |
| 804 | |
| 805 | return err; |
Nicholas Piggin | 48e7b76 | 2018-09-15 01:30:51 +1000 | [diff] [blame] | 806 | } |
| 807 | } |
| 808 | |
| 809 | void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err) |
| 810 | { |
| 811 | if (err == -EFAULT) { |
| 812 | if (user_mode(regs)) |
| 813 | _exception(SIGSEGV, regs, SEGV_BNDERR, ea); |
| 814 | else |
| 815 | bad_page_fault(regs, ea, SIGSEGV); |
| 816 | } else if (err == -EINVAL) { |
| 817 | unrecoverable_exception(regs); |
| 818 | } else { |
| 819 | BUG(); |
| 820 | } |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 821 | } |