blob: af46aa88422bf950cd1868dd4715c6adf2f6c5bb [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +10003 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Derived from "arch/i386/mm/fault.c"
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 *
9 * Modified by Cort Dougan and Paul Mackerras.
10 *
11 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100012 */
13
Paul Mackerras14cf11a2005-09-26 16:04:21 +100014#include <linux/signal.h>
15#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010016#include <linux/sched/task_stack.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100017#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
Christophe Leroy0e36b0d2018-05-23 10:53:22 +020021#include <linux/pagemap.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100022#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/interrupt.h>
26#include <linux/highmem.h>
Paul Gortmaker8a39b052016-08-16 10:57:34 -040027#include <linux/extable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100028#include <linux/kprobes.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070029#include <linux/kdebug.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020030#include <linux/perf_event.h>
Christian Dietrich76462232011-06-04 05:36:54 +000031#include <linux/ratelimit.h>
Li Zhongba12eed2013-05-13 16:16:41 +000032#include <linux/context_tracking.h>
Anton Blanchard9d574722014-09-24 16:59:58 +100033#include <linux/hugetlb.h>
David Hildenbrand70ffdb92015-05-11 17:52:11 +020034#include <linux/uaccess.h>
Christophe Leroy90cbac02021-03-04 14:35:09 +000035#include <linux/kfence.h>
Christophe Leroy98c26a72021-03-15 14:52:51 +000036#include <linux/pkeys.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100037
Brian King40900192008-10-22 05:53:45 +000038#include <asm/firmware.h>
Nicholas Piggin3a965702021-01-30 23:08:38 +100039#include <asm/interrupt.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100040#include <asm/page.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100041#include <asm/mmu.h>
42#include <asm/mmu_context.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100043#include <asm/siginfo.h>
David Howellsae3a197e2012-03-28 18:30:02 +010044#include <asm/debug.h>
Michael Ellerman5e5be3a2019-04-18 16:51:25 +100045#include <asm/kup.h>
Jordan Niethe80948922020-05-06 13:40:28 +100046#include <asm/inst.h>
Christoph Hellwig9f90b992007-04-30 11:56:46 +010047
Michael Ellerman773b3e52020-07-24 19:25:27 +100048
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +110049/*
50 * do_page_fault error handling helpers
51 */
52
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +100053static int
Eric W. Biedermancd60ab7a2018-09-18 10:42:33 +020054__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +100055{
56 /*
57 * If we are in kernel mode, bail out with a SEGV, this will
58 * be caught by the assembly which will restore the non-volatile
59 * registers before calling bad_page_fault()
60 */
61 if (!user_mode(regs))
62 return SIGSEGV;
63
Eric W. Biedermancd60ab7a2018-09-18 10:42:33 +020064 _exception(SIGSEGV, regs, si_code, address);
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +100065
66 return 0;
67}
68
69static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
70{
Eric W. Biedermancd60ab7a2018-09-18 10:42:33 +020071 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +100072}
73
Eric W. Biederman9f2ee692018-09-18 09:19:24 +020074static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +100075{
76 struct mm_struct *mm = current->mm;
77
78 /*
79 * Something tried to access memory that isn't in our memory map..
80 * Fix it, but check if it's kernel or user first..
81 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -070082 mmap_read_unlock(mm);
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +100083
Eric W. Biedermancd60ab7a2018-09-18 10:42:33 +020084 return __bad_area_nosemaphore(regs, address, si_code);
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +100085}
86
87static noinline int bad_area(struct pt_regs *regs, unsigned long address)
88{
Eric W. Biederman9f2ee692018-09-18 09:19:24 +020089 return __bad_area(regs, address, SEGV_MAPERR);
Ram Pai99cd1302018-01-18 17:50:42 -080090}
91
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +053092static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
93 struct vm_area_struct *vma)
Ram Pai99cd1302018-01-18 17:50:42 -080094{
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +053095 struct mm_struct *mm = current->mm;
96 int pkey;
97
98 /*
99 * We don't try to fetch the pkey from page table because reading
100 * page table without locking doesn't guarantee stable pte value.
101 * Hence the pkey value that we return to userspace can be different
102 * from the pkey that actually caused access error.
103 *
104 * It does *not* guarantee that the VMA we find here
105 * was the one that we faulted on.
106 *
107 * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
108 * 2. T1 : set AMR to deny access to pkey=4, touches, page
109 * 3. T1 : faults...
110 * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700111 * 5. T1 : enters fault handler, takes mmap_lock, etc...
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +0530112 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
113 * faulted on a pte with its pkey=4.
114 */
115 pkey = vma_pkey(vma);
116
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700117 mmap_read_unlock(mm);
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +0530118
Eric W. Biederman8eb2ba22018-09-18 09:14:49 +0200119 /*
120 * If we are in kernel mode, bail out with a SEGV, this will
121 * be caught by the assembly which will restore the non-volatile
122 * registers before calling bad_page_fault()
123 */
124 if (!user_mode(regs))
125 return SIGSEGV;
126
Eric W. Biederman5d8fb8a2018-09-18 10:56:25 +0200127 _exception_pkey(regs, address, pkey);
Eric W. Biederman8eb2ba22018-09-18 09:14:49 +0200128
129 return 0;
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +1000130}
131
John Sperbeckecb101a2017-12-31 21:24:58 -0800132static noinline int bad_access(struct pt_regs *regs, unsigned long address)
133{
Eric W. Biederman9f2ee692018-09-18 09:19:24 +0200134 return __bad_area(regs, address, SEGV_ACCERR);
John Sperbeckecb101a2017-12-31 21:24:58 -0800135}
136
Anton Blanchard3913fdd2014-09-24 16:59:57 +1000137static int do_sigbus(struct pt_regs *regs, unsigned long address,
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700138 vm_fault_t fault)
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100139{
Anton Blanchard63af5262014-09-24 16:59:56 +1000140 if (!user_mode(regs))
Benjamin Herrenschmidtb5c8f0f2017-07-19 14:49:36 +1000141 return SIGBUS;
Anton Blanchard63af5262014-09-24 16:59:56 +1000142
143 current->thread.trap_nr = BUS_ADRERR;
Anton Blanchard3913fdd2014-09-24 16:59:57 +1000144#ifdef CONFIG_MEMORY_FAILURE
145 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
Eric W. Biedermanf654fc02018-04-19 17:36:43 -0500146 unsigned int lsb = 0; /* shutup gcc */
147
Anton Blanchard3913fdd2014-09-24 16:59:57 +1000148 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
149 current->comm, current->pid, address);
Eric W. Biedermanf654fc02018-04-19 17:36:43 -0500150
151 if (fault & VM_FAULT_HWPOISON_LARGE)
152 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
153 if (fault & VM_FAULT_HWPOISON)
154 lsb = PAGE_SHIFT;
155
Eric W. Biedermanf8eac902019-02-05 18:14:19 -0600156 force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
Eric W. Biedermanf654fc02018-04-19 17:36:43 -0500157 return 0;
Anton Blanchard3913fdd2014-09-24 16:59:57 +1000158 }
Anton Blanchard9d574722014-09-24 16:59:58 +1000159
Anton Blanchard3913fdd2014-09-24 16:59:57 +1000160#endif
Eric W. Biederman2e1661d22019-05-23 11:04:24 -0500161 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
Benjamin Herrenschmidtb5c8f0f2017-07-19 14:49:36 +1000162 return 0;
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100163}
164
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700165static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
166 vm_fault_t fault)
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100167{
168 /*
Benjamin Herrenschmidtb5c8f0f2017-07-19 14:49:36 +1000169 * Kernel page fault interrupted by SIGKILL. We have no reason to
170 * continue processing.
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100171 */
Benjamin Herrenschmidtb5c8f0f2017-07-19 14:49:36 +1000172 if (fatal_signal_pending(current) && !user_mode(regs))
173 return SIGKILL;
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100174
175 /* Out of memory */
David Rientjesc2d23f92012-12-12 13:52:10 -0800176 if (fault & VM_FAULT_OOM) {
David Rientjesc2d23f92012-12-12 13:52:10 -0800177 /*
178 * We ran out of memory, or some other thing happened to us that
179 * made us unable to handle the page fault gracefully.
180 */
181 if (!user_mode(regs))
Benjamin Herrenschmidtb5c8f0f2017-07-19 14:49:36 +1000182 return SIGSEGV;
David Rientjesc2d23f92012-12-12 13:52:10 -0800183 pagefault_out_of_memory();
Benjamin Herrenschmidtb5c8f0f2017-07-19 14:49:36 +1000184 } else {
185 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
186 VM_FAULT_HWPOISON_LARGE))
187 return do_sigbus(regs, addr, fault);
188 else if (fault & VM_FAULT_SIGSEGV)
189 return bad_area_nosemaphore(regs, addr);
190 else
191 BUG();
David Rientjesc2d23f92012-12-12 13:52:10 -0800192 }
Benjamin Herrenschmidtb5c8f0f2017-07-19 14:49:36 +1000193 return 0;
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100194}
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000195
Benjamin Herrenschmidtd3ca5872017-07-19 14:49:34 +1000196/* Is this a bad kernel fault ? */
Christophe Leroyde78a9c2019-04-18 16:51:20 +1000197static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
Michael Ellerman5e5be3a2019-04-18 16:51:25 +1000198 unsigned long address, bool is_write)
Benjamin Herrenschmidtd3ca5872017-07-19 14:49:34 +1000199{
Xiongwei Song7153d4b2021-04-14 19:00:33 +0800200 int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
Christophe Leroyde78a9c2019-04-18 16:51:20 +1000201
Christophe Leroycd5d5e62021-07-01 11:17:08 +0000202 if (is_exec) {
Christophe Leroy0fb1c252019-04-18 16:51:19 +1000203 pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
204 address >= TASK_SIZE ? "exec-protected" : "user",
205 address,
206 from_kuid(&init_user_ns, current_uid()));
Michael Ellerman5e5be3a2019-04-18 16:51:25 +1000207
208 // Kernel exec fault is always bad
209 return true;
Benjamin Herrenschmidtd3ca5872017-07-19 14:49:34 +1000210 }
Christophe Leroyde78a9c2019-04-18 16:51:20 +1000211
Michael Ellerman5e5be3a2019-04-18 16:51:25 +1000212 // Kernel fault on kernel address is bad
213 if (address >= TASK_SIZE)
214 return true;
215
Christophe Leroycbd7e6c2020-12-09 05:29:24 +0000216 // Read/write fault blocked by KUAP is bad, it can never succeed.
217 if (bad_kuap_fault(regs, address, is_write)) {
218 pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n",
219 is_write ? "write" : "read", address,
220 from_kuid(&init_user_ns, current_uid()));
Michael Ellerman5e5be3a2019-04-18 16:51:25 +1000221
Christophe Leroycbd7e6c2020-12-09 05:29:24 +0000222 // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
223 if (!search_exception_tables(regs->nip))
224 return true;
225
226 // Read/write fault in a valid region (the exception table search passed
227 // above), but blocked by KUAP is bad, it can never succeed.
Christophe Leroy3dc12df2020-12-09 05:29:23 +0000228 return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read");
Christophe Leroycbd7e6c2020-12-09 05:29:24 +0000229 }
Michael Ellerman5e5be3a2019-04-18 16:51:25 +1000230
Christophe Leroycbd7e6c2020-12-09 05:29:24 +0000231 // What's left? Kernel fault on user and allowed by KUAP in the faulting context.
Michael Ellerman5e5be3a2019-04-18 16:51:25 +1000232 return false;
Benjamin Herrenschmidtd3ca5872017-07-19 14:49:34 +1000233}
234
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +0530235static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
236 struct vm_area_struct *vma)
237{
238 /*
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +0530239 * Make sure to check the VMA so that we do not perform
240 * faults just to hit a pkey fault as soon as we fill in a
241 * page. Only called for current mm, hence foreign == 0
242 */
243 if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
244 return true;
245
246 return false;
247}
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +0530248
249static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
Benjamin Herrenschmidtbd0d63f2017-07-19 14:49:43 +1000250{
251 /*
252 * Allow execution from readable areas if the MMU does not
253 * provide separate controls over reading and executing.
254 *
255 * Note: That code used to not be enabled for 4xx/BookE.
256 * It is now as I/D cache coherency for these is done at
257 * set_pte_at() time and I see no reason why the test
258 * below wouldn't be valid on those processors. This -may-
259 * break programs compiled with a really old ABI though.
260 */
261 if (is_exec) {
262 return !(vma->vm_flags & VM_EXEC) &&
263 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
264 !(vma->vm_flags & (VM_READ | VM_WRITE)));
265 }
266
267 if (is_write) {
268 if (unlikely(!(vma->vm_flags & VM_WRITE)))
269 return true;
270 return false;
271 }
272
Russell Currey395cac72022-08-17 15:06:39 +1000273 /*
Russell Curreyf2c7e352023-03-10 16:08:34 +1100274 * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
275 * defined in protection_map[]. Read faults can only be caused by
276 * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
Russell Currey395cac72022-08-17 15:06:39 +1000277 */
Russell Curreyf2c7e352023-03-10 16:08:34 +1100278 if (unlikely(!vma_is_accessible(vma)))
Benjamin Herrenschmidtbd0d63f2017-07-19 14:49:43 +1000279 return true;
Russell Curreyf2c7e352023-03-10 16:08:34 +1100280
281 if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
282 return true;
283
Aneesh Kumar K.Vf2ed4802018-03-07 19:06:45 +0530284 /*
285 * We should ideally do the vma pkey access check here. But in the
286 * fault path, handle_mm_fault() also does the same check. To avoid
287 * these multiple checks, we skip it here and handle access error due
288 * to pkeys later.
289 */
Benjamin Herrenschmidtbd0d63f2017-07-19 14:49:43 +1000290 return false;
291}
292
Benjamin Herrenschmidt3da02642017-07-19 14:49:37 +1000293#ifdef CONFIG_PPC_SMLPAR
294static inline void cmo_account_page_fault(void)
295{
296 if (firmware_has_feature(FW_FEATURE_CMO)) {
297 u32 page_ins;
298
299 preempt_disable();
300 page_ins = be32_to_cpu(get_lppaca()->page_ins);
301 page_ins += 1 << PAGE_FACTOR;
302 get_lppaca()->page_ins = cpu_to_be32(page_ins);
303 preempt_enable();
304 }
305}
306#else
307static inline void cmo_account_page_fault(void) { }
308#endif /* CONFIG_PPC_SMLPAR */
309
Aneesh Kumar K.V374f3f52018-11-26 20:05:04 +0530310static void sanity_check_fault(bool is_write, bool is_user,
311 unsigned long error_code, unsigned long address)
Benjamin Herrenschmidt2865d082017-07-19 14:49:39 +1000312{
313 /*
Aneesh Kumar K.V374f3f52018-11-26 20:05:04 +0530314 * Userspace trying to access kernel address, we get PROTFAULT for that.
315 */
316 if (is_user && address >= TASK_SIZE) {
Christophe Leroy0f9aee02019-12-23 07:54:22 +0000317 if ((long)address == -1)
318 return;
319
Aneesh Kumar K.V374f3f52018-11-26 20:05:04 +0530320 pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
321 current->comm, current->pid, address,
322 from_kuid(&init_user_ns, current_uid()));
323 return;
324 }
325
Christophe Leroy7ceb4002020-12-09 05:29:21 +0000326 if (!IS_ENABLED(CONFIG_PPC_BOOK3S))
327 return;
328
Aneesh Kumar K.V374f3f52018-11-26 20:05:04 +0530329 /*
Benjamin Herrenschmidt2865d082017-07-19 14:49:39 +1000330 * For hash translation mode, we should never get a
331 * PROTFAULT. Any update to pte to reduce access will result in us
332 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
333 * fault instead of DSISR_PROTFAULT.
334 *
335 * A pte update to relax the access will not result in a hash page table
336 * entry invalidate and hence can result in DSISR_PROTFAULT.
337 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
338 * the special !is_write in the below conditional.
339 *
340 * For platforms that doesn't supports coherent icache and do support
341 * per page noexec bit, we do setup things such that we do the
342 * sync between D/I cache via fault. But that is handled via low level
343 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
344 * here in such case.
345 *
346 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
347 * check should handle those and hence we should fall to the bad_area
348 * handling correctly.
349 *
350 * For embedded with per page exec support that doesn't support coherent
351 * icache we do get PROTFAULT and we handle that D/I cache sync in
352 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
353 * is conditional for server MMU.
354 *
355 * For radix, we can get prot fault for autonuma case, because radix
356 * page table will have them marked noaccess for user.
357 */
Aneesh Kumar K.V374f3f52018-11-26 20:05:04 +0530358 if (radix_enabled() || is_write)
359 return;
360
361 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
Benjamin Herrenschmidt2865d082017-07-19 14:49:39 +1000362}
Benjamin Herrenschmidt2865d082017-07-19 14:49:39 +1000363
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000364/*
Benjamin Herrenschmidt41b464e52017-07-19 14:49:29 +1000365 * Define the correct "is_write" bit in error_code based
366 * on the processor family
367 */
368#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
369#define page_fault_is_write(__err) ((__err) & ESR_DST)
370#else
371#define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE)
Christophe Leroy5250d022020-12-09 05:29:22 +0000372#endif
373
374#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
375#define page_fault_is_bad(__err) (0)
376#elif defined(CONFIG_PPC_8xx)
Christophe Leroy49153492017-08-08 13:59:00 +0200377#define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G)
Benjamin Herrenschmidtf3d96e62017-07-19 14:49:31 +1000378#elif defined(CONFIG_PPC64)
Haren Myneni335e1a92022-09-27 18:29:27 -0700379static int page_fault_is_bad(unsigned long err)
380{
381 unsigned long flag = DSISR_BAD_FAULT_64S;
382
383 /*
384 * PAPR+ v2.11 § 14.15.3.4.1 (unreleased)
385 * If byte 0, bit 3 of pi-attribute-specifier-type in
386 * ibm,pi-features property is defined, ignore the DSI error
387 * which is caused by the paste instruction on the
388 * suspended NX window.
389 */
390 if (mmu_has_feature(MMU_FTR_NX_DSI))
391 flag &= ~DSISR_BAD_COPYPASTE;
392
393 return err & flag;
394}
Benjamin Herrenschmidtf3d96e62017-07-19 14:49:31 +1000395#else
396#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S)
397#endif
Benjamin Herrenschmidt41b464e52017-07-19 14:49:29 +1000398
399/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000400 * For 600- and 800-family processors, the error_code parameter is DSISR
Nicholas Piggin31d64902021-01-30 23:08:26 +1000401 * for a data fault, SRR1 for an instruction fault.
402 * For 400-family processors the error_code parameter is ESR for a data fault,
403 * 0 for an instruction fault.
404 * For 64-bit processors, the error_code parameter is DSISR for a data access
405 * fault, SRR1 & 0x08000000 for an instruction access fault.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000406 *
407 * The return value is 0 if the fault was handled, or the signal
408 * number if this is a kernel fault that can't be handled here.
409 */
Nicholas Piggina008f8f2021-01-30 23:08:41 +1000410static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
Benjamin Herrenschmidt7afad422017-07-19 14:49:23 +1000411 unsigned long error_code)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000412{
413 struct vm_area_struct * vma;
414 struct mm_struct *mm = current->mm;
Peter Xudde16072020-04-01 21:08:37 -0700415 unsigned int flags = FAULT_FLAG_DEFAULT;
Xiongwei Song7153d4b2021-04-14 19:00:33 +0800416 int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
Christophe Leroyda929f62017-04-19 14:56:30 +0200417 int is_user = user_mode(regs);
Benjamin Herrenschmidt41b464e52017-07-19 14:49:29 +1000418 int is_write = page_fault_is_write(error_code);
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700419 vm_fault_t fault, major = 0;
Anshuman Khandualb98cca42019-07-16 16:28:00 -0700420 bool kprobe_fault = kprobe_page_fault(regs, 11);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000421
Anshuman Khandualb98cca42019-07-16 16:28:00 -0700422 if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
Benjamin Herrenschmidt65d47fd2017-07-19 14:49:33 +1000423 return 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000424
Benjamin Herrenschmidtf3d96e62017-07-19 14:49:31 +1000425 if (unlikely(page_fault_is_bad(error_code))) {
Benjamin Herrenschmidt65d47fd2017-07-19 14:49:33 +1000426 if (is_user) {
Benjamin Herrenschmidtf3d96e62017-07-19 14:49:31 +1000427 _exception(SIGBUS, regs, BUS_OBJERR, address);
Benjamin Herrenschmidt65d47fd2017-07-19 14:49:33 +1000428 return 0;
429 }
430 return SIGBUS;
Benjamin Herrenschmidte6c82902017-07-19 14:49:30 +1000431 }
Benjamin Herrenschmidte6c82902017-07-19 14:49:30 +1000432
Benjamin Herrenschmidt2865d082017-07-19 14:49:39 +1000433 /* Additional sanity check(s) */
Aneesh Kumar K.V374f3f52018-11-26 20:05:04 +0530434 sanity_check_fault(is_write, is_user, error_code, address);
Benjamin Herrenschmidt2865d082017-07-19 14:49:39 +1000435
Benjamin Herrenschmidtd7df2442017-02-03 17:10:28 +1100436 /*
437 * The kernel should never take an execute fault nor should it
Christophe Leroyde78a9c2019-04-18 16:51:20 +1000438 * take a page fault to a kernel address or a page fault to a user
439 * address outside of dedicated places
Benjamin Herrenschmidtd7df2442017-02-03 17:10:28 +1100440 */
Christophe Leroy90cbac02021-03-04 14:35:09 +0000441 if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
442 if (kfence_handle_page_fault(address, is_write, regs))
443 return 0;
444
Benjamin Herrenschmidt65d47fd2017-07-19 14:49:33 +1000445 return SIGSEGV;
Christophe Leroy90cbac02021-03-04 14:35:09 +0000446 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000447
Benjamin Herrenschmidt11ccdd32017-07-19 14:49:40 +1000448 /*
449 * If we're in an interrupt, have no user context or are running
450 * in a region with pagefaults disabled then we must not take the fault
451 */
452 if (unlikely(faulthandler_disabled() || !mm)) {
453 if (is_user)
454 printk_ratelimited(KERN_ERR "Page fault in user mode"
455 " with faulthandler_disabled()=%d"
456 " mm=%p\n",
457 faulthandler_disabled(), mm);
458 return bad_area_nosemaphore(regs, address);
459 }
460
Nicholas Piggine6f8a6c2021-01-30 23:08:39 +1000461 interrupt_cond_local_irq_enable(regs);
Benjamin Herrenschmidta5464982012-03-07 16:48:45 +1100462
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200463 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Peter Zijlstra7dd1fcc2009-03-13 12:21:33 +0100464
Aneesh Kumar K.V69e044dd2013-09-10 18:44:42 +0530465 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700466 * We want to do this outside mmap_lock, because reading code around nip
Aneesh Kumar K.V69e044dd2013-09-10 18:44:42 +0530467 * can result in fault, which will cause a deadlock when called with
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700468 * mmap_lock held
Aneesh Kumar K.V69e044dd2013-09-10 18:44:42 +0530469 */
Christophe Leroyda929f62017-04-19 14:56:30 +0200470 if (is_user)
Johannes Weiner759496b2013-09-12 15:13:39 -0700471 flags |= FAULT_FLAG_USER;
Benjamin Herrenschmidtd2e0d2c2017-07-19 14:49:42 +1000472 if (is_write)
473 flags |= FAULT_FLAG_WRITE;
474 if (is_exec)
475 flags |= FAULT_FLAG_INSTRUCTION;
Johannes Weiner759496b2013-09-12 15:13:39 -0700476
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000477 /* When running in the kernel we expect faults to occur only to
478 * addresses in user space. All other faults represent errors in the
Anton Blanchardfc5266e2006-04-01 11:33:12 +1100479 * kernel and should generate an OOPS. Unfortunately, in the case of an
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700480 * erroneous fault occurring in a code path which already holds mmap_lock
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000481 * we will deadlock attempting to validate the fault against the
482 * address space. Luckily the kernel only validly references user
483 * space from well defined areas of code, which are listed in the
484 * exceptions table.
485 *
486 * As the vast majority of faults will be valid we will only perform
Anton Blanchardfc5266e2006-04-01 11:33:12 +1100487 * the source reference check when there is a possibility of a deadlock.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000488 * Attempt to lock the address space, if we cannot we then validate the
489 * source. If this is invalid we can skip the address space check,
490 * thus avoiding the deadlock.
491 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700492 if (unlikely(!mmap_read_trylock(mm))) {
Christophe Leroyda929f62017-04-19 14:56:30 +0200493 if (!is_user && !search_exception_tables(regs->nip))
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +1000494 return bad_area_nosemaphore(regs, address);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000495
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100496retry:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700497 mmap_read_lock(mm);
Benjamin Herrenschmidta5464982012-03-07 16:48:45 +1100498 } else {
499 /*
500 * The above down_read_trylock() might have succeeded in
501 * which case we'll have missed the might_sleep() from
502 * down_read():
503 */
504 might_sleep();
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000505 }
506
507 vma = find_vma(mm, address);
Benjamin Herrenschmidtb15021d2017-07-19 14:49:41 +1000508 if (unlikely(!vma))
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +1000509 return bad_area(regs, address);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000510
Michael Ellerman773b3e52020-07-24 19:25:27 +1000511 if (unlikely(vma->vm_start > address)) {
512 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
Christophe Leroy0e36b0d2018-05-23 10:53:22 +0200513 return bad_area(regs, address);
514
Michael Ellerman773b3e52020-07-24 19:25:27 +1000515 if (unlikely(expand_stack(vma, address)))
516 return bad_area(regs, address);
Christophe Leroy0e36b0d2018-05-23 10:53:22 +0200517 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000518
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +0530519 if (unlikely(access_pkey_error(is_write, is_exec,
520 (error_code & DSISR_KEYFAULT), vma)))
521 return bad_access_pkey(regs, address, vma);
Aneesh Kumar K.Vfe4a6852020-05-05 12:47:08 +0530522
Benjamin Herrenschmidtbd0d63f2017-07-19 14:49:43 +1000523 if (unlikely(access_error(is_write, is_exec, vma)))
John Sperbeckecb101a2017-12-31 21:24:58 -0800524 return bad_access(regs, address);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000525
526 /*
527 * If for any reason at all we couldn't handle the fault,
528 * make sure we exit gracefully rather than endlessly redo
529 * the fault.
530 */
Peter Xu428fdc02020-08-11 18:38:31 -0700531 fault = handle_mm_fault(vma, address, flags, regs);
Ram Paie6c2a472018-01-18 17:50:40 -0800532
Benjamin Herrenschmidtf43bb272017-07-19 14:49:44 +1000533 major |= fault & VM_FAULT_MAJOR;
Laurent Dufour14c02e412017-02-14 17:45:11 +0100534
Peter Xuc9a0dad2020-04-01 21:08:22 -0700535 if (fault_signal_pending(fault, regs))
536 return user_mode(regs) ? 0 : SIGBUS;
537
Peter Xud9272522022-05-30 14:34:50 -0400538 /* The fault is fully completed (including releasing mmap lock) */
539 if (fault & VM_FAULT_COMPLETED)
540 goto out;
541
Laurent Dufour14c02e412017-02-14 17:45:11 +0100542 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700543 * Handle the retry right now, the mmap_lock has been released in that
Laurent Dufour14c02e412017-02-14 17:45:11 +0100544 * case.
545 */
546 if (unlikely(fault & VM_FAULT_RETRY)) {
Qi Zheng36ef1592022-01-14 14:05:51 -0800547 flags |= FAULT_FLAG_TRIED;
548 goto retry;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000549 }
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100550
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700551 mmap_read_unlock(current->mm);
Benjamin Herrenschmidtb5c8f0f2017-07-19 14:49:36 +1000552
553 if (unlikely(fault & VM_FAULT_ERROR))
554 return mm_fault_error(regs, address, fault);
555
Peter Xud9272522022-05-30 14:34:50 -0400556out:
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100557 /*
Laurent Dufour14c02e412017-02-14 17:45:11 +0100558 * Major/minor page fault accounting.
Benjamin Herrenschmidt9be72572012-03-01 18:14:45 +1100559 */
Peter Xu428fdc02020-08-11 18:38:31 -0700560 if (major)
Benjamin Herrenschmidt3da02642017-07-19 14:49:37 +1000561 cmo_account_page_fault();
Peter Xu428fdc02020-08-11 18:38:31 -0700562
Benjamin Herrenschmidtc3350602017-07-19 14:49:35 +1000563 return 0;
Benjamin Herrenschmidt7afad422017-07-19 14:49:23 +1000564}
Nicholas Piggina008f8f2021-01-30 23:08:41 +1000565NOKPROBE_SYMBOL(___do_page_fault);
Benjamin Herrenschmidt7afad422017-07-19 14:49:23 +1000566
Nicholas Pigginc45ba4f42021-03-16 20:42:03 +1000567static __always_inline void __do_page_fault(struct pt_regs *regs)
Benjamin Herrenschmidt7afad422017-07-19 14:49:23 +1000568{
Nicholas Piggin4cb84282021-01-30 23:08:23 +1000569 long err;
570
Nicholas Piggina008f8f2021-01-30 23:08:41 +1000571 err = ___do_page_fault(regs, regs->dar, regs->dsisr);
Nicholas Pigginc45ba4f42021-03-16 20:42:03 +1000572 if (unlikely(err))
573 bad_page_fault(regs, err);
Nicholas Piggina008f8f2021-01-30 23:08:41 +1000574}
Christophe Leroy5f1888a2020-12-09 05:29:25 +0000575
Nicholas Pigginc45ba4f42021-03-16 20:42:03 +1000576DEFINE_INTERRUPT_HANDLER(do_page_fault)
Nicholas Piggina008f8f2021-01-30 23:08:41 +1000577{
Nicholas Pigginc45ba4f42021-03-16 20:42:03 +1000578 __do_page_fault(regs);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000579}
580
Nicholas Piggina008f8f2021-01-30 23:08:41 +1000581#ifdef CONFIG_PPC_BOOK3S_64
582/* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
Nicholas Pigginc45ba4f42021-03-16 20:42:03 +1000583void hash__do_page_fault(struct pt_regs *regs)
Nicholas Piggina008f8f2021-01-30 23:08:41 +1000584{
Nicholas Pigginc45ba4f42021-03-16 20:42:03 +1000585 __do_page_fault(regs);
Nicholas Piggina008f8f2021-01-30 23:08:41 +1000586}
587NOKPROBE_SYMBOL(hash__do_page_fault);
588#endif
589
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000590/*
591 * bad_page_fault is called when we have a bad access from the kernel.
592 * It is called from the DSI and ISI handlers in head.S and from some
593 * of the procedures in traps.c.
594 */
Nicholas Pigginc45ba4f42021-03-16 20:42:03 +1000595static void __bad_page_fault(struct pt_regs *regs, int sig)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000596{
Christophe Leroy46ddcb32019-08-21 15:21:55 +0000597 int is_write = page_fault_is_write(regs->dsisr);
Michael Ellermand4679ac2022-02-22 22:34:49 +1100598 const char *msg;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000599
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000600 /* kernel has accessed a bad area */
Olof Johansson723925b2005-11-06 14:54:36 -0800601
Michael Ellermand4679ac2022-02-22 22:34:49 +1100602 if (regs->dar < PAGE_SIZE)
603 msg = "Kernel NULL pointer dereference";
604 else
605 msg = "Unable to handle kernel data access";
606
Benjamin Herrenschmidt2271db22018-01-12 13:28:49 +1100607 switch (TRAP(regs)) {
Xiongwei Song7153d4b2021-04-14 19:00:33 +0800608 case INTERRUPT_DATA_STORAGE:
Xiongwei Song7153d4b2021-04-14 19:00:33 +0800609 case INTERRUPT_H_DATA_STORAGE:
Michael Ellermand4679ac2022-02-22 22:34:49 +1100610 pr_alert("BUG: %s on %s at 0x%08lx\n", msg,
Christophe Leroy46ddcb32019-08-21 15:21:55 +0000611 is_write ? "write" : "read", regs->dar);
Michael Ellermana416dd82006-11-08 10:22:59 +1100612 break;
Michael Ellermand4679ac2022-02-22 22:34:49 +1100613 case INTERRUPT_DATA_SEGMENT:
614 pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar);
615 break;
Xiongwei Song7153d4b2021-04-14 19:00:33 +0800616 case INTERRUPT_INST_STORAGE:
617 case INTERRUPT_INST_SEGMENT:
Christophe Leroy49a502e2018-12-14 15:23:33 +0000618 pr_alert("BUG: Unable to handle kernel instruction fetch%s",
619 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
Michael Ellermana416dd82006-11-08 10:22:59 +1100620 break;
Xiongwei Song7153d4b2021-04-14 19:00:33 +0800621 case INTERRUPT_ALIGNMENT:
Christophe Leroy49a502e2018-12-14 15:23:33 +0000622 pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
623 regs->dar);
Anton Blanchardeab861a2015-07-02 14:56:20 +1000624 break;
Michael Ellermana416dd82006-11-08 10:22:59 +1100625 default:
Christophe Leroy49a502e2018-12-14 15:23:33 +0000626 pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
627 regs->dar);
Michael Ellermana416dd82006-11-08 10:22:59 +1100628 break;
Olof Johansson723925b2005-11-06 14:54:36 -0800629 }
630 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
631 regs->nip);
632
Aaron Tomlina70857e2014-09-12 14:16:18 +0100633 if (task_stack_end_corrupted(current))
Anton Blanchard28b54992010-08-24 13:15:28 +0000634 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
635
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000636 die("Kernel access of bad area", regs, sig);
637}
Christophe Leroy5f1888a2020-12-09 05:29:25 +0000638
Nicholas Piggin8458c622021-01-30 23:08:21 +1000639void bad_page_fault(struct pt_regs *regs, int sig)
Christophe Leroy5f1888a2020-12-09 05:29:25 +0000640{
641 const struct exception_table_entry *entry;
642
643 /* Are we prepared to handle this fault? */
644 entry = search_exception_tables(instruction_pointer(regs));
645 if (entry)
646 instruction_pointer_set(regs, extable_fixup(entry));
647 else
Nicholas Piggin8458c622021-01-30 23:08:21 +1000648 __bad_page_fault(regs, sig);
Christophe Leroy5f1888a2020-12-09 05:29:25 +0000649}
Nicholas Piggin71f47972021-01-30 23:08:22 +1000650
651#ifdef CONFIG_PPC_BOOK3S_64
Nicholas Piggin3a965702021-01-30 23:08:38 +1000652DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)
Nicholas Piggin71f47972021-01-30 23:08:22 +1000653{
654 bad_page_fault(regs, SIGSEGV);
655}
Nicholas Piggin935b5342021-12-02 00:41:39 +1000656
657/*
658 * In radix, segment interrupts indicate the EA is not addressable by the
659 * page table geometry, so they are always sent here.
660 *
661 * In hash, this is called if do_slb_fault returns error. Typically it is
662 * because the EA was outside the region allowed by software.
663 */
664DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt)
665{
666 int err = regs->result;
667
668 if (err == -EFAULT) {
669 if (user_mode(regs))
670 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
671 else
672 bad_page_fault(regs, SIGSEGV);
673 } else if (err == -EINVAL) {
674 unrecoverable_exception(regs);
675 } else {
676 BUG();
677 }
678}
Nicholas Piggin71f47972021-01-30 23:08:22 +1000679#endif