blob: 51df82b610843d5c490741fa33d59afb5a01c08b [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
Michael Ellermanc3525940c2015-07-23 20:21:01 +100022#include <linux/err.h>
Michael Ellerman85baa092016-03-24 22:04:05 +110023#include <linux/magic.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100024#include <asm/unistd.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/thread_info.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100032#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000033#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100034#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100035#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053036#include <asm/ftrace.h>
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110037#include <asm/hw_irq.h>
Li Zhong5d1c5742013-05-13 16:16:43 +000038#include <asm/context_tracking.h>
Sam bobroffb4b56f92015-06-12 11:06:32 +100039#include <asm/tm.h>
Chris Smart8a649042016-04-26 10:28:50 +100040#include <asm/ppc-opcode.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100041
42/*
43 * System calls.
44 */
45 .section ".toc","aw"
Anton Blanchardc857c432014-02-04 16:05:53 +110046SYS_CALL_TABLE:
47 .tc sys_call_table[TC],sys_call_table
Paul Mackerras9994a332005-10-10 22:36:14 +100048
49/* This value is used to mark exception frames on the stack. */
50exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100051 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100052
53 .section ".text"
54 .align 7
55
Paul Mackerras9994a332005-10-10 22:36:14 +100056 .globl system_call_common
57system_call_common:
Sam bobroffb4b56f92015-06-12 11:06:32 +100058#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
59BEGIN_FTR_SECTION
60 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
61 bne tabort_syscall
62END_FTR_SECTION_IFSET(CPU_FTR_TM)
63#endif
Paul Mackerras9994a332005-10-10 22:36:14 +100064 andi. r10,r12,MSR_PR
65 mr r10,r1
66 addi r1,r1,-INT_FRAME_SIZE
67 beq- 1f
68 ld r1,PACAKSAVE(r13)
691: std r10,0(r1)
70 std r11,_NIP(r1)
71 std r12,_MSR(r1)
72 std r0,GPR0(r1)
73 std r10,GPR1(r1)
Haren Myneni5d75b262012-12-06 21:46:37 +000074 beq 2f /* if from kernel mode */
Christophe Leroyc223c902016-05-17 08:33:46 +020075 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
Haren Myneni5d75b262012-12-06 21:46:37 +0000762: std r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100077 std r3,GPR3(r1)
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000078 mfcr r2
Paul Mackerras9994a332005-10-10 22:36:14 +100079 std r4,GPR4(r1)
80 std r5,GPR5(r1)
81 std r6,GPR6(r1)
82 std r7,GPR7(r1)
83 std r8,GPR8(r1)
84 li r11,0
85 std r11,GPR9(r1)
86 std r11,GPR10(r1)
87 std r11,GPR11(r1)
88 std r11,GPR12(r1)
Anton Blanchard823df432012-04-04 18:24:29 +000089 std r11,_XER(r1)
Anton Blanchard82087412012-04-04 18:26:39 +000090 std r11,_CTR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100091 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100092 mflr r10
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000093 /*
94 * This clears CR0.SO (bit 28), which is the error indication on
95 * return from this system call.
96 */
97 rldimi r2,r11,28,(63-28)
Paul Mackerras9994a332005-10-10 22:36:14 +100098 li r11,0xc01
Paul Mackerras9994a332005-10-10 22:36:14 +100099 std r10,_LINK(r1)
100 std r11,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000101 std r3,ORIG_GPR3(r1)
Anton Blanchardfd6c40f2012-04-05 03:44:48 +0000102 std r2,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000103 ld r2,PACATOC(r13)
104 addi r9,r1,STACK_FRAME_OVERHEAD
105 ld r11,exception_marker@toc(r2)
106 std r11,-16(r9) /* "regshere" marker */
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200107#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000108BEGIN_FW_FTR_SECTION
109 beq 33f
110 /* if from user, see if there are any DTL entries to process */
111 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
112 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000113 addi r10,r10,LPPACA_DTLIDX
114 LDX_BE r10,0,r10 /* get log write index */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000115 cmpd cr1,r11,r10
116 beq+ cr1,33f
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100117 bl accumulate_stolen_time
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000118 REST_GPR(0,r1)
119 REST_4GPRS(3,r1)
120 REST_2GPRS(7,r1)
121 addi r9,r1,STACK_FRAME_OVERHEAD
12233:
123END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200124#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000125
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100126 /*
127 * A syscall should always be called with interrupts enabled
128 * so we just unconditionally hard-enable here. When some kind
129 * of irq tracing is used, we additionally check that condition
130 * is correct
131 */
132#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
133 lbz r10,PACASOFTIRQEN(r13)
134 xori r10,r10,1
1351: tdnei r10,0
136 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
137#endif
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000138
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000139#ifdef CONFIG_PPC_BOOK3E
140 wrteei 1
141#else
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000142 li r11,MSR_RI
Paul Mackerras9994a332005-10-10 22:36:14 +1000143 ori r11,r11,MSR_EE
144 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000145#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000146
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100147 /* We do need to set SOFTE in the stack frame or the return
148 * from interrupt will be painful
149 */
150 li r10,1
151 std r10,SOFTE(r1)
152
Stuart Yoder9778b692012-07-05 04:41:35 +0000153 CURRENT_THREAD_INFO(r11, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000154 ld r10,TI_FLAGS(r11)
Michael Ellerman10ea8342015-01-15 12:01:42 +1100155 andi. r11,r10,_TIF_SYSCALL_DOTRACE
Michael Ellermand3837412015-07-23 20:21:02 +1000156 bne syscall_dotrace /* does not return */
Paul Mackerras9994a332005-10-10 22:36:14 +1000157 cmpldi 0,r0,NR_syscalls
158 bge- syscall_enosys
159
160system_call: /* label this so stack traces look sane */
161/*
162 * Need to vector to 32 Bit or default sys_call_table here,
163 * based on caller's run-mode / personality.
164 */
Anton Blanchardc857c432014-02-04 16:05:53 +1100165 ld r11,SYS_CALL_TABLE@toc(2)
Paul Mackerras9994a332005-10-10 22:36:14 +1000166 andi. r10,r10,_TIF_32BIT
167 beq 15f
168 addi r11,r11,8 /* use 32-bit syscall entries */
169 clrldi r3,r3,32
170 clrldi r4,r4,32
171 clrldi r5,r5,32
172 clrldi r6,r6,32
173 clrldi r7,r7,32
174 clrldi r8,r8,32
17515:
176 slwi r0,r0,4
Anton Blanchardcc7efbf2014-02-04 16:07:47 +1100177 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
178 mtctr r12
Paul Mackerras9994a332005-10-10 22:36:14 +1000179 bctrl /* Call handler */
180
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100181.Lsyscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000182 std r3,RESULT(r1)
Stuart Yoder9778b692012-07-05 04:41:35 +0000183 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000184
Paul Mackerras9994a332005-10-10 22:36:14 +1000185 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000186#ifdef CONFIG_PPC_BOOK3S
187 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000188 andi. r10,r8,MSR_RI
189 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000190#endif
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100191 /*
192 * Disable interrupts so current_thread_info()->flags can't change,
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000193 * and so that we don't get interrupted after loading SRR0/1.
194 */
195#ifdef CONFIG_PPC_BOOK3E
196 wrteei 0
197#else
Anton Blanchardac1dc362012-05-29 12:22:00 +0000198 /*
199 * For performance reasons we clear RI the same time that we
200 * clear EE. We only need to clear RI just before we restore r13
201 * below, but batching it with EE saves us one expensive mtmsrd call.
202 * We have to be careful to restore RI if we branch anywhere from
203 * here (eg syscall_exit_work).
204 */
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000205 li r11,0
Anton Blanchardac1dc362012-05-29 12:22:00 +0000206 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000207#endif /* CONFIG_PPC_BOOK3E */
208
Paul Mackerras9994a332005-10-10 22:36:14 +1000209 ld r9,TI_FLAGS(r12)
Michael Ellermanc3525940c2015-07-23 20:21:01 +1000210 li r11,-MAX_ERRNO
Michael Ellerman10ea8342015-01-15 12:01:42 +1100211 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000212 bne- syscall_exit_work
Cyril Bur70fe3d92016-02-29 17:53:47 +1100213
214 andi. r0,r8,MSR_FP
215 beq 2f
216#ifdef CONFIG_ALTIVEC
217 andis. r0,r8,MSR_VEC@h
218 bne 3f
219#endif
2202: addi r3,r1,STACK_FRAME_OVERHEAD
Cyril Bur6e669f02016-03-16 13:29:30 +1100221#ifdef CONFIG_PPC_BOOK3S
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000222 li r10,MSR_RI
Cyril Bur6e669f02016-03-16 13:29:30 +1100223 mtmsrd r10,1 /* Restore RI */
224#endif
Cyril Bur70fe3d92016-02-29 17:53:47 +1100225 bl restore_math
Cyril Bur6e669f02016-03-16 13:29:30 +1100226#ifdef CONFIG_PPC_BOOK3S
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000227 li r11,0
Cyril Bur6e669f02016-03-16 13:29:30 +1100228 mtmsrd r11,1
229#endif
Cyril Bur70fe3d92016-02-29 17:53:47 +1100230 ld r8,_MSR(r1)
231 ld r3,RESULT(r1)
232 li r11,-MAX_ERRNO
233
2343: cmpld r3,r11
David Woodhouse401d1f02005-11-15 18:52:18 +0000235 ld r5,_CCR(r1)
236 bge- syscall_error
Anton Blanchardd14299d2012-04-04 18:23:27 +0000237.Lsyscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000238 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000239BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000240 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000241END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000242 andi. r6,r8,MSR_PR
243 ld r4,_LINK(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000244
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100245 beq- 1f
Christophe Leroyc223c902016-05-17 08:33:46 +0200246 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
Michael Ellermand030a4b2015-11-25 14:25:17 +1100247
248BEGIN_FTR_SECTION
249 HMT_MEDIUM_LOW
250END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
251
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100252 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002531: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000254 ld r1,GPR1(r1)
255 mtlr r4
256 mtcr r5
257 mtspr SPRN_SRR0,r7
258 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000259 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000260 b . /* prevent speculative execution */
261
David Woodhouse401d1f02005-11-15 18:52:18 +0000262syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000263 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000264 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000265 std r5,_CCR(r1)
Anton Blanchardd14299d2012-04-04 18:23:27 +0000266 b .Lsyscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000267
Paul Mackerras9994a332005-10-10 22:36:14 +1000268/* Traced system call support */
269syscall_dotrace:
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100270 bl save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000271 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100272 bl do_syscall_trace_enter
Michael Ellermand3837412015-07-23 20:21:02 +1000273
Roland McGrath4f72c422008-07-27 16:51:03 +1000274 /*
Michael Ellermand3837412015-07-23 20:21:02 +1000275 * We use the return value of do_syscall_trace_enter() as the syscall
276 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
277 * returns an invalid syscall number and the test below against
278 * NR_syscalls will fail.
Roland McGrath4f72c422008-07-27 16:51:03 +1000279 */
280 mr r0,r3
Michael Ellermand3837412015-07-23 20:21:02 +1000281
282 /* Restore argument registers just clobbered and/or possibly changed. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000283 ld r3,GPR3(r1)
284 ld r4,GPR4(r1)
285 ld r5,GPR5(r1)
286 ld r6,GPR6(r1)
287 ld r7,GPR7(r1)
288 ld r8,GPR8(r1)
Michael Ellermand3837412015-07-23 20:21:02 +1000289
290 /* Repopulate r9 and r10 for the system_call path */
Paul Mackerras9994a332005-10-10 22:36:14 +1000291 addi r9,r1,STACK_FRAME_OVERHEAD
Stuart Yoder9778b692012-07-05 04:41:35 +0000292 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000293 ld r10,TI_FLAGS(r10)
Michael Ellermand3837412015-07-23 20:21:02 +1000294
295 cmpldi r0,NR_syscalls
296 blt+ system_call
297
298 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
299 b .Lsyscall_exit
300
Paul Mackerras9994a332005-10-10 22:36:14 +1000301
David Woodhouse401d1f02005-11-15 18:52:18 +0000302syscall_enosys:
303 li r3,-ENOSYS
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100304 b .Lsyscall_exit
David Woodhouse401d1f02005-11-15 18:52:18 +0000305
306syscall_exit_work:
Anton Blanchardac1dc362012-05-29 12:22:00 +0000307#ifdef CONFIG_PPC_BOOK3S
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000308 li r10,MSR_RI
Anton Blanchardac1dc362012-05-29 12:22:00 +0000309 mtmsrd r10,1 /* Restore RI */
310#endif
David Woodhouse401d1f02005-11-15 18:52:18 +0000311 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
312 If TIF_NOERROR is set, just save r3 as it is. */
313
314 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100315 beq+ 0f
316 REST_NVGPRS(r1)
317 b 2f
Michael Ellermanc3525940c2015-07-23 20:21:01 +10003180: cmpld r3,r11 /* r11 is -MAX_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000319 blt+ 1f
320 andi. r0,r9,_TIF_NOERROR
321 bne- 1f
322 ld r5,_CCR(r1)
323 neg r3,r3
324 oris r5,r5,0x1000 /* Set SO bit in CR */
325 std r5,_CCR(r1)
3261: std r3,GPR3(r1)
3272: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
328 beq 4f
329
Paul Mackerras1bd79332006-03-08 13:24:22 +1100330 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000331
332 li r11,_TIF_PERSYSCALL_MASK
333 addi r12,r12,TI_FLAGS
3343: ldarx r10,0,r12
335 andc r10,r10,r11
336 stdcx. r10,0,r12
337 bne- 3b
338 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100339
3404: /* Anything else left to do? */
Michael Ellermand8725ce2015-11-25 14:25:18 +1100341BEGIN_FTR_SECTION
342 lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
343 ld r10,PACACURRENT(r13)
344 sldi r3,r3,32 /* bits 11-13 are used for ppr */
345 std r3,TASKTHREADPPR(r10)
346END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
347
Michael Ellerman10ea8342015-01-15 12:01:42 +1100348 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100349 beq ret_from_except_lite
David Woodhouse401d1f02005-11-15 18:52:18 +0000350
351 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000352#ifdef CONFIG_PPC_BOOK3E
353 wrteei 1
354#else
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000355 li r10,MSR_RI
David Woodhouse401d1f02005-11-15 18:52:18 +0000356 ori r10,r10,MSR_EE
357 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000358#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000359
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100360 bl save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000361 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100362 bl do_syscall_trace_leave
363 b ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000364
Sam bobroffb4b56f92015-06-12 11:06:32 +1000365#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
366tabort_syscall:
367 /* Firstly we need to enable TM in the kernel */
368 mfmsr r10
Nicholas Piggincc7786d2016-07-25 14:26:51 +1000369 li r9, 1
370 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
Sam bobroffb4b56f92015-06-12 11:06:32 +1000371 mtmsrd r10, 0
372
373 /* tabort, this dooms the transaction, nothing else */
Nicholas Piggincc7786d2016-07-25 14:26:51 +1000374 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
375 TABORT(R9)
Sam bobroffb4b56f92015-06-12 11:06:32 +1000376
377 /*
378 * Return directly to userspace. We have corrupted user register state,
379 * but userspace will never see that register state. Execution will
380 * resume after the tbegin of the aborted transaction with the
381 * checkpointed register state.
382 */
Nicholas Piggincc7786d2016-07-25 14:26:51 +1000383 li r9, MSR_RI
384 andc r10, r10, r9
Sam bobroffb4b56f92015-06-12 11:06:32 +1000385 mtmsrd r10, 1
386 mtspr SPRN_SRR0, r11
387 mtspr SPRN_SRR1, r12
388
389 rfid
390 b . /* prevent speculative execution */
391#endif
392
Paul Mackerras9994a332005-10-10 22:36:14 +1000393/* Save non-volatile GPRs, if not already saved. */
394_GLOBAL(save_nvgprs)
395 ld r11,_TRAP(r1)
396 andi. r0,r11,1
397 beqlr-
398 SAVE_NVGPRS(r1)
399 clrrdi r0,r11,1
400 std r0,_TRAP(r1)
401 blr
402
David Woodhouse401d1f02005-11-15 18:52:18 +0000403
Paul Mackerras9994a332005-10-10 22:36:14 +1000404/*
405 * The sigsuspend and rt_sigsuspend system calls can call do_signal
406 * and thus put the process into the stopped state where we might
407 * want to examine its user state with ptrace. Therefore we need
408 * to save all the nonvolatile registers (r14 - r31) before calling
409 * the C code. Similarly, fork, vfork and clone need the full
410 * register state on the stack so that it can be copied to the child.
411 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000412
413_GLOBAL(ppc_fork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100414 bl save_nvgprs
415 bl sys_fork
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100416 b .Lsyscall_exit
Paul Mackerras9994a332005-10-10 22:36:14 +1000417
418_GLOBAL(ppc_vfork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100419 bl save_nvgprs
420 bl sys_vfork
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100421 b .Lsyscall_exit
Paul Mackerras9994a332005-10-10 22:36:14 +1000422
423_GLOBAL(ppc_clone)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100424 bl save_nvgprs
425 bl sys_clone
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100426 b .Lsyscall_exit
Paul Mackerras9994a332005-10-10 22:36:14 +1000427
Paul Mackerras1bd79332006-03-08 13:24:22 +1100428_GLOBAL(ppc32_swapcontext)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100429 bl save_nvgprs
430 bl compat_sys_swapcontext
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100431 b .Lsyscall_exit
Paul Mackerras1bd79332006-03-08 13:24:22 +1100432
433_GLOBAL(ppc64_swapcontext)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100434 bl save_nvgprs
435 bl sys_swapcontext
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100436 b .Lsyscall_exit
Paul Mackerras1bd79332006-03-08 13:24:22 +1100437
Michael Ellerman529d2352015-03-28 21:35:16 +1100438_GLOBAL(ppc_switch_endian)
439 bl save_nvgprs
440 bl sys_switch_endian
441 b .Lsyscall_exit
442
Paul Mackerras9994a332005-10-10 22:36:14 +1000443_GLOBAL(ret_from_fork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100444 bl schedule_tail
Paul Mackerras9994a332005-10-10 22:36:14 +1000445 REST_NVGPRS(r1)
446 li r3,0
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100447 b .Lsyscall_exit
Paul Mackerras9994a332005-10-10 22:36:14 +1000448
Al Viro58254e12012-09-12 18:32:42 -0400449_GLOBAL(ret_from_kernel_thread)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100450 bl schedule_tail
Al Viro58254e12012-09-12 18:32:42 -0400451 REST_NVGPRS(r1)
Al Viro58254e12012-09-12 18:32:42 -0400452 mtlr r14
453 mr r3,r15
Michael Ellermanf55d9662016-06-06 22:26:10 +0530454#ifdef PPC64_ELF_ABI_v2
Anton Blanchard7cedd602014-02-04 16:08:51 +1100455 mr r12,r14
456#endif
Al Viro58254e12012-09-12 18:32:42 -0400457 blrl
458 li r3,0
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100459 b .Lsyscall_exit
Al Virobe6abfa72012-08-31 15:48:05 -0400460
Paul Mackerras9994a332005-10-10 22:36:14 +1000461/*
462 * This routine switches between two different tasks. The process
463 * state of one is saved on its kernel stack. Then the state
464 * of the other is restored from its kernel stack. The memory
465 * management hardware is updated to the second process's state.
466 * Finally, we can return to the second process, via ret_from_except.
467 * On entry, r3 points to the THREAD for the current task, r4
468 * points to the THREAD for the new task.
469 *
470 * Note: there are two ways to get to the "going out" portion
471 * of this code; either by coming in via the entry (_switch)
472 * or via "fork" which must set up an environment equivalent
473 * to the "_switch" path. If you change this you'll have to change
474 * the fork code also.
475 *
476 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600477 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000478 */
479 .align 7
480_GLOBAL(_switch)
481 mflr r0
482 std r0,16(r1)
483 stdu r1,-SWITCH_FRAME_SIZE(r1)
484 /* r3-r13 are caller saved -- Cort */
485 SAVE_8GPRS(14, r1)
486 SAVE_10GPRS(22, r1)
Anton Blanchard68bfa962015-10-29 11:43:56 +1100487 std r0,_NIP(r1) /* Return to switch caller */
Paul Mackerras9994a332005-10-10 22:36:14 +1000488 mfcr r23
489 std r23,_CCR(r1)
490 std r1,KSP(r3) /* Set old stack pointer */
491
492#ifdef CONFIG_SMP
493 /* We need a sync somewhere here to make sure that if the
494 * previous task gets rescheduled on another CPU, it sees all
495 * stores it has performed on this one.
496 */
497 sync
498#endif /* CONFIG_SMP */
499
Anton Blanchardf89451f2010-08-11 01:40:27 +0000500 /*
501 * If we optimise away the clear of the reservation in system
502 * calls because we know the CPU tracks the address of the
503 * reservation, then we need to clear it here to cover the
504 * case that the kernel context switch path has no larx
505 * instructions.
506 */
507BEGIN_FTR_SECTION
508 ldarx r6,0,r1
509END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
510
Chris Smart8a649042016-04-26 10:28:50 +1000511BEGIN_FTR_SECTION
512/*
513 * A cp_abort (copy paste abort) here ensures that when context switching, a
514 * copy from one process can't leak into the paste of another.
515 */
516 PPC_CP_ABORT
517END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
518
Michael Neulinga5153482013-05-29 19:34:27 +0000519#ifdef CONFIG_PPC_BOOK3S
520/* Cancel all explict user streams as they will have no use after context
521 * switch and will stop the HW from creating streams itself
522 */
523 DCBT_STOP_ALL_STREAM_IDS(r6)
524#endif
525
Paul Mackerras9994a332005-10-10 22:36:14 +1000526 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
527 std r6,PACACURRENT(r13) /* Set new 'current' */
528
529 ld r8,KSP(r4) /* new stack pointer */
Aneesh Kumar K.Vcaca2852016-04-29 23:26:07 +1000530#ifdef CONFIG_PPC_STD_MMU_64
531BEGIN_MMU_FTR_SECTION
532 b 2f
Aneesh Kumar K.V5a25b6f2016-07-27 13:19:01 +1000533END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000534BEGIN_FTR_SECTION
535 clrrdi r6,r8,28 /* get its ESID */
536 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellerman13b3d132014-07-10 12:29:20 +1000537FTR_SECTION_ELSE
Paul Mackerras1189be62007-10-11 20:37:10 +1000538 clrrdi r6,r8,40 /* get its 1T ESID */
539 clrrdi r9,r1,40 /* get current sp 1T ESID */
Michael Ellerman13b3d132014-07-10 12:29:20 +1000540ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000541 clrldi. r0,r6,2 /* is new ESID c00000000? */
542 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
543 cror eq,4*cr1+eq,eq
544 beq 2f /* if yes, don't slbie it */
545
546 /* Bolt in the new stack SLB entry */
547 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
548 oris r0,r6,(SLB_ESID_V)@h
549 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000550BEGIN_FTR_SECTION
551 li r9,MMU_SEGSIZE_1T /* insert B field */
552 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
553 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
Matt Evans44ae3ab2011-04-06 19:48:50 +0000554END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000555
Michael Neuling00efee72007-08-24 16:58:37 +1000556 /* Update the last bolted SLB. No write barriers are needed
557 * here, provided we only update the current CPU's SLB shadow
558 * buffer.
559 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000560 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000561 li r12,0
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000562 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
563 li r12,SLBSHADOW_STACKVSID
564 STDX_BE r7,r12,r9 /* Save VSID */
565 li r12,SLBSHADOW_STACKESID
566 STDX_BE r0,r12,r9 /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000567
Matt Evans44ae3ab2011-04-06 19:48:50 +0000568 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
Olof Johanssonf66bce52007-10-16 00:58:59 +1000569 * we have 1TB segments, the only CPUs known to have the errata
570 * only support less than 1TB of system memory and we'll never
571 * actually hit this code path.
572 */
573
Paul Mackerras9994a332005-10-10 22:36:14 +1000574 slbie r6
575 slbie r6 /* Workaround POWER5 < DD2.1 issue */
576 slbmte r7,r0
577 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10005782:
Aneesh Kumar K.Vcaca2852016-04-29 23:26:07 +1000579#endif /* CONFIG_PPC_STD_MMU_64 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000580
Stuart Yoder9778b692012-07-05 04:41:35 +0000581 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
Paul Mackerras9994a332005-10-10 22:36:14 +1000582 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
583 because we don't need to leave the 288-byte ABI gap at the
584 top of the kernel stack. */
585 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
586
587 mr r1,r8 /* start using new stack pointer */
588 std r7,PACAKSAVE(r13)
589
Anton Blanchard71433282012-09-03 16:51:10 +0000590 ld r6,_CCR(r1)
591 mtcrf 0xFF,r6
592
Paul Mackerras9994a332005-10-10 22:36:14 +1000593 /* r3-r13 are destroyed -- Cort */
594 REST_8GPRS(14, r1)
595 REST_10GPRS(22, r1)
596
597 /* convert old thread to its task_struct for return value */
598 addi r3,r3,-THREAD
599 ld r7,_NIP(r1) /* Return to _switch caller in new task */
600 mtlr r7
601 addi r1,r1,SWITCH_FRAME_SIZE
602 blr
603
604 .align 7
605_GLOBAL(ret_from_except)
606 ld r11,_TRAP(r1)
607 andi. r0,r11,1
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100608 bne ret_from_except_lite
Paul Mackerras9994a332005-10-10 22:36:14 +1000609 REST_NVGPRS(r1)
610
611_GLOBAL(ret_from_except_lite)
612 /*
613 * Disable interrupts so that current_thread_info()->flags
614 * can't change between when we test it and when we return
615 * from the interrupt.
616 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000617#ifdef CONFIG_PPC_BOOK3E
618 wrteei 0
619#else
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000620 li r10,MSR_RI
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100621 mtmsrd r10,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000622#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000623
Stuart Yoder9778b692012-07-05 04:41:35 +0000624 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000625 ld r3,_MSR(r1)
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530626#ifdef CONFIG_PPC_BOOK3E
627 ld r10,PACACURRENT(r13)
628#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000629 ld r4,TI_FLAGS(r9)
Paul Mackerras9994a332005-10-10 22:36:14 +1000630 andi. r3,r3,MSR_PR
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000631 beq resume_kernel
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530632#ifdef CONFIG_PPC_BOOK3E
633 lwz r3,(THREAD+THREAD_DBCR0)(r10)
634#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000635
636 /* Check current_thread_info()->flags */
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000637 andi. r0,r4,_TIF_USER_WORK_MASK
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530638 bne 1f
Cyril Bur70fe3d92016-02-29 17:53:47 +1100639#ifdef CONFIG_PPC_BOOK3E
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530640 /*
641 * Check to see if the dbcr0 register is set up to debug.
642 * Use the internal debug mode bit to do this.
643 */
644 andis. r0,r3,DBCR0_IDM@h
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000645 beq restore
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530646 mfmsr r0
647 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
648 mtmsr r0
649 mtspr SPRN_DBCR0,r3
650 li r10, -1
651 mtspr SPRN_DBSR,r10
652 b restore
653#else
Cyril Bur70fe3d92016-02-29 17:53:47 +1100654 addi r3,r1,STACK_FRAME_OVERHEAD
655 bl restore_math
656 b restore
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530657#endif
6581: andi. r0,r4,_TIF_NEED_RESCHED
659 beq 2f
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100660 bl restore_interrupts
Li Zhong5d1c5742013-05-13 16:16:43 +0000661 SCHEDULE_USER
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100662 b ret_from_except_lite
Paul Mackerrasd31626f2014-01-13 15:56:29 +11006632:
664#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
665 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
666 bne 3f /* only restore TM if nothing else to do */
667 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100668 bl restore_tm_state
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100669 b restore
6703:
671#endif
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100672 bl save_nvgprs
Anton Blanchard808be312014-10-31 16:50:57 +1100673 /*
674 * Use a non volatile GPR to save and restore our thread_info flags
675 * across the call to restore_interrupts.
676 */
677 mr r30,r4
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100678 bl restore_interrupts
Anton Blanchard808be312014-10-31 16:50:57 +1100679 mr r4,r30
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000680 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100681 bl do_notify_resume
682 b ret_from_except
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000683
684resume_kernel:
Tiejun Chena9c4e542012-09-16 23:54:30 +0000685 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
Kevin Hao0edfdd12013-09-26 16:41:34 +0800686 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
Tiejun Chena9c4e542012-09-16 23:54:30 +0000687 beq+ 1f
688
689 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
690
691 lwz r3,GPR1(r1)
692 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
693 mr r4,r1 /* src: current exception frame */
694 mr r1,r3 /* Reroute the trampoline frame to r1 */
695
696 /* Copy from the original to the trampoline. */
697 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
698 li r6,0 /* start offset: 0 */
699 mtctr r5
7002: ldx r0,r6,r4
701 stdx r0,r6,r3
702 addi r6,r6,8
703 bdnz 2b
704
705 /* Do real store operation to complete stwu */
706 lwz r5,GPR1(r1)
707 std r8,0(r5)
708
709 /* Clear _TIF_EMULATE_STACK_STORE flag */
710 lis r11,_TIF_EMULATE_STACK_STORE@h
711 addi r5,r9,TI_FLAGS
Kevin Haod8b92292013-04-09 22:31:24 +00007120: ldarx r4,0,r5
Tiejun Chena9c4e542012-09-16 23:54:30 +0000713 andc r4,r4,r11
714 stdcx. r4,0,r5
715 bne- 0b
7161:
717
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000718#ifdef CONFIG_PREEMPT
719 /* Check if we need to preempt */
720 andi. r0,r4,_TIF_NEED_RESCHED
721 beq+ restore
722 /* Check that preempt_count() == 0 and interrupts are enabled */
723 lwz r8,TI_PREEMPT(r9)
724 cmpwi cr1,r8,0
725 ld r0,SOFTE(r1)
726 cmpdi r0,0
727 crandc eq,cr1*4+eq,eq
728 bne restore
729
730 /*
731 * Here we are preempting the current task. We want to make
Tiejun Chende021bb2013-07-16 11:09:30 +0800732 * sure we are soft-disabled first and reconcile irq state.
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000733 */
Tiejun Chende021bb2013-07-16 11:09:30 +0800734 RECONCILE_IRQ_STATE(r3,r4)
Anton Blanchardb1576fe2014-02-04 16:04:35 +11007351: bl preempt_schedule_irq
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000736
737 /* Re-test flags and eventually loop */
Stuart Yoder9778b692012-07-05 04:41:35 +0000738 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000739 ld r4,TI_FLAGS(r9)
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000740 andi. r0,r4,_TIF_NEED_RESCHED
741 bne 1b
Tiejun Chen572177d2013-01-06 00:49:34 +0000742
743 /*
744 * arch_local_irq_restore() from preempt_schedule_irq above may
745 * enable hard interrupt but we really should disable interrupts
746 * when we return from the interrupt, and so that we don't get
747 * interrupted after loading SRR0/1.
748 */
749#ifdef CONFIG_PPC_BOOK3E
750 wrteei 0
751#else
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000752 li r10,MSR_RI
Tiejun Chen572177d2013-01-06 00:49:34 +0000753 mtmsrd r10,1 /* Update machine state */
754#endif /* CONFIG_PPC_BOOK3E */
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000755#endif /* CONFIG_PREEMPT */
Paul Mackerras9994a332005-10-10 22:36:14 +1000756
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100757 .globl fast_exc_return_irq
758fast_exc_return_irq:
Paul Mackerras9994a332005-10-10 22:36:14 +1000759restore:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100760 /*
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000761 * This is the main kernel exit path. First we check if we
762 * are about to re-enable interrupts
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100763 */
Michael Ellerman01f3880d2008-07-16 14:21:34 +1000764 ld r5,SOFTE(r1)
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100765 lbz r6,PACASOFTIRQEN(r13)
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000766 cmpwi cr0,r5,0
767 beq restore_irq_off
Paul Mackerras9994a332005-10-10 22:36:14 +1000768
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000769 /* We are enabling, were we already enabled ? Yes, just return */
770 cmpwi cr0,r6,1
771 beq cr0,do_restore
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000772
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000773 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100774 * We are about to soft-enable interrupts (we are hard disabled
775 * at this point). We check if there's anything that needs to
776 * be replayed first.
777 */
778 lbz r0,PACAIRQHAPPENED(r13)
779 cmpwi cr0,r0,0
780 bne- restore_check_irq_replay
781
782 /*
783 * Get here when nothing happened while soft-disabled, just
784 * soft-enable and move-on. We will hard-enable as a side
785 * effect of rfi
786 */
787restore_no_replay:
788 TRACE_ENABLE_INTS
789 li r0,1
790 stb r0,PACASOFTIRQEN(r13);
791
792 /*
793 * Final return path. BookE is handled in a different file
794 */
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000795do_restore:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000796#ifdef CONFIG_PPC_BOOK3E
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100797 b exception_return_book3e
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000798#else
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100799 /*
800 * Clear the reservation. If we know the CPU tracks the address of
801 * the reservation then we can potentially save some cycles and use
802 * a larx. On POWER6 and POWER7 this is significantly faster.
803 */
804BEGIN_FTR_SECTION
805 stdcx. r0,0,r1 /* to clear the reservation */
806FTR_SECTION_ELSE
807 ldarx r4,0,r1
808ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
809
810 /*
811 * Some code path such as load_up_fpu or altivec return directly
812 * here. They run entirely hard disabled and do not alter the
813 * interrupt state. They also don't use lwarx/stwcx. and thus
814 * are known not to leave dangling reservations.
815 */
816 .globl fast_exception_return
817fast_exception_return:
818 ld r3,_MSR(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100819 ld r4,_CTR(r1)
820 ld r0,_LINK(r1)
821 mtctr r4
822 mtlr r0
823 ld r4,_XER(r1)
824 mtspr SPRN_XER,r4
825
826 REST_8GPRS(5, r1)
827
828 andi. r0,r3,MSR_RI
829 beq- unrecov_restore
830
Benjamin Herrenschmidt0c4888e2013-11-05 16:33:22 +1100831 /* Load PPR from thread struct before we clear MSR:RI */
832BEGIN_FTR_SECTION
833 ld r2,PACACURRENT(r13)
834 ld r2,TASKTHREADPPR(r2)
835END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
836
Anton Blanchardf89451f2010-08-11 01:40:27 +0000837 /*
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100838 * Clear RI before restoring r13. If we are returning to
839 * userspace and we take an exception after restoring r13,
840 * we end up corrupting the userspace r13 value.
841 */
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000842 li r4,0
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100843 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000844
Michael Neulingafc07702013-02-13 16:21:34 +0000845#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
846 /* TM debug */
847 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
848#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000849 /*
850 * r13 is our per cpu area, only restore it if we are returning to
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100851 * userspace the value stored in the stack frame may belong to
852 * another CPU.
Paul Mackerras9994a332005-10-10 22:36:14 +1000853 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100854 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000855 beq 1f
Benjamin Herrenschmidt0c4888e2013-11-05 16:33:22 +1100856BEGIN_FTR_SECTION
857 mtspr SPRN_PPR,r2 /* Restore PPR */
858END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Christophe Leroyc223c902016-05-17 08:33:46 +0200859 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000860 REST_GPR(13, r1)
8611:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100862 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000863
864 ld r2,_CCR(r1)
865 mtcrf 0xFF,r2
866 ld r2,_NIP(r1)
867 mtspr SPRN_SRR0,r2
868
869 ld r0,GPR0(r1)
870 ld r2,GPR2(r1)
871 ld r3,GPR3(r1)
872 ld r4,GPR4(r1)
873 ld r1,GPR1(r1)
874
875 rfid
876 b . /* prevent speculative execution */
877
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000878#endif /* CONFIG_PPC_BOOK3E */
879
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100880 /*
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000881 * We are returning to a context with interrupts soft disabled.
882 *
883 * However, we may also about to hard enable, so we need to
884 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
885 * or that bit can get out of sync and bad things will happen
886 */
887restore_irq_off:
888 ld r3,_MSR(r1)
889 lbz r7,PACAIRQHAPPENED(r13)
890 andi. r0,r3,MSR_EE
891 beq 1f
892 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
893 stb r7,PACAIRQHAPPENED(r13)
8941: li r0,0
895 stb r0,PACASOFTIRQEN(r13);
896 TRACE_DISABLE_INTS
897 b do_restore
898
899 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100900 * Something did happen, check if a re-emit is needed
901 * (this also clears paca->irq_happened)
902 */
903restore_check_irq_replay:
904 /* XXX: We could implement a fast path here where we check
905 * for irq_happened being just 0x01, in which case we can
906 * clear it and return. That means that we would potentially
907 * miss a decrementer having wrapped all the way around.
908 *
909 * Still, this might be useful for things like hash_page
910 */
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100911 bl __check_irq_replay
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100912 cmpwi cr0,r3,0
913 beq restore_no_replay
914
915 /*
916 * We need to re-emit an interrupt. We do so by re-using our
917 * existing exception frame. We first change the trap value,
918 * but we need to ensure we preserve the low nibble of it
919 */
920 ld r4,_TRAP(r1)
921 clrldi r4,r4,60
922 or r4,r4,r3
923 std r4,_TRAP(r1)
924
925 /*
926 * Then find the right handler and call it. Interrupts are
927 * still soft-disabled and we keep them that way.
928 */
929 cmpwi cr0,r3,0x500
930 bne 1f
931 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100932 bl do_IRQ
933 b ret_from_except
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +05309341: cmpwi cr0,r3,0xe60
935 bne 1f
936 addi r3,r1,STACK_FRAME_OVERHEAD;
937 bl handle_hmi_exception
938 b ret_from_except
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11009391: cmpwi cr0,r3,0x900
940 bne 1f
941 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100942 bl timer_interrupt
943 b ret_from_except
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000944#ifdef CONFIG_PPC_DOORBELL
9451:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100946#ifdef CONFIG_PPC_BOOK3E
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000947 cmpwi cr0,r3,0x280
948#else
949 BEGIN_FTR_SECTION
950 cmpwi cr0,r3,0xe80
951 FTR_SECTION_ELSE
952 cmpwi cr0,r3,0xa00
953 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
954#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100955 bne 1f
956 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100957 bl doorbell_exception
958 b ret_from_except
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000959#endif /* CONFIG_PPC_DOORBELL */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11009601: b ret_from_except /* What else to do here ? */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100961
Paul Mackerras9994a332005-10-10 22:36:14 +1000962unrecov_restore:
963 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100964 bl unrecoverable_exception
Paul Mackerras9994a332005-10-10 22:36:14 +1000965 b unrecov_restore
966
967#ifdef CONFIG_PPC_RTAS
968/*
969 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
970 * called with the MMU off.
971 *
972 * In addition, we need to be in 32b mode, at least for now.
973 *
974 * Note: r3 is an input parameter to rtas, so don't trash it...
975 */
976_GLOBAL(enter_rtas)
977 mflr r0
978 std r0,16(r1)
979 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
980
981 /* Because RTAS is running in 32b mode, it clobbers the high order half
982 * of all registers that it saves. We therefore save those registers
983 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
984 */
985 SAVE_GPR(2, r1) /* Save the TOC */
986 SAVE_GPR(13, r1) /* Save paca */
987 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
988 SAVE_10GPRS(22, r1) /* ditto */
989
990 mfcr r4
991 std r4,_CCR(r1)
992 mfctr r5
993 std r5,_CTR(r1)
994 mfspr r6,SPRN_XER
995 std r6,_XER(r1)
996 mfdar r7
997 std r7,_DAR(r1)
998 mfdsisr r8
999 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001000
Mike Kravetz9fe901d2006-03-27 15:20:00 -08001001 /* Temporary workaround to clear CR until RTAS can be modified to
1002 * ignore all bits.
1003 */
1004 li r0,0
1005 mtcr r0
1006
David Woodhouse007d88d2007-01-01 18:45:34 +00001007#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +10001008 /* There is no way it is acceptable to get here with interrupts enabled,
1009 * check it with the asm equivalent of WARN_ON
1010 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10001011 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +100010121: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +00001013 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1014#endif
1015
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10001016 /* Hard-disable interrupts */
1017 mfmsr r6
1018 rldicl r7,r6,48,1
1019 rotldi r7,r7,16
1020 mtmsrd r7,1
1021
Paul Mackerras9994a332005-10-10 22:36:14 +10001022 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1023 * so they are saved in the PACA which allows us to restore
1024 * our original state after RTAS returns.
1025 */
1026 std r1,PACAR1(r13)
1027 std r6,PACASAVEDMSR(r13)
1028
1029 /* Setup our real return addr */
Anton Blanchardad0289e2014-02-04 16:04:52 +11001030 LOAD_REG_ADDR(r4,rtas_return_loc)
David Gibsone58c3492006-01-13 14:56:25 +11001031 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +10001032 mtlr r4
1033
1034 li r0,0
1035 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1036 andc r0,r6,r0
1037
1038 li r9,1
1039 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001040 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
Paul Mackerras9994a332005-10-10 22:36:14 +10001041 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +10001042 sync /* disable interrupts so SRR0/1 */
1043 mtmsrd r0 /* don't get trashed */
1044
David Gibsone58c3492006-01-13 14:56:25 +11001045 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001046 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1047 ld r4,RTASBASE(r4) /* get the rtas->base value */
1048
1049 mtspr SPRN_SRR0,r5
1050 mtspr SPRN_SRR1,r6
1051 rfid
1052 b . /* prevent speculative execution */
1053
Anton Blanchardad0289e2014-02-04 16:04:52 +11001054rtas_return_loc:
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001055 FIXUP_ENDIAN
1056
Paul Mackerras9994a332005-10-10 22:36:14 +10001057 /* relocation is off at this point */
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +11001058 GET_PACA(r4)
David Gibsone58c3492006-01-13 14:56:25 +11001059 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +10001060
Paul Mackerrase31aa452008-08-30 11:41:12 +10001061 bcl 20,31,$+4
10620: mflr r3
Anton Blanchardad0289e2014-02-04 16:04:52 +11001063 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
Paul Mackerrase31aa452008-08-30 11:41:12 +10001064
Paul Mackerras9994a332005-10-10 22:36:14 +10001065 mfmsr r6
1066 li r0,MSR_RI
1067 andc r6,r6,r0
1068 sync
1069 mtmsrd r6
1070
1071 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +10001072 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1073
1074 mtspr SPRN_SRR0,r3
1075 mtspr SPRN_SRR1,r4
1076 rfid
1077 b . /* prevent speculative execution */
1078
Paul Mackerrase31aa452008-08-30 11:41:12 +10001079 .align 3
Anton Blanchardad0289e2014-02-04 16:04:52 +110010801: .llong rtas_restore_regs
Paul Mackerrase31aa452008-08-30 11:41:12 +10001081
Anton Blanchardad0289e2014-02-04 16:04:52 +11001082rtas_restore_regs:
Paul Mackerras9994a332005-10-10 22:36:14 +10001083 /* relocation is on at this point */
1084 REST_GPR(2, r1) /* Restore the TOC */
1085 REST_GPR(13, r1) /* Restore paca */
1086 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1087 REST_10GPRS(22, r1) /* ditto */
1088
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +11001089 GET_PACA(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10001090
1091 ld r4,_CCR(r1)
1092 mtcr r4
1093 ld r5,_CTR(r1)
1094 mtctr r5
1095 ld r6,_XER(r1)
1096 mtspr SPRN_XER,r6
1097 ld r7,_DAR(r1)
1098 mtdar r7
1099 ld r8,_DSISR(r1)
1100 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +10001101
1102 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1103 ld r0,16(r1) /* get return address */
1104
1105 mtlr r0
1106 blr /* return to caller */
1107
1108#endif /* CONFIG_PPC_RTAS */
1109
Paul Mackerras9994a332005-10-10 22:36:14 +10001110_GLOBAL(enter_prom)
1111 mflr r0
1112 std r0,16(r1)
1113 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1114
1115 /* Because PROM is running in 32b mode, it clobbers the high order half
1116 * of all registers that it saves. We therefore save those registers
1117 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1118 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001119 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001120 SAVE_GPR(13, r1)
1121 SAVE_8GPRS(14, r1)
1122 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001123 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +10001124 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001125 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001126 std r11,_MSR(r1)
1127
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001128 /* Put PROM address in SRR0 */
1129 mtsrr0 r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001130
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001131 /* Setup our trampoline return addr in LR */
1132 bcl 20,31,$+4
11330: mflr r4
1134 addi r4,r4,(1f - 0b)
1135 mtlr r4
1136
1137 /* Prepare a 32-bit mode big endian MSR
Paul Mackerras9994a332005-10-10 22:36:14 +10001138 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001139#ifdef CONFIG_PPC_BOOK3E
1140 rlwinm r11,r11,0,1,31
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001141 mtsrr1 r11
1142 rfi
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001143#else /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001144 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1145 andc r11,r11,r12
1146 mtsrr1 r11
1147 rfid
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001148#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +10001149
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +100011501: /* Return from OF */
1151 FIXUP_ENDIAN
Paul Mackerras9994a332005-10-10 22:36:14 +10001152
1153 /* Just make sure that r1 top 32 bits didn't get
1154 * corrupt by OF
1155 */
1156 rldicl r1,r1,0,32
1157
1158 /* Restore the MSR (back to 64 bits) */
1159 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001160 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +10001161 isync
1162
1163 /* Restore other registers */
1164 REST_GPR(2, r1)
1165 REST_GPR(13, r1)
1166 REST_8GPRS(14, r1)
1167 REST_10GPRS(22, r1)
1168 ld r4,_CCR(r1)
1169 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001170
1171 addi r1,r1,PROM_FRAME_SIZE
1172 ld r0,16(r1)
1173 mtlr r0
1174 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -04001175
Steven Rostedt606576c2008-10-06 19:06:12 -04001176#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001177#ifdef CONFIG_DYNAMIC_FTRACE
1178_GLOBAL(mcount)
1179_GLOBAL(_mcount)
Torsten Duwe15308662016-03-03 15:26:59 +11001180 mflr r12
1181 mtctr r12
1182 mtlr r0
1183 bctr
Steven Rostedt4e491d12008-05-14 23:49:44 -04001184
Torsten Duwe15308662016-03-03 15:26:59 +11001185#ifndef CC_USING_MPROFILE_KERNEL
Anton Blanchard5e666842014-04-04 09:06:33 +11001186_GLOBAL_TOC(ftrace_caller)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001187 /* Taken from output of objdump from lib64/glibc */
1188 mflr r3
1189 ld r11, 0(r1)
1190 stdu r1, -112(r1)
1191 std r3, 128(r1)
1192 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301193 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001194.globl ftrace_call
1195ftrace_call:
1196 bl ftrace_stub
1197 nop
Steven Rostedt46542882009-02-10 22:19:54 -08001198#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1199.globl ftrace_graph_call
1200ftrace_graph_call:
1201 b ftrace_graph_stub
1202_GLOBAL(ftrace_graph_stub)
1203#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001204 ld r0, 128(r1)
1205 mtlr r0
1206 addi r1, r1, 112
Torsten Duwe15308662016-03-03 15:26:59 +11001207
1208#else /* CC_USING_MPROFILE_KERNEL */
1209/*
1210 *
1211 * ftrace_caller() is the function that replaces _mcount() when ftrace is
1212 * active.
1213 *
1214 * We arrive here after a function A calls function B, and we are the trace
1215 * function for B. When we enter r1 points to A's stack frame, B has not yet
1216 * had a chance to allocate one yet.
1217 *
1218 * Additionally r2 may point either to the TOC for A, or B, depending on
1219 * whether B did a TOC setup sequence before calling us.
1220 *
1221 * On entry the LR points back to the _mcount() call site, and r0 holds the
1222 * saved LR as it was on entry to B, ie. the original return address at the
1223 * call site in A.
1224 *
1225 * Our job is to save the register state into a struct pt_regs (on the stack)
1226 * and then arrange for the ftrace function to be called.
1227 */
1228_GLOBAL(ftrace_caller)
1229 /* Save the original return address in A's stack frame */
1230 std r0,LRSAVE(r1)
1231
1232 /* Create our stack frame + pt_regs */
1233 stdu r1,-SWITCH_FRAME_SIZE(r1)
1234
1235 /* Save all gprs to pt_regs */
1236 SAVE_8GPRS(0,r1)
1237 SAVE_8GPRS(8,r1)
1238 SAVE_8GPRS(16,r1)
1239 SAVE_8GPRS(24,r1)
1240
1241 /* Load special regs for save below */
1242 mfmsr r8
1243 mfctr r9
1244 mfxer r10
1245 mfcr r11
1246
1247 /* Get the _mcount() call site out of LR */
1248 mflr r7
1249 /* Save it as pt_regs->nip & pt_regs->link */
1250 std r7, _NIP(r1)
1251 std r7, _LINK(r1)
1252
1253 /* Save callee's TOC in the ABI compliant location */
1254 std r2, 24(r1)
1255 ld r2,PACATOC(r13) /* get kernel TOC in r2 */
1256
1257 addis r3,r2,function_trace_op@toc@ha
1258 addi r3,r3,function_trace_op@toc@l
1259 ld r5,0(r3)
1260
Michael Ellerman85baa092016-03-24 22:04:05 +11001261#ifdef CONFIG_LIVEPATCH
1262 mr r14,r7 /* remember old NIP */
1263#endif
Torsten Duwe15308662016-03-03 15:26:59 +11001264 /* Calculate ip from nip-4 into r3 for call below */
1265 subi r3, r7, MCOUNT_INSN_SIZE
1266
1267 /* Put the original return address in r4 as parent_ip */
1268 mr r4, r0
1269
1270 /* Save special regs */
1271 std r8, _MSR(r1)
1272 std r9, _CTR(r1)
1273 std r10, _XER(r1)
1274 std r11, _CCR(r1)
1275
1276 /* Load &pt_regs in r6 for call below */
1277 addi r6, r1 ,STACK_FRAME_OVERHEAD
1278
1279 /* ftrace_call(r3, r4, r5, r6) */
1280.globl ftrace_call
1281ftrace_call:
1282 bl ftrace_stub
1283 nop
1284
1285 /* Load ctr with the possibly modified NIP */
1286 ld r3, _NIP(r1)
1287 mtctr r3
Michael Ellerman85baa092016-03-24 22:04:05 +11001288#ifdef CONFIG_LIVEPATCH
1289 cmpd r14,r3 /* has NIP been altered? */
1290#endif
Torsten Duwe15308662016-03-03 15:26:59 +11001291
1292 /* Restore gprs */
1293 REST_8GPRS(0,r1)
1294 REST_8GPRS(8,r1)
1295 REST_8GPRS(16,r1)
1296 REST_8GPRS(24,r1)
1297
1298 /* Restore callee's TOC */
1299 ld r2, 24(r1)
1300
1301 /* Pop our stack frame */
1302 addi r1, r1, SWITCH_FRAME_SIZE
1303
1304 /* Restore original LR for return to B */
1305 ld r0, LRSAVE(r1)
1306 mtlr r0
1307
Michael Ellerman85baa092016-03-24 22:04:05 +11001308#ifdef CONFIG_LIVEPATCH
1309 /* Based on the cmpd above, if the NIP was altered handle livepatch */
1310 bne- livepatch_handler
1311#endif
1312
Torsten Duwe15308662016-03-03 15:26:59 +11001313#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1314 stdu r1, -112(r1)
1315.globl ftrace_graph_call
1316ftrace_graph_call:
1317 b ftrace_graph_stub
1318_GLOBAL(ftrace_graph_stub)
1319 addi r1, r1, 112
1320#endif
1321
1322 ld r0,LRSAVE(r1) /* restore callee's lr at _mcount site */
1323 mtlr r0
1324 bctr /* jump after _mcount site */
1325#endif /* CC_USING_MPROFILE_KERNEL */
1326
Steven Rostedt4e491d12008-05-14 23:49:44 -04001327_GLOBAL(ftrace_stub)
1328 blr
Michael Ellerman85baa092016-03-24 22:04:05 +11001329
1330#ifdef CONFIG_LIVEPATCH
1331 /*
1332 * This function runs in the mcount context, between two functions. As
1333 * such it can only clobber registers which are volatile and used in
1334 * function linkage.
1335 *
1336 * We get here when a function A, calls another function B, but B has
1337 * been live patched with a new function C.
1338 *
1339 * On entry:
1340 * - we have no stack frame and can not allocate one
1341 * - LR points back to the original caller (in A)
1342 * - CTR holds the new NIP in C
1343 * - r0 & r12 are free
1344 *
1345 * r0 can't be used as the base register for a DS-form load or store, so
1346 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
1347 */
1348livepatch_handler:
1349 CURRENT_THREAD_INFO(r12, r1)
1350
1351 /* Save stack pointer into r0 */
1352 mr r0, r1
1353
1354 /* Allocate 3 x 8 bytes */
1355 ld r1, TI_livepatch_sp(r12)
1356 addi r1, r1, 24
1357 std r1, TI_livepatch_sp(r12)
1358
1359 /* Save toc & real LR on livepatch stack */
1360 std r2, -24(r1)
1361 mflr r12
1362 std r12, -16(r1)
1363
1364 /* Store stack end marker */
1365 lis r12, STACK_END_MAGIC@h
1366 ori r12, r12, STACK_END_MAGIC@l
1367 std r12, -8(r1)
1368
1369 /* Restore real stack pointer */
1370 mr r1, r0
1371
1372 /* Put ctr in r12 for global entry and branch there */
1373 mfctr r12
1374 bctrl
1375
1376 /*
1377 * Now we are returning from the patched function to the original
1378 * caller A. We are free to use r0 and r12, and we can use r2 until we
1379 * restore it.
1380 */
1381
1382 CURRENT_THREAD_INFO(r12, r1)
1383
1384 /* Save stack pointer into r0 */
1385 mr r0, r1
1386
1387 ld r1, TI_livepatch_sp(r12)
1388
1389 /* Check stack marker hasn't been trashed */
1390 lis r2, STACK_END_MAGIC@h
1391 ori r2, r2, STACK_END_MAGIC@l
1392 ld r12, -8(r1)
13931: tdne r12, r2
1394 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
1395
1396 /* Restore LR & toc from livepatch stack */
1397 ld r12, -16(r1)
1398 mtlr r12
1399 ld r2, -24(r1)
1400
1401 /* Pop livepatch stack frame */
1402 CURRENT_THREAD_INFO(r12, r0)
1403 subi r1, r1, 24
1404 std r1, TI_livepatch_sp(r12)
1405
1406 /* Restore real stack pointer */
1407 mr r1, r0
1408
1409 /* Return to original caller of live patched function */
1410 blr
1411#endif
1412
1413
Steven Rostedt4e491d12008-05-14 23:49:44 -04001414#else
Anton Blanchard5e666842014-04-04 09:06:33 +11001415_GLOBAL_TOC(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001416 /* Taken from output of objdump from lib64/glibc */
1417 mflr r3
1418 ld r11, 0(r1)
1419 stdu r1, -112(r1)
1420 std r3, 128(r1)
1421 ld r4, 16(r11)
1422
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301423 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001424 LOAD_REG_ADDR(r5,ftrace_trace_function)
1425 ld r5,0(r5)
1426 ld r5,0(r5)
1427 mtctr r5
1428 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001429 nop
Steven Rostedt6794c782009-02-09 21:10:27 -08001430
1431
1432#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1433 b ftrace_graph_caller
1434#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001435 ld r0, 128(r1)
1436 mtlr r0
1437 addi r1, r1, 112
1438_GLOBAL(ftrace_stub)
1439 blr
1440
Steven Rostedt6794c782009-02-09 21:10:27 -08001441#endif /* CONFIG_DYNAMIC_FTRACE */
1442
1443#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Torsten Duwe15308662016-03-03 15:26:59 +11001444#ifndef CC_USING_MPROFILE_KERNEL
Steven Rostedt46542882009-02-10 22:19:54 -08001445_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001446 /* load r4 with local address */
1447 ld r4, 128(r1)
1448 subi r4, r4, MCOUNT_INSN_SIZE
1449
Anton Blanchardb3c18722014-09-17 17:07:04 +10001450 /* Grab the LR out of the caller stack frame */
Steven Rostedt6794c782009-02-09 21:10:27 -08001451 ld r11, 112(r1)
Anton Blanchardb3c18722014-09-17 17:07:04 +10001452 ld r3, 16(r11)
Steven Rostedt6794c782009-02-09 21:10:27 -08001453
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001454 bl prepare_ftrace_return
Steven Rostedt6794c782009-02-09 21:10:27 -08001455 nop
1456
Anton Blanchardb3c18722014-09-17 17:07:04 +10001457 /*
1458 * prepare_ftrace_return gives us the address we divert to.
1459 * Change the LR in the callers stack frame to this.
1460 */
1461 ld r11, 112(r1)
1462 std r3, 16(r11)
1463
Steven Rostedt6794c782009-02-09 21:10:27 -08001464 ld r0, 128(r1)
1465 mtlr r0
1466 addi r1, r1, 112
1467 blr
1468
Torsten Duwe15308662016-03-03 15:26:59 +11001469#else /* CC_USING_MPROFILE_KERNEL */
1470_GLOBAL(ftrace_graph_caller)
1471 /* with -mprofile-kernel, parameter regs are still alive at _mcount */
1472 std r10, 104(r1)
1473 std r9, 96(r1)
1474 std r8, 88(r1)
1475 std r7, 80(r1)
1476 std r6, 72(r1)
1477 std r5, 64(r1)
1478 std r4, 56(r1)
1479 std r3, 48(r1)
1480
1481 /* Save callee's TOC in the ABI compliant location */
1482 std r2, 24(r1)
1483 ld r2, PACATOC(r13) /* get kernel TOC in r2 */
1484
1485 mfctr r4 /* ftrace_caller has moved local addr here */
1486 std r4, 40(r1)
1487 mflr r3 /* ftrace_caller has restored LR from stack */
1488 subi r4, r4, MCOUNT_INSN_SIZE
1489
1490 bl prepare_ftrace_return
1491 nop
1492
1493 /*
1494 * prepare_ftrace_return gives us the address we divert to.
1495 * Change the LR to this.
1496 */
1497 mtlr r3
1498
1499 ld r0, 40(r1)
1500 mtctr r0
1501 ld r10, 104(r1)
1502 ld r9, 96(r1)
1503 ld r8, 88(r1)
1504 ld r7, 80(r1)
1505 ld r6, 72(r1)
1506 ld r5, 64(r1)
1507 ld r4, 56(r1)
1508 ld r3, 48(r1)
1509
1510 /* Restore callee's TOC */
1511 ld r2, 24(r1)
1512
1513 addi r1, r1, 112
1514 mflr r0
1515 std r0, LRSAVE(r1)
1516 bctr
1517#endif /* CC_USING_MPROFILE_KERNEL */
1518
Steven Rostedt6794c782009-02-09 21:10:27 -08001519_GLOBAL(return_to_handler)
1520 /* need to save return values */
1521 std r4, -32(r1)
1522 std r3, -24(r1)
1523 /* save TOC */
1524 std r2, -16(r1)
1525 std r31, -8(r1)
1526 mr r31, r1
1527 stdu r1, -112(r1)
1528
Steven Rostedtbb725342009-02-11 12:45:49 -08001529 /*
Anton Blanchard7d56c652014-09-17 17:07:03 +10001530 * We might be called from a module.
Steven Rostedtbb725342009-02-11 12:45:49 -08001531 * Switch to our TOC to run inside the core kernel.
1532 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001533 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001534
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001535 bl ftrace_return_to_handler
Steven Rostedt6794c782009-02-09 21:10:27 -08001536 nop
1537
1538 /* return value has real return address */
1539 mtlr r3
1540
1541 ld r1, 0(r1)
1542 ld r4, -32(r1)
1543 ld r3, -24(r1)
1544 ld r2, -16(r1)
1545 ld r31, -8(r1)
1546
1547 /* Jump back to real return address */
1548 blr
1549#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1550#endif /* CONFIG_FUNCTION_TRACER */