blob: 81c8324a4a3c7aeefcf274f4c364e4258db3b55b [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Paul Mackerras9994a332005-10-10 22:36:14 +100022#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053033#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100034
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +100048 .globl mcheck_transfer_to_handler
49mcheck_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050050 mfspr r0,SPRN_DSRR0
51 stw r0,_DSRR0(r11)
52 mfspr r0,SPRN_DSRR1
53 stw r0,_DSRR1(r11)
54 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100055
56 .globl debug_transfer_to_handler
57debug_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050058 mfspr r0,SPRN_CSRR0
59 stw r0,_CSRR0(r11)
60 mfspr r0,SPRN_CSRR1
61 stw r0,_CSRR1(r11)
62 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100063
64 .globl crit_transfer_to_handler
65crit_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050066#ifdef CONFIG_FSL_BOOKE
67 mfspr r0,SPRN_MAS0
68 stw r0,MAS0(r11)
69 mfspr r0,SPRN_MAS1
70 stw r0,MAS1(r11)
71 mfspr r0,SPRN_MAS2
72 stw r0,MAS2(r11)
73 mfspr r0,SPRN_MAS3
74 stw r0,MAS3(r11)
75 mfspr r0,SPRN_MAS6
76 stw r0,MAS6(r11)
77#ifdef CONFIG_PHYS_64BIT
78 mfspr r0,SPRN_MAS7
79 stw r0,MAS7(r11)
80#endif /* CONFIG_PHYS_64BIT */
81#endif /* CONFIG_FSL_BOOKE */
82#ifdef CONFIG_44x
83 mfspr r0,SPRN_MMUCR
84 stw r0,MMUCR(r11)
85#endif
86 mfspr r0,SPRN_SRR0
87 stw r0,_SRR0(r11)
88 mfspr r0,SPRN_SRR1
89 stw r0,_SRR1(r11)
90
91 mfspr r8,SPRN_SPRG3
92 lwz r0,KSP_LIMIT(r8)
93 stw r0,SAVED_KSP_LIMIT(r11)
94 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
95 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +100096 /* fall through */
97#endif
98
99#ifdef CONFIG_40x
100 .globl crit_transfer_to_handler
101crit_transfer_to_handler:
102 lwz r0,crit_r10@l(0)
103 stw r0,GPR10(r11)
104 lwz r0,crit_r11@l(0)
105 stw r0,GPR11(r11)
Kumar Galafca622c2008-04-30 05:23:21 -0500106 mfspr r0,SPRN_SRR0
107 stw r0,crit_srr0@l(0)
108 mfspr r0,SPRN_SRR1
109 stw r0,crit_srr1@l(0)
110
111 mfspr r8,SPRN_SPRG3
112 lwz r0,KSP_LIMIT(r8)
113 stw r0,saved_ksp_limit@l(0)
114 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
115 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000116 /* fall through */
117#endif
118
119/*
120 * This code finishes saving the registers to the exception frame
121 * and jumps to the appropriate handler for the exception, turning
122 * on address translation.
123 * Note that we rely on the caller having set cr0.eq iff the exception
124 * occurred in kernel mode (i.e. MSR:PR = 0).
125 */
126 .globl transfer_to_handler_full
127transfer_to_handler_full:
128 SAVE_NVGPRS(r11)
129 /* fall through */
130
131 .globl transfer_to_handler
132transfer_to_handler:
133 stw r2,GPR2(r11)
134 stw r12,_NIP(r11)
135 stw r9,_MSR(r11)
136 andi. r2,r9,MSR_PR
137 mfctr r12
138 mfspr r2,SPRN_XER
139 stw r12,_CTR(r11)
140 stw r2,_XER(r11)
141 mfspr r12,SPRN_SPRG3
142 addi r2,r12,-THREAD
143 tovirt(r2,r2) /* set r2 to current */
144 beq 2f /* if from user, fix up THREAD.regs */
145 addi r11,r1,STACK_FRAME_OVERHEAD
146 stw r11,PT_REGS(r12)
147#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
148 /* Check to see if the dbcr0 register is set up to debug. Use the
Kumar Gala4eaddb42008-04-09 16:15:40 -0500149 internal debug mode bit to do this. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000150 lwz r12,THREAD_DBCR0(r12)
Luis Machadod6a61bf2008-07-24 02:10:41 +1000151 andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000152 beq+ 3f
153 /* From user and task is ptraced - load up global dbcr0 */
154 li r12,-1 /* clear all pending debug events */
155 mtspr SPRN_DBSR,r12
156 lis r11,global_dbcr0@ha
157 tophys(r11,r11)
158 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -0500159#ifdef CONFIG_SMP
160 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
161 lwz r9,TI_CPU(r9)
162 slwi r9,r9,3
163 add r11,r11,r9
164#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000165 lwz r12,0(r11)
166 mtspr SPRN_DBCR0,r12
167 lwz r12,4(r11)
168 addi r12,r12,-1
169 stw r12,4(r11)
170#endif
171 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000172
Paul Mackerras9994a332005-10-10 22:36:14 +10001732: /* if from kernel, check interrupted DOZE/NAP mode and
174 * check for stack overflow
175 */
Kumar Gala85218822008-04-28 16:21:22 +1000176 lwz r9,KSP_LIMIT(r12)
177 cmplw r1,r9 /* if r1 <= ksp_limit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000178 ble- stack_ovf /* then the kernel stack overflowed */
1795:
Kumar Galafc4033b2008-06-18 16:26:52 -0500180#if defined(CONFIG_6xx) || defined(CONFIG_E500)
Kumar Gala85218822008-04-28 16:21:22 +1000181 rlwinm r9,r1,0,0,31-THREAD_SHIFT
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000182 tophys(r9,r9) /* check local flags */
183 lwz r12,TI_LOCAL_FLAGS(r9)
184 mtcrf 0x01,r12
185 bt- 31-TLF_NAPPING,4f
Paul Mackerrasa5606432008-05-14 14:30:48 +1000186 bt- 31-TLF_SLEEPING,7f
Kumar Galafc4033b2008-06-18 16:26:52 -0500187#endif /* CONFIG_6xx || CONFIG_E500 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000188 .globl transfer_to_handler_cont
189transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10001903:
191 mflr r9
192 lwz r11,0(r9) /* virtual address of handler */
193 lwz r9,4(r9) /* where to go when done */
Paul Mackerras9994a332005-10-10 22:36:14 +1000194 mtspr SPRN_SRR0,r11
195 mtspr SPRN_SRR1,r10
196 mtlr r9
197 SYNC
198 RFI /* jump to handler, enable MMU */
199
Kumar Galafc4033b2008-06-18 16:26:52 -0500200#if defined (CONFIG_6xx) || defined(CONFIG_E500)
Paul Mackerrasf39224a2006-04-18 21:49:11 +10002014: rlwinm r12,r12,0,~_TLF_NAPPING
202 stw r12,TI_LOCAL_FLAGS(r9)
Kumar Galafc4033b2008-06-18 16:26:52 -0500203 b power_save_ppc32_restore
Paul Mackerrasa5606432008-05-14 14:30:48 +1000204
2057: rlwinm r12,r12,0,~_TLF_SLEEPING
206 stw r12,TI_LOCAL_FLAGS(r9)
207 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
208 rlwinm r9,r9,0,~MSR_EE
209 lwz r12,_LINK(r11) /* and return to address in LR */
210 b fast_exception_return
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100211#endif
212
Paul Mackerras9994a332005-10-10 22:36:14 +1000213/*
214 * On kernel stack overflow, load up an initial stack pointer
215 * and call StackOverflow(regs), which should not return.
216 */
217stack_ovf:
218 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000219 lis r12,_end@h
220 ori r12,r12,_end@l
221 cmplw r1,r12
222 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000223 SAVE_NVGPRS(r11)
224 addi r3,r1,STACK_FRAME_OVERHEAD
225 lis r1,init_thread_union@ha
226 addi r1,r1,init_thread_union@l
227 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
228 lis r9,StackOverflow@ha
229 addi r9,r9,StackOverflow@l
230 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
231 FIX_SRR1(r10,r12)
232 mtspr SPRN_SRR0,r9
233 mtspr SPRN_SRR1,r10
234 SYNC
235 RFI
236
237/*
238 * Handle a system call.
239 */
240 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
241 .stabs "entry_32.S",N_SO,0,0,0f
2420:
243
244_GLOBAL(DoSyscall)
Paul Mackerras9994a332005-10-10 22:36:14 +1000245 stw r3,ORIG_GPR3(r1)
246 li r12,0
247 stw r12,RESULT(r1)
248 lwz r11,_CCR(r1) /* Clear SO bit in CR */
249 rlwinm r11,r11,0,4,2
250 stw r11,_CCR(r1)
251#ifdef SHOW_SYSCALLS
252 bl do_show_syscall
253#endif /* SHOW_SYSCALLS */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000254 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000255 lwz r11,TI_FLAGS(r10)
256 andi. r11,r11,_TIF_SYSCALL_T_OR_A
257 bne- syscall_dotrace
258syscall_dotrace_cont:
259 cmplwi 0,r0,NR_syscalls
260 lis r10,sys_call_table@h
261 ori r10,r10,sys_call_table@l
262 slwi r0,r0,2
263 bge- 66f
264 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
265 mtlr r10
266 addi r9,r1,STACK_FRAME_OVERHEAD
267 PPC440EP_ERR42
268 blrl /* Call handler */
269 .globl ret_from_syscall
270ret_from_syscall:
271#ifdef SHOW_SYSCALLS
272 bl do_show_syscall_exit
273#endif
274 mr r6,r3
David Gibson6cb7bfe2005-10-21 15:45:50 +1000275 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000276 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000277 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000278 SYNC
279 MTMSRD(r10)
280 lwz r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000281 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100282 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000283 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000284 cmplw 0,r3,r8
285 blt+ syscall_exit_cont
286 lwz r11,_CCR(r1) /* Load CR */
287 neg r3,r3
288 oris r11,r11,0x1000 /* Set SO bit in CR */
289 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000290syscall_exit_cont:
291#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500292 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000294 lwz r0,THREAD+THREAD_DBCR0(r2)
Luis Machadod6a61bf2008-07-24 02:10:41 +1000295 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000296 bnel- load_dbcr0
297#endif
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100298#ifdef CONFIG_44x
299 lis r4,icache_44x_need_flush@ha
300 lwz r5,icache_44x_need_flush@l(r4)
301 cmplwi cr0,r5,0
302 bne- 2f
3031:
304#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100305BEGIN_FTR_SECTION
306 lwarx r7,0,r1
307END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000308 stwcx. r0,0,r1 /* to clear the reservation */
309 lwz r4,_LINK(r1)
310 lwz r5,_CCR(r1)
311 mtlr r4
312 mtcr r5
313 lwz r7,_NIP(r1)
314 lwz r8,_MSR(r1)
315 FIX_SRR1(r8, r0)
316 lwz r2,GPR2(r1)
317 lwz r1,GPR1(r1)
318 mtspr SPRN_SRR0,r7
319 mtspr SPRN_SRR1,r8
320 SYNC
321 RFI
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100322#ifdef CONFIG_44x
3232: li r7,0
324 iccci r0,r0
325 stw r7,icache_44x_need_flush@l(r4)
326 b 1b
327#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000328
32966: li r3,-ENOSYS
330 b ret_from_syscall
331
332 .globl ret_from_fork
333ret_from_fork:
334 REST_NVGPRS(r1)
335 bl schedule_tail
336 li r3,0
337 b ret_from_syscall
338
339/* Traced system call support */
340syscall_dotrace:
341 SAVE_NVGPRS(r1)
342 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000343 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000344 addi r3,r1,STACK_FRAME_OVERHEAD
345 bl do_syscall_trace_enter
346 lwz r0,GPR0(r1) /* Restore original registers */
347 lwz r3,GPR3(r1)
348 lwz r4,GPR4(r1)
349 lwz r5,GPR5(r1)
350 lwz r6,GPR6(r1)
351 lwz r7,GPR7(r1)
352 lwz r8,GPR8(r1)
353 REST_NVGPRS(r1)
354 b syscall_dotrace_cont
355
356syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000357 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100358 beq+ 0f
359 REST_NVGPRS(r1)
360 b 2f
3610: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000362 blt+ 1f
363 andi. r0,r9,_TIF_NOERROR
364 bne- 1f
365 lwz r11,_CCR(r1) /* Load CR */
366 neg r3,r3
367 oris r11,r11,0x1000 /* Set SO bit in CR */
368 stw r11,_CCR(r1)
369
3701: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000371 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00003722: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
373 beq 4f
374
Paul Mackerras1bd79332006-03-08 13:24:22 +1100375 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000376
377 li r11,_TIF_PERSYSCALL_MASK
378 addi r12,r12,TI_FLAGS
3793: lwarx r8,0,r12
380 andc r8,r8,r11
381#ifdef CONFIG_IBM405_ERR77
382 dcbt 0,r12
383#endif
384 stwcx. r8,0,r12
385 bne- 3b
386 subi r12,r12,TI_FLAGS
387
3884: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100389 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
390 beq ret_from_except
391
392 /* Re-enable interrupts */
393 ori r10,r10,MSR_EE
394 SYNC
395 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000396
397 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000398 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000399 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000400 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000401 SAVE_NVGPRS(r1)
402 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000403 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11004045:
Paul Mackerras9994a332005-10-10 22:36:14 +1000405 addi r3,r1,STACK_FRAME_OVERHEAD
406 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100407 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000408
Paul Mackerras9994a332005-10-10 22:36:14 +1000409#ifdef SHOW_SYSCALLS
410do_show_syscall:
411#ifdef SHOW_SYSCALLS_TASK
412 lis r11,show_syscalls_task@ha
413 lwz r11,show_syscalls_task@l(r11)
414 cmp 0,r2,r11
415 bnelr
416#endif
417 stw r31,GPR31(r1)
418 mflr r31
419 lis r3,7f@ha
420 addi r3,r3,7f@l
421 lwz r4,GPR0(r1)
422 lwz r5,GPR3(r1)
423 lwz r6,GPR4(r1)
424 lwz r7,GPR5(r1)
425 lwz r8,GPR6(r1)
426 lwz r9,GPR7(r1)
427 bl printk
428 lis r3,77f@ha
429 addi r3,r3,77f@l
430 lwz r4,GPR8(r1)
431 mr r5,r2
432 bl printk
433 lwz r0,GPR0(r1)
434 lwz r3,GPR3(r1)
435 lwz r4,GPR4(r1)
436 lwz r5,GPR5(r1)
437 lwz r6,GPR6(r1)
438 lwz r7,GPR7(r1)
439 lwz r8,GPR8(r1)
440 mtlr r31
441 lwz r31,GPR31(r1)
442 blr
443
444do_show_syscall_exit:
445#ifdef SHOW_SYSCALLS_TASK
446 lis r11,show_syscalls_task@ha
447 lwz r11,show_syscalls_task@l(r11)
448 cmp 0,r2,r11
449 bnelr
450#endif
451 stw r31,GPR31(r1)
452 mflr r31
453 stw r3,RESULT(r1) /* Save result */
454 mr r4,r3
455 lis r3,79f@ha
456 addi r3,r3,79f@l
457 bl printk
458 lwz r3,RESULT(r1)
459 mtlr r31
460 lwz r31,GPR31(r1)
461 blr
462
4637: .string "syscall %d(%x, %x, %x, %x, %x, "
46477: .string "%x), current=%p\n"
46579: .string " -> %x\n"
466 .align 2,0
467
468#ifdef SHOW_SYSCALLS_TASK
469 .data
470 .globl show_syscalls_task
471show_syscalls_task:
472 .long -1
473 .text
474#endif
475#endif /* SHOW_SYSCALLS */
476
477/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000478 * The fork/clone functions need to copy the full register set into
479 * the child process. Therefore we need to save all the nonvolatile
480 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000481 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000482 .globl ppc_fork
483ppc_fork:
484 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000485 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000486 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000487 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000488 b sys_fork
489
490 .globl ppc_vfork
491ppc_vfork:
492 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000493 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000494 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000495 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000496 b sys_vfork
497
498 .globl ppc_clone
499ppc_clone:
500 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000501 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000502 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000503 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000504 b sys_clone
505
Paul Mackerras1bd79332006-03-08 13:24:22 +1100506 .globl ppc_swapcontext
507ppc_swapcontext:
508 SAVE_NVGPRS(r1)
509 lwz r0,_TRAP(r1)
510 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
511 stw r0,_TRAP(r1) /* register set saved */
512 b sys_swapcontext
513
Paul Mackerras9994a332005-10-10 22:36:14 +1000514/*
515 * Top-level page fault handling.
516 * This is in assembler because if do_page_fault tells us that
517 * it is a bad kernel page fault, we want to save the non-volatile
518 * registers before calling bad_page_fault.
519 */
520 .globl handle_page_fault
521handle_page_fault:
522 stw r4,_DAR(r1)
523 addi r3,r1,STACK_FRAME_OVERHEAD
524 bl do_page_fault
525 cmpwi r3,0
526 beq+ ret_from_except
527 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000528 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000529 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000530 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000531 mr r5,r3
532 addi r3,r1,STACK_FRAME_OVERHEAD
533 lwz r4,_DAR(r1)
534 bl bad_page_fault
535 b ret_from_except_full
536
537/*
538 * This routine switches between two different tasks. The process
539 * state of one is saved on its kernel stack. Then the state
540 * of the other is restored from its kernel stack. The memory
541 * management hardware is updated to the second process's state.
542 * Finally, we can return to the second process.
543 * On entry, r3 points to the THREAD for the current task, r4
544 * points to the THREAD for the new task.
545 *
546 * This routine is always called with interrupts disabled.
547 *
548 * Note: there are two ways to get to the "going out" portion
549 * of this code; either by coming in via the entry (_switch)
550 * or via "fork" which must set up an environment equivalent
551 * to the "_switch" path. If you change this , you'll have to
552 * change the fork code also.
553 *
554 * The code which creates the new task context is in 'copy_thread'
555 * in arch/ppc/kernel/process.c
556 */
557_GLOBAL(_switch)
558 stwu r1,-INT_FRAME_SIZE(r1)
559 mflr r0
560 stw r0,INT_FRAME_SIZE+4(r1)
561 /* r3-r12 are caller saved -- Cort */
562 SAVE_NVGPRS(r1)
563 stw r0,_NIP(r1) /* Return to switch caller */
564 mfmsr r11
565 li r0,MSR_FP /* Disable floating-point */
566#ifdef CONFIG_ALTIVEC
567BEGIN_FTR_SECTION
568 oris r0,r0,MSR_VEC@h /* Disable altivec */
569 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
570 stw r12,THREAD+THREAD_VRSAVE(r2)
571END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
572#endif /* CONFIG_ALTIVEC */
573#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500574BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000575 oris r0,r0,MSR_SPE@h /* Disable SPE */
576 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
577 stw r12,THREAD+THREAD_SPEFSCR(r2)
Kumar Gala5e14d212007-09-13 01:44:20 -0500578END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000579#endif /* CONFIG_SPE */
580 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
581 beq+ 1f
582 andc r11,r11,r0
583 MTMSRD(r11)
584 isync
5851: stw r11,_MSR(r1)
586 mfcr r10
587 stw r10,_CCR(r1)
588 stw r1,KSP(r3) /* Set old stack pointer */
589
590#ifdef CONFIG_SMP
591 /* We need a sync somewhere here to make sure that if the
592 * previous task gets rescheduled on another CPU, it sees all
593 * stores it has performed on this one.
594 */
595 sync
596#endif /* CONFIG_SMP */
597
598 tophys(r0,r4)
599 CLR_TOP32(r0)
600 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
601 lwz r1,KSP(r4) /* Load new stack pointer */
602
603 /* save the old current 'last' for return value */
604 mr r3,r2
605 addi r2,r4,-THREAD /* Update current */
606
607#ifdef CONFIG_ALTIVEC
608BEGIN_FTR_SECTION
609 lwz r0,THREAD+THREAD_VRSAVE(r2)
610 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
611END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
612#endif /* CONFIG_ALTIVEC */
613#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500614BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000615 lwz r0,THREAD+THREAD_SPEFSCR(r2)
616 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
Kumar Gala5e14d212007-09-13 01:44:20 -0500617END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000618#endif /* CONFIG_SPE */
619
620 lwz r0,_CCR(r1)
621 mtcrf 0xFF,r0
622 /* r3-r12 are destroyed -- Cort */
623 REST_NVGPRS(r1)
624
625 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
626 mtlr r4
627 addi r1,r1,INT_FRAME_SIZE
628 blr
629
630 .globl fast_exception_return
631fast_exception_return:
632#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
633 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
634 beq 1f /* if not, we've got problems */
635#endif
636
6372: REST_4GPRS(3, r11)
638 lwz r10,_CCR(r11)
639 REST_GPR(1, r11)
640 mtcr r10
641 lwz r10,_LINK(r11)
642 mtlr r10
643 REST_GPR(10, r11)
644 mtspr SPRN_SRR1,r9
645 mtspr SPRN_SRR0,r12
646 REST_GPR(9, r11)
647 REST_GPR(12, r11)
648 lwz r11,GPR11(r11)
649 SYNC
650 RFI
651
652#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
653/* check if the exception happened in a restartable section */
6541: lis r3,exc_exit_restart_end@ha
655 addi r3,r3,exc_exit_restart_end@l
656 cmplw r12,r3
657 bge 3f
658 lis r4,exc_exit_restart@ha
659 addi r4,r4,exc_exit_restart@l
660 cmplw r12,r4
661 blt 3f
662 lis r3,fee_restarts@ha
663 tophys(r3,r3)
664 lwz r5,fee_restarts@l(r3)
665 addi r5,r5,1
666 stw r5,fee_restarts@l(r3)
667 mr r12,r4 /* restart at exc_exit_restart */
668 b 2b
669
Kumar Gala991eb432007-05-14 17:11:58 -0500670 .section .bss
671 .align 2
672fee_restarts:
673 .space 4
674 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +1000675
676/* aargh, a nonrecoverable interrupt, panic */
677/* aargh, we don't know which trap this is */
678/* but the 601 doesn't implement the RI bit, so assume it's OK */
6793:
680BEGIN_FTR_SECTION
681 b 2b
682END_FTR_SECTION_IFSET(CPU_FTR_601)
683 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000684 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000685 addi r3,r1,STACK_FRAME_OVERHEAD
686 lis r10,MSR_KERNEL@h
687 ori r10,r10,MSR_KERNEL@l
688 bl transfer_to_handler_full
689 .long nonrecoverable_exception
690 .long ret_from_except
691#endif
692
Paul Mackerras9994a332005-10-10 22:36:14 +1000693 .globl ret_from_except_full
694ret_from_except_full:
695 REST_NVGPRS(r1)
696 /* fall through */
697
698 .globl ret_from_except
699ret_from_except:
700 /* Hard-disable interrupts so that current_thread_info()->flags
701 * can't change between when we test it and when we return
702 * from the interrupt. */
703 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
704 SYNC /* Some chip revs have problems here... */
705 MTMSRD(r10) /* disable interrupts */
706
707 lwz r3,_MSR(r1) /* Returning to user mode? */
708 andi. r0,r3,MSR_PR
709 beq resume_kernel
710
711user_exc_return: /* r10 contains MSR_KERNEL here */
712 /* Check current_thread_info()->flags */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000713 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000714 lwz r9,TI_FLAGS(r9)
Roland McGrath7a101742008-04-28 17:30:37 +1000715 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000716 bne do_work
717
718restore_user:
719#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500720 /* Check whether this process has its own DBCR0 value. The internal
721 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000722 lwz r0,THREAD+THREAD_DBCR0(r2)
Luis Machadod6a61bf2008-07-24 02:10:41 +1000723 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000724 bnel- load_dbcr0
725#endif
726
727#ifdef CONFIG_PREEMPT
728 b restore
729
730/* N.B. the only way to get here is from the beq following ret_from_except. */
731resume_kernel:
732 /* check current_thread_info->preempt_count */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000733 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000734 lwz r0,TI_PREEMPT(r9)
735 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
736 bne restore
737 lwz r0,TI_FLAGS(r9)
738 andi. r0,r0,_TIF_NEED_RESCHED
739 beq+ restore
740 andi. r0,r3,MSR_EE /* interrupts off? */
741 beq restore /* don't schedule if so */
7421: bl preempt_schedule_irq
David Gibson6cb7bfe2005-10-21 15:45:50 +1000743 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000744 lwz r3,TI_FLAGS(r9)
745 andi. r0,r3,_TIF_NEED_RESCHED
746 bne- 1b
747#else
748resume_kernel:
749#endif /* CONFIG_PREEMPT */
750
751 /* interrupts are hard-disabled at this point */
752restore:
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100753#ifdef CONFIG_44x
754 lis r4,icache_44x_need_flush@ha
755 lwz r5,icache_44x_need_flush@l(r4)
756 cmplwi cr0,r5,0
757 beq+ 1f
758 li r6,0
759 iccci r0,r0
760 stw r6,icache_44x_need_flush@l(r4)
7611:
762#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000763 lwz r0,GPR0(r1)
764 lwz r2,GPR2(r1)
765 REST_4GPRS(3, r1)
766 REST_2GPRS(7, r1)
767
768 lwz r10,_XER(r1)
769 lwz r11,_CTR(r1)
770 mtspr SPRN_XER,r10
771 mtctr r11
772
773 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100774BEGIN_FTR_SECTION
775 lwarx r11,0,r1
776END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000777 stwcx. r0,0,r1 /* to clear the reservation */
778
779#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
780 lwz r9,_MSR(r1)
781 andi. r10,r9,MSR_RI /* check if this exception occurred */
782 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
783
784 lwz r10,_CCR(r1)
785 lwz r11,_LINK(r1)
786 mtcrf 0xFF,r10
787 mtlr r11
788
789 /*
790 * Once we put values in SRR0 and SRR1, we are in a state
791 * where exceptions are not recoverable, since taking an
792 * exception will trash SRR0 and SRR1. Therefore we clear the
793 * MSR:RI bit to indicate this. If we do take an exception,
794 * we can't return to the point of the exception but we
795 * can restart the exception exit path at the label
796 * exc_exit_restart below. -- paulus
797 */
798 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
799 SYNC
800 MTMSRD(r10) /* clear the RI bit */
801 .globl exc_exit_restart
802exc_exit_restart:
803 lwz r9,_MSR(r1)
804 lwz r12,_NIP(r1)
805 FIX_SRR1(r9,r10)
806 mtspr SPRN_SRR0,r12
807 mtspr SPRN_SRR1,r9
808 REST_4GPRS(9, r1)
809 lwz r1,GPR1(r1)
810 .globl exc_exit_restart_end
811exc_exit_restart_end:
812 SYNC
813 RFI
814
815#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
816 /*
817 * This is a bit different on 4xx/Book-E because it doesn't have
818 * the RI bit in the MSR.
819 * The TLB miss handler checks if we have interrupted
820 * the exception exit path and restarts it if so
821 * (well maybe one day it will... :).
822 */
823 lwz r11,_LINK(r1)
824 mtlr r11
825 lwz r10,_CCR(r1)
826 mtcrf 0xff,r10
827 REST_2GPRS(9, r1)
828 .globl exc_exit_restart
829exc_exit_restart:
830 lwz r11,_NIP(r1)
831 lwz r12,_MSR(r1)
832exc_exit_start:
833 mtspr SPRN_SRR0,r11
834 mtspr SPRN_SRR1,r12
835 REST_2GPRS(11, r1)
836 lwz r1,GPR1(r1)
837 .globl exc_exit_restart_end
838exc_exit_restart_end:
839 PPC405_ERR77_SYNC
840 rfi
841 b . /* prevent prefetch past rfi */
842
843/*
844 * Returning from a critical interrupt in user mode doesn't need
845 * to be any different from a normal exception. For a critical
846 * interrupt in the kernel, we just return (without checking for
847 * preemption) since the interrupt may have happened at some crucial
848 * place (e.g. inside the TLB miss handler), and because we will be
849 * running with r1 pointing into critical_stack, not the current
850 * process's kernel stack (and therefore current_thread_info() will
851 * give the wrong answer).
852 * We have to restore various SPRs that may have been in use at the
853 * time of the critical interrupt.
854 *
855 */
856#ifdef CONFIG_40x
857#define PPC_40x_TURN_OFF_MSR_DR \
858 /* avoid any possible TLB misses here by turning off MSR.DR, we \
859 * assume the instructions here are mapped by a pinned TLB entry */ \
860 li r10,MSR_IR; \
861 mtmsr r10; \
862 isync; \
863 tophys(r1, r1);
864#else
865#define PPC_40x_TURN_OFF_MSR_DR
866#endif
867
868#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
869 REST_NVGPRS(r1); \
870 lwz r3,_MSR(r1); \
871 andi. r3,r3,MSR_PR; \
872 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
873 bne user_exc_return; \
874 lwz r0,GPR0(r1); \
875 lwz r2,GPR2(r1); \
876 REST_4GPRS(3, r1); \
877 REST_2GPRS(7, r1); \
878 lwz r10,_XER(r1); \
879 lwz r11,_CTR(r1); \
880 mtspr SPRN_XER,r10; \
881 mtctr r11; \
882 PPC405_ERR77(0,r1); \
883 stwcx. r0,0,r1; /* to clear the reservation */ \
884 lwz r11,_LINK(r1); \
885 mtlr r11; \
886 lwz r10,_CCR(r1); \
887 mtcrf 0xff,r10; \
888 PPC_40x_TURN_OFF_MSR_DR; \
889 lwz r9,_DEAR(r1); \
890 lwz r10,_ESR(r1); \
891 mtspr SPRN_DEAR,r9; \
892 mtspr SPRN_ESR,r10; \
893 lwz r11,_NIP(r1); \
894 lwz r12,_MSR(r1); \
895 mtspr exc_lvl_srr0,r11; \
896 mtspr exc_lvl_srr1,r12; \
897 lwz r9,GPR9(r1); \
898 lwz r12,GPR12(r1); \
899 lwz r10,GPR10(r1); \
900 lwz r11,GPR11(r1); \
901 lwz r1,GPR1(r1); \
902 PPC405_ERR77_SYNC; \
903 exc_lvl_rfi; \
904 b .; /* prevent prefetch past exc_lvl_rfi */
905
Kumar Galafca622c2008-04-30 05:23:21 -0500906#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
907 lwz r9,_##exc_lvl_srr0(r1); \
908 lwz r10,_##exc_lvl_srr1(r1); \
909 mtspr SPRN_##exc_lvl_srr0,r9; \
910 mtspr SPRN_##exc_lvl_srr1,r10;
911
912#if defined(CONFIG_FSL_BOOKE)
913#ifdef CONFIG_PHYS_64BIT
914#define RESTORE_MAS7 \
915 lwz r11,MAS7(r1); \
916 mtspr SPRN_MAS7,r11;
917#else
918#define RESTORE_MAS7
919#endif /* CONFIG_PHYS_64BIT */
920#define RESTORE_MMU_REGS \
921 lwz r9,MAS0(r1); \
922 lwz r10,MAS1(r1); \
923 lwz r11,MAS2(r1); \
924 mtspr SPRN_MAS0,r9; \
925 lwz r9,MAS3(r1); \
926 mtspr SPRN_MAS1,r10; \
927 lwz r10,MAS6(r1); \
928 mtspr SPRN_MAS2,r11; \
929 mtspr SPRN_MAS3,r9; \
930 mtspr SPRN_MAS6,r10; \
931 RESTORE_MAS7;
932#elif defined(CONFIG_44x)
933#define RESTORE_MMU_REGS \
934 lwz r9,MMUCR(r1); \
935 mtspr SPRN_MMUCR,r9;
936#else
937#define RESTORE_MMU_REGS
938#endif
939
940#ifdef CONFIG_40x
Paul Mackerras9994a332005-10-10 22:36:14 +1000941 .globl ret_from_crit_exc
942ret_from_crit_exc:
Kumar Galafca622c2008-04-30 05:23:21 -0500943 mfspr r9,SPRN_SPRG3
944 lis r10,saved_ksp_limit@ha;
945 lwz r10,saved_ksp_limit@l(r10);
946 tovirt(r9,r9);
947 stw r10,KSP_LIMIT(r9)
948 lis r9,crit_srr0@ha;
949 lwz r9,crit_srr0@l(r9);
950 lis r10,crit_srr1@ha;
951 lwz r10,crit_srr1@l(r10);
952 mtspr SPRN_SRR0,r9;
953 mtspr SPRN_SRR1,r10;
Paul Mackerras9994a332005-10-10 22:36:14 +1000954 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -0500955#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000956
957#ifdef CONFIG_BOOKE
Kumar Galafca622c2008-04-30 05:23:21 -0500958 .globl ret_from_crit_exc
959ret_from_crit_exc:
960 mfspr r9,SPRN_SPRG3
961 lwz r10,SAVED_KSP_LIMIT(r1)
962 stw r10,KSP_LIMIT(r9)
963 RESTORE_xSRR(SRR0,SRR1);
964 RESTORE_MMU_REGS;
965 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
966
Paul Mackerras9994a332005-10-10 22:36:14 +1000967 .globl ret_from_debug_exc
968ret_from_debug_exc:
Kumar Galafca622c2008-04-30 05:23:21 -0500969 mfspr r9,SPRN_SPRG3
970 lwz r10,SAVED_KSP_LIMIT(r1)
971 stw r10,KSP_LIMIT(r9)
972 lwz r9,THREAD_INFO-THREAD(r9)
973 rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
974 lwz r10,TI_PREEMPT(r10)
975 stw r10,TI_PREEMPT(r9)
976 RESTORE_xSRR(SRR0,SRR1);
977 RESTORE_xSRR(CSRR0,CSRR1);
978 RESTORE_MMU_REGS;
Paul Mackerras9994a332005-10-10 22:36:14 +1000979 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
980
981 .globl ret_from_mcheck_exc
982ret_from_mcheck_exc:
Kumar Galafca622c2008-04-30 05:23:21 -0500983 mfspr r9,SPRN_SPRG3
984 lwz r10,SAVED_KSP_LIMIT(r1)
985 stw r10,KSP_LIMIT(r9)
986 RESTORE_xSRR(SRR0,SRR1);
987 RESTORE_xSRR(CSRR0,CSRR1);
988 RESTORE_xSRR(DSRR0,DSRR1);
989 RESTORE_MMU_REGS;
Paul Mackerras9994a332005-10-10 22:36:14 +1000990 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
991#endif /* CONFIG_BOOKE */
992
993/*
994 * Load the DBCR0 value for a task that is being ptraced,
995 * having first saved away the global DBCR0. Note that r0
996 * has the dbcr0 value to set upon entry to this.
997 */
998load_dbcr0:
999 mfmsr r10 /* first disable debug exceptions */
1000 rlwinm r10,r10,0,~MSR_DE
1001 mtmsr r10
1002 isync
1003 mfspr r10,SPRN_DBCR0
1004 lis r11,global_dbcr0@ha
1005 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -05001006#ifdef CONFIG_SMP
1007 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1008 lwz r9,TI_CPU(r9)
1009 slwi r9,r9,3
1010 add r11,r11,r9
1011#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10001012 stw r10,0(r11)
1013 mtspr SPRN_DBCR0,r0
1014 lwz r10,4(r11)
1015 addi r10,r10,1
1016 stw r10,4(r11)
1017 li r11,-1
1018 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1019 blr
1020
Kumar Gala991eb432007-05-14 17:11:58 -05001021 .section .bss
1022 .align 4
1023global_dbcr0:
Kumar Gala4eaddb42008-04-09 16:15:40 -05001024 .space 8*NR_CPUS
Kumar Gala991eb432007-05-14 17:11:58 -05001025 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001026#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1027
1028do_work: /* r10 contains MSR_KERNEL here */
1029 andi. r0,r9,_TIF_NEED_RESCHED
1030 beq do_user_signal
1031
1032do_resched: /* r10 contains MSR_KERNEL here */
1033 ori r10,r10,MSR_EE
1034 SYNC
1035 MTMSRD(r10) /* hard-enable interrupts */
1036 bl schedule
1037recheck:
1038 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1039 SYNC
1040 MTMSRD(r10) /* disable interrupts */
David Gibson6cb7bfe2005-10-21 15:45:50 +10001041 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +10001042 lwz r9,TI_FLAGS(r9)
1043 andi. r0,r9,_TIF_NEED_RESCHED
1044 bne- do_resched
Roland McGrath7a101742008-04-28 17:30:37 +10001045 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +10001046 beq restore_user
1047do_user_signal: /* r10 contains MSR_KERNEL here */
1048 ori r10,r10,MSR_EE
1049 SYNC
1050 MTMSRD(r10) /* hard-enable interrupts */
1051 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001052 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001053 andi. r0,r3,1
1054 beq 2f
1055 SAVE_NVGPRS(r1)
1056 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001057 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100010582: li r3,0
1059 addi r4,r1,STACK_FRAME_OVERHEAD
1060 bl do_signal
1061 REST_NVGPRS(r1)
1062 b recheck
1063
1064/*
1065 * We come here when we are at the end of handling an exception
1066 * that occurred at a place where taking an exception will lose
1067 * state information, such as the contents of SRR0 and SRR1.
1068 */
1069nonrecoverable:
1070 lis r10,exc_exit_restart_end@ha
1071 addi r10,r10,exc_exit_restart_end@l
1072 cmplw r12,r10
1073 bge 3f
1074 lis r11,exc_exit_restart@ha
1075 addi r11,r11,exc_exit_restart@l
1076 cmplw r12,r11
1077 blt 3f
1078 lis r10,ee_restarts@ha
1079 lwz r12,ee_restarts@l(r10)
1080 addi r12,r12,1
1081 stw r12,ee_restarts@l(r10)
1082 mr r12,r11 /* restart at exc_exit_restart */
1083 blr
10843: /* OK, we can't recover, kill this process */
1085 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1086BEGIN_FTR_SECTION
1087 blr
1088END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001089 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001090 andi. r0,r3,1
1091 beq 4f
1092 SAVE_NVGPRS(r1)
1093 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001094 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100010954: addi r3,r1,STACK_FRAME_OVERHEAD
1096 bl nonrecoverable_exception
1097 /* shouldn't return */
1098 b 4b
1099
Kumar Gala991eb432007-05-14 17:11:58 -05001100 .section .bss
1101 .align 2
1102ee_restarts:
1103 .space 4
1104 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001105
1106/*
1107 * PROM code for specific machines follows. Put it
1108 * here so it's easy to add arch-specific sections later.
1109 * -- Cort
1110 */
Paul Mackerras033ef332005-10-26 17:05:24 +10001111#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +10001112/*
1113 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1114 * called with the MMU off.
1115 */
1116_GLOBAL(enter_rtas)
1117 stwu r1,-INT_FRAME_SIZE(r1)
1118 mflr r0
1119 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +11001120 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001121 lis r6,1f@ha /* physical return address for rtas */
1122 addi r6,r6,1f@l
1123 tophys(r6,r6)
1124 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +10001125 lwz r8,RTASENTRY(r4)
1126 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +10001127 mfmsr r9
1128 stw r9,8(r1)
1129 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1130 SYNC /* disable interrupts so SRR0/1 */
1131 MTMSRD(r0) /* don't get trashed */
1132 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1133 mtlr r6
Paul Mackerras9994a332005-10-10 22:36:14 +10001134 mtspr SPRN_SPRG2,r7
1135 mtspr SPRN_SRR0,r8
1136 mtspr SPRN_SRR1,r9
1137 RFI
11381: tophys(r9,r1)
1139 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1140 lwz r9,8(r9) /* original msr value */
1141 FIX_SRR1(r9,r0)
1142 addi r1,r1,INT_FRAME_SIZE
1143 li r0,0
1144 mtspr SPRN_SPRG2,r0
1145 mtspr SPRN_SRR0,r8
1146 mtspr SPRN_SRR1,r9
1147 RFI /* return to caller */
1148
1149 .globl machine_check_in_rtas
1150machine_check_in_rtas:
1151 twi 31,0,0
1152 /* XXX load up BATs and panic */
1153
Paul Mackerras033ef332005-10-26 17:05:24 +10001154#endif /* CONFIG_PPC_RTAS */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001155
1156#ifdef CONFIG_FTRACE
1157#ifdef CONFIG_DYNAMIC_FTRACE
1158_GLOBAL(mcount)
1159_GLOBAL(_mcount)
1160 stwu r1,-48(r1)
1161 stw r3, 12(r1)
1162 stw r4, 16(r1)
1163 stw r5, 20(r1)
1164 stw r6, 24(r1)
1165 mflr r3
1166 stw r7, 28(r1)
1167 mfcr r5
1168 stw r8, 32(r1)
1169 stw r9, 36(r1)
1170 stw r10,40(r1)
1171 stw r3, 44(r1)
1172 stw r5, 8(r1)
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301173 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001174 .globl mcount_call
1175mcount_call:
1176 bl ftrace_stub
1177 nop
1178 lwz r6, 8(r1)
1179 lwz r0, 44(r1)
1180 lwz r3, 12(r1)
1181 mtctr r0
1182 lwz r4, 16(r1)
1183 mtcr r6
1184 lwz r5, 20(r1)
1185 lwz r6, 24(r1)
1186 lwz r0, 52(r1)
1187 lwz r7, 28(r1)
1188 lwz r8, 32(r1)
1189 mtlr r0
1190 lwz r9, 36(r1)
1191 lwz r10,40(r1)
1192 addi r1, r1, 48
1193 bctr
1194
1195_GLOBAL(ftrace_caller)
1196 /* Based off of objdump optput from glibc */
1197 stwu r1,-48(r1)
1198 stw r3, 12(r1)
1199 stw r4, 16(r1)
1200 stw r5, 20(r1)
1201 stw r6, 24(r1)
1202 mflr r3
1203 lwz r4, 52(r1)
1204 mfcr r5
1205 stw r7, 28(r1)
1206 stw r8, 32(r1)
1207 stw r9, 36(r1)
1208 stw r10,40(r1)
1209 stw r3, 44(r1)
1210 stw r5, 8(r1)
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301211 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001212.globl ftrace_call
1213ftrace_call:
1214 bl ftrace_stub
1215 nop
1216 lwz r6, 8(r1)
1217 lwz r0, 44(r1)
1218 lwz r3, 12(r1)
1219 mtctr r0
1220 lwz r4, 16(r1)
1221 mtcr r6
1222 lwz r5, 20(r1)
1223 lwz r6, 24(r1)
1224 lwz r0, 52(r1)
1225 lwz r7, 28(r1)
1226 lwz r8, 32(r1)
1227 mtlr r0
1228 lwz r9, 36(r1)
1229 lwz r10,40(r1)
1230 addi r1, r1, 48
1231 bctr
1232#else
1233_GLOBAL(mcount)
1234_GLOBAL(_mcount)
1235 stwu r1,-48(r1)
1236 stw r3, 12(r1)
1237 stw r4, 16(r1)
1238 stw r5, 20(r1)
1239 stw r6, 24(r1)
1240 mflr r3
1241 lwz r4, 52(r1)
1242 mfcr r5
1243 stw r7, 28(r1)
1244 stw r8, 32(r1)
1245 stw r9, 36(r1)
1246 stw r10,40(r1)
1247 stw r3, 44(r1)
1248 stw r5, 8(r1)
1249
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301250 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001251 LOAD_REG_ADDR(r5, ftrace_trace_function)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001252 lwz r5,0(r5)
Steven Rostedtccbfac22008-05-22 14:31:07 -04001253
Steven Rostedt4e491d12008-05-14 23:49:44 -04001254 mtctr r5
1255 bctrl
Steven Rostedtccbfac22008-05-22 14:31:07 -04001256
Steven Rostedt4e491d12008-05-14 23:49:44 -04001257 nop
1258
1259 lwz r6, 8(r1)
1260 lwz r0, 44(r1)
1261 lwz r3, 12(r1)
1262 mtctr r0
1263 lwz r4, 16(r1)
1264 mtcr r6
1265 lwz r5, 20(r1)
1266 lwz r6, 24(r1)
1267 lwz r0, 52(r1)
1268 lwz r7, 28(r1)
1269 lwz r8, 32(r1)
1270 mtlr r0
1271 lwz r9, 36(r1)
1272 lwz r10,40(r1)
1273 addi r1, r1, 48
1274 bctr
1275#endif
1276
1277_GLOBAL(ftrace_stub)
1278 blr
1279
1280#endif /* CONFIG_MCOUNT */