Ley Foon Tan | 82ed08d | 2014-11-06 15:19:38 +0800 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/nios2/kernel/entry.S |
| 3 | * |
| 4 | * Copyright (C) 2013-2014 Altera Corporation |
| 5 | * Copyright (C) 2009, Wind River Systems Inc |
| 6 | * |
| 7 | * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com |
| 8 | * |
| 9 | * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) |
| 10 | * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, |
| 11 | * Kenneth Albanowski <kjahds@kjahds.com>, |
| 12 | * Copyright (C) 2000 Lineo Inc. (www.lineo.com) |
| 13 | * Copyright (C) 2004 Microtronix Datacom Ltd. |
| 14 | * |
| 15 | * This file is subject to the terms and conditions of the GNU General Public |
| 16 | * License. See the file "COPYING" in the main directory of this archive |
| 17 | * for more details. |
| 18 | * |
| 19 | * Linux/m68k support by Hamish Macdonald |
| 20 | * |
| 21 | * 68060 fixes by Jesper Skov |
| 22 | * ColdFire support by Greg Ungerer (gerg@snapgear.com) |
| 23 | * 5307 fixes by David W. Miller |
| 24 | * linux 2.4 support David McCullough <davidm@snapgear.com> |
| 25 | */ |
| 26 | |
| 27 | #include <linux/sys.h> |
| 28 | #include <linux/linkage.h> |
| 29 | #include <asm/asm-offsets.h> |
| 30 | #include <asm/asm-macros.h> |
| 31 | #include <asm/thread_info.h> |
| 32 | #include <asm/errno.h> |
| 33 | #include <asm/setup.h> |
| 34 | #include <asm/entry.h> |
| 35 | #include <asm/unistd.h> |
| 36 | #include <asm/processor.h> |
| 37 | |
| 38 | .macro GET_THREAD_INFO reg |
| 39 | .if THREAD_SIZE & 0xffff0000 |
| 40 | andhi \reg, sp, %hi(~(THREAD_SIZE-1)) |
| 41 | .else |
| 42 | addi \reg, r0, %lo(~(THREAD_SIZE-1)) |
| 43 | and \reg, \reg, sp |
| 44 | .endif |
| 45 | .endm |
| 46 | |
| 47 | .macro kuser_cmpxchg_check |
| 48 | /* |
| 49 | * Make sure our user space atomic helper is restarted if it was |
| 50 | * interrupted in a critical region. |
| 51 | * ea-4 = address of interrupted insn (ea must be preserved). |
| 52 | * sp = saved regs. |
| 53 | * cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn. |
| 54 | * If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to |
| 55 | * cmpxchg_ldw + 4. |
| 56 | */ |
| 57 | /* et = cmpxchg_stw + 4 */ |
| 58 | movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start)) |
| 59 | bgtu ea, et, 1f |
| 60 | |
| 61 | subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */ |
| 62 | bltu ea, et, 1f |
| 63 | stw et, PT_EA(sp) /* fix up EA */ |
| 64 | mov ea, et |
| 65 | 1: |
| 66 | .endm |
| 67 | |
| 68 | .section .rodata |
| 69 | .align 4 |
| 70 | exception_table: |
| 71 | .word unhandled_exception /* 0 - Reset */ |
| 72 | .word unhandled_exception /* 1 - Processor-only Reset */ |
| 73 | .word external_interrupt /* 2 - Interrupt */ |
| 74 | .word handle_trap /* 3 - Trap Instruction */ |
| 75 | |
| 76 | .word instruction_trap /* 4 - Unimplemented instruction */ |
| 77 | .word handle_illegal /* 5 - Illegal instruction */ |
| 78 | .word handle_unaligned /* 6 - Misaligned data access */ |
| 79 | .word handle_unaligned /* 7 - Misaligned destination address */ |
| 80 | |
| 81 | .word handle_diverror /* 8 - Division error */ |
| 82 | .word protection_exception_ba /* 9 - Supervisor-only instr. address */ |
| 83 | .word protection_exception_instr /* 10 - Supervisor only instruction */ |
| 84 | .word protection_exception_ba /* 11 - Supervisor only data address */ |
| 85 | |
| 86 | .word unhandled_exception /* 12 - Double TLB miss (data) */ |
| 87 | .word protection_exception_pte /* 13 - TLB permission violation (x) */ |
| 88 | .word protection_exception_pte /* 14 - TLB permission violation (r) */ |
| 89 | .word protection_exception_pte /* 15 - TLB permission violation (w) */ |
| 90 | |
| 91 | .word unhandled_exception /* 16 - MPU region violation */ |
| 92 | |
| 93 | trap_table: |
| 94 | .word handle_system_call /* 0 */ |
Ley Foon Tan | baa54ab | 2015-04-16 15:19:01 +0800 | [diff] [blame] | 95 | .word handle_trap_1 /* 1 */ |
| 96 | .word handle_trap_2 /* 2 */ |
| 97 | .word handle_trap_3 /* 3 */ |
| 98 | .word handle_trap_reserved /* 4 */ |
| 99 | .word handle_trap_reserved /* 5 */ |
| 100 | .word handle_trap_reserved /* 6 */ |
| 101 | .word handle_trap_reserved /* 7 */ |
| 102 | .word handle_trap_reserved /* 8 */ |
| 103 | .word handle_trap_reserved /* 9 */ |
| 104 | .word handle_trap_reserved /* 10 */ |
| 105 | .word handle_trap_reserved /* 11 */ |
| 106 | .word handle_trap_reserved /* 12 */ |
| 107 | .word handle_trap_reserved /* 13 */ |
| 108 | .word handle_trap_reserved /* 14 */ |
| 109 | .word handle_trap_reserved /* 15 */ |
| 110 | .word handle_trap_reserved /* 16 */ |
| 111 | .word handle_trap_reserved /* 17 */ |
| 112 | .word handle_trap_reserved /* 18 */ |
| 113 | .word handle_trap_reserved /* 19 */ |
| 114 | .word handle_trap_reserved /* 20 */ |
| 115 | .word handle_trap_reserved /* 21 */ |
| 116 | .word handle_trap_reserved /* 22 */ |
| 117 | .word handle_trap_reserved /* 23 */ |
| 118 | .word handle_trap_reserved /* 24 */ |
| 119 | .word handle_trap_reserved /* 25 */ |
| 120 | .word handle_trap_reserved /* 26 */ |
| 121 | .word handle_trap_reserved /* 27 */ |
| 122 | .word handle_trap_reserved /* 28 */ |
| 123 | .word handle_trap_reserved /* 29 */ |
Ley Foon Tan | d16d2be | 2015-02-16 19:26:43 +0800 | [diff] [blame] | 124 | #ifdef CONFIG_KGDB |
| 125 | .word handle_kgdb_breakpoint /* 30 KGDB breakpoint */ |
| 126 | #else |
| 127 | .word instruction_trap /* 30 */ |
| 128 | #endif |
Ley Foon Tan | 82ed08d | 2014-11-06 15:19:38 +0800 | [diff] [blame] | 129 | .word handle_breakpoint /* 31 */ |
| 130 | |
| 131 | .text |
| 132 | .set noat |
| 133 | .set nobreak |
| 134 | |
| 135 | ENTRY(inthandler) |
| 136 | SAVE_ALL |
| 137 | |
| 138 | kuser_cmpxchg_check |
| 139 | |
| 140 | /* Clear EH bit before we get a new excpetion in the kernel |
| 141 | * and after we have saved it to the exception frame. This is done |
| 142 | * whether it's trap, tlb-miss or interrupt. If we don't do this |
| 143 | * estatus is not updated the next exception. |
| 144 | */ |
| 145 | rdctl r24, status |
| 146 | movi r9, %lo(~STATUS_EH) |
| 147 | and r24, r24, r9 |
| 148 | wrctl status, r24 |
| 149 | |
| 150 | /* Read cause and vector and branch to the associated handler */ |
| 151 | mov r4, sp |
| 152 | rdctl r5, exception |
| 153 | movia r9, exception_table |
| 154 | add r24, r9, r5 |
| 155 | ldw r24, 0(r24) |
| 156 | jmp r24 |
| 157 | |
| 158 | |
| 159 | /*********************************************************************** |
| 160 | * Handle traps |
| 161 | *********************************************************************** |
| 162 | */ |
| 163 | ENTRY(handle_trap) |
Ley Foon Tan | 4a89c30 | 2015-04-10 11:10:08 +0800 | [diff] [blame] | 164 | ldwio r24, -4(ea) /* instruction that caused the exception */ |
Ley Foon Tan | 82ed08d | 2014-11-06 15:19:38 +0800 | [diff] [blame] | 165 | srli r24, r24, 4 |
| 166 | andi r24, r24, 0x7c |
| 167 | movia r9,trap_table |
| 168 | add r24, r24, r9 |
| 169 | ldw r24, 0(r24) |
| 170 | jmp r24 |
| 171 | |
| 172 | |
| 173 | /*********************************************************************** |
| 174 | * Handle system calls |
| 175 | *********************************************************************** |
| 176 | */ |
| 177 | ENTRY(handle_system_call) |
| 178 | /* Enable interrupts */ |
| 179 | rdctl r10, status |
| 180 | ori r10, r10, STATUS_PIE |
| 181 | wrctl status, r10 |
| 182 | |
| 183 | /* Reload registers destroyed by common code. */ |
| 184 | ldw r4, PT_R4(sp) |
| 185 | ldw r5, PT_R5(sp) |
| 186 | |
| 187 | local_restart: |
| 188 | /* Check that the requested system call is within limits */ |
| 189 | movui r1, __NR_syscalls |
| 190 | bgeu r2, r1, ret_invsyscall |
| 191 | slli r1, r2, 2 |
| 192 | movhi r11, %hiadj(sys_call_table) |
| 193 | add r1, r1, r11 |
| 194 | ldw r1, %lo(sys_call_table)(r1) |
| 195 | beq r1, r0, ret_invsyscall |
| 196 | |
| 197 | /* Check if we are being traced */ |
| 198 | GET_THREAD_INFO r11 |
| 199 | ldw r11,TI_FLAGS(r11) |
| 200 | BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call |
| 201 | |
| 202 | /* Execute the system call */ |
| 203 | callr r1 |
| 204 | |
| 205 | /* If the syscall returns a negative result: |
| 206 | * Set r7 to 1 to indicate error, |
| 207 | * Negate r2 to get a positive error code |
| 208 | * If the syscall returns zero or a positive value: |
| 209 | * Set r7 to 0. |
| 210 | * The sigreturn system calls will skip the code below by |
| 211 | * adding to register ra. To avoid destroying registers |
| 212 | */ |
| 213 | translate_rc_and_ret: |
| 214 | movi r1, 0 |
| 215 | bge r2, zero, 3f |
| 216 | sub r2, zero, r2 |
| 217 | movi r1, 1 |
| 218 | 3: |
| 219 | stw r2, PT_R2(sp) |
| 220 | stw r1, PT_R7(sp) |
| 221 | end_translate_rc_and_ret: |
| 222 | |
| 223 | ret_from_exception: |
| 224 | ldw r1, PT_ESTATUS(sp) |
| 225 | /* if so, skip resched, signals */ |
| 226 | TSTBNZ r1, r1, ESTATUS_EU, Luser_return |
| 227 | |
| 228 | restore_all: |
| 229 | rdctl r10, status /* disable intrs */ |
| 230 | andi r10, r10, %lo(~STATUS_PIE) |
| 231 | wrctl status, r10 |
| 232 | RESTORE_ALL |
| 233 | eret |
| 234 | |
| 235 | /* If the syscall number was invalid return ENOSYS */ |
| 236 | ret_invsyscall: |
| 237 | movi r2, -ENOSYS |
| 238 | br translate_rc_and_ret |
| 239 | |
| 240 | /* This implements the same as above, except it calls |
| 241 | * do_syscall_trace_enter and do_syscall_trace_exit before and after the |
| 242 | * syscall in order for utilities like strace and gdb to work. |
| 243 | */ |
| 244 | traced_system_call: |
| 245 | SAVE_SWITCH_STACK |
| 246 | call do_syscall_trace_enter |
| 247 | RESTORE_SWITCH_STACK |
| 248 | |
| 249 | /* Create system call register arguments. The 5th and 6th |
| 250 | arguments on stack are already in place at the beginning |
| 251 | of pt_regs. */ |
| 252 | ldw r2, PT_R2(sp) |
| 253 | ldw r4, PT_R4(sp) |
| 254 | ldw r5, PT_R5(sp) |
| 255 | ldw r6, PT_R6(sp) |
| 256 | ldw r7, PT_R7(sp) |
| 257 | |
| 258 | /* Fetch the syscall function, we don't need to check the boundaries |
| 259 | * since this is already done. |
| 260 | */ |
| 261 | slli r1, r2, 2 |
| 262 | movhi r11,%hiadj(sys_call_table) |
| 263 | add r1, r1, r11 |
| 264 | ldw r1, %lo(sys_call_table)(r1) |
| 265 | |
| 266 | callr r1 |
| 267 | |
| 268 | /* If the syscall returns a negative result: |
| 269 | * Set r7 to 1 to indicate error, |
| 270 | * Negate r2 to get a positive error code |
| 271 | * If the syscall returns zero or a positive value: |
| 272 | * Set r7 to 0. |
| 273 | * The sigreturn system calls will skip the code below by |
| 274 | * adding to register ra. To avoid destroying registers |
| 275 | */ |
| 276 | translate_rc_and_ret2: |
| 277 | movi r1, 0 |
| 278 | bge r2, zero, 4f |
| 279 | sub r2, zero, r2 |
| 280 | movi r1, 1 |
| 281 | 4: |
| 282 | stw r2, PT_R2(sp) |
| 283 | stw r1, PT_R7(sp) |
| 284 | end_translate_rc_and_ret2: |
| 285 | SAVE_SWITCH_STACK |
| 286 | call do_syscall_trace_exit |
| 287 | RESTORE_SWITCH_STACK |
| 288 | br ret_from_exception |
| 289 | |
| 290 | Luser_return: |
| 291 | GET_THREAD_INFO r11 /* get thread_info pointer */ |
| 292 | ldw r10, TI_FLAGS(r11) /* get thread_info->flags */ |
| 293 | ANDI32 r11, r10, _TIF_WORK_MASK |
| 294 | beq r11, r0, restore_all /* Nothing to do */ |
| 295 | BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return |
| 296 | |
| 297 | /* Reschedule work */ |
| 298 | call schedule |
| 299 | br ret_from_exception |
| 300 | |
| 301 | Lsignal_return: |
| 302 | ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME |
| 303 | beq r1, r0, restore_all |
| 304 | mov r4, sp /* pt_regs */ |
| 305 | SAVE_SWITCH_STACK |
| 306 | call do_notify_resume |
| 307 | beq r2, r0, no_work_pending |
| 308 | RESTORE_SWITCH_STACK |
| 309 | /* prepare restart syscall here without leaving kernel */ |
| 310 | ldw r2, PT_R2(sp) /* reload syscall number in r2 */ |
| 311 | ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */ |
| 312 | ldw r5, PT_R5(sp) |
| 313 | ldw r6, PT_R6(sp) |
| 314 | ldw r7, PT_R7(sp) |
| 315 | ldw r8, PT_R8(sp) |
| 316 | ldw r9, PT_R9(sp) |
| 317 | br local_restart /* restart syscall */ |
| 318 | |
| 319 | no_work_pending: |
| 320 | RESTORE_SWITCH_STACK |
| 321 | br ret_from_exception |
| 322 | |
| 323 | /*********************************************************************** |
| 324 | * Handle external interrupts. |
| 325 | *********************************************************************** |
| 326 | */ |
| 327 | /* |
| 328 | * This is the generic interrupt handler (for all hardware interrupt |
| 329 | * sources). It figures out the vector number and calls the appropriate |
| 330 | * interrupt service routine directly. |
| 331 | */ |
| 332 | external_interrupt: |
| 333 | rdctl r12, ipending |
| 334 | rdctl r9, ienable |
| 335 | and r12, r12, r9 |
| 336 | /* skip if no interrupt is pending */ |
| 337 | beq r12, r0, ret_from_interrupt |
| 338 | |
| 339 | movi r24, -1 |
| 340 | stw r24, PT_ORIG_R2(sp) |
| 341 | |
| 342 | /* |
| 343 | * Process an external hardware interrupt. |
| 344 | */ |
| 345 | |
| 346 | addi ea, ea, -4 /* re-issue the interrupted instruction */ |
| 347 | stw ea, PT_EA(sp) |
| 348 | 2: movi r4, %lo(-1) /* Start from bit position 0, |
| 349 | highest priority */ |
| 350 | /* This is the IRQ # for handler call */ |
| 351 | 1: andi r10, r12, 1 /* Isolate bit we are interested in */ |
| 352 | srli r12, r12, 1 /* shift count is costly without hardware |
| 353 | multiplier */ |
| 354 | addi r4, r4, 1 |
| 355 | beq r10, r0, 1b |
| 356 | mov r5, sp /* Setup pt_regs pointer for handler call */ |
| 357 | call do_IRQ |
| 358 | rdctl r12, ipending /* check again if irq still pending */ |
| 359 | rdctl r9, ienable /* Isolate possible interrupts */ |
| 360 | and r12, r12, r9 |
| 361 | bne r12, r0, 2b |
| 362 | /* br ret_from_interrupt */ /* fall through to ret_from_interrupt */ |
| 363 | |
| 364 | ENTRY(ret_from_interrupt) |
| 365 | ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */ |
| 366 | TSTBNZ r1, r1, ESTATUS_EU, Luser_return |
| 367 | |
| 368 | #ifdef CONFIG_PREEMPT |
| 369 | GET_THREAD_INFO r1 |
| 370 | ldw r4, TI_PREEMPT_COUNT(r1) |
| 371 | bne r4, r0, restore_all |
Ley Foon Tan | 82ed08d | 2014-11-06 15:19:38 +0800 | [diff] [blame] | 372 | ldw r4, TI_FLAGS(r1) /* ? Need resched set */ |
| 373 | BTBZ r10, r4, TIF_NEED_RESCHED, restore_all |
| 374 | ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */ |
| 375 | andi r10, r4, ESTATUS_EPIE |
| 376 | beq r10, r0, restore_all |
Tobias Klauser | 1b0f449 | 2014-12-31 10:53:11 +0800 | [diff] [blame] | 377 | call preempt_schedule_irq |
Ley Foon Tan | 82ed08d | 2014-11-06 15:19:38 +0800 | [diff] [blame] | 378 | #endif |
Tobias Klauser | 1b0f449 | 2014-12-31 10:53:11 +0800 | [diff] [blame] | 379 | br restore_all |
Ley Foon Tan | 82ed08d | 2014-11-06 15:19:38 +0800 | [diff] [blame] | 380 | |
| 381 | /*********************************************************************** |
| 382 | * A few syscall wrappers |
| 383 | *********************************************************************** |
| 384 | */ |
| 385 | /* |
| 386 | * int clone(unsigned long clone_flags, unsigned long newsp, |
| 387 | * int __user * parent_tidptr, int __user * child_tidptr, |
| 388 | * int tls_val) |
| 389 | */ |
| 390 | ENTRY(sys_clone) |
| 391 | SAVE_SWITCH_STACK |
| 392 | addi sp, sp, -4 |
| 393 | stw r7, 0(sp) /* Pass 5th arg thru stack */ |
| 394 | mov r7, r6 /* 4th arg is 3rd of clone() */ |
| 395 | mov r6, zero /* 3rd arg always 0 */ |
| 396 | call do_fork |
| 397 | addi sp, sp, 4 |
| 398 | RESTORE_SWITCH_STACK |
| 399 | ret |
| 400 | |
| 401 | ENTRY(sys_rt_sigreturn) |
| 402 | SAVE_SWITCH_STACK |
| 403 | mov r4, sp |
| 404 | call do_rt_sigreturn |
| 405 | RESTORE_SWITCH_STACK |
| 406 | addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret) |
| 407 | ret |
| 408 | |
| 409 | /*********************************************************************** |
| 410 | * A few other wrappers and stubs |
| 411 | *********************************************************************** |
| 412 | */ |
| 413 | protection_exception_pte: |
| 414 | rdctl r6, pteaddr |
| 415 | slli r6, r6, 10 |
| 416 | call do_page_fault |
| 417 | br ret_from_exception |
| 418 | |
| 419 | protection_exception_ba: |
| 420 | rdctl r6, badaddr |
| 421 | call do_page_fault |
| 422 | br ret_from_exception |
| 423 | |
| 424 | protection_exception_instr: |
| 425 | call handle_supervisor_instr |
| 426 | br ret_from_exception |
| 427 | |
| 428 | handle_breakpoint: |
| 429 | call breakpoint_c |
| 430 | br ret_from_exception |
| 431 | |
| 432 | #ifdef CONFIG_NIOS2_ALIGNMENT_TRAP |
| 433 | handle_unaligned: |
| 434 | SAVE_SWITCH_STACK |
| 435 | call handle_unaligned_c |
| 436 | RESTORE_SWITCH_STACK |
| 437 | br ret_from_exception |
| 438 | #else |
| 439 | handle_unaligned: |
| 440 | call handle_unaligned_c |
| 441 | br ret_from_exception |
| 442 | #endif |
| 443 | |
| 444 | handle_illegal: |
| 445 | call handle_illegal_c |
| 446 | br ret_from_exception |
| 447 | |
| 448 | handle_diverror: |
| 449 | call handle_diverror_c |
| 450 | br ret_from_exception |
| 451 | |
Ley Foon Tan | d16d2be | 2015-02-16 19:26:43 +0800 | [diff] [blame] | 452 | #ifdef CONFIG_KGDB |
| 453 | handle_kgdb_breakpoint: |
| 454 | call kgdb_breakpoint_c |
| 455 | br ret_from_exception |
| 456 | #endif |
| 457 | |
Ley Foon Tan | baa54ab | 2015-04-16 15:19:01 +0800 | [diff] [blame] | 458 | handle_trap_1: |
| 459 | call handle_trap_1_c |
| 460 | br ret_from_exception |
| 461 | |
| 462 | handle_trap_2: |
| 463 | call handle_trap_2_c |
| 464 | br ret_from_exception |
| 465 | |
| 466 | handle_trap_3: |
| 467 | handle_trap_reserved: |
| 468 | call handle_trap_3_c |
| 469 | br ret_from_exception |
| 470 | |
Ley Foon Tan | 82ed08d | 2014-11-06 15:19:38 +0800 | [diff] [blame] | 471 | /* |
| 472 | * Beware - when entering resume, prev (the current task) is |
| 473 | * in r4, next (the new task) is in r5, don't change these |
| 474 | * registers. |
| 475 | */ |
| 476 | ENTRY(resume) |
| 477 | |
| 478 | rdctl r7, status /* save thread status reg */ |
| 479 | stw r7, TASK_THREAD + THREAD_KPSR(r4) |
| 480 | |
| 481 | andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */ |
| 482 | wrctl status, r7 |
| 483 | |
| 484 | SAVE_SWITCH_STACK |
| 485 | stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */ |
| 486 | ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */ |
| 487 | movia r24, _current_thread /* save thread */ |
| 488 | GET_THREAD_INFO r1 |
| 489 | stw r1, 0(r24) |
| 490 | RESTORE_SWITCH_STACK |
| 491 | |
| 492 | ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */ |
| 493 | wrctl status, r7 |
| 494 | ret |
| 495 | |
| 496 | ENTRY(ret_from_fork) |
| 497 | call schedule_tail |
| 498 | br ret_from_exception |
| 499 | |
| 500 | ENTRY(ret_from_kernel_thread) |
| 501 | call schedule_tail |
| 502 | mov r4,r17 /* arg */ |
| 503 | callr r16 /* function */ |
| 504 | br ret_from_exception |
| 505 | |
| 506 | /* |
| 507 | * Kernel user helpers. |
| 508 | * |
| 509 | * Each segment is 64-byte aligned and will be mapped to the <User space>. |
| 510 | * New segments (if ever needed) must be added after the existing ones. |
| 511 | * This mechanism should be used only for things that are really small and |
| 512 | * justified, and not be abused freely. |
| 513 | * |
| 514 | */ |
| 515 | |
| 516 | /* Filling pads with undefined instructions. */ |
| 517 | .macro kuser_pad sym size |
| 518 | .if ((. - \sym) & 3) |
| 519 | .rept (4 - (. - \sym) & 3) |
| 520 | .byte 0 |
| 521 | .endr |
| 522 | .endif |
| 523 | .rept ((\size - (. - \sym)) / 4) |
| 524 | .word 0xdeadbeef |
| 525 | .endr |
| 526 | .endm |
| 527 | |
| 528 | .align 6 |
| 529 | .globl __kuser_helper_start |
| 530 | __kuser_helper_start: |
| 531 | |
| 532 | __kuser_helper_version: /* @ 0x1000 */ |
| 533 | .word ((__kuser_helper_end - __kuser_helper_start) >> 6) |
| 534 | |
| 535 | __kuser_cmpxchg: /* @ 0x1004 */ |
| 536 | /* |
| 537 | * r4 pointer to exchange variable |
| 538 | * r5 old value |
| 539 | * r6 new value |
| 540 | */ |
| 541 | cmpxchg_ldw: |
| 542 | ldw r2, 0(r4) /* load current value */ |
| 543 | sub r2, r2, r5 /* compare with old value */ |
| 544 | bne r2, zero, cmpxchg_ret |
| 545 | |
| 546 | /* We had a match, store the new value */ |
| 547 | cmpxchg_stw: |
| 548 | stw r6, 0(r4) |
| 549 | cmpxchg_ret: |
| 550 | ret |
| 551 | |
| 552 | kuser_pad __kuser_cmpxchg, 64 |
| 553 | |
| 554 | .globl __kuser_sigtramp |
| 555 | __kuser_sigtramp: |
| 556 | movi r2, __NR_rt_sigreturn |
| 557 | trap |
| 558 | |
| 559 | kuser_pad __kuser_sigtramp, 64 |
| 560 | |
| 561 | .globl __kuser_helper_end |
| 562 | __kuser_helper_end: |