blob: 63873aa6693fe0881ad3eeb040bb285674025ca4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Heiko Carstenscbdc2292009-03-26 15:23:52 +01003 * This file handles the architecture dependent parts of process handling.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02005 * Copyright IBM Corp. 1999, 2009
Heiko Carstenscbdc2292009-03-26 15:23:52 +01006 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Hartmut Penner <hp@de.ibm.com>,
8 * Denis Joseph Barrow,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Heiko Carstensca218722016-05-07 12:15:34 +020011#include <linux/elf-randomize.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/compiler.h>
13#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010015#include <linux/sched/debug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010016#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010017#include <linux/sched/task_stack.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/kernel.h>
19#include <linux/mm.h>
Martin Schwidefsky638ad342011-10-30 15:17:13 +010020#include <linux/elfcore.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/smp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/interrupt.h>
Heiko Carstens5a62b192008-04-17 07:46:25 +020024#include <linux/tick.h>
Heiko Carstens9887a1f2011-01-12 09:55:28 +010025#include <linux/personality.h>
Heiko Carstens26689452009-01-14 14:14:36 +010026#include <linux/syscalls.h>
Heiko Carstens3e86a8c2009-09-22 22:58:42 +020027#include <linux/compat.h>
Martin Schwidefsky860dba42011-01-05 12:47:25 +010028#include <linux/kprobes.h>
Heiko Carstens9887a1f2011-01-12 09:55:28 +010029#include <linux/random.h>
Paul Gortmaker3994a522017-02-09 15:20:23 -050030#include <linux/export.h>
Hendrik Brueckner0ac27772015-09-29 17:53:22 +020031#include <linux/init_task.h>
Martin Schwidefsky2317b072018-04-20 10:21:21 +020032#include <asm/cpu_mf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/io.h>
34#include <asm/processor.h>
Martin Schwidefsky27f6b412012-07-20 11:15:08 +020035#include <asm/vtimer.h>
Heiko Carstensf6e38692012-09-14 13:45:24 +020036#include <asm/exec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/irq.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010038#include <asm/nmi.h>
Heiko Carstensda7f51c2011-01-05 12:48:09 +010039#include <asm/smp.h>
Martin Schwidefsky78c98f92019-01-28 08:33:08 +010040#include <asm/stacktrace.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jan Glaubere4b8b3f2012-07-31 10:52:05 +020042#include <asm/runtime_instr.h>
Heiko Carstensa8061702008-04-17 07:46:26 +020043#include "entry.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020045asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Heiko Carstens7a5388d2014-10-22 12:42:38 +020047extern void kernel_thread_starter(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049void flush_thread(void)
50{
Linus Torvalds1da177e2005-04-16 15:20:36 -070051}
52
Martin Schwidefsky2317b072018-04-20 10:21:21 +020053void arch_setup_new_exec(void)
54{
55 if (S390_lowcore.current_pid != current->pid) {
56 S390_lowcore.current_pid = current->pid;
57 if (test_facility(40))
58 lpp(&S390_lowcore.lpp);
59 }
60}
61
Hendrik Brueckner6a039ea2015-02-09 16:47:00 +010062void arch_release_task_struct(struct task_struct *tsk)
63{
Heiko Carstens8d9047f2017-09-11 11:24:22 +020064 runtime_instr_release(tsk);
Heiko Carstens7b83c622017-09-11 11:24:23 +020065 guarded_storage_release(tsk);
Hendrik Brueckner155e8392015-06-11 16:57:20 +020066}
67
68int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
69{
Hendrik Brueckner9977e882015-06-10 12:53:42 +020070 /*
71 * Save the floating-point or vector register state of the current
Hendrik Bruecknerb5510d92015-09-29 10:04:41 +020072 * task and set the CIF_FPU flag to lazy restore the FPU register
73 * state when returning to user space.
Hendrik Brueckner9977e882015-06-10 12:53:42 +020074 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +020075 save_fpu_regs();
Hendrik Bruecknerb5510d92015-09-29 10:04:41 +020076
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +020077 memcpy(dst, src, arch_task_struct_size);
78 dst->thread.fpu.regs = dst->thread.fpu.fprs;
Hendrik Brueckner155e8392015-06-11 16:57:20 +020079 return 0;
Hendrik Brueckner6a039ea2015-02-09 16:47:00 +010080}
Hendrik Brueckner6a039ea2015-02-09 16:47:00 +010081
Heiko Carstensf50c0e62017-02-20 09:38:42 +010082int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
83 unsigned long arg, struct task_struct *p, unsigned long tls)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
Heiko Carstenscbdc2292009-03-26 15:23:52 +010085 struct fake_frame
86 {
87 struct stack_frame sf;
88 struct pt_regs childregs;
89 } *frame;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Heiko Carstenscbdc2292009-03-26 15:23:52 +010091 frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
92 p->thread.ksp = (unsigned long) frame;
Al Virof9a7e022012-09-21 20:48:32 -040093 /* Save access registers to new thread structure. */
94 save_access_regs(&p->thread.acrs[0]);
95 /* start new process with ar4 pointing to the correct address space */
96 p->thread.mm_segment = get_fs();
97 /* Don't copy debug registers */
98 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
99 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
100 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
Heiko Carstensa1c5bef2017-11-09 12:29:34 +0100101 p->thread.per_flags = 0;
Al Virof9a7e022012-09-21 20:48:32 -0400102 /* Initialize per thread user and system timer values */
Martin Schwidefsky90c53e62016-11-08 12:15:59 +0100103 p->thread.user_timer = 0;
Martin Schwidefsky3c915bd2017-03-01 09:18:34 +0100104 p->thread.guest_timer = 0;
Martin Schwidefsky90c53e62016-11-08 12:15:59 +0100105 p->thread.system_timer = 0;
Martin Schwidefsky3c915bd2017-03-01 09:18:34 +0100106 p->thread.hardirq_timer = 0;
107 p->thread.softirq_timer = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Al Virof9a7e022012-09-21 20:48:32 -0400109 frame->sf.back_chain = 0;
Heiko Carstenscbdc2292009-03-26 15:23:52 +0100110 /* new return point is ret_from_fork */
111 frame->sf.gprs[8] = (unsigned long) ret_from_fork;
Heiko Carstenscbdc2292009-03-26 15:23:52 +0100112 /* fake return stack for resume(), don't go back to schedule */
113 frame->sf.gprs[9] = (unsigned long) frame;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Al Virof9a7e022012-09-21 20:48:32 -0400115 /* Store access registers to kernel stack of new process. */
Al Viro87f1ca82012-10-21 16:44:27 -0400116 if (unlikely(p->flags & PF_KTHREAD)) {
Al Virof9a7e022012-09-21 20:48:32 -0400117 /* kernel thread */
118 memset(&frame->childregs, 0, sizeof(struct pt_regs));
Martin Schwidefskye258d712013-09-24 09:14:56 +0200119 frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
Al Virof9a7e022012-09-21 20:48:32 -0400120 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100121 frame->childregs.psw.addr =
Al Virof9a7e022012-09-21 20:48:32 -0400122 (unsigned long) kernel_thread_starter;
123 frame->childregs.gprs[9] = new_stackp; /* function */
124 frame->childregs.gprs[10] = arg;
125 frame->childregs.gprs[11] = (unsigned long) do_exit;
126 frame->childregs.orig_gpr2 = -1;
127
128 return 0;
129 }
Al Viro87f1ca82012-10-21 16:44:27 -0400130 frame->childregs = *current_pt_regs();
Al Virof9a7e022012-09-21 20:48:32 -0400131 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +0200132 frame->childregs.flags = 0;
Al Viro87f1ca82012-10-21 16:44:27 -0400133 if (new_stackp)
134 frame->childregs.gprs[15] = new_stackp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Jan Glaubere4b8b3f2012-07-31 10:52:05 +0200136 /* Don't copy runtime instrumentation info */
137 p->thread.ri_cb = NULL;
Jan Glaubere4b8b3f2012-07-31 10:52:05 +0200138 frame->childregs.psw.mask &= ~PSW_MASK_RI;
Martin Schwidefsky916cda12016-01-26 14:10:34 +0100139 /* Don't copy guarded storage control block */
140 p->thread.gs_cb = NULL;
141 p->thread.gs_bc_cb = NULL;
Jan Glaubere4b8b3f2012-07-31 10:52:05 +0200142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 /* Set a new TLS ? */
144 if (clone_flags & CLONE_SETTLS) {
Heiko Carstens77575912009-06-12 10:26:25 +0200145 if (is_compat_task()) {
Al Viro87f1ca82012-10-21 16:44:27 -0400146 p->thread.acrs[0] = (unsigned int)tls;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 } else {
Al Viro87f1ca82012-10-21 16:44:27 -0400148 p->thread.acrs[0] = (unsigned int)(tls >> 32);
149 p->thread.acrs[1] = (unsigned int)tls;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 }
151 }
Heiko Carstenscbdc2292009-03-26 15:23:52 +0100152 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153}
154
Martin Schwidefsky03ff9a22007-04-27 16:01:40 +0200155asmlinkage void execve_tail(void)
156{
Hendrik Brueckner904818e2015-06-11 15:33:54 +0200157 current->thread.fpu.fpc = 0;
Heiko Carstense47994d2015-07-06 15:02:37 +0200158 asm volatile("sfpc %0" : : "d" (0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159}
160
161/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * fill in the FPU structure for a core dump.
163 */
164int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
165{
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +0200166 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +0200167 fpregs->fpc = current->thread.fpu.fpc;
168 fpregs->pad = 0;
Hendrik Bruecknerb5510d92015-09-29 10:04:41 +0200169 if (MACHINE_HAS_VX)
Hendrik Brueckner9977e882015-06-10 12:53:42 +0200170 convert_vx_to_fp((freg_t *)&fpregs->fprs,
171 current->thread.fpu.vxrs);
172 else
173 memcpy(&fpregs->fprs, current->thread.fpu.fprs,
174 sizeof(fpregs->fprs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 return 1;
176}
Heiko Carstens1485c5c2009-03-26 15:24:04 +0100177EXPORT_SYMBOL(dump_fpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179unsigned long get_wchan(struct task_struct *p)
180{
181 struct stack_frame *sf, *low, *high;
182 unsigned long return_address;
183 int count;
184
Al Viro30af71202006-01-12 01:05:50 -0800185 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 return 0;
Al Viro30af71202006-01-12 01:05:50 -0800187 low = task_stack_page(p);
188 high = (struct stack_frame *) task_pt_regs(p);
Heiko Carstens9cb1cce2016-01-18 13:12:19 +0100189 sf = (struct stack_frame *) p->thread.ksp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 if (sf <= low || sf > high)
191 return 0;
192 for (count = 0; count < 16; count++) {
Heiko Carstens9cb1cce2016-01-18 13:12:19 +0100193 sf = (struct stack_frame *) sf->back_chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 if (sf <= low || sf > high)
195 return 0;
Heiko Carstens9cb1cce2016-01-18 13:12:19 +0100196 return_address = sf->gprs[8];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 if (!in_sched_functions(return_address))
198 return return_address;
199 }
200 return 0;
201}
Heiko Carstens9887a1f2011-01-12 09:55:28 +0100202
203unsigned long arch_align_stack(unsigned long sp)
204{
205 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
206 sp -= get_random_int() & ~PAGE_MASK;
207 return sp & ~0xf;
208}
Heiko Carstens33519182011-01-12 09:55:30 +0100209
210static inline unsigned long brk_rnd(void)
211{
Martin Schwidefskyc7e8b2c2015-11-10 12:30:28 +0100212 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
Heiko Carstens33519182011-01-12 09:55:30 +0100213}
214
215unsigned long arch_randomize_brk(struct mm_struct *mm)
216{
Martin Schwidefsky9efe4f22013-12-17 13:41:31 +0100217 unsigned long ret;
Heiko Carstens33519182011-01-12 09:55:30 +0100218
Martin Schwidefsky9efe4f22013-12-17 13:41:31 +0100219 ret = PAGE_ALIGN(mm->brk + brk_rnd());
220 return (ret > mm->brk) ? ret : mm->brk;
Heiko Carstens33519182011-01-12 09:55:30 +0100221}
Heiko Carstensb5a882f2017-02-17 08:13:28 +0100222
223void set_fs_fixup(void)
224{
225 struct pt_regs *regs = current_pt_regs();
226 static bool warned;
227
228 set_fs(USER_DS);
229 if (warned)
230 return;
231 WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
232 show_registers(regs);
233 warned = true;
234}