Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1995 Linus Torvalds |
| 3 | * |
| 4 | * Pentium III FXSR, SSE support |
| 5 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
| 6 | */ |
| 7 | |
| 8 | /* |
| 9 | * This file handles the architecture-dependent parts of process handling.. |
| 10 | */ |
| 11 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 12 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/errno.h> |
| 14 | #include <linux/sched.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 15 | #include <linux/sched/task.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 16 | #include <linux/sched/task_stack.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/fs.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/elfcore.h> |
| 21 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/stddef.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/vmalloc.h> |
| 25 | #include <linux/user.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/interrupt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/delay.h> |
| 28 | #include <linux/reboot.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/mc146818rtc.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 30 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/kallsyms.h> |
| 32 | #include <linux/ptrace.h> |
Andi Kleen | c16b63e0 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 33 | #include <linux/personality.h> |
Jeremy Fitzhardinge | 7c3576d | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 34 | #include <linux/percpu.h> |
Erik Bosman | 529e25f | 2008-04-14 00:24:18 +0200 | [diff] [blame] | 35 | #include <linux/prctl.h> |
Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 36 | #include <linux/ftrace.h> |
Jaswinder Singh Rajput | befa9e7 | 2009-01-04 16:18:56 +0530 | [diff] [blame] | 37 | #include <linux/uaccess.h> |
| 38 | #include <linux/io.h> |
| 39 | #include <linux/kdebug.h> |
Kyle Huey | 79170fd | 2017-03-20 01:16:24 -0700 | [diff] [blame] | 40 | #include <linux/syscalls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <asm/pgtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <asm/ldt.h> |
| 44 | #include <asm/processor.h> |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 45 | #include <asm/fpu/internal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <asm/desc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/err.h> |
| 49 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 50 | #include <asm/tlbflush.h> |
| 51 | #include <asm/cpu.h> |
Jaswinder Singh | bbc1f69 | 2008-07-21 21:34:13 +0530 | [diff] [blame] | 52 | #include <asm/syscalls.h> |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 53 | #include <asm/debugreg.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 54 | #include <asm/switch_to.h> |
Brian Gerst | ba3e127 | 2015-07-29 01:41:21 -0400 | [diff] [blame] | 55 | #include <asm/vm86.h> |
Babu Moger | fa7d949 | 2018-11-21 20:28:25 +0000 | [diff] [blame] | 56 | #include <asm/resctrl_sched.h> |
Kyle Huey | 79170fd | 2017-03-20 01:16:24 -0700 | [diff] [blame] | 57 | #include <asm/proto.h> |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 58 | |
Thomas Gleixner | ff16701 | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 59 | #include "process.h" |
| 60 | |
Jann Horn | 9fe6299 | 2018-08-31 21:41:51 +0200 | [diff] [blame] | 61 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | { |
| 63 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 64 | unsigned long d0, d1, d2, d3, d6, d7; |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame^] | 65 | unsigned short gs; |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 66 | |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame^] | 67 | if (user_mode(regs)) |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 68 | gs = get_user_gs(regs); |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame^] | 69 | else |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 70 | savesegment(gs, gs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Borislav Petkov | 7cccf07 | 2018-04-17 18:11:22 +0200 | [diff] [blame] | 72 | show_ip(regs, KERN_DEFAULT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 74 | printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 75 | regs->ax, regs->bx, regs->cx, regs->dx); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 76 | printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame^] | 77 | regs->si, regs->di, regs->bp, regs->sp); |
Borislav Petkov | 7cccf07 | 2018-04-17 18:11:22 +0200 | [diff] [blame] | 78 | printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame^] | 79 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, regs->ss, regs->flags); |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 80 | |
Jann Horn | 9fe6299 | 2018-08-31 21:41:51 +0200 | [diff] [blame] | 81 | if (mode != SHOW_REGS_ALL) |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 82 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
Zachary Amsden | 4bb0d3e | 2005-09-03 15:56:36 -0700 | [diff] [blame] | 84 | cr0 = read_cr0(); |
| 85 | cr2 = read_cr2(); |
Andy Lutomirski | 6c690ee | 2017-06-12 10:26:14 -0700 | [diff] [blame] | 86 | cr3 = __read_cr3(); |
Andy Lutomirski | 1ef55be1 | 2016-09-29 12:48:12 -0700 | [diff] [blame] | 87 | cr4 = __read_cr4(); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 88 | printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 89 | cr0, cr2, cr3, cr4); |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 90 | |
| 91 | get_debugreg(d0, 0); |
| 92 | get_debugreg(d1, 1); |
| 93 | get_debugreg(d2, 2); |
| 94 | get_debugreg(d3, 3); |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 95 | get_debugreg(d6, 6); |
| 96 | get_debugreg(d7, 7); |
Dave Jones | 4338774c | 2013-06-18 12:09:11 -0400 | [diff] [blame] | 97 | |
| 98 | /* Only print out debug registers if they are in their non-default state. */ |
| 99 | if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && |
| 100 | (d6 == DR6_RESERVED) && (d7 == 0x400)) |
| 101 | return; |
| 102 | |
| 103 | printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", |
| 104 | d0, d1, d2, d3); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 105 | printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 106 | d6, d7); |
| 107 | } |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | void release_thread(struct task_struct *dead_task) |
| 110 | { |
Zachary Amsden | 2684927 | 2006-01-06 00:11:59 -0800 | [diff] [blame] | 111 | BUG_ON(dead_task->mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | release_vm86_irqs(dead_task); |
| 113 | } |
| 114 | |
Josh Triplett | c1bd55f | 2015-06-30 15:00:00 -0700 | [diff] [blame] | 115 | int copy_thread_tls(unsigned long clone_flags, unsigned long sp, |
| 116 | unsigned long arg, struct task_struct *p, unsigned long tls) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | { |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 118 | struct pt_regs *childregs = task_pt_regs(p); |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 119 | struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs); |
| 120 | struct inactive_task_frame *frame = &fork_frame->frame; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | struct task_struct *tsk; |
| 122 | int err; |
| 123 | |
Peter Zijlstra | 6690e86 | 2019-02-14 10:30:52 +0100 | [diff] [blame] | 124 | /* |
| 125 | * For a new task use the RESET flags value since there is no before. |
| 126 | * All the status flags are zero; DF and all the system flags must also |
| 127 | * be 0, specifically IF must be 0 because we context switch to the new |
| 128 | * task with interrupts disabled. |
| 129 | */ |
| 130 | frame->flags = X86_EFLAGS_FIXED; |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 131 | frame->bp = 0; |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 132 | frame->ret_addr = (unsigned long) ret_from_fork; |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 133 | p->thread.sp = (unsigned long) fork_frame; |
H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 134 | p->thread.sp0 = (unsigned long) (childregs+1); |
Oleg Nesterov | 6f46b3a | 2014-09-02 19:57:33 +0200 | [diff] [blame] | 135 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 137 | if (unlikely(p->flags & PF_KTHREAD)) { |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 138 | /* kernel thread */ |
| 139 | memset(childregs, 0, sizeof(struct pt_regs)); |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 140 | frame->bx = sp; /* function */ |
| 141 | frame->di = arg; |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 142 | p->thread.io_bitmap_ptr = NULL; |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 143 | return 0; |
| 144 | } |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 145 | frame->bx = 0; |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 146 | *childregs = *current_pt_regs(); |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 147 | childregs->ax = 0; |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 148 | if (sp) |
| 149 | childregs->sp = sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 151 | task_user_gs(p) = get_user_gs(current_pt_regs()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 153 | p->thread.io_bitmap_ptr = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | tsk = current; |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 155 | err = -ENOMEM; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 156 | |
Stephane Eranian | b3cf257 | 2006-07-09 21:12:39 -0400 | [diff] [blame] | 157 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
Alexey Dobriyan | 52978be | 2006-09-30 23:27:21 -0700 | [diff] [blame] | 158 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
| 159 | IO_BITMAP_BYTES, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | if (!p->thread.io_bitmap_ptr) { |
| 161 | p->thread.io_bitmap_max = 0; |
| 162 | return -ENOMEM; |
| 163 | } |
Stephane Eranian | b3cf257 | 2006-07-09 21:12:39 -0400 | [diff] [blame] | 164 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | } |
| 166 | |
Roland McGrath | efd1ca5 | 2008-01-30 13:30:46 +0100 | [diff] [blame] | 167 | err = 0; |
| 168 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | /* |
| 170 | * Set a new TLS for the child thread? |
| 171 | */ |
Roland McGrath | efd1ca5 | 2008-01-30 13:30:46 +0100 | [diff] [blame] | 172 | if (clone_flags & CLONE_SETTLS) |
| 173 | err = do_set_thread_area(p, -1, |
Josh Triplett | c1bd55f | 2015-06-30 15:00:00 -0700 | [diff] [blame] | 174 | (struct user_desc __user *)tls, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | if (err && p->thread.io_bitmap_ptr) { |
| 177 | kfree(p->thread.io_bitmap_ptr); |
| 178 | p->thread.io_bitmap_max = 0; |
| 179 | } |
| 180 | return err; |
| 181 | } |
| 182 | |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 183 | void |
| 184 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) |
| 185 | { |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 186 | set_user_gs(regs, 0); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 187 | regs->fs = 0; |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 188 | regs->ds = __USER_DS; |
| 189 | regs->es = __USER_DS; |
| 190 | regs->ss = __USER_DS; |
| 191 | regs->cs = __USER_CS; |
| 192 | regs->ip = new_ip; |
| 193 | regs->sp = new_sp; |
Al Viro | 6783eaa2 | 2012-08-02 23:05:11 +0400 | [diff] [blame] | 194 | regs->flags = X86_EFLAGS_IF; |
Brian Gerst | 1daeaa3 | 2015-03-21 18:54:21 -0400 | [diff] [blame] | 195 | force_iret(); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 196 | } |
| 197 | EXPORT_SYMBOL_GPL(start_thread); |
| 198 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
| 200 | /* |
Kamalesh Babulal | ea70ef3 | 2011-04-28 14:32:08 +0530 | [diff] [blame] | 201 | * switch_to(x,y) should switch tasks from x to y. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | * |
| 203 | * We fsave/fwait so that an exception goes off at the right time |
| 204 | * (as a call from the fsave or fwait in effect) rather than to |
| 205 | * the wrong process. Lazy FP saving no longer makes any sense |
| 206 | * with modern CPU's, and this simplifies a lot of things (SMP |
| 207 | * and UP become the same). |
| 208 | * |
| 209 | * NOTE! We used to use the x86 hardware context switching. The |
| 210 | * reason for not using it any more becomes apparent when you |
| 211 | * try to recover gracefully from saved state that is no longer |
| 212 | * valid (stale segment register values in particular). With the |
| 213 | * hardware task-switch, there is no way to fix up bad state in |
| 214 | * a reasonable manner. |
| 215 | * |
| 216 | * The fact that Intel documents the hardware task-switching to |
| 217 | * be slow is a fairly red herring - this code is not noticeably |
| 218 | * faster. However, there _is_ some room for improvement here, |
| 219 | * so the performance issues may eventually be a valid point. |
| 220 | * More important, however, is the fact that this allows us much |
| 221 | * more flexibility. |
| 222 | * |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 223 | * The return value (in %ax) will be the "prev" task after |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | * the task-switch, and shows up in ret_from_fork in entry.S, |
| 225 | * for example. |
| 226 | */ |
Andi Kleen | 35ea7903 | 2013-08-05 15:02:39 -0700 | [diff] [blame] | 227 | __visible __notrace_funcgraph struct task_struct * |
Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 228 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | { |
| 230 | struct thread_struct *prev = &prev_p->thread, |
Ingo Molnar | 384a23f | 2015-04-23 17:43:27 +0200 | [diff] [blame] | 231 | *next = &next_p->thread; |
| 232 | struct fpu *prev_fpu = &prev->fpu; |
| 233 | struct fpu *next_fpu = &next->fpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | int cpu = smp_processor_id(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | |
| 236 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
| 237 | |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 238 | if (!test_thread_flag(TIF_NEED_FPU_LOAD)) |
| 239 | switch_fpu_prepare(prev_fpu, cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
| 241 | /* |
Jeremy Fitzhardinge | 464d1a7 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 242 | * Save away %gs. No need to save %fs, as it was saved on the |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 243 | * stack on entry. No need to save %es and %ds, as those are |
| 244 | * always kernel segments while inside the kernel. Doing this |
| 245 | * before setting the new TLS descriptors avoids the situation |
| 246 | * where we temporarily have non-reloadable segments in %fs |
| 247 | * and %gs. This could be an issue if the NMI handler ever |
| 248 | * used %fs or %gs (it does not today), or if the kernel is |
| 249 | * running inside of a hypervisor layer. |
Zachary Amsden | e7a2ff5 | 2005-09-03 15:56:39 -0700 | [diff] [blame] | 250 | */ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 251 | lazy_save_gs(prev->gs); |
Zachary Amsden | e7a2ff5 | 2005-09-03 15:56:39 -0700 | [diff] [blame] | 252 | |
| 253 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | * Load the per-thread Thread-Local Storage descriptor. |
| 255 | */ |
| 256 | load_TLS(next, cpu); |
| 257 | |
| 258 | /* |
Zachary Amsden | 8b15114 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 259 | * Restore IOPL if needed. In normal use, the flags restore |
| 260 | * in the switch assembly will handle this. But if the kernel |
| 261 | * is running virtualized at a non-zero CPL, the popf will |
| 262 | * not restore flags, so it must be done in a separate step. |
| 263 | */ |
| 264 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) |
| 265 | set_iopl_mask(next->iopl); |
| 266 | |
Thomas Gleixner | ff16701 | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 267 | switch_to_extra(prev_p, next_p); |
Andrea Arcangeli | ffaa8bd | 2005-06-27 14:36:36 -0700 | [diff] [blame] | 268 | |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 269 | /* |
| 270 | * Leave lazy mode, flushing any hypercalls made here. |
| 271 | * This must be done before restoring TLS segments so |
Sebastian Andrzej Siewior | 6dd677a | 2019-04-03 18:41:31 +0200 | [diff] [blame] | 272 | * the GDT and LDT are properly updated. |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 273 | */ |
Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 274 | arch_end_context_switch(next_p); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 275 | |
Andy Lutomirski | b27559a | 2015-03-06 17:50:18 -0800 | [diff] [blame] | 276 | /* |
Denys Vlasenko | fed7c3f | 2015-04-24 17:31:34 +0200 | [diff] [blame] | 277 | * Reload esp0 and cpu_current_top_of_stack. This changes |
Andy Lutomirski | bd7dc5a | 2017-11-02 00:59:09 -0700 | [diff] [blame] | 278 | * current_thread_info(). Refresh the SYSENTER configuration in |
| 279 | * case prev or next is vm86. |
Andy Lutomirski | b27559a | 2015-03-06 17:50:18 -0800 | [diff] [blame] | 280 | */ |
Joerg Roedel | 252e1a0 | 2018-07-18 11:40:51 +0200 | [diff] [blame] | 281 | update_task_stack(next_p); |
Andy Lutomirski | bd7dc5a | 2017-11-02 00:59:09 -0700 | [diff] [blame] | 282 | refresh_sysenter_cs(next); |
Andy Lutomirski | a7fcf28 | 2015-03-06 17:50:19 -0800 | [diff] [blame] | 283 | this_cpu_write(cpu_current_top_of_stack, |
| 284 | (unsigned long)task_stack_page(next_p) + |
| 285 | THREAD_SIZE); |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 286 | |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 287 | /* |
| 288 | * Restore %gs if needed (which is common) |
| 289 | */ |
| 290 | if (prev->gs | next->gs) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 291 | lazy_load_gs(next->gs); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 292 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 293 | this_cpu_write(current_task, next_p); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 294 | |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 295 | switch_fpu_finish(next_fpu); |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 296 | |
Fenghua Yu | 4f341a5 | 2016-10-28 15:04:48 -0700 | [diff] [blame] | 297 | /* Load the Intel cache allocation PQR MSR. */ |
Babu Moger | 352940e | 2018-11-21 20:28:27 +0000 | [diff] [blame] | 298 | resctrl_sched_in(); |
Fenghua Yu | 4f341a5 | 2016-10-28 15:04:48 -0700 | [diff] [blame] | 299 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | return prev_p; |
| 301 | } |
Kyle Huey | 79170fd | 2017-03-20 01:16:24 -0700 | [diff] [blame] | 302 | |
| 303 | SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) |
| 304 | { |
| 305 | return do_arch_prctl_common(current, option, arg2); |
| 306 | } |