Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 1994 Linus Torvalds |
| 4 | * |
| 5 | * Pentium III FXSR, SSE support |
| 6 | * General FPU state handling cleanups |
| 7 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
| 8 | * x86-64 work by Andi Kleen 2002 |
| 9 | */ |
| 10 | |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 11 | #ifndef _ASM_X86_FPU_INTERNAL_H |
| 12 | #define _ASM_X86_FPU_INTERNAL_H |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 13 | |
Suresh Siddha | 050902c | 2012-07-24 16:05:27 -0700 | [diff] [blame] | 14 | #include <linux/compat.h> |
Ingo Molnar | 952f07e | 2015-04-26 16:56:05 +0200 | [diff] [blame] | 15 | #include <linux/sched.h> |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 16 | #include <linux/slab.h> |
Rik van Riel | 0cecca9 | 2019-04-03 18:41:44 +0200 | [diff] [blame] | 17 | #include <linux/mm.h> |
Ingo Molnar | f89e32e | 2015-04-22 10:58:10 +0200 | [diff] [blame] | 18 | |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 19 | #include <asm/user.h> |
Ingo Molnar | df6b35f | 2015-04-24 02:46:00 +0200 | [diff] [blame] | 20 | #include <asm/fpu/api.h> |
Ingo Molnar | 669ebab | 2015-04-28 08:41:33 +0200 | [diff] [blame] | 21 | #include <asm/fpu/xstate.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 22 | #include <asm/cpufeature.h> |
Dave Hansen | d1898b7 | 2016-06-01 10:42:20 -0700 | [diff] [blame] | 23 | #include <asm/trace/fpu.h> |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 24 | |
Ingo Molnar | 6ffc152 | 2015-04-29 20:24:14 +0200 | [diff] [blame] | 25 | /* |
| 26 | * High level FPU state handling functions: |
| 27 | */ |
Ingo Molnar | 369a036 | 2017-09-23 13:37:45 +0200 | [diff] [blame] | 28 | extern void fpu__prepare_read(struct fpu *fpu); |
| 29 | extern void fpu__prepare_write(struct fpu *fpu); |
Ingo Molnar | 6ffc152 | 2015-04-29 20:24:14 +0200 | [diff] [blame] | 30 | extern void fpu__save(struct fpu *fpu); |
Ingo Molnar | 82c0e45 | 2015-04-29 21:09:18 +0200 | [diff] [blame] | 31 | extern int fpu__restore_sig(void __user *buf, int ia32_frame); |
Ingo Molnar | 6ffc152 | 2015-04-29 20:24:14 +0200 | [diff] [blame] | 32 | extern void fpu__drop(struct fpu *fpu); |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 33 | extern int fpu__copy(struct task_struct *dst, struct task_struct *src); |
Fenghua Yu | b860eb8 | 2020-05-12 07:54:39 -0700 | [diff] [blame] | 34 | extern void fpu__clear_user_states(struct fpu *fpu); |
| 35 | extern void fpu__clear_all(struct fpu *fpu); |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 36 | extern int fpu__exception_code(struct fpu *fpu, int trap_nr); |
| 37 | extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate); |
Ingo Molnar | 6ffc152 | 2015-04-29 20:24:14 +0200 | [diff] [blame] | 38 | |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 39 | /* |
| 40 | * Boot time FPU initialization functions: |
| 41 | */ |
| 42 | extern void fpu__init_cpu(void); |
| 43 | extern void fpu__init_system_xstate(void); |
| 44 | extern void fpu__init_cpu_xstate(void); |
| 45 | extern void fpu__init_system(struct cpuinfo_x86 *c); |
Ingo Molnar | 952f07e | 2015-04-26 16:56:05 +0200 | [diff] [blame] | 46 | extern void fpu__init_check_bugs(void); |
| 47 | extern void fpu__resume_cpu(void); |
yu-cheng yu | a5fe93a | 2016-01-06 14:24:53 -0800 | [diff] [blame] | 48 | extern u64 fpu__get_supported_xfeatures_mask(void); |
Ingo Molnar | 952f07e | 2015-04-26 16:56:05 +0200 | [diff] [blame] | 49 | |
Ingo Molnar | e97131a | 2015-05-05 11:34:49 +0200 | [diff] [blame] | 50 | /* |
| 51 | * Debugging facility: |
| 52 | */ |
| 53 | #ifdef CONFIG_X86_DEBUG_FPU |
| 54 | # define WARN_ON_FPU(x) WARN_ON_ONCE(x) |
| 55 | #else |
Ingo Molnar | 83242c5 | 2015-05-27 12:22:29 +0200 | [diff] [blame] | 56 | # define WARN_ON_FPU(x) ({ (void)(x); 0; }) |
Ingo Molnar | e97131a | 2015-05-05 11:34:49 +0200 | [diff] [blame] | 57 | #endif |
| 58 | |
Rik van Riel | 1c927ee | 2015-02-06 15:02:01 -0500 | [diff] [blame] | 59 | /* |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 60 | * FPU related CPU feature flag helper routines: |
Rik van Riel | 1c927ee | 2015-02-06 15:02:01 -0500 | [diff] [blame] | 61 | */ |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 62 | static __always_inline __pure bool use_xsaveopt(void) |
| 63 | { |
Borislav Petkov | bc696ca | 2016-01-26 22:12:05 +0100 | [diff] [blame] | 64 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 65 | } |
| 66 | |
| 67 | static __always_inline __pure bool use_xsave(void) |
| 68 | { |
Borislav Petkov | bc696ca | 2016-01-26 22:12:05 +0100 | [diff] [blame] | 69 | return static_cpu_has(X86_FEATURE_XSAVE); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | static __always_inline __pure bool use_fxsr(void) |
| 73 | { |
Borislav Petkov | bc696ca | 2016-01-26 22:12:05 +0100 | [diff] [blame] | 74 | return static_cpu_has(X86_FEATURE_FXSR); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 75 | } |
| 76 | |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 77 | /* |
| 78 | * fpstate handling functions: |
| 79 | */ |
| 80 | |
| 81 | extern union fpregs_state init_fpstate; |
| 82 | |
| 83 | extern void fpstate_init(union fpregs_state *state); |
| 84 | #ifdef CONFIG_MATH_EMULATION |
| 85 | extern void fpstate_init_soft(struct swregs_state *soft); |
| 86 | #else |
| 87 | static inline void fpstate_init_soft(struct swregs_state *soft) {} |
| 88 | #endif |
Yu-cheng Yu | a5828ed | 2017-01-24 10:25:46 -0800 | [diff] [blame] | 89 | |
| 90 | static inline void fpstate_init_xstate(struct xregs_state *xsave) |
| 91 | { |
| 92 | /* |
| 93 | * XRSTORS requires these bits set in xcomp_bv, or it will |
| 94 | * trigger #GP: |
| 95 | */ |
Yu-cheng Yu | 524bb73 | 2020-05-12 07:54:37 -0700 | [diff] [blame] | 96 | xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all; |
Yu-cheng Yu | a5828ed | 2017-01-24 10:25:46 -0800 | [diff] [blame] | 97 | } |
| 98 | |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 99 | static inline void fpstate_init_fxstate(struct fxregs_state *fx) |
| 100 | { |
| 101 | fx->cwd = 0x37f; |
| 102 | fx->mxcsr = MXCSR_DEFAULT; |
| 103 | } |
Ingo Molnar | 36e49e7f | 2015-04-28 11:25:02 +0200 | [diff] [blame] | 104 | extern void fpstate_sanitize_xstate(struct fpu *fpu); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 105 | |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 106 | #define user_insn(insn, output, input...) \ |
| 107 | ({ \ |
| 108 | int err; \ |
Sebastian Andrzej Siewior | 6637401 | 2018-11-28 23:20:11 +0100 | [diff] [blame] | 109 | \ |
| 110 | might_fault(); \ |
| 111 | \ |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 112 | asm volatile(ASM_STAC "\n" \ |
| 113 | "1:" #insn "\n\t" \ |
| 114 | "2: " ASM_CLAC "\n" \ |
| 115 | ".section .fixup,\"ax\"\n" \ |
| 116 | "3: movl $-1,%[err]\n" \ |
| 117 | " jmp 2b\n" \ |
| 118 | ".previous\n" \ |
| 119 | _ASM_EXTABLE(1b, 3b) \ |
| 120 | : [err] "=r" (err), output \ |
| 121 | : "0"(0), input); \ |
| 122 | err; \ |
| 123 | }) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 124 | |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 125 | #define kernel_insn_err(insn, output, input...) \ |
| 126 | ({ \ |
| 127 | int err; \ |
| 128 | asm volatile("1:" #insn "\n\t" \ |
| 129 | "2:\n" \ |
| 130 | ".section .fixup,\"ax\"\n" \ |
| 131 | "3: movl $-1,%[err]\n" \ |
| 132 | " jmp 2b\n" \ |
| 133 | ".previous\n" \ |
| 134 | _ASM_EXTABLE(1b, 3b) \ |
| 135 | : [err] "=r" (err), output \ |
| 136 | : "0"(0), input); \ |
| 137 | err; \ |
| 138 | }) |
| 139 | |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 140 | #define kernel_insn(insn, output, input...) \ |
Suresh Siddha | 0ca5bd0 | 2012-07-24 16:05:28 -0700 | [diff] [blame] | 141 | asm volatile("1:" #insn "\n\t" \ |
| 142 | "2:\n" \ |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 143 | _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \ |
| 144 | : output : input) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 145 | |
Ingo Molnar | c47ada3 | 2015-04-30 17:15:32 +0200 | [diff] [blame] | 146 | static inline int copy_fregs_to_user(struct fregs_state __user *fx) |
Suresh Siddha | 0ca5bd0 | 2012-07-24 16:05:28 -0700 | [diff] [blame] | 147 | { |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 148 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 149 | } |
| 150 | |
Ingo Molnar | c47ada3 | 2015-04-30 17:15:32 +0200 | [diff] [blame] | 151 | static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 152 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 153 | if (IS_ENABLED(CONFIG_X86_32)) |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 154 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 155 | else |
H. Peter Anvin | 49b8c695 | 2012-09-21 17:18:44 -0700 | [diff] [blame] | 156 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 157 | |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 158 | } |
| 159 | |
Ingo Molnar | 9ccc27a | 2015-05-25 11:27:46 +0200 | [diff] [blame] | 160 | static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 161 | { |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 162 | if (IS_ENABLED(CONFIG_X86_32)) |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 163 | kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 164 | else |
| 165 | kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
Suresh Siddha | 0ca5bd0 | 2012-07-24 16:05:28 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 168 | static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx) |
| 169 | { |
| 170 | if (IS_ENABLED(CONFIG_X86_32)) |
| 171 | return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
| 172 | else |
| 173 | return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
| 174 | } |
| 175 | |
Ingo Molnar | c47ada3 | 2015-04-30 17:15:32 +0200 | [diff] [blame] | 176 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 177 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 178 | if (IS_ENABLED(CONFIG_X86_32)) |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 179 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 180 | else |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 181 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 182 | } |
| 183 | |
Ingo Molnar | 9ccc27a | 2015-05-25 11:27:46 +0200 | [diff] [blame] | 184 | static inline void copy_kernel_to_fregs(struct fregs_state *fx) |
Suresh Siddha | 0ca5bd0 | 2012-07-24 16:05:28 -0700 | [diff] [blame] | 185 | { |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 186 | kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 187 | } |
| 188 | |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 189 | static inline int copy_kernel_to_fregs_err(struct fregs_state *fx) |
| 190 | { |
| 191 | return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
| 192 | } |
| 193 | |
Ingo Molnar | c47ada3 | 2015-04-30 17:15:32 +0200 | [diff] [blame] | 194 | static inline int copy_user_to_fregs(struct fregs_state __user *fx) |
H. Peter Anvin | e139e95 | 2012-09-25 15:42:18 -0700 | [diff] [blame] | 195 | { |
| 196 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
| 197 | } |
| 198 | |
Ingo Molnar | c681314 | 2015-04-30 11:34:09 +0200 | [diff] [blame] | 199 | static inline void copy_fxregs_to_kernel(struct fpu *fpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 200 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 201 | if (IS_ENABLED(CONFIG_X86_32)) |
Ingo Molnar | 7366ed7 | 2015-04-27 04:19:39 +0200 | [diff] [blame] | 202 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
Borislav Petkov | bae54dc | 2019-01-18 00:05:40 +0100 | [diff] [blame] | 203 | else |
Ingo Molnar | 7366ed7 | 2015-04-27 04:19:39 +0200 | [diff] [blame] | 204 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 205 | } |
| 206 | |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 207 | /* These macros all use (%edi)/(%rdi) as the single memory argument. */ |
| 208 | #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" |
| 209 | #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" |
| 210 | #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f" |
| 211 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" |
| 212 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" |
| 213 | |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 214 | #define XSTATE_OP(op, st, lmask, hmask, err) \ |
| 215 | asm volatile("1:" op "\n\t" \ |
| 216 | "xor %[err], %[err]\n" \ |
| 217 | "2:\n\t" \ |
| 218 | ".pushsection .fixup,\"ax\"\n\t" \ |
| 219 | "3: movl $-2,%[err]\n\t" \ |
| 220 | "jmp 2b\n\t" \ |
| 221 | ".popsection\n\t" \ |
Jann Horn | ac26d1f | 2018-11-27 14:32:00 +0100 | [diff] [blame] | 222 | _ASM_EXTABLE(1b, 3b) \ |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 223 | : [err] "=r" (err) \ |
| 224 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
| 225 | : "memory") |
| 226 | |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 227 | /* |
| 228 | * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact |
| 229 | * format and supervisor states in addition to modified optimization in |
| 230 | * XSAVEOPT. |
| 231 | * |
| 232 | * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT |
| 233 | * supports modified optimization which is not supported by XSAVE. |
| 234 | * |
| 235 | * We use XSAVE as a fallback. |
| 236 | * |
| 237 | * The 661 label is defined in the ALTERNATIVE* macros as the address of the |
| 238 | * original instruction which gets replaced. We need to use it here as the |
| 239 | * address of the instruction where we might get an exception at. |
| 240 | */ |
| 241 | #define XSTATE_XSAVE(st, lmask, hmask, err) \ |
| 242 | asm volatile(ALTERNATIVE_2(XSAVE, \ |
| 243 | XSAVEOPT, X86_FEATURE_XSAVEOPT, \ |
| 244 | XSAVES, X86_FEATURE_XSAVES) \ |
| 245 | "\n" \ |
| 246 | "xor %[err], %[err]\n" \ |
| 247 | "3:\n" \ |
| 248 | ".pushsection .fixup,\"ax\"\n" \ |
| 249 | "4: movl $-2, %[err]\n" \ |
| 250 | "jmp 3b\n" \ |
| 251 | ".popsection\n" \ |
| 252 | _ASM_EXTABLE(661b, 4b) \ |
| 253 | : [err] "=r" (err) \ |
| 254 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
| 255 | : "memory") |
| 256 | |
| 257 | /* |
| 258 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact |
| 259 | * XSAVE area format. |
| 260 | */ |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 261 | #define XSTATE_XRESTORE(st, lmask, hmask) \ |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 262 | asm volatile(ALTERNATIVE(XRSTOR, \ |
| 263 | XRSTORS, X86_FEATURE_XSAVES) \ |
| 264 | "\n" \ |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 265 | "3:\n" \ |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 266 | _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ |
| 267 | : \ |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 268 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
| 269 | : "memory") |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 270 | |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 271 | /* |
| 272 | * This function is called only during boot time when x86 caps are not set |
| 273 | * up and alternative can not be used yet. |
| 274 | */ |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 275 | static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 276 | { |
| 277 | u64 mask = -1; |
| 278 | u32 lmask = mask; |
| 279 | u32 hmask = mask >> 32; |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 280 | int err; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 281 | |
| 282 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 283 | |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 284 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 285 | XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 286 | else |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 287 | XSTATE_OP(XSAVE, xstate, lmask, hmask, err); |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 288 | |
| 289 | /* We should never fault when copying to a kernel buffer: */ |
| 290 | WARN_ON_FPU(err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | /* |
| 294 | * This function is called only during boot time when x86 caps are not set |
| 295 | * up and alternative can not be used yet. |
| 296 | */ |
Ingo Molnar | d65fcd6 | 2015-05-27 14:04:44 +0200 | [diff] [blame] | 297 | static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 298 | { |
Ingo Molnar | d65fcd6 | 2015-05-27 14:04:44 +0200 | [diff] [blame] | 299 | u64 mask = -1; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 300 | u32 lmask = mask; |
| 301 | u32 hmask = mask >> 32; |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 302 | int err; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 303 | |
| 304 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 305 | |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 306 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 307 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 308 | else |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 309 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 310 | |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 311 | /* |
| 312 | * We should never fault when copying from a kernel buffer, and the FPU |
| 313 | * state we set at boot time should be valid. |
| 314 | */ |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 315 | WARN_ON_FPU(err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | /* |
| 319 | * Save processor xstate to xsave area. |
| 320 | */ |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 321 | static inline void copy_xregs_to_kernel(struct xregs_state *xstate) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 322 | { |
| 323 | u64 mask = -1; |
| 324 | u32 lmask = mask; |
| 325 | u32 hmask = mask >> 32; |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 326 | int err; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 327 | |
Andi Kleen | 03eaec8 | 2017-09-23 15:00:06 +0200 | [diff] [blame] | 328 | WARN_ON_FPU(!alternatives_patched); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 329 | |
Borislav Petkov | b7106fa | 2015-11-19 12:25:26 +0100 | [diff] [blame] | 330 | XSTATE_XSAVE(xstate, lmask, hmask, err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 331 | |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 332 | /* We should never fault when copying to a kernel buffer: */ |
| 333 | WARN_ON_FPU(err); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | /* |
| 337 | * Restore processor xstate from xsave area. |
| 338 | */ |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 339 | static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 340 | { |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 341 | u32 lmask = mask; |
| 342 | u32 hmask = mask >> 32; |
| 343 | |
Eric Biggers | d5c8028 | 2017-09-23 15:00:09 +0200 | [diff] [blame] | 344 | XSTATE_XRESTORE(xstate, lmask, hmask); |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | /* |
| 348 | * Save xstate to user space xsave area. |
| 349 | * |
| 350 | * We don't use modified optimization because xrstor/xrstors might track |
| 351 | * a different application. |
| 352 | * |
| 353 | * We don't use compacted format xsave area for |
| 354 | * backward compatibility for old applications which don't understand |
| 355 | * compacted format of xsave area. |
| 356 | */ |
| 357 | static inline int copy_xregs_to_user(struct xregs_state __user *buf) |
| 358 | { |
| 359 | int err; |
| 360 | |
| 361 | /* |
| 362 | * Clear the xsave header first, so that reserved fields are |
| 363 | * initialized to zero. |
| 364 | */ |
| 365 | err = __clear_user(&buf->header, sizeof(buf->header)); |
| 366 | if (unlikely(err)) |
| 367 | return -EFAULT; |
| 368 | |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 369 | stac(); |
| 370 | XSTATE_OP(XSAVE, buf, -1, -1, err); |
| 371 | clac(); |
| 372 | |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 373 | return err; |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * Restore xstate from user space xsave area. |
| 378 | */ |
| 379 | static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) |
| 380 | { |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 381 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); |
| 382 | u32 lmask = mask; |
| 383 | u32 hmask = mask >> 32; |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 384 | int err; |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 385 | |
Borislav Petkov | b74a0cf | 2015-11-19 12:25:25 +0100 | [diff] [blame] | 386 | stac(); |
| 387 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
| 388 | clac(); |
| 389 | |
Ingo Molnar | fd169b0 | 2015-05-25 09:55:39 +0200 | [diff] [blame] | 390 | return err; |
| 391 | } |
| 392 | |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 393 | /* |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 394 | * Restore xstate from kernel space xsave area, return an error code instead of |
| 395 | * an exception. |
| 396 | */ |
| 397 | static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask) |
| 398 | { |
| 399 | u32 lmask = mask; |
| 400 | u32 hmask = mask >> 32; |
| 401 | int err; |
| 402 | |
Yu-cheng Yu | c95473e | 2020-05-12 07:54:41 -0700 | [diff] [blame] | 403 | if (static_cpu_has(X86_FEATURE_XSAVES)) |
| 404 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
| 405 | else |
| 406 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
Sebastian Andrzej Siewior | 926b21f | 2019-04-03 18:41:50 +0200 | [diff] [blame] | 407 | |
| 408 | return err; |
| 409 | } |
| 410 | |
| 411 | /* |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 412 | * These must be called with preempt disabled. Returns |
Ingo Molnar | 4f83634 | 2015-04-27 02:53:16 +0200 | [diff] [blame] | 413 | * 'true' if the FPU state is still intact and we can |
| 414 | * keep registers active. |
| 415 | * |
| 416 | * The legacy FNSAVE instruction cleared all FPU state |
| 417 | * unconditionally, so registers are essentially destroyed. |
| 418 | * Modern FPU state can be kept in registers, if there are |
Ingo Molnar | 1bc6b05 | 2015-04-27 03:32:18 +0200 | [diff] [blame] | 419 | * no pending FP exceptions. |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 420 | */ |
Ingo Molnar | 4f83634 | 2015-04-27 02:53:16 +0200 | [diff] [blame] | 421 | static inline int copy_fpregs_to_fpstate(struct fpu *fpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 422 | { |
Ingo Molnar | 1bc6b05 | 2015-04-27 03:32:18 +0200 | [diff] [blame] | 423 | if (likely(use_xsave())) { |
Ingo Molnar | c681314 | 2015-04-30 11:34:09 +0200 | [diff] [blame] | 424 | copy_xregs_to_kernel(&fpu->state.xsave); |
Aubrey Li | 2f7726f | 2019-01-18 02:38:20 +0800 | [diff] [blame] | 425 | |
| 426 | /* |
| 427 | * AVX512 state is tracked here because its use is |
| 428 | * known to slow the max clock speed of the core. |
| 429 | */ |
| 430 | if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512) |
| 431 | fpu->avx512_timestamp = jiffies; |
Ingo Molnar | 1bc6b05 | 2015-04-27 03:32:18 +0200 | [diff] [blame] | 432 | return 1; |
| 433 | } |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 434 | |
Ingo Molnar | 1bc6b05 | 2015-04-27 03:32:18 +0200 | [diff] [blame] | 435 | if (likely(use_fxsr())) { |
Ingo Molnar | c681314 | 2015-04-30 11:34:09 +0200 | [diff] [blame] | 436 | copy_fxregs_to_kernel(fpu); |
Ingo Molnar | 1bc6b05 | 2015-04-27 03:32:18 +0200 | [diff] [blame] | 437 | return 1; |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 438 | } |
| 439 | |
| 440 | /* |
Ingo Molnar | 1bc6b05 | 2015-04-27 03:32:18 +0200 | [diff] [blame] | 441 | * Legacy FPU register saving, FNSAVE always clears FPU registers, |
| 442 | * so we have to mark them inactive: |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 443 | */ |
Ingo Molnar | 87dafd4 | 2015-05-25 10:57:06 +0200 | [diff] [blame] | 444 | asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); |
Ingo Molnar | 4f83634 | 2015-04-27 02:53:16 +0200 | [diff] [blame] | 445 | |
Ingo Molnar | 4f83634 | 2015-04-27 02:53:16 +0200 | [diff] [blame] | 446 | return 0; |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 447 | } |
| 448 | |
Paolo Bonzini | 38cfd5e | 2017-08-23 23:16:29 +0200 | [diff] [blame] | 449 | static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 450 | { |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 451 | if (use_xsave()) { |
Paolo Bonzini | 38cfd5e | 2017-08-23 23:16:29 +0200 | [diff] [blame] | 452 | copy_kernel_to_xregs(&fpstate->xsave, mask); |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 453 | } else { |
| 454 | if (use_fxsr()) |
Ingo Molnar | 003e2e8 | 2015-05-25 11:59:35 +0200 | [diff] [blame] | 455 | copy_kernel_to_fxregs(&fpstate->fxsave); |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 456 | else |
Ingo Molnar | 003e2e8 | 2015-05-25 11:59:35 +0200 | [diff] [blame] | 457 | copy_kernel_to_fregs(&fpstate->fsave); |
Ingo Molnar | 8c05f05 | 2015-05-24 09:23:25 +0200 | [diff] [blame] | 458 | } |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 459 | } |
| 460 | |
Ingo Molnar | 003e2e8 | 2015-05-25 11:59:35 +0200 | [diff] [blame] | 461 | static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 462 | { |
Borislav Petkov | 6ca7a8a | 2014-12-21 15:02:23 +0100 | [diff] [blame] | 463 | /* |
| 464 | * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is |
| 465 | * pending. Clear the x87 state here by setting it to fixed values. |
| 466 | * "m" is a random variable that should be in L1. |
| 467 | */ |
Borislav Petkov | bc696ca | 2016-01-26 22:12:05 +0100 | [diff] [blame] | 468 | if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { |
Linus Torvalds | 26bef13 | 2014-01-11 19:15:52 -0800 | [diff] [blame] | 469 | asm volatile( |
| 470 | "fnclex\n\t" |
| 471 | "emms\n\t" |
| 472 | "fildl %P[addr]" /* set F?P to defined value */ |
Ingo Molnar | 003e2e8 | 2015-05-25 11:59:35 +0200 | [diff] [blame] | 473 | : : [addr] "m" (fpstate)); |
Linus Torvalds | 26bef13 | 2014-01-11 19:15:52 -0800 | [diff] [blame] | 474 | } |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 475 | |
Paolo Bonzini | 38cfd5e | 2017-08-23 23:16:29 +0200 | [diff] [blame] | 476 | __copy_kernel_to_fpregs(fpstate, -1); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 477 | } |
| 478 | |
Ingo Molnar | 87dafd4 | 2015-05-25 10:57:06 +0200 | [diff] [blame] | 479 | extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 480 | |
| 481 | /* |
| 482 | * FPU context switch related helper methods: |
| 483 | */ |
| 484 | |
| 485 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
| 486 | |
| 487 | /* |
Rik van Riel | 25d83b5 | 2016-10-04 20:34:36 -0400 | [diff] [blame] | 488 | * The in-register FPU state for an FPU context on a CPU is assumed to be |
| 489 | * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx |
| 490 | * matches the FPU. |
| 491 | * |
| 492 | * If the FPU register state is valid, the kernel can skip restoring the |
| 493 | * FPU state from memory. |
| 494 | * |
| 495 | * Any code that clobbers the FPU registers or updates the in-memory |
| 496 | * FPU state for a task MUST let the rest of the kernel know that the |
Rik van Riel | 317b622 | 2016-10-14 08:15:30 -0400 | [diff] [blame] | 497 | * FPU registers are no longer valid for this task. |
Rik van Riel | 25d83b5 | 2016-10-04 20:34:36 -0400 | [diff] [blame] | 498 | * |
Rik van Riel | 317b622 | 2016-10-14 08:15:30 -0400 | [diff] [blame] | 499 | * Either one of these invalidation functions is enough. Invalidate |
| 500 | * a resource you control: CPU if using the CPU for something else |
| 501 | * (with preemption disabled), FPU for the current task, or a task that |
| 502 | * is prevented from running by the current task. |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 503 | */ |
Rik van Riel | 317b622 | 2016-10-14 08:15:30 -0400 | [diff] [blame] | 504 | static inline void __cpu_invalidate_fpregs_state(void) |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 505 | { |
Rik van Riel | 317b622 | 2016-10-14 08:15:30 -0400 | [diff] [blame] | 506 | __this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 507 | } |
| 508 | |
Rik van Riel | 25d83b5 | 2016-10-04 20:34:36 -0400 | [diff] [blame] | 509 | static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) |
| 510 | { |
| 511 | fpu->last_cpu = -1; |
| 512 | } |
| 513 | |
| 514 | static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 515 | { |
Sebastian Andrzej Siewior | 59c4bd8 | 2019-11-28 09:53:06 +0100 | [diff] [blame] | 516 | return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 517 | } |
| 518 | |
Rik van Riel | 66f314e | 2016-10-04 20:34:37 -0400 | [diff] [blame] | 519 | /* |
| 520 | * These generally need preemption protection to work, |
| 521 | * do try to avoid using these on their own: |
| 522 | */ |
| 523 | static inline void fpregs_deactivate(struct fpu *fpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 524 | { |
Ingo Molnar | 36b544d | 2015-04-23 12:18:28 +0200 | [diff] [blame] | 525 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
Dave Hansen | d1898b7 | 2016-06-01 10:42:20 -0700 | [diff] [blame] | 526 | trace_x86_fpu_regs_deactivated(fpu); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 527 | } |
| 528 | |
Rik van Riel | 66f314e | 2016-10-04 20:34:37 -0400 | [diff] [blame] | 529 | static inline void fpregs_activate(struct fpu *fpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 530 | { |
Ingo Molnar | c0311f6 | 2015-04-23 12:24:59 +0200 | [diff] [blame] | 531 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); |
Dave Hansen | d1898b7 | 2016-06-01 10:42:20 -0700 | [diff] [blame] | 532 | trace_x86_fpu_regs_activated(fpu); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 533 | } |
| 534 | |
| 535 | /* |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 536 | * Internal helper, do not use directly. Use switch_fpu_return() instead. |
| 537 | */ |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 538 | static inline void __fpregs_load_activate(void) |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 539 | { |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 540 | struct fpu *fpu = ¤t->thread.fpu; |
| 541 | int cpu = smp_processor_id(); |
| 542 | |
Christoph Hellwig | 8d3289f | 2019-06-04 19:54:12 +0200 | [diff] [blame] | 543 | if (WARN_ON_ONCE(current->flags & PF_KTHREAD)) |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 544 | return; |
| 545 | |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 546 | if (!fpregs_state_valid(fpu, cpu)) { |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 547 | copy_kernel_to_fpregs(&fpu->state); |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 548 | fpregs_activate(fpu); |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 549 | fpu->last_cpu = cpu; |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 550 | } |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 551 | clear_thread_flag(TIF_NEED_FPU_LOAD); |
Rik van Riel | 4ee9151 | 2019-04-03 18:41:38 +0200 | [diff] [blame] | 552 | } |
| 553 | |
| 554 | /* |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 555 | * FPU state switching for scheduling. |
| 556 | * |
| 557 | * This is a two-stage process: |
| 558 | * |
Rik van Riel | c474e50 | 2016-10-14 08:15:31 -0400 | [diff] [blame] | 559 | * - switch_fpu_prepare() saves the old state. |
| 560 | * This is done within the context of the old process. |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 561 | * |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 562 | * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state |
| 563 | * will get loaded on return to userspace, or when the kernel needs it. |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 564 | * |
Sebastian Andrzej Siewior | 383c252 | 2019-04-03 18:41:45 +0200 | [diff] [blame] | 565 | * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers |
| 566 | * are saved in the current thread's FPU register state. |
| 567 | * |
| 568 | * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not |
| 569 | * hold current()'s FPU registers. It is required to load the |
| 570 | * registers before returning to userland or using the content |
| 571 | * otherwise. |
| 572 | * |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 573 | * The FPU context is only stored/restored for a user task and |
Christoph Hellwig | 8d3289f | 2019-06-04 19:54:12 +0200 | [diff] [blame] | 574 | * PF_KTHREAD is used to distinguish between kernel and user threads. |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 575 | */ |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 576 | static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 577 | { |
Christoph Hellwig | 8d3289f | 2019-06-04 19:54:12 +0200 | [diff] [blame] | 578 | if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) { |
Ingo Molnar | 4f83634 | 2015-04-27 02:53:16 +0200 | [diff] [blame] | 579 | if (!copy_fpregs_to_fpstate(old_fpu)) |
Ingo Molnar | cb8818b | 2015-04-23 17:39:04 +0200 | [diff] [blame] | 580 | old_fpu->last_cpu = -1; |
Rik van Riel | 1361ef2 | 2015-02-06 15:02:03 -0500 | [diff] [blame] | 581 | else |
Ingo Molnar | cb8818b | 2015-04-23 17:39:04 +0200 | [diff] [blame] | 582 | old_fpu->last_cpu = cpu; |
Rik van Riel | 1361ef2 | 2015-02-06 15:02:03 -0500 | [diff] [blame] | 583 | |
Ingo Molnar | 36b544d | 2015-04-23 12:18:28 +0200 | [diff] [blame] | 584 | /* But leave fpu_fpregs_owner_ctx! */ |
Dave Hansen | d1898b7 | 2016-06-01 10:42:20 -0700 | [diff] [blame] | 585 | trace_x86_fpu_regs_deactivated(old_fpu); |
Sebastian Andrzej Siewior | 2722146 | 2019-04-03 18:41:36 +0200 | [diff] [blame] | 586 | } |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 587 | } |
| 588 | |
| 589 | /* |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 590 | * Misc helper functions: |
| 591 | */ |
| 592 | |
| 593 | /* |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 594 | * Load PKRU from the FPU context if available. Delay loading of the |
| 595 | * complete FPU state until the return to userland. |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 596 | */ |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 597 | static inline void switch_fpu_finish(struct fpu *new_fpu) |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 598 | { |
Rik van Riel | 0cecca9 | 2019-04-03 18:41:44 +0200 | [diff] [blame] | 599 | u32 pkru_val = init_pkru_value; |
| 600 | struct pkru_state *pk; |
Rik van Riel | c474e50 | 2016-10-14 08:15:31 -0400 | [diff] [blame] | 601 | |
Rik van Riel | 0cecca9 | 2019-04-03 18:41:44 +0200 | [diff] [blame] | 602 | if (!static_cpu_has(X86_FEATURE_FPU)) |
| 603 | return; |
| 604 | |
Rik van Riel | 5f409e2 | 2019-04-03 18:41:52 +0200 | [diff] [blame] | 605 | set_thread_flag(TIF_NEED_FPU_LOAD); |
Rik van Riel | 0cecca9 | 2019-04-03 18:41:44 +0200 | [diff] [blame] | 606 | |
| 607 | if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) |
| 608 | return; |
| 609 | |
| 610 | /* |
| 611 | * PKRU state is switched eagerly because it needs to be valid before we |
| 612 | * return to userland e.g. for a copy_to_user() operation. |
| 613 | */ |
| 614 | if (current->mm) { |
| 615 | pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU); |
| 616 | if (pk) |
| 617 | pkru_val = pk->pkru; |
Rik van Riel | c474e50 | 2016-10-14 08:15:31 -0400 | [diff] [blame] | 618 | } |
Rik van Riel | 0cecca9 | 2019-04-03 18:41:44 +0200 | [diff] [blame] | 619 | __write_pkru(pkru_val); |
Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 620 | } |
| 621 | |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 622 | /* |
| 623 | * MXCSR and XCR definitions: |
| 624 | */ |
| 625 | |
Petteri Aimonen | 7ad8167 | 2020-06-16 11:12:57 +0200 | [diff] [blame] | 626 | static inline void ldmxcsr(u32 mxcsr) |
| 627 | { |
| 628 | asm volatile("ldmxcsr %0" :: "m" (mxcsr)); |
| 629 | } |
| 630 | |
Ingo Molnar | b1b64dc | 2015-05-05 15:56:33 +0200 | [diff] [blame] | 631 | extern unsigned int mxcsr_feature_mask; |
| 632 | |
| 633 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 |
| 634 | |
| 635 | static inline u64 xgetbv(u32 index) |
| 636 | { |
| 637 | u32 eax, edx; |
| 638 | |
| 639 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ |
| 640 | : "=a" (eax), "=d" (edx) |
| 641 | : "c" (index)); |
| 642 | return eax + ((u64)edx << 32); |
| 643 | } |
| 644 | |
| 645 | static inline void xsetbv(u32 index, u64 value) |
| 646 | { |
| 647 | u32 eax = value; |
| 648 | u32 edx = value >> 32; |
| 649 | |
| 650 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ |
| 651 | : : "a" (eax), "d" (edx), "c" (index)); |
| 652 | } |
| 653 | |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 654 | #endif /* _ASM_X86_FPU_INTERNAL_H */ |