blob: 4d418e70587802b05aff3ae49e80db7faf47a64c [file] [log] [blame]
Thomas Gleixneraa8c6242017-12-04 15:07:36 +01001/*
2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * This code is based in part on work published here:
14 *
15 * https://github.com/IAIK/KAISER
16 *
17 * The original work was written by and and signed off by for the Linux
18 * kernel by:
19 *
20 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
24 *
25 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27 * Andy Lutomirsky <luto@amacapital.net>
28 */
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/bug.h>
34#include <linux/init.h>
35#include <linux/spinlock.h>
36#include <linux/mm.h>
37#include <linux/uaccess.h>
38
39#include <asm/cpufeature.h>
40#include <asm/hypervisor.h>
Andy Lutomirski85900ea2017-12-12 07:56:42 -080041#include <asm/vsyscall.h>
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010042#include <asm/cmdline.h>
43#include <asm/pti.h>
44#include <asm/pgtable.h>
45#include <asm/pgalloc.h>
46#include <asm/tlbflush.h>
47#include <asm/desc.h>
48
49#undef pr_fmt
50#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
51
Andy Lutomirski03f44242017-12-04 15:07:42 +010052/* Backporting helper */
53#ifndef __GFP_NOTRACK
54#define __GFP_NOTRACK 0
55#endif
56
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010057static void __init pti_print_if_insecure(const char *reason)
58{
Thomas Gleixnerde791822018-01-05 15:27:34 +010059 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010060 pr_info("%s\n", reason);
61}
62
Borislav Petkov41f4c202017-12-12 14:39:52 +010063static void __init pti_print_if_secure(const char *reason)
64{
Thomas Gleixnerde791822018-01-05 15:27:34 +010065 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
Borislav Petkov41f4c202017-12-12 14:39:52 +010066 pr_info("%s\n", reason);
67}
68
Dave Hansen8c06c772018-04-06 13:55:18 -070069enum pti_mode {
70 PTI_AUTO = 0,
71 PTI_FORCE_OFF,
72 PTI_FORCE_ON
73} pti_mode;
74
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010075void __init pti_check_boottime_disable(void)
76{
Borislav Petkov41f4c202017-12-12 14:39:52 +010077 char arg[5];
78 int ret;
79
Dave Hansen8c06c772018-04-06 13:55:18 -070080 /* Assume mode is auto unless overridden. */
81 pti_mode = PTI_AUTO;
82
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010083 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
Dave Hansen8c06c772018-04-06 13:55:18 -070084 pti_mode = PTI_FORCE_OFF;
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010085 pti_print_if_insecure("disabled on XEN PV.");
86 return;
87 }
88
Borislav Petkov41f4c202017-12-12 14:39:52 +010089 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
90 if (ret > 0) {
91 if (ret == 3 && !strncmp(arg, "off", 3)) {
Dave Hansen8c06c772018-04-06 13:55:18 -070092 pti_mode = PTI_FORCE_OFF;
Borislav Petkov41f4c202017-12-12 14:39:52 +010093 pti_print_if_insecure("disabled on command line.");
94 return;
95 }
96 if (ret == 2 && !strncmp(arg, "on", 2)) {
Dave Hansen8c06c772018-04-06 13:55:18 -070097 pti_mode = PTI_FORCE_ON;
Borislav Petkov41f4c202017-12-12 14:39:52 +010098 pti_print_if_secure("force enabled on command line.");
99 goto enable;
100 }
Dave Hansen8c06c772018-04-06 13:55:18 -0700101 if (ret == 4 && !strncmp(arg, "auto", 4)) {
102 pti_mode = PTI_AUTO;
Borislav Petkov41f4c202017-12-12 14:39:52 +0100103 goto autosel;
Dave Hansen8c06c772018-04-06 13:55:18 -0700104 }
Borislav Petkov41f4c202017-12-12 14:39:52 +0100105 }
106
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100107 if (cmdline_find_option_bool(boot_command_line, "nopti")) {
Dave Hansen8c06c772018-04-06 13:55:18 -0700108 pti_mode = PTI_FORCE_OFF;
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100109 pti_print_if_insecure("disabled on command line.");
110 return;
111 }
112
Borislav Petkov41f4c202017-12-12 14:39:52 +0100113autosel:
Thomas Gleixnerde791822018-01-05 15:27:34 +0100114 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100115 return;
Borislav Petkov41f4c202017-12-12 14:39:52 +0100116enable:
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100117 setup_force_cpu_cap(X86_FEATURE_PTI);
118}
119
Dave Hansen61e9b362017-12-04 15:07:37 +0100120pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
121{
122 /*
123 * Changes to the high (kernel) portion of the kernelmode page
124 * tables are not automatically propagated to the usermode tables.
125 *
126 * Users should keep in mind that, unlike the kernelmode tables,
127 * there is no vmalloc_fault equivalent for the usermode tables.
128 * Top-level entries added to init_mm's usermode pgd after boot
129 * will not be automatically propagated to other mms.
130 */
131 if (!pgdp_maps_userspace(pgdp))
132 return pgd;
133
134 /*
135 * The user page tables get the full PGD, accessible from
136 * userspace:
137 */
138 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
139
140 /*
141 * If this is normal user memory, make it NX in the kernel
142 * pagetables so that, if we somehow screw up and return to
143 * usermode with the kernel CR3 loaded, we'll get a page fault
144 * instead of allowing user code to execute with the wrong CR3.
145 *
146 * As exceptions, we don't set NX if:
147 * - _PAGE_USER is not set. This could be an executable
148 * EFI runtime mapping or something similar, and the kernel
149 * may execute from it
150 * - we don't have NX support
151 * - we're clearing the PGD (i.e. the new pgd is not present).
152 */
153 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
154 (__supported_pte_mask & _PAGE_NX))
155 pgd.pgd |= _PAGE_NX;
156
157 /* return the copy of the PGD we want the kernel to use: */
158 return pgd;
159}
160
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100161/*
Andy Lutomirski03f44242017-12-04 15:07:42 +0100162 * Walk the user copy of the page tables (optionally) trying to allocate
163 * page table pages on the way down.
164 *
165 * Returns a pointer to a P4D on success, or NULL on failure.
166 */
Dave Hansen8c06c772018-04-06 13:55:18 -0700167static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
Andy Lutomirski03f44242017-12-04 15:07:42 +0100168{
169 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
170 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
171
172 if (address < PAGE_OFFSET) {
173 WARN_ONCE(1, "attempt to walk user address\n");
174 return NULL;
175 }
176
177 if (pgd_none(*pgd)) {
178 unsigned long new_p4d_page = __get_free_page(gfp);
179 if (!new_p4d_page)
180 return NULL;
181
Jike Song8d56eff2018-01-09 00:03:41 +0800182 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
Andy Lutomirski03f44242017-12-04 15:07:42 +0100183 }
184 BUILD_BUG_ON(pgd_large(*pgd) != 0);
185
186 return p4d_offset(pgd, address);
187}
188
189/*
190 * Walk the user copy of the page tables (optionally) trying to allocate
191 * page table pages on the way down.
192 *
193 * Returns a pointer to a PMD on success, or NULL on failure.
194 */
Dave Hansen8c06c772018-04-06 13:55:18 -0700195static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
Andy Lutomirski03f44242017-12-04 15:07:42 +0100196{
197 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
198 p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
199 pud_t *pud;
200
201 BUILD_BUG_ON(p4d_large(*p4d) != 0);
202 if (p4d_none(*p4d)) {
203 unsigned long new_pud_page = __get_free_page(gfp);
204 if (!new_pud_page)
205 return NULL;
206
Jike Song8d56eff2018-01-09 00:03:41 +0800207 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
Andy Lutomirski03f44242017-12-04 15:07:42 +0100208 }
209
210 pud = pud_offset(p4d, address);
211 /* The user page tables do not use large mappings: */
212 if (pud_large(*pud)) {
213 WARN_ON(1);
214 return NULL;
215 }
216 if (pud_none(*pud)) {
217 unsigned long new_pmd_page = __get_free_page(gfp);
218 if (!new_pmd_page)
219 return NULL;
220
Jike Song8d56eff2018-01-09 00:03:41 +0800221 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
Andy Lutomirski03f44242017-12-04 15:07:42 +0100222 }
223
224 return pmd_offset(pud, address);
225}
226
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800227#ifdef CONFIG_X86_VSYSCALL_EMULATION
228/*
229 * Walk the shadow copy of the page tables (optionally) trying to allocate
230 * page table pages on the way down. Does not support large pages.
231 *
232 * Note: this is only used when mapping *new* kernel data into the
233 * user/shadow page tables. It is never used for userspace data.
234 *
235 * Returns a pointer to a PTE on success, or NULL on failure.
236 */
237static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
238{
239 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
240 pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
241 pte_t *pte;
242
243 /* We can't do anything sensible if we hit a large mapping. */
244 if (pmd_large(*pmd)) {
245 WARN_ON(1);
246 return NULL;
247 }
248
249 if (pmd_none(*pmd)) {
250 unsigned long new_pte_page = __get_free_page(gfp);
251 if (!new_pte_page)
252 return NULL;
253
Jike Song8d56eff2018-01-09 00:03:41 +0800254 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800255 }
256
257 pte = pte_offset_kernel(pmd, address);
258 if (pte_flags(*pte) & _PAGE_USER) {
259 WARN_ONCE(1, "attempt to walk to user pte\n");
260 return NULL;
261 }
262 return pte;
263}
264
265static void __init pti_setup_vsyscall(void)
266{
267 pte_t *pte, *target_pte;
268 unsigned int level;
269
270 pte = lookup_address(VSYSCALL_ADDR, &level);
271 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
272 return;
273
274 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
275 if (WARN_ON(!target_pte))
276 return;
277
278 *target_pte = *pte;
279 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
280}
281#else
282static void __init pti_setup_vsyscall(void) { }
283#endif
284
Dave Hansen8c06c772018-04-06 13:55:18 -0700285static void
Andy Lutomirski03f44242017-12-04 15:07:42 +0100286pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
287{
288 unsigned long addr;
289
290 /*
291 * Clone the populated PMDs which cover start to end. These PMD areas
292 * can have holes.
293 */
294 for (addr = start; addr < end; addr += PMD_SIZE) {
295 pmd_t *pmd, *target_pmd;
296 pgd_t *pgd;
297 p4d_t *p4d;
298 pud_t *pud;
299
300 pgd = pgd_offset_k(addr);
301 if (WARN_ON(pgd_none(*pgd)))
302 return;
303 p4d = p4d_offset(pgd, addr);
304 if (WARN_ON(p4d_none(*p4d)))
305 return;
306 pud = pud_offset(p4d, addr);
307 if (pud_none(*pud))
308 continue;
309 pmd = pmd_offset(pud, addr);
310 if (pmd_none(*pmd))
311 continue;
312
313 target_pmd = pti_user_pagetable_walk_pmd(addr);
314 if (WARN_ON(!target_pmd))
315 return;
316
317 /*
Dave Hansen0f561fc2018-04-06 13:55:15 -0700318 * Only clone present PMDs. This ensures only setting
319 * _PAGE_GLOBAL on present PMDs. This should only be
320 * called on well-known addresses anyway, so a non-
321 * present PMD would be a surprise.
322 */
323 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
324 return;
325
326 /*
327 * Setting 'target_pmd' below creates a mapping in both
328 * the user and kernel page tables. It is effectively
329 * global, so set it as global in both copies. Note:
330 * the X86_FEATURE_PGE check is not _required_ because
331 * the CPU ignores _PAGE_GLOBAL when PGE is not
332 * supported. The check keeps consistentency with
333 * code that only set this bit when supported.
334 */
335 if (boot_cpu_has(X86_FEATURE_PGE))
336 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
337
338 /*
Andy Lutomirski03f44242017-12-04 15:07:42 +0100339 * Copy the PMD. That is, the kernelmode and usermode
340 * tables will share the last-level page tables of this
341 * address range
342 */
343 *target_pmd = pmd_clear_flags(*pmd, clear);
344 }
345}
346
347/*
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100348 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
349 * next-level entry on 5-level systems.
350 */
351static void __init pti_clone_p4d(unsigned long addr)
352{
353 p4d_t *kernel_p4d, *user_p4d;
354 pgd_t *kernel_pgd;
355
356 user_p4d = pti_user_pagetable_walk_p4d(addr);
357 kernel_pgd = pgd_offset_k(addr);
358 kernel_p4d = p4d_offset(kernel_pgd, addr);
359 *user_p4d = *kernel_p4d;
360}
361
362/*
363 * Clone the CPU_ENTRY_AREA into the user space visible page table.
364 */
365static void __init pti_clone_user_shared(void)
366{
367 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
368}
369
370/*
Seunghun Hanc5b679f2018-03-07 13:32:15 +0900371 * Clone the ESPFIX P4D into the user space visible page table
Andy Lutomirski4b6bbe92017-12-15 22:08:18 +0100372 */
373static void __init pti_setup_espfix64(void)
374{
375#ifdef CONFIG_X86_ESPFIX64
376 pti_clone_p4d(ESPFIX_BASE_ADDR);
377#endif
378}
379
380/*
Thomas Gleixner6dc72c32017-12-04 15:07:47 +0100381 * Clone the populated PMDs of the entry and irqentry text and force it RO.
382 */
383static void __init pti_clone_entry_text(void)
384{
385 pti_clone_pmds((unsigned long) __entry_text_start,
Thomas Gleixner52994c22018-01-03 15:57:59 +0100386 (unsigned long) __irqentry_text_end,
Dave Hansen0f561fc2018-04-06 13:55:15 -0700387 _PAGE_RW);
Thomas Gleixner6dc72c32017-12-04 15:07:47 +0100388}
389
390/*
Dave Hansen8c06c772018-04-06 13:55:18 -0700391 * Global pages and PCIDs are both ways to make kernel TLB entries
392 * live longer, reduce TLB misses and improve kernel performance.
393 * But, leaving all kernel text Global makes it potentially accessible
394 * to Meltdown-style attacks which make it trivial to find gadgets or
395 * defeat KASLR.
396 *
397 * Only use global pages when it is really worth it.
398 */
399static inline bool pti_kernel_image_global_ok(void)
400{
401 /*
402 * Systems with PCIDs get litlle benefit from global
403 * kernel text and are not worth the downsides.
404 */
405 if (cpu_feature_enabled(X86_FEATURE_PCID))
406 return false;
407
408 /*
409 * Only do global kernel image for pti=auto. Do the most
410 * secure thing (not global) if pti=on specified.
411 */
412 if (pti_mode != PTI_AUTO)
413 return false;
414
415 /*
416 * K8 may not tolerate the cleared _PAGE_RW on the userspace
417 * global kernel image pages. Do the safe thing (disable
418 * global kernel image). This is unlikely to ever be
419 * noticed because PTI is disabled by default on AMD CPUs.
420 */
421 if (boot_cpu_has(X86_FEATURE_K8))
422 return false;
423
Dave Hansenb7c21bc2018-04-20 15:20:26 -0700424 /*
425 * RANDSTRUCT derives its hardening benefits from the
426 * attacker's lack of knowledge about the layout of kernel
427 * data structures. Keep the kernel image non-global in
428 * cases where RANDSTRUCT is in use to help keep the layout a
429 * secret.
430 */
431 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
432 return false;
433
Dave Hansen8c06c772018-04-06 13:55:18 -0700434 return true;
435}
436
437/*
438 * For some configurations, map all of kernel text into the user page
439 * tables. This reduces TLB misses, especially on non-PCID systems.
440 */
441void pti_clone_kernel_text(void)
442{
Dave Hansena44ca8f2018-04-20 15:20:23 -0700443 /*
444 * rodata is part of the kernel image and is normally
445 * readable on the filesystem or on the web. But, do not
446 * clone the areas past rodata, they might contain secrets.
447 */
Dave Hansen8c06c772018-04-06 13:55:18 -0700448 unsigned long start = PFN_ALIGN(_text);
Dave Hansena44ca8f2018-04-20 15:20:23 -0700449 unsigned long end = (unsigned long)__end_rodata_hpage_align;
Dave Hansen8c06c772018-04-06 13:55:18 -0700450
451 if (!pti_kernel_image_global_ok())
452 return;
453
Dave Hansena44ca8f2018-04-20 15:20:23 -0700454 pr_debug("mapping partial kernel image into user address space\n");
455
456 /*
457 * Note that this will undo _some_ of the work that
458 * pti_set_kernel_image_nonglobal() did to clear the
459 * global bit.
460 */
Dave Hansen8c06c772018-04-06 13:55:18 -0700461 pti_clone_pmds(start, end, _PAGE_RW);
462}
463
464/*
Dave Hansen39114b72018-04-06 13:55:17 -0700465 * This is the only user for it and it is not arch-generic like
466 * the other set_memory.h functions. Just extern it.
467 */
468extern int set_memory_nonglobal(unsigned long addr, int numpages);
469void pti_set_kernel_image_nonglobal(void)
470{
471 /*
472 * The identity map is created with PMDs, regardless of the
473 * actual length of the kernel. We need to clear
474 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
475 * of the image.
476 */
477 unsigned long start = PFN_ALIGN(_text);
478 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
479
Dave Hansen8c06c772018-04-06 13:55:18 -0700480 if (pti_kernel_image_global_ok())
481 return;
482
Dave Hansen39114b72018-04-06 13:55:17 -0700483 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
484}
485
486/*
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100487 * Initialize kernel page table isolation
488 */
489void __init pti_init(void)
490{
491 if (!static_cpu_has(X86_FEATURE_PTI))
492 return;
493
494 pr_info("enabled\n");
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100495
496 pti_clone_user_shared();
Dave Hansen39114b72018-04-06 13:55:17 -0700497
498 /* Undo all global bits from the init pagetables in head_64.S: */
499 pti_set_kernel_image_nonglobal();
500 /* Replace some of the global bits just for shared entry text: */
Thomas Gleixner6dc72c32017-12-04 15:07:47 +0100501 pti_clone_entry_text();
Andy Lutomirski4b6bbe92017-12-15 22:08:18 +0100502 pti_setup_espfix64();
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800503 pti_setup_vsyscall();
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100504}