Greg Kroah-Hartman | a17ae4c | 2017-11-24 15:00:32 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 2 | /* |
| 3 | * vdso setup for s390 |
| 4 | * |
| 5 | * Copyright IBM Corp. 2008 |
| 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Paul Gortmaker | 3994a52 | 2017-02-09 15:20:23 -0500 | [diff] [blame] | 9 | #include <linux/init.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 10 | #include <linux/errno.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/smp.h> |
| 15 | #include <linux/stddef.h> |
| 16 | #include <linux/unistd.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/user.h> |
| 19 | #include <linux/elf.h> |
| 20 | #include <linux/security.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 21 | #include <linux/memblock.h> |
Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 22 | #include <linux/compat.h> |
Sven Schnelle | 4bff8cb | 2020-04-28 09:52:23 +0200 | [diff] [blame] | 23 | #include <linux/binfmts.h> |
| 24 | #include <vdso/datapage.h> |
Heiko Carstens | cbb870c | 2010-02-26 22:37:43 +0100 | [diff] [blame] | 25 | #include <asm/asm-offsets.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 26 | #include <asm/processor.h> |
| 27 | #include <asm/mmu.h> |
| 28 | #include <asm/mmu_context.h> |
| 29 | #include <asm/sections.h> |
| 30 | #include <asm/vdso.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 31 | #include <asm/facility.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 32 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 33 | extern char vdso64_start, vdso64_end; |
| 34 | static void *vdso64_kbase = &vdso64_start; |
| 35 | static unsigned int vdso64_pages; |
| 36 | static struct page **vdso64_pagelist; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 37 | |
| 38 | /* |
| 39 | * Should the kernel map a VDSO page into processes and pass its |
| 40 | * address down to glibc upon exec()? |
| 41 | */ |
| 42 | unsigned int __read_mostly vdso_enabled = 1; |
| 43 | |
Souptick Joarder | ef4b891 | 2018-07-22 19:11:09 +0530 | [diff] [blame] | 44 | static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 45 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 46 | { |
| 47 | struct page **vdso_pagelist; |
| 48 | unsigned long vdso_pages; |
| 49 | |
| 50 | vdso_pagelist = vdso64_pagelist; |
| 51 | vdso_pages = vdso64_pages; |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 52 | |
| 53 | if (vmf->pgoff >= vdso_pages) |
| 54 | return VM_FAULT_SIGBUS; |
| 55 | |
| 56 | vmf->page = vdso_pagelist[vmf->pgoff]; |
| 57 | get_page(vmf->page); |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | static int vdso_mremap(const struct vm_special_mapping *sm, |
| 62 | struct vm_area_struct *vma) |
| 63 | { |
| 64 | unsigned long vdso_pages; |
| 65 | |
| 66 | vdso_pages = vdso64_pages; |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 67 | |
| 68 | if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start) |
| 69 | return -EINVAL; |
| 70 | |
| 71 | if (WARN_ON_ONCE(current->mm != vma->vm_mm)) |
| 72 | return -EFAULT; |
| 73 | |
| 74 | current->mm->context.vdso_base = vma->vm_start; |
| 75 | return 0; |
| 76 | } |
| 77 | |
| 78 | static const struct vm_special_mapping vdso_mapping = { |
| 79 | .name = "[vdso]", |
| 80 | .fault = vdso_fault, |
| 81 | .mremap = vdso_mremap, |
| 82 | }; |
| 83 | |
Vasily Gorbik | 3d64436 | 2019-08-19 17:41:17 +0200 | [diff] [blame] | 84 | static int __init vdso_setup(char *str) |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 85 | { |
Vasily Gorbik | 3d64436 | 2019-08-19 17:41:17 +0200 | [diff] [blame] | 86 | bool enabled; |
Martin Schwidefsky | 7aa79f94 | 2009-06-12 10:26:20 +0200 | [diff] [blame] | 87 | |
Vasily Gorbik | 3d64436 | 2019-08-19 17:41:17 +0200 | [diff] [blame] | 88 | if (!kstrtobool(str, &enabled)) |
| 89 | vdso_enabled = enabled; |
| 90 | return 1; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 91 | } |
| 92 | __setup("vdso=", vdso_setup); |
| 93 | |
| 94 | /* |
| 95 | * The vdso data page |
| 96 | */ |
| 97 | static union { |
| 98 | struct vdso_data data; |
| 99 | u8 page[PAGE_SIZE]; |
Tim Abbott | abe1ee3 | 2009-09-20 18:14:15 -0400 | [diff] [blame] | 100 | } vdso_data_store __page_aligned_data; |
Sven Schnelle | 4bff8cb | 2020-04-28 09:52:23 +0200 | [diff] [blame] | 101 | struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 102 | /* |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 103 | * Allocate/free per cpu vdso data. |
| 104 | */ |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 105 | #define SEGMENT_ORDER 2 |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 106 | |
Heiko Carstens | c667aea | 2015-12-31 10:29:00 +0100 | [diff] [blame] | 107 | int vdso_alloc_per_cpu(struct lowcore *lowcore) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 108 | { |
| 109 | unsigned long segment_table, page_table, page_frame; |
Martin Schwidefsky | 249c543 | 2016-01-05 13:29:38 +0100 | [diff] [blame] | 110 | struct vdso_per_cpu_data *vd; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 111 | |
| 112 | segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); |
Martin Schwidefsky | 0aaba41 | 2017-08-22 12:08:22 +0200 | [diff] [blame] | 113 | page_table = get_zeroed_page(GFP_KERNEL); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 114 | page_frame = get_zeroed_page(GFP_KERNEL); |
| 115 | if (!segment_table || !page_table || !page_frame) |
| 116 | goto out; |
Martin Schwidefsky | c9b5ad5 | 2016-06-14 12:56:01 +0200 | [diff] [blame] | 117 | arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER); |
| 118 | arch_set_page_dat(virt_to_page(page_table), 0); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 119 | |
Martin Schwidefsky | 249c543 | 2016-01-05 13:29:38 +0100 | [diff] [blame] | 120 | /* Initialize per-cpu vdso data page */ |
| 121 | vd = (struct vdso_per_cpu_data *) page_frame; |
| 122 | vd->cpu_nr = lowcore->cpu_nr; |
| 123 | vd->node_id = cpu_to_node(vd->cpu_nr); |
| 124 | |
Martin Schwidefsky | 0aaba41 | 2017-08-22 12:08:22 +0200 | [diff] [blame] | 125 | /* Set up page table for the vdso address space */ |
Heiko Carstens | 41879ff | 2017-10-04 19:27:07 +0200 | [diff] [blame] | 126 | memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES); |
| 127 | memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 128 | |
| 129 | *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 130 | *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 131 | |
Martin Schwidefsky | 0aaba41 | 2017-08-22 12:08:22 +0200 | [diff] [blame] | 132 | lowcore->vdso_asce = segment_table + |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 133 | _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 134 | lowcore->vdso_per_cpu_data = page_frame; |
| 135 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 136 | return 0; |
| 137 | |
| 138 | out: |
| 139 | free_page(page_frame); |
| 140 | free_page(page_table); |
| 141 | free_pages(segment_table, SEGMENT_ORDER); |
| 142 | return -ENOMEM; |
| 143 | } |
| 144 | |
Heiko Carstens | c667aea | 2015-12-31 10:29:00 +0100 | [diff] [blame] | 145 | void vdso_free_per_cpu(struct lowcore *lowcore) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 146 | { |
| 147 | unsigned long segment_table, page_table, page_frame; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 148 | |
Martin Schwidefsky | 0aaba41 | 2017-08-22 12:08:22 +0200 | [diff] [blame] | 149 | segment_table = lowcore->vdso_asce & PAGE_MASK; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 150 | page_table = *(unsigned long *) segment_table; |
| 151 | page_frame = *(unsigned long *) page_table; |
| 152 | |
| 153 | free_page(page_frame); |
| 154 | free_page(page_table); |
| 155 | free_pages(segment_table, SEGMENT_ORDER); |
| 156 | } |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 157 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 158 | /* |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 159 | * This is called from binfmt_elf, we create the special vma for the |
| 160 | * vDSO and insert it into the mm struct tree |
| 161 | */ |
| 162 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 163 | { |
| 164 | struct mm_struct *mm = current->mm; |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 165 | struct vm_area_struct *vma; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 166 | unsigned long vdso_pages; |
| 167 | unsigned long vdso_base; |
| 168 | int rc; |
| 169 | |
| 170 | if (!vdso_enabled) |
| 171 | return 0; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 172 | |
Heiko Carstens | 2115fbf | 2019-11-18 13:59:25 +0100 | [diff] [blame] | 173 | if (is_compat_task()) |
| 174 | return 0; |
| 175 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 176 | vdso_pages = vdso64_pages; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 177 | /* |
| 178 | * vDSO has a problem and was disabled, just don't "enable" it for |
| 179 | * the process |
| 180 | */ |
| 181 | if (vdso_pages == 0) |
| 182 | return 0; |
| 183 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 184 | /* |
| 185 | * pick a base address for the vDSO in process space. We try to put |
| 186 | * it at vdso_base which is the "natural" base for it, but we might |
| 187 | * fail and end up putting it elsewhere. |
| 188 | */ |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 189 | if (mmap_write_lock_killable(mm)) |
Michal Hocko | 6904817 | 2016-05-23 16:25:54 -0700 | [diff] [blame] | 190 | return -EINTR; |
Heiko Carstens | e7828bb | 2011-01-12 09:55:24 +0100 | [diff] [blame] | 191 | vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 192 | if (IS_ERR_VALUE(vdso_base)) { |
| 193 | rc = vdso_base; |
| 194 | goto out_up; |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * our vma flags don't have VM_WRITE so by default, the process |
| 199 | * isn't allowed to write those pages. |
| 200 | * gdb can break that with ptrace interface, and thus trigger COW |
| 201 | * on those pages but it's then your responsibility to never do that |
| 202 | * on the "data" page of the vDSO or you'll stop getting kernel |
| 203 | * updates and your nice userland gettimeofday will be totally dead. |
| 204 | * It's fine to use that for setting breakpoints in the vDSO code |
Jason Baron | 909af76 | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 205 | * pages though. |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 206 | */ |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 207 | vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, |
| 208 | VM_READ|VM_EXEC| |
| 209 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
| 210 | &vdso_mapping); |
| 211 | if (IS_ERR(vma)) { |
| 212 | rc = PTR_ERR(vma); |
| 213 | goto out_up; |
| 214 | } |
| 215 | |
| 216 | current->mm->context.vdso_base = vdso_base; |
| 217 | rc = 0; |
| 218 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 219 | out_up: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 220 | mmap_write_unlock(mm); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 221 | return rc; |
| 222 | } |
| 223 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 224 | static int __init vdso_init(void) |
| 225 | { |
| 226 | int i; |
| 227 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 228 | /* Calculate the size of the 64 bit vDSO */ |
| 229 | vdso64_pages = ((&vdso64_end - &vdso64_start |
| 230 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
| 231 | |
| 232 | /* Make sure pages are in the correct state */ |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 233 | vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *), |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 234 | GFP_KERNEL); |
| 235 | BUG_ON(vdso64_pagelist == NULL); |
| 236 | for (i = 0; i < vdso64_pages - 1; i++) { |
| 237 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 238 | get_page(pg); |
| 239 | vdso64_pagelist[i] = pg; |
| 240 | } |
| 241 | vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); |
| 242 | vdso64_pagelist[vdso64_pages] = NULL; |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 243 | if (vdso_alloc_per_cpu(&S390_lowcore)) |
Heiko Carstens | 81ffa04 | 2009-01-09 12:14:54 +0100 | [diff] [blame] | 244 | BUG(); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 245 | |
| 246 | get_page(virt_to_page(vdso_data)); |
| 247 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 248 | return 0; |
| 249 | } |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 250 | early_initcall(vdso_init); |