blob: f9da5b149141836b05b443713ddf774eb86968ff [file] [log] [blame]
Greg Kroah-Hartmana17ae4c2017-11-24 15:00:32 +01001// SPDX-License-Identifier: GPL-2.0
Martin Schwidefskyb0206322008-12-25 13:38:36 +01002/*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
Martin Schwidefskyb0206322008-12-25 13:38:36 +01007 */
8
Paul Gortmaker3994a522017-02-09 15:20:23 -05009#include <linux/init.h>
Martin Schwidefskyb0206322008-12-25 13:38:36 +010010#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/stddef.h>
16#include <linux/unistd.h>
17#include <linux/slab.h>
18#include <linux/user.h>
19#include <linux/elf.h>
20#include <linux/security.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070021#include <linux/memblock.h>
Heiko Carstens77575912009-06-12 10:26:25 +020022#include <linux/compat.h>
Sven Schnelle4bff8cb2020-04-28 09:52:23 +020023#include <linux/binfmts.h>
24#include <vdso/datapage.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010025#include <asm/asm-offsets.h>
Martin Schwidefskyb0206322008-12-25 13:38:36 +010026#include <asm/processor.h>
27#include <asm/mmu.h>
28#include <asm/mmu_context.h>
29#include <asm/sections.h>
30#include <asm/vdso.h>
David Howellsa0616cd2012-03-28 18:30:02 +010031#include <asm/facility.h>
Martin Schwidefskyb0206322008-12-25 13:38:36 +010032
Martin Schwidefskyb0206322008-12-25 13:38:36 +010033extern char vdso64_start, vdso64_end;
34static void *vdso64_kbase = &vdso64_start;
35static unsigned int vdso64_pages;
36static struct page **vdso64_pagelist;
Martin Schwidefskyb0206322008-12-25 13:38:36 +010037
38/*
39 * Should the kernel map a VDSO page into processes and pass its
40 * address down to glibc upon exec()?
41 */
42unsigned int __read_mostly vdso_enabled = 1;
43
Souptick Joarderef4b8912018-07-22 19:11:09 +053044static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
Martin Schwidefsky35bb0922017-05-15 10:23:38 +020045 struct vm_area_struct *vma, struct vm_fault *vmf)
46{
47 struct page **vdso_pagelist;
48 unsigned long vdso_pages;
49
50 vdso_pagelist = vdso64_pagelist;
51 vdso_pages = vdso64_pages;
Martin Schwidefsky35bb0922017-05-15 10:23:38 +020052
53 if (vmf->pgoff >= vdso_pages)
54 return VM_FAULT_SIGBUS;
55
56 vmf->page = vdso_pagelist[vmf->pgoff];
57 get_page(vmf->page);
58 return 0;
59}
60
61static int vdso_mremap(const struct vm_special_mapping *sm,
62 struct vm_area_struct *vma)
63{
64 unsigned long vdso_pages;
65
66 vdso_pages = vdso64_pages;
Martin Schwidefsky35bb0922017-05-15 10:23:38 +020067
68 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
69 return -EINVAL;
70
71 if (WARN_ON_ONCE(current->mm != vma->vm_mm))
72 return -EFAULT;
73
74 current->mm->context.vdso_base = vma->vm_start;
75 return 0;
76}
77
78static const struct vm_special_mapping vdso_mapping = {
79 .name = "[vdso]",
80 .fault = vdso_fault,
81 .mremap = vdso_mremap,
82};
83
Vasily Gorbik3d644362019-08-19 17:41:17 +020084static int __init vdso_setup(char *str)
Martin Schwidefskyb0206322008-12-25 13:38:36 +010085{
Vasily Gorbik3d644362019-08-19 17:41:17 +020086 bool enabled;
Martin Schwidefsky7aa79f942009-06-12 10:26:20 +020087
Vasily Gorbik3d644362019-08-19 17:41:17 +020088 if (!kstrtobool(str, &enabled))
89 vdso_enabled = enabled;
90 return 1;
Martin Schwidefskyb0206322008-12-25 13:38:36 +010091}
92__setup("vdso=", vdso_setup);
93
94/*
95 * The vdso data page
96 */
97static union {
98 struct vdso_data data;
99 u8 page[PAGE_SIZE];
Tim Abbottabe1ee32009-09-20 18:14:15 -0400100} vdso_data_store __page_aligned_data;
Sven Schnelle4bff8cb2020-04-28 09:52:23 +0200101struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100102/*
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100103 * Allocate/free per cpu vdso data.
104 */
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100105#define SEGMENT_ORDER 2
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100106
Heiko Carstensc667aea2015-12-31 10:29:00 +0100107int vdso_alloc_per_cpu(struct lowcore *lowcore)
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100108{
109 unsigned long segment_table, page_table, page_frame;
Martin Schwidefsky249c5432016-01-05 13:29:38 +0100110 struct vdso_per_cpu_data *vd;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100111
112 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200113 page_table = get_zeroed_page(GFP_KERNEL);
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100114 page_frame = get_zeroed_page(GFP_KERNEL);
115 if (!segment_table || !page_table || !page_frame)
116 goto out;
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +0200117 arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
118 arch_set_page_dat(virt_to_page(page_table), 0);
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100119
Martin Schwidefsky249c5432016-01-05 13:29:38 +0100120 /* Initialize per-cpu vdso data page */
121 vd = (struct vdso_per_cpu_data *) page_frame;
122 vd->cpu_nr = lowcore->cpu_nr;
123 vd->node_id = cpu_to_node(vd->cpu_nr);
124
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200125 /* Set up page table for the vdso address space */
Heiko Carstens41879ff2017-10-04 19:27:07 +0200126 memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
127 memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100128
129 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200130 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100131
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200132 lowcore->vdso_asce = segment_table +
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100133 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100134 lowcore->vdso_per_cpu_data = page_frame;
135
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100136 return 0;
137
138out:
139 free_page(page_frame);
140 free_page(page_table);
141 free_pages(segment_table, SEGMENT_ORDER);
142 return -ENOMEM;
143}
144
Heiko Carstensc667aea2015-12-31 10:29:00 +0100145void vdso_free_per_cpu(struct lowcore *lowcore)
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100146{
147 unsigned long segment_table, page_table, page_frame;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100148
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200149 segment_table = lowcore->vdso_asce & PAGE_MASK;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100150 page_table = *(unsigned long *) segment_table;
151 page_frame = *(unsigned long *) page_table;
152
153 free_page(page_frame);
154 free_page(page_table);
155 free_pages(segment_table, SEGMENT_ORDER);
156}
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100157
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100158/*
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100159 * This is called from binfmt_elf, we create the special vma for the
160 * vDSO and insert it into the mm struct tree
161 */
162int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
163{
164 struct mm_struct *mm = current->mm;
Martin Schwidefsky35bb0922017-05-15 10:23:38 +0200165 struct vm_area_struct *vma;
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100166 unsigned long vdso_pages;
167 unsigned long vdso_base;
168 int rc;
169
170 if (!vdso_enabled)
171 return 0;
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100172
Heiko Carstens2115fbf2019-11-18 13:59:25 +0100173 if (is_compat_task())
174 return 0;
175
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100176 vdso_pages = vdso64_pages;
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100177 /*
178 * vDSO has a problem and was disabled, just don't "enable" it for
179 * the process
180 */
181 if (vdso_pages == 0)
182 return 0;
183
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100184 /*
185 * pick a base address for the vDSO in process space. We try to put
186 * it at vdso_base which is the "natural" base for it, but we might
187 * fail and end up putting it elsewhere.
188 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700189 if (mmap_write_lock_killable(mm))
Michal Hocko69048172016-05-23 16:25:54 -0700190 return -EINTR;
Heiko Carstense7828bb2011-01-12 09:55:24 +0100191 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100192 if (IS_ERR_VALUE(vdso_base)) {
193 rc = vdso_base;
194 goto out_up;
195 }
196
197 /*
198 * our vma flags don't have VM_WRITE so by default, the process
199 * isn't allowed to write those pages.
200 * gdb can break that with ptrace interface, and thus trigger COW
201 * on those pages but it's then your responsibility to never do that
202 * on the "data" page of the vDSO or you'll stop getting kernel
203 * updates and your nice userland gettimeofday will be totally dead.
204 * It's fine to use that for setting breakpoints in the vDSO code
Jason Baron909af762012-03-23 15:02:51 -0700205 * pages though.
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100206 */
Martin Schwidefsky35bb0922017-05-15 10:23:38 +0200207 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
208 VM_READ|VM_EXEC|
209 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
210 &vdso_mapping);
211 if (IS_ERR(vma)) {
212 rc = PTR_ERR(vma);
213 goto out_up;
214 }
215
216 current->mm->context.vdso_base = vdso_base;
217 rc = 0;
218
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100219out_up:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700220 mmap_write_unlock(mm);
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100221 return rc;
222}
223
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100224static int __init vdso_init(void)
225{
226 int i;
227
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100228 /* Calculate the size of the 64 bit vDSO */
229 vdso64_pages = ((&vdso64_end - &vdso64_start
230 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
231
232 /* Make sure pages are in the correct state */
Kees Cook6396bb22018-06-12 14:03:40 -0700233 vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100234 GFP_KERNEL);
235 BUG_ON(vdso64_pagelist == NULL);
236 for (i = 0; i < vdso64_pages - 1; i++) {
237 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100238 get_page(pg);
239 vdso64_pagelist[i] = pg;
240 }
241 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
242 vdso64_pagelist[vdso64_pages] = NULL;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400243 if (vdso_alloc_per_cpu(&S390_lowcore))
Heiko Carstens81ffa042009-01-09 12:14:54 +0100244 BUG();
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100245
246 get_page(virt_to_page(vdso_data));
247
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100248 return 0;
249}
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400250early_initcall(vdso_init);