Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/stddef.h>
16#include <linux/unistd.h>
17#include <linux/slab.h>
18#include <linux/user.h>
19#include <linux/elf.h>
20#include <linux/security.h>
21#include <linux/memblock.h>
22#include <linux/compat.h>
23#include <asm/asm-offsets.h>
24#include <asm/pgtable.h>
25#include <asm/processor.h>
26#include <asm/mmu.h>
27#include <asm/mmu_context.h>
28#include <asm/sections.h>
29#include <asm/vdso.h>
30#include <asm/facility.h>
31
32#ifdef CONFIG_COMPAT_VDSO
33extern char vdso32_start, vdso32_end;
34static void *vdso32_kbase = &vdso32_start;
35static unsigned int vdso32_pages;
36static struct page **vdso32_pagelist;
37#endif
38
39extern char vdso64_start, vdso64_end;
40static void *vdso64_kbase = &vdso64_start;
41static unsigned int vdso64_pages;
42static struct page **vdso64_pagelist;
43
44/*
45 * Should the kernel map a VDSO page into processes and pass its
46 * address down to glibc upon exec()?
47 */
48unsigned int __read_mostly vdso_enabled = 1;
49
50static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
51 struct vm_area_struct *vma, struct vm_fault *vmf)
52{
53 struct page **vdso_pagelist;
54 unsigned long vdso_pages;
55
56 vdso_pagelist = vdso64_pagelist;
57 vdso_pages = vdso64_pages;
58#ifdef CONFIG_COMPAT_VDSO
59 if (vma->vm_mm->context.compat_mm) {
60 vdso_pagelist = vdso32_pagelist;
61 vdso_pages = vdso32_pages;
62 }
63#endif
64
65 if (vmf->pgoff >= vdso_pages)
66 return VM_FAULT_SIGBUS;
67
68 vmf->page = vdso_pagelist[vmf->pgoff];
69 get_page(vmf->page);
70 return 0;
71}
72
73static int vdso_mremap(const struct vm_special_mapping *sm,
74 struct vm_area_struct *vma)
75{
76 unsigned long vdso_pages;
77
78 vdso_pages = vdso64_pages;
79#ifdef CONFIG_COMPAT_VDSO
80 if (vma->vm_mm->context.compat_mm)
81 vdso_pages = vdso32_pages;
82#endif
83
84 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
85 return -EINVAL;
86
87 if (WARN_ON_ONCE(current->mm != vma->vm_mm))
88 return -EFAULT;
89
90 current->mm->context.vdso_base = vma->vm_start;
91 return 0;
92}
93
94static const struct vm_special_mapping vdso_mapping = {
95 .name = "[vdso]",
96 .fault = vdso_fault,
97 .mremap = vdso_mremap,
98};
99
100static int __init vdso_setup(char *str)
101{
102 bool enabled;
103
104 if (!kstrtobool(str, &enabled))
105 vdso_enabled = enabled;
106 return 1;
107}
108__setup("vdso=", vdso_setup);
109
110/*
111 * The vdso data page
112 */
113static union {
114 struct vdso_data data;
115 u8 page[PAGE_SIZE];
116} vdso_data_store __page_aligned_data;
117struct vdso_data *vdso_data = &vdso_data_store.data;
118
119/*
120 * Setup vdso data page.
121 */
122static void __init vdso_init_data(struct vdso_data *vd)
123{
124 vd->ectg_available = test_facility(31);
125}
126
127/*
128 * Allocate/free per cpu vdso data.
129 */
130#define SEGMENT_ORDER 2
131
132/*
133 * The initial vdso_data structure for the boot CPU. Eventually
134 * it is replaced with a properly allocated structure in vdso_init.
135 * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
136 * pointer is required to be able to return from an interrupt or
137 * program check. See the exit paths in entry.S.
138 */
139struct vdso_data boot_vdso_data __initdata;
140
141void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
142{
143 lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
144}
145
146int vdso_alloc_per_cpu(struct lowcore *lowcore)
147{
148 unsigned long segment_table, page_table, page_frame;
149 struct vdso_per_cpu_data *vd;
150
151 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
152 page_table = get_zeroed_page(GFP_KERNEL);
153 page_frame = get_zeroed_page(GFP_KERNEL);
154 if (!segment_table || !page_table || !page_frame)
155 goto out;
156 arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
157 arch_set_page_dat(virt_to_page(page_table), 0);
158
159 /* Initialize per-cpu vdso data page */
160 vd = (struct vdso_per_cpu_data *) page_frame;
161 vd->cpu_nr = lowcore->cpu_nr;
162 vd->node_id = cpu_to_node(vd->cpu_nr);
163
164 /* Set up page table for the vdso address space */
165 memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
166 memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
167
168 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
169 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
170
171 lowcore->vdso_asce = segment_table +
172 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
173 lowcore->vdso_per_cpu_data = page_frame;
174
175 return 0;
176
177out:
178 free_page(page_frame);
179 free_page(page_table);
180 free_pages(segment_table, SEGMENT_ORDER);
181 return -ENOMEM;
182}
183
184void vdso_free_per_cpu(struct lowcore *lowcore)
185{
186 unsigned long segment_table, page_table, page_frame;
187
188 segment_table = lowcore->vdso_asce & PAGE_MASK;
189 page_table = *(unsigned long *) segment_table;
190 page_frame = *(unsigned long *) page_table;
191
192 free_page(page_frame);
193 free_page(page_table);
194 free_pages(segment_table, SEGMENT_ORDER);
195}
196
197/*
198 * This is called from binfmt_elf, we create the special vma for the
199 * vDSO and insert it into the mm struct tree
200 */
201int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
202{
203 struct mm_struct *mm = current->mm;
204 struct vm_area_struct *vma;
205 unsigned long vdso_pages;
206 unsigned long vdso_base;
207 int rc;
208
209 if (!vdso_enabled)
210 return 0;
211
212 vdso_pages = vdso64_pages;
213#ifdef CONFIG_COMPAT_VDSO
214 mm->context.compat_mm = is_compat_task();
215 if (mm->context.compat_mm)
216 vdso_pages = vdso32_pages;
217#endif
218 /*
219 * vDSO has a problem and was disabled, just don't "enable" it for
220 * the process
221 */
222 if (vdso_pages == 0)
223 return 0;
224
225 /*
226 * pick a base address for the vDSO in process space. We try to put
227 * it at vdso_base which is the "natural" base for it, but we might
228 * fail and end up putting it elsewhere.
229 */
230 if (down_write_killable(&mm->mmap_sem))
231 return -EINTR;
232 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
233 if (IS_ERR_VALUE(vdso_base)) {
234 rc = vdso_base;
235 goto out_up;
236 }
237
238 /*
239 * our vma flags don't have VM_WRITE so by default, the process
240 * isn't allowed to write those pages.
241 * gdb can break that with ptrace interface, and thus trigger COW
242 * on those pages but it's then your responsibility to never do that
243 * on the "data" page of the vDSO or you'll stop getting kernel
244 * updates and your nice userland gettimeofday will be totally dead.
245 * It's fine to use that for setting breakpoints in the vDSO code
246 * pages though.
247 */
248 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
249 VM_READ|VM_EXEC|
250 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
251 &vdso_mapping);
252 if (IS_ERR(vma)) {
253 rc = PTR_ERR(vma);
254 goto out_up;
255 }
256
257 current->mm->context.vdso_base = vdso_base;
258 rc = 0;
259
260out_up:
261 up_write(&mm->mmap_sem);
262 return rc;
263}
264
265static int __init vdso_init(void)
266{
267 int i;
268
269 vdso_init_data(vdso_data);
270#ifdef CONFIG_COMPAT_VDSO
271 /* Calculate the size of the 32 bit vDSO */
272 vdso32_pages = ((&vdso32_end - &vdso32_start
273 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
274
275 /* Make sure pages are in the correct state */
276 vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
277 GFP_KERNEL);
278 BUG_ON(vdso32_pagelist == NULL);
279 for (i = 0; i < vdso32_pages - 1; i++) {
280 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
281 get_page(pg);
282 vdso32_pagelist[i] = pg;
283 }
284 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
285 vdso32_pagelist[vdso32_pages] = NULL;
286#endif
287
288 /* Calculate the size of the 64 bit vDSO */
289 vdso64_pages = ((&vdso64_end - &vdso64_start
290 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
291
292 /* Make sure pages are in the correct state */
293 vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
294 GFP_KERNEL);
295 BUG_ON(vdso64_pagelist == NULL);
296 for (i = 0; i < vdso64_pages - 1; i++) {
297 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
298 get_page(pg);
299 vdso64_pagelist[i] = pg;
300 }
301 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
302 vdso64_pagelist[vdso64_pages] = NULL;
303 if (vdso_alloc_per_cpu(&S390_lowcore))
304 BUG();
305
306 get_page(virt_to_page(vdso_data));
307
308 return 0;
309}
310early_initcall(vdso_init);
1/*
2 * vdso setup for s390
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/smp.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/elf.h>
23#include <linux/security.h>
24#include <linux/bootmem.h>
25#include <linux/compat.h>
26#include <asm/asm-offsets.h>
27#include <asm/pgtable.h>
28#include <asm/processor.h>
29#include <asm/mmu.h>
30#include <asm/mmu_context.h>
31#include <asm/sections.h>
32#include <asm/vdso.h>
33#include <asm/facility.h>
34
35#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
36extern char vdso32_start, vdso32_end;
37static void *vdso32_kbase = &vdso32_start;
38static unsigned int vdso32_pages;
39static struct page **vdso32_pagelist;
40#endif
41
42#ifdef CONFIG_64BIT
43extern char vdso64_start, vdso64_end;
44static void *vdso64_kbase = &vdso64_start;
45static unsigned int vdso64_pages;
46static struct page **vdso64_pagelist;
47#endif /* CONFIG_64BIT */
48
49/*
50 * Should the kernel map a VDSO page into processes and pass its
51 * address down to glibc upon exec()?
52 */
53unsigned int __read_mostly vdso_enabled = 1;
54
55static int __init vdso_setup(char *s)
56{
57 unsigned long val;
58 int rc;
59
60 rc = 0;
61 if (strncmp(s, "on", 3) == 0)
62 vdso_enabled = 1;
63 else if (strncmp(s, "off", 4) == 0)
64 vdso_enabled = 0;
65 else {
66 rc = kstrtoul(s, 0, &val);
67 vdso_enabled = rc ? 0 : !!val;
68 }
69 return !rc;
70}
71__setup("vdso=", vdso_setup);
72
73/*
74 * The vdso data page
75 */
76static union {
77 struct vdso_data data;
78 u8 page[PAGE_SIZE];
79} vdso_data_store __page_aligned_data;
80struct vdso_data *vdso_data = &vdso_data_store.data;
81
82/*
83 * Setup vdso data page.
84 */
85static void vdso_init_data(struct vdso_data *vd)
86{
87 vd->ectg_available = test_facility(31);
88}
89
90#ifdef CONFIG_64BIT
91/*
92 * Allocate/free per cpu vdso data.
93 */
94#define SEGMENT_ORDER 2
95
96int vdso_alloc_per_cpu(struct _lowcore *lowcore)
97{
98 unsigned long segment_table, page_table, page_frame;
99 u32 *psal, *aste;
100 int i;
101
102 lowcore->vdso_per_cpu_data = __LC_PASTE;
103
104 if (!vdso_enabled)
105 return 0;
106
107 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
108 page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
109 page_frame = get_zeroed_page(GFP_KERNEL);
110 if (!segment_table || !page_table || !page_frame)
111 goto out;
112
113 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
114 PAGE_SIZE << SEGMENT_ORDER);
115 clear_table((unsigned long *) page_table, _PAGE_INVALID,
116 256*sizeof(unsigned long));
117
118 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
119 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
120
121 psal = (u32 *) (page_table + 256*sizeof(unsigned long));
122 aste = psal + 32;
123
124 for (i = 4; i < 32; i += 4)
125 psal[i] = 0x80000000;
126
127 lowcore->paste[4] = (u32)(addr_t) psal;
128 psal[0] = 0x02000000;
129 psal[2] = (u32)(addr_t) aste;
130 *(unsigned long *) (aste + 2) = segment_table +
131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
132 aste[4] = (u32)(addr_t) psal;
133 lowcore->vdso_per_cpu_data = page_frame;
134
135 return 0;
136
137out:
138 free_page(page_frame);
139 free_page(page_table);
140 free_pages(segment_table, SEGMENT_ORDER);
141 return -ENOMEM;
142}
143
144void vdso_free_per_cpu(struct _lowcore *lowcore)
145{
146 unsigned long segment_table, page_table, page_frame;
147 u32 *psal, *aste;
148
149 if (!vdso_enabled)
150 return;
151
152 psal = (u32 *)(addr_t) lowcore->paste[4];
153 aste = (u32 *)(addr_t) psal[2];
154 segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
155 page_table = *(unsigned long *) segment_table;
156 page_frame = *(unsigned long *) page_table;
157
158 free_page(page_frame);
159 free_page(page_table);
160 free_pages(segment_table, SEGMENT_ORDER);
161}
162
163static void vdso_init_cr5(void)
164{
165 unsigned long cr5;
166
167 if (!vdso_enabled)
168 return;
169 cr5 = offsetof(struct _lowcore, paste);
170 __ctl_load(cr5, 5, 5);
171}
172#endif /* CONFIG_64BIT */
173
174/*
175 * This is called from binfmt_elf, we create the special vma for the
176 * vDSO and insert it into the mm struct tree
177 */
178int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
179{
180 struct mm_struct *mm = current->mm;
181 struct page **vdso_pagelist;
182 unsigned long vdso_pages;
183 unsigned long vdso_base;
184 int rc;
185
186 if (!vdso_enabled)
187 return 0;
188 /*
189 * Only map the vdso for dynamically linked elf binaries.
190 */
191 if (!uses_interp)
192 return 0;
193
194#ifdef CONFIG_64BIT
195 vdso_pagelist = vdso64_pagelist;
196 vdso_pages = vdso64_pages;
197#ifdef CONFIG_COMPAT
198 if (is_compat_task()) {
199 vdso_pagelist = vdso32_pagelist;
200 vdso_pages = vdso32_pages;
201 }
202#endif
203#else
204 vdso_pagelist = vdso32_pagelist;
205 vdso_pages = vdso32_pages;
206#endif
207
208 /*
209 * vDSO has a problem and was disabled, just don't "enable" it for
210 * the process
211 */
212 if (vdso_pages == 0)
213 return 0;
214
215 current->mm->context.vdso_base = 0;
216
217 /*
218 * pick a base address for the vDSO in process space. We try to put
219 * it at vdso_base which is the "natural" base for it, but we might
220 * fail and end up putting it elsewhere.
221 */
222 down_write(&mm->mmap_sem);
223 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
224 if (IS_ERR_VALUE(vdso_base)) {
225 rc = vdso_base;
226 goto out_up;
227 }
228
229 /*
230 * Put vDSO base into mm struct. We need to do this before calling
231 * install_special_mapping or the perf counter mmap tracking code
232 * will fail to recognise it as a vDSO (since arch_vma_name fails).
233 */
234 current->mm->context.vdso_base = vdso_base;
235
236 /*
237 * our vma flags don't have VM_WRITE so by default, the process
238 * isn't allowed to write those pages.
239 * gdb can break that with ptrace interface, and thus trigger COW
240 * on those pages but it's then your responsibility to never do that
241 * on the "data" page of the vDSO or you'll stop getting kernel
242 * updates and your nice userland gettimeofday will be totally dead.
243 * It's fine to use that for setting breakpoints in the vDSO code
244 * pages though.
245 */
246 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
247 VM_READ|VM_EXEC|
248 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
249 vdso_pagelist);
250 if (rc)
251 current->mm->context.vdso_base = 0;
252out_up:
253 up_write(&mm->mmap_sem);
254 return rc;
255}
256
257const char *arch_vma_name(struct vm_area_struct *vma)
258{
259 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
260 return "[vdso]";
261 return NULL;
262}
263
264static int __init vdso_init(void)
265{
266 int i;
267
268 if (!vdso_enabled)
269 return 0;
270 vdso_init_data(vdso_data);
271#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
272 /* Calculate the size of the 32 bit vDSO */
273 vdso32_pages = ((&vdso32_end - &vdso32_start
274 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
275
276 /* Make sure pages are in the correct state */
277 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
278 GFP_KERNEL);
279 BUG_ON(vdso32_pagelist == NULL);
280 for (i = 0; i < vdso32_pages - 1; i++) {
281 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
282 ClearPageReserved(pg);
283 get_page(pg);
284 vdso32_pagelist[i] = pg;
285 }
286 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
287 vdso32_pagelist[vdso32_pages] = NULL;
288#endif
289
290#ifdef CONFIG_64BIT
291 /* Calculate the size of the 64 bit vDSO */
292 vdso64_pages = ((&vdso64_end - &vdso64_start
293 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
294
295 /* Make sure pages are in the correct state */
296 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
297 GFP_KERNEL);
298 BUG_ON(vdso64_pagelist == NULL);
299 for (i = 0; i < vdso64_pages - 1; i++) {
300 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
301 ClearPageReserved(pg);
302 get_page(pg);
303 vdso64_pagelist[i] = pg;
304 }
305 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
306 vdso64_pagelist[vdso64_pages] = NULL;
307 if (vdso_alloc_per_cpu(&S390_lowcore))
308 BUG();
309 vdso_init_cr5();
310#endif /* CONFIG_64BIT */
311
312 get_page(virt_to_page(vdso_data));
313
314 smp_wmb();
315
316 return 0;
317}
318early_initcall(vdso_init);
319
320int in_gate_area_no_mm(unsigned long addr)
321{
322 return 0;
323}
324
325int in_gate_area(struct mm_struct *mm, unsigned long addr)
326{
327 return 0;
328}
329
330struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
331{
332 return NULL;
333}