Loading...
1/*
2 * vdso setup for s390
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/smp.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/elf.h>
23#include <linux/security.h>
24#include <linux/bootmem.h>
25#include <linux/compat.h>
26#include <asm/asm-offsets.h>
27#include <asm/pgtable.h>
28#include <asm/processor.h>
29#include <asm/mmu.h>
30#include <asm/mmu_context.h>
31#include <asm/sections.h>
32#include <asm/vdso.h>
33#include <asm/facility.h>
34
35#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
36extern char vdso32_start, vdso32_end;
37static void *vdso32_kbase = &vdso32_start;
38static unsigned int vdso32_pages;
39static struct page **vdso32_pagelist;
40#endif
41
42#ifdef CONFIG_64BIT
43extern char vdso64_start, vdso64_end;
44static void *vdso64_kbase = &vdso64_start;
45static unsigned int vdso64_pages;
46static struct page **vdso64_pagelist;
47#endif /* CONFIG_64BIT */
48
49/*
50 * Should the kernel map a VDSO page into processes and pass its
51 * address down to glibc upon exec()?
52 */
53unsigned int __read_mostly vdso_enabled = 1;
54
55static int __init vdso_setup(char *s)
56{
57 unsigned long val;
58 int rc;
59
60 rc = 0;
61 if (strncmp(s, "on", 3) == 0)
62 vdso_enabled = 1;
63 else if (strncmp(s, "off", 4) == 0)
64 vdso_enabled = 0;
65 else {
66 rc = kstrtoul(s, 0, &val);
67 vdso_enabled = rc ? 0 : !!val;
68 }
69 return !rc;
70}
71__setup("vdso=", vdso_setup);
72
73/*
74 * The vdso data page
75 */
76static union {
77 struct vdso_data data;
78 u8 page[PAGE_SIZE];
79} vdso_data_store __page_aligned_data;
80struct vdso_data *vdso_data = &vdso_data_store.data;
81
82/*
83 * Setup vdso data page.
84 */
85static void vdso_init_data(struct vdso_data *vd)
86{
87 vd->ectg_available = test_facility(31);
88}
89
90#ifdef CONFIG_64BIT
91/*
92 * Allocate/free per cpu vdso data.
93 */
94#define SEGMENT_ORDER 2
95
96int vdso_alloc_per_cpu(struct _lowcore *lowcore)
97{
98 unsigned long segment_table, page_table, page_frame;
99 u32 *psal, *aste;
100 int i;
101
102 lowcore->vdso_per_cpu_data = __LC_PASTE;
103
104 if (!vdso_enabled)
105 return 0;
106
107 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
108 page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
109 page_frame = get_zeroed_page(GFP_KERNEL);
110 if (!segment_table || !page_table || !page_frame)
111 goto out;
112
113 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
114 PAGE_SIZE << SEGMENT_ORDER);
115 clear_table((unsigned long *) page_table, _PAGE_INVALID,
116 256*sizeof(unsigned long));
117
118 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
119 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
120
121 psal = (u32 *) (page_table + 256*sizeof(unsigned long));
122 aste = psal + 32;
123
124 for (i = 4; i < 32; i += 4)
125 psal[i] = 0x80000000;
126
127 lowcore->paste[4] = (u32)(addr_t) psal;
128 psal[0] = 0x02000000;
129 psal[2] = (u32)(addr_t) aste;
130 *(unsigned long *) (aste + 2) = segment_table +
131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
132 aste[4] = (u32)(addr_t) psal;
133 lowcore->vdso_per_cpu_data = page_frame;
134
135 return 0;
136
137out:
138 free_page(page_frame);
139 free_page(page_table);
140 free_pages(segment_table, SEGMENT_ORDER);
141 return -ENOMEM;
142}
143
144void vdso_free_per_cpu(struct _lowcore *lowcore)
145{
146 unsigned long segment_table, page_table, page_frame;
147 u32 *psal, *aste;
148
149 if (!vdso_enabled)
150 return;
151
152 psal = (u32 *)(addr_t) lowcore->paste[4];
153 aste = (u32 *)(addr_t) psal[2];
154 segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
155 page_table = *(unsigned long *) segment_table;
156 page_frame = *(unsigned long *) page_table;
157
158 free_page(page_frame);
159 free_page(page_table);
160 free_pages(segment_table, SEGMENT_ORDER);
161}
162
163static void vdso_init_cr5(void)
164{
165 unsigned long cr5;
166
167 if (!vdso_enabled)
168 return;
169 cr5 = offsetof(struct _lowcore, paste);
170 __ctl_load(cr5, 5, 5);
171}
172#endif /* CONFIG_64BIT */
173
174/*
175 * This is called from binfmt_elf, we create the special vma for the
176 * vDSO and insert it into the mm struct tree
177 */
178int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
179{
180 struct mm_struct *mm = current->mm;
181 struct page **vdso_pagelist;
182 unsigned long vdso_pages;
183 unsigned long vdso_base;
184 int rc;
185
186 if (!vdso_enabled)
187 return 0;
188 /*
189 * Only map the vdso for dynamically linked elf binaries.
190 */
191 if (!uses_interp)
192 return 0;
193
194#ifdef CONFIG_64BIT
195 vdso_pagelist = vdso64_pagelist;
196 vdso_pages = vdso64_pages;
197#ifdef CONFIG_COMPAT
198 if (is_compat_task()) {
199 vdso_pagelist = vdso32_pagelist;
200 vdso_pages = vdso32_pages;
201 }
202#endif
203#else
204 vdso_pagelist = vdso32_pagelist;
205 vdso_pages = vdso32_pages;
206#endif
207
208 /*
209 * vDSO has a problem and was disabled, just don't "enable" it for
210 * the process
211 */
212 if (vdso_pages == 0)
213 return 0;
214
215 current->mm->context.vdso_base = 0;
216
217 /*
218 * pick a base address for the vDSO in process space. We try to put
219 * it at vdso_base which is the "natural" base for it, but we might
220 * fail and end up putting it elsewhere.
221 */
222 down_write(&mm->mmap_sem);
223 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
224 if (IS_ERR_VALUE(vdso_base)) {
225 rc = vdso_base;
226 goto out_up;
227 }
228
229 /*
230 * Put vDSO base into mm struct. We need to do this before calling
231 * install_special_mapping or the perf counter mmap tracking code
232 * will fail to recognise it as a vDSO (since arch_vma_name fails).
233 */
234 current->mm->context.vdso_base = vdso_base;
235
236 /*
237 * our vma flags don't have VM_WRITE so by default, the process
238 * isn't allowed to write those pages.
239 * gdb can break that with ptrace interface, and thus trigger COW
240 * on those pages but it's then your responsibility to never do that
241 * on the "data" page of the vDSO or you'll stop getting kernel
242 * updates and your nice userland gettimeofday will be totally dead.
243 * It's fine to use that for setting breakpoints in the vDSO code
244 * pages though.
245 */
246 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
247 VM_READ|VM_EXEC|
248 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
249 vdso_pagelist);
250 if (rc)
251 current->mm->context.vdso_base = 0;
252out_up:
253 up_write(&mm->mmap_sem);
254 return rc;
255}
256
257const char *arch_vma_name(struct vm_area_struct *vma)
258{
259 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
260 return "[vdso]";
261 return NULL;
262}
263
264static int __init vdso_init(void)
265{
266 int i;
267
268 if (!vdso_enabled)
269 return 0;
270 vdso_init_data(vdso_data);
271#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
272 /* Calculate the size of the 32 bit vDSO */
273 vdso32_pages = ((&vdso32_end - &vdso32_start
274 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
275
276 /* Make sure pages are in the correct state */
277 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
278 GFP_KERNEL);
279 BUG_ON(vdso32_pagelist == NULL);
280 for (i = 0; i < vdso32_pages - 1; i++) {
281 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
282 ClearPageReserved(pg);
283 get_page(pg);
284 vdso32_pagelist[i] = pg;
285 }
286 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
287 vdso32_pagelist[vdso32_pages] = NULL;
288#endif
289
290#ifdef CONFIG_64BIT
291 /* Calculate the size of the 64 bit vDSO */
292 vdso64_pages = ((&vdso64_end - &vdso64_start
293 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
294
295 /* Make sure pages are in the correct state */
296 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
297 GFP_KERNEL);
298 BUG_ON(vdso64_pagelist == NULL);
299 for (i = 0; i < vdso64_pages - 1; i++) {
300 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
301 ClearPageReserved(pg);
302 get_page(pg);
303 vdso64_pagelist[i] = pg;
304 }
305 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
306 vdso64_pagelist[vdso64_pages] = NULL;
307 if (vdso_alloc_per_cpu(&S390_lowcore))
308 BUG();
309 vdso_init_cr5();
310#endif /* CONFIG_64BIT */
311
312 get_page(virt_to_page(vdso_data));
313
314 smp_wmb();
315
316 return 0;
317}
318early_initcall(vdso_init);
319
320int in_gate_area_no_mm(unsigned long addr)
321{
322 return 0;
323}
324
325int in_gate_area(struct mm_struct *mm, unsigned long addr)
326{
327 return 0;
328}
329
330struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
331{
332 return NULL;
333}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/binfmts.h>
10#include <linux/compat.h>
11#include <linux/elf.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/smp.h>
18#include <linux/time_namespace.h>
19#include <vdso/datapage.h>
20#include <asm/vdso.h>
21
22extern char vdso64_start[], vdso64_end[];
23extern char vdso32_start[], vdso32_end[];
24
25static struct vm_special_mapping vvar_mapping;
26
27static union {
28 struct vdso_data data[CS_BASES];
29 u8 page[PAGE_SIZE];
30} vdso_data_store __page_aligned_data;
31
32struct vdso_data *vdso_data = vdso_data_store.data;
33
34enum vvar_pages {
35 VVAR_DATA_PAGE_OFFSET,
36 VVAR_TIMENS_PAGE_OFFSET,
37 VVAR_NR_PAGES,
38};
39
40#ifdef CONFIG_TIME_NS
41struct vdso_data *arch_get_vdso_data(void *vvar_page)
42{
43 return (struct vdso_data *)(vvar_page);
44}
45
46static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
47{
48 if (likely(vma->vm_mm == current->mm))
49 return current->nsproxy->time_ns->vvar_page;
50 /*
51 * VM_PFNMAP | VM_IO protect .fault() handler from being called
52 * through interfaces like /proc/$pid/mem or
53 * process_vm_{readv,writev}() as long as there's no .access()
54 * in special_mapping_vmops().
55 * For more details check_vma_flags() and __access_remote_vm()
56 */
57 WARN(1, "vvar_page accessed remotely");
58 return NULL;
59}
60
61/*
62 * The VVAR page layout depends on whether a task belongs to the root or
63 * non-root time namespace. Whenever a task changes its namespace, the VVAR
64 * page tables are cleared and then they will be re-faulted with a
65 * corresponding layout.
66 * See also the comment near timens_setup_vdso_data() for details.
67 */
68int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
69{
70 struct mm_struct *mm = task->mm;
71 struct vm_area_struct *vma;
72
73 mmap_read_lock(mm);
74 for (vma = mm->mmap; vma; vma = vma->vm_next) {
75 unsigned long size = vma->vm_end - vma->vm_start;
76
77 if (!vma_is_special_mapping(vma, &vvar_mapping))
78 continue;
79 zap_page_range(vma, vma->vm_start, size);
80 break;
81 }
82 mmap_read_unlock(mm);
83 return 0;
84}
85#else
86static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
87{
88 return NULL;
89}
90#endif
91
92static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
93 struct vm_area_struct *vma, struct vm_fault *vmf)
94{
95 struct page *timens_page = find_timens_vvar_page(vma);
96 unsigned long addr, pfn;
97 vm_fault_t err;
98
99 switch (vmf->pgoff) {
100 case VVAR_DATA_PAGE_OFFSET:
101 pfn = virt_to_pfn(vdso_data);
102 if (timens_page) {
103 /*
104 * Fault in VVAR page too, since it will be accessed
105 * to get clock data anyway.
106 */
107 addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
108 err = vmf_insert_pfn(vma, addr, pfn);
109 if (unlikely(err & VM_FAULT_ERROR))
110 return err;
111 pfn = page_to_pfn(timens_page);
112 }
113 break;
114#ifdef CONFIG_TIME_NS
115 case VVAR_TIMENS_PAGE_OFFSET:
116 /*
117 * If a task belongs to a time namespace then a namespace
118 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
119 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
120 * offset.
121 * See also the comment near timens_setup_vdso_data().
122 */
123 if (!timens_page)
124 return VM_FAULT_SIGBUS;
125 pfn = virt_to_pfn(vdso_data);
126 break;
127#endif /* CONFIG_TIME_NS */
128 default:
129 return VM_FAULT_SIGBUS;
130 }
131 return vmf_insert_pfn(vma, vmf->address, pfn);
132}
133
134static int vdso_mremap(const struct vm_special_mapping *sm,
135 struct vm_area_struct *vma)
136{
137 current->mm->context.vdso_base = vma->vm_start;
138 return 0;
139}
140
141static struct vm_special_mapping vvar_mapping = {
142 .name = "[vvar]",
143 .fault = vvar_fault,
144};
145
146static struct vm_special_mapping vdso64_mapping = {
147 .name = "[vdso]",
148 .mremap = vdso_mremap,
149};
150
151static struct vm_special_mapping vdso32_mapping = {
152 .name = "[vdso]",
153 .mremap = vdso_mremap,
154};
155
156int vdso_getcpu_init(void)
157{
158 set_tod_programmable_field(smp_processor_id());
159 return 0;
160}
161early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
162
163int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
164{
165 unsigned long vdso_text_len, vdso_mapping_len;
166 unsigned long vvar_start, vdso_text_start;
167 struct vm_special_mapping *vdso_mapping;
168 struct mm_struct *mm = current->mm;
169 struct vm_area_struct *vma;
170 int rc;
171
172 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
173 if (mmap_write_lock_killable(mm))
174 return -EINTR;
175
176 if (is_compat_task()) {
177 vdso_text_len = vdso32_end - vdso32_start;
178 vdso_mapping = &vdso32_mapping;
179 } else {
180 vdso_text_len = vdso64_end - vdso64_start;
181 vdso_mapping = &vdso64_mapping;
182 }
183 vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
184 vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
185 rc = vvar_start;
186 if (IS_ERR_VALUE(vvar_start))
187 goto out;
188 vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
189 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
190 VM_PFNMAP,
191 &vvar_mapping);
192 rc = PTR_ERR(vma);
193 if (IS_ERR(vma))
194 goto out;
195 vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
196 /* VM_MAYWRITE for COW so gdb can set breakpoints */
197 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
198 VM_READ|VM_EXEC|
199 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
200 vdso_mapping);
201 if (IS_ERR(vma)) {
202 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
203 rc = PTR_ERR(vma);
204 } else {
205 current->mm->context.vdso_base = vdso_text_start;
206 rc = 0;
207 }
208out:
209 mmap_write_unlock(mm);
210 return rc;
211}
212
213static struct page ** __init vdso_setup_pages(void *start, void *end)
214{
215 int pages = (end - start) >> PAGE_SHIFT;
216 struct page **pagelist;
217 int i;
218
219 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
220 if (!pagelist)
221 panic("%s: Cannot allocate page list for VDSO", __func__);
222 for (i = 0; i < pages; i++)
223 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
224 return pagelist;
225}
226
227static int __init vdso_init(void)
228{
229 vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
230 if (IS_ENABLED(CONFIG_COMPAT))
231 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
232 return 0;
233}
234arch_initcall(vdso_init);