Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/binfmts.h>
10#include <linux/compat.h>
11#include <linux/elf.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/mm.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/time_namespace.h>
20#include <linux/random.h>
21#include <vdso/datapage.h>
22#include <asm/vdso/vsyscall.h>
23#include <asm/alternative.h>
24#include <asm/vdso.h>
25
26extern char vdso64_start[], vdso64_end[];
27extern char vdso32_start[], vdso32_end[];
28
29static struct vm_special_mapping vvar_mapping;
30
31static union vdso_data_store vdso_data_store __page_aligned_data;
32
33struct vdso_data *vdso_data = vdso_data_store.data;
34
35#ifdef CONFIG_TIME_NS
36struct vdso_data *arch_get_vdso_data(void *vvar_page)
37{
38 return (struct vdso_data *)(vvar_page);
39}
40
41/*
42 * The VVAR page layout depends on whether a task belongs to the root or
43 * non-root time namespace. Whenever a task changes its namespace, the VVAR
44 * page tables are cleared and then they will be re-faulted with a
45 * corresponding layout.
46 * See also the comment near timens_setup_vdso_data() for details.
47 */
48int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
49{
50 struct mm_struct *mm = task->mm;
51 VMA_ITERATOR(vmi, mm, 0);
52 struct vm_area_struct *vma;
53
54 mmap_read_lock(mm);
55 for_each_vma(vmi, vma) {
56 if (!vma_is_special_mapping(vma, &vvar_mapping))
57 continue;
58 zap_vma_pages(vma);
59 break;
60 }
61 mmap_read_unlock(mm);
62 return 0;
63}
64#endif
65
66static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
67 struct vm_area_struct *vma, struct vm_fault *vmf)
68{
69 struct page *timens_page = find_timens_vvar_page(vma);
70 unsigned long addr, pfn;
71 vm_fault_t err;
72
73 switch (vmf->pgoff) {
74 case VVAR_DATA_PAGE_OFFSET:
75 pfn = virt_to_pfn(vdso_data);
76 if (timens_page) {
77 /*
78 * Fault in VVAR page too, since it will be accessed
79 * to get clock data anyway.
80 */
81 addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
82 err = vmf_insert_pfn(vma, addr, pfn);
83 if (unlikely(err & VM_FAULT_ERROR))
84 return err;
85 pfn = page_to_pfn(timens_page);
86 }
87 break;
88#ifdef CONFIG_TIME_NS
89 case VVAR_TIMENS_PAGE_OFFSET:
90 /*
91 * If a task belongs to a time namespace then a namespace
92 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
93 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
94 * offset.
95 * See also the comment near timens_setup_vdso_data().
96 */
97 if (!timens_page)
98 return VM_FAULT_SIGBUS;
99 pfn = virt_to_pfn(vdso_data);
100 break;
101#endif /* CONFIG_TIME_NS */
102 default:
103 return VM_FAULT_SIGBUS;
104 }
105 return vmf_insert_pfn(vma, vmf->address, pfn);
106}
107
108static int vdso_mremap(const struct vm_special_mapping *sm,
109 struct vm_area_struct *vma)
110{
111 current->mm->context.vdso_base = vma->vm_start;
112 return 0;
113}
114
115static struct vm_special_mapping vvar_mapping = {
116 .name = "[vvar]",
117 .fault = vvar_fault,
118};
119
120static struct vm_special_mapping vdso64_mapping = {
121 .name = "[vdso]",
122 .mremap = vdso_mremap,
123};
124
125static struct vm_special_mapping vdso32_mapping = {
126 .name = "[vdso]",
127 .mremap = vdso_mremap,
128};
129
130int vdso_getcpu_init(void)
131{
132 set_tod_programmable_field(smp_processor_id());
133 return 0;
134}
135early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
136
137static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
138{
139 unsigned long vvar_start, vdso_text_start, vdso_text_len;
140 struct vm_special_mapping *vdso_mapping;
141 struct mm_struct *mm = current->mm;
142 struct vm_area_struct *vma;
143 int rc;
144
145 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
146 if (mmap_write_lock_killable(mm))
147 return -EINTR;
148
149 if (is_compat_task()) {
150 vdso_text_len = vdso32_end - vdso32_start;
151 vdso_mapping = &vdso32_mapping;
152 } else {
153 vdso_text_len = vdso64_end - vdso64_start;
154 vdso_mapping = &vdso64_mapping;
155 }
156 vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
157 rc = vvar_start;
158 if (IS_ERR_VALUE(vvar_start))
159 goto out;
160 vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
161 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
162 VM_PFNMAP,
163 &vvar_mapping);
164 rc = PTR_ERR(vma);
165 if (IS_ERR(vma))
166 goto out;
167 vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
168 /* VM_MAYWRITE for COW so gdb can set breakpoints */
169 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
170 VM_READ|VM_EXEC|
171 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
172 vdso_mapping);
173 if (IS_ERR(vma)) {
174 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
175 rc = PTR_ERR(vma);
176 } else {
177 current->mm->context.vdso_base = vdso_text_start;
178 rc = 0;
179 }
180out:
181 mmap_write_unlock(mm);
182 return rc;
183}
184
185static unsigned long vdso_addr(unsigned long start, unsigned long len)
186{
187 unsigned long addr, end, offset;
188
189 /*
190 * Round up the start address. It can start out unaligned as a result
191 * of stack start randomization.
192 */
193 start = PAGE_ALIGN(start);
194
195 /* Round the lowest possible end address up to a PMD boundary. */
196 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
197 if (end >= VDSO_BASE)
198 end = VDSO_BASE;
199 end -= len;
200
201 if (end > start) {
202 offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
203 addr = start + (offset << PAGE_SHIFT);
204 } else {
205 addr = start;
206 }
207 return addr;
208}
209
210unsigned long vdso_text_size(void)
211{
212 unsigned long size;
213
214 if (is_compat_task())
215 size = vdso32_end - vdso32_start;
216 else
217 size = vdso64_end - vdso64_start;
218 return PAGE_ALIGN(size);
219}
220
221unsigned long vdso_size(void)
222{
223 return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
224}
225
226int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
227{
228 unsigned long addr = VDSO_BASE;
229 unsigned long size = vdso_size();
230
231 if (current->flags & PF_RANDOMIZE)
232 addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
233 return map_vdso(addr, size);
234}
235
236static struct page ** __init vdso_setup_pages(void *start, void *end)
237{
238 int pages = (end - start) >> PAGE_SHIFT;
239 struct page **pagelist;
240 int i;
241
242 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
243 if (!pagelist)
244 panic("%s: Cannot allocate page list for VDSO", __func__);
245 for (i = 0; i < pages; i++)
246 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
247 return pagelist;
248}
249
250static void vdso_apply_alternatives(void)
251{
252 const struct elf64_shdr *alt, *shdr;
253 struct alt_instr *start, *end;
254 const struct elf64_hdr *hdr;
255
256 hdr = (struct elf64_hdr *)vdso64_start;
257 shdr = (void *)hdr + hdr->e_shoff;
258 alt = find_section(hdr, shdr, ".altinstructions");
259 if (!alt)
260 return;
261 start = (void *)hdr + alt->sh_offset;
262 end = (void *)hdr + alt->sh_offset + alt->sh_size;
263 apply_alternatives(start, end);
264}
265
266static int __init vdso_init(void)
267{
268 vdso_apply_alternatives();
269 vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
270 if (IS_ENABLED(CONFIG_COMPAT))
271 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
272 return 0;
273}
274arch_initcall(vdso_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/binfmts.h>
10#include <linux/compat.h>
11#include <linux/elf.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/smp.h>
18#include <linux/time_namespace.h>
19#include <vdso/datapage.h>
20#include <asm/vdso.h>
21
22extern char vdso64_start[], vdso64_end[];
23extern char vdso32_start[], vdso32_end[];
24
25static struct vm_special_mapping vvar_mapping;
26
27static union {
28 struct vdso_data data[CS_BASES];
29 u8 page[PAGE_SIZE];
30} vdso_data_store __page_aligned_data;
31
32struct vdso_data *vdso_data = vdso_data_store.data;
33
34enum vvar_pages {
35 VVAR_DATA_PAGE_OFFSET,
36 VVAR_TIMENS_PAGE_OFFSET,
37 VVAR_NR_PAGES,
38};
39
40#ifdef CONFIG_TIME_NS
41struct vdso_data *arch_get_vdso_data(void *vvar_page)
42{
43 return (struct vdso_data *)(vvar_page);
44}
45
46static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
47{
48 if (likely(vma->vm_mm == current->mm))
49 return current->nsproxy->time_ns->vvar_page;
50 /*
51 * VM_PFNMAP | VM_IO protect .fault() handler from being called
52 * through interfaces like /proc/$pid/mem or
53 * process_vm_{readv,writev}() as long as there's no .access()
54 * in special_mapping_vmops().
55 * For more details check_vma_flags() and __access_remote_vm()
56 */
57 WARN(1, "vvar_page accessed remotely");
58 return NULL;
59}
60
61/*
62 * The VVAR page layout depends on whether a task belongs to the root or
63 * non-root time namespace. Whenever a task changes its namespace, the VVAR
64 * page tables are cleared and then they will be re-faulted with a
65 * corresponding layout.
66 * See also the comment near timens_setup_vdso_data() for details.
67 */
68int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
69{
70 struct mm_struct *mm = task->mm;
71 struct vm_area_struct *vma;
72
73 mmap_read_lock(mm);
74 for (vma = mm->mmap; vma; vma = vma->vm_next) {
75 unsigned long size = vma->vm_end - vma->vm_start;
76
77 if (!vma_is_special_mapping(vma, &vvar_mapping))
78 continue;
79 zap_page_range(vma, vma->vm_start, size);
80 break;
81 }
82 mmap_read_unlock(mm);
83 return 0;
84}
85#else
86static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
87{
88 return NULL;
89}
90#endif
91
92static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
93 struct vm_area_struct *vma, struct vm_fault *vmf)
94{
95 struct page *timens_page = find_timens_vvar_page(vma);
96 unsigned long addr, pfn;
97 vm_fault_t err;
98
99 switch (vmf->pgoff) {
100 case VVAR_DATA_PAGE_OFFSET:
101 pfn = virt_to_pfn(vdso_data);
102 if (timens_page) {
103 /*
104 * Fault in VVAR page too, since it will be accessed
105 * to get clock data anyway.
106 */
107 addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
108 err = vmf_insert_pfn(vma, addr, pfn);
109 if (unlikely(err & VM_FAULT_ERROR))
110 return err;
111 pfn = page_to_pfn(timens_page);
112 }
113 break;
114#ifdef CONFIG_TIME_NS
115 case VVAR_TIMENS_PAGE_OFFSET:
116 /*
117 * If a task belongs to a time namespace then a namespace
118 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
119 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
120 * offset.
121 * See also the comment near timens_setup_vdso_data().
122 */
123 if (!timens_page)
124 return VM_FAULT_SIGBUS;
125 pfn = virt_to_pfn(vdso_data);
126 break;
127#endif /* CONFIG_TIME_NS */
128 default:
129 return VM_FAULT_SIGBUS;
130 }
131 return vmf_insert_pfn(vma, vmf->address, pfn);
132}
133
134static int vdso_mremap(const struct vm_special_mapping *sm,
135 struct vm_area_struct *vma)
136{
137 current->mm->context.vdso_base = vma->vm_start;
138 return 0;
139}
140
141static struct vm_special_mapping vvar_mapping = {
142 .name = "[vvar]",
143 .fault = vvar_fault,
144};
145
146static struct vm_special_mapping vdso64_mapping = {
147 .name = "[vdso]",
148 .mremap = vdso_mremap,
149};
150
151static struct vm_special_mapping vdso32_mapping = {
152 .name = "[vdso]",
153 .mremap = vdso_mremap,
154};
155
156int vdso_getcpu_init(void)
157{
158 set_tod_programmable_field(smp_processor_id());
159 return 0;
160}
161early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
162
163int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
164{
165 unsigned long vdso_text_len, vdso_mapping_len;
166 unsigned long vvar_start, vdso_text_start;
167 struct vm_special_mapping *vdso_mapping;
168 struct mm_struct *mm = current->mm;
169 struct vm_area_struct *vma;
170 int rc;
171
172 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
173 if (mmap_write_lock_killable(mm))
174 return -EINTR;
175
176 if (is_compat_task()) {
177 vdso_text_len = vdso32_end - vdso32_start;
178 vdso_mapping = &vdso32_mapping;
179 } else {
180 vdso_text_len = vdso64_end - vdso64_start;
181 vdso_mapping = &vdso64_mapping;
182 }
183 vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
184 vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
185 rc = vvar_start;
186 if (IS_ERR_VALUE(vvar_start))
187 goto out;
188 vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
189 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
190 VM_PFNMAP,
191 &vvar_mapping);
192 rc = PTR_ERR(vma);
193 if (IS_ERR(vma))
194 goto out;
195 vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
196 /* VM_MAYWRITE for COW so gdb can set breakpoints */
197 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
198 VM_READ|VM_EXEC|
199 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
200 vdso_mapping);
201 if (IS_ERR(vma)) {
202 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
203 rc = PTR_ERR(vma);
204 } else {
205 current->mm->context.vdso_base = vdso_text_start;
206 rc = 0;
207 }
208out:
209 mmap_write_unlock(mm);
210 return rc;
211}
212
213static struct page ** __init vdso_setup_pages(void *start, void *end)
214{
215 int pages = (end - start) >> PAGE_SHIFT;
216 struct page **pagelist;
217 int i;
218
219 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
220 if (!pagelist)
221 panic("%s: Cannot allocate page list for VDSO", __func__);
222 for (i = 0; i < pages; i++)
223 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
224 return pagelist;
225}
226
227static int __init vdso_init(void)
228{
229 vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
230 if (IS_ENABLED(CONFIG_COMPAT))
231 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
232 return 0;
233}
234arch_initcall(vdso_init);