Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/binfmts.h>
10#include <linux/compat.h>
11#include <linux/elf.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/smp.h>
18#include <linux/time_namespace.h>
19#include <linux/random.h>
20#include <vdso/datapage.h>
21#include <asm/vdso.h>
22
23extern char vdso64_start[], vdso64_end[];
24extern char vdso32_start[], vdso32_end[];
25
26static struct vm_special_mapping vvar_mapping;
27
28static union {
29 struct vdso_data data[CS_BASES];
30 u8 page[PAGE_SIZE];
31} vdso_data_store __page_aligned_data;
32
33struct vdso_data *vdso_data = vdso_data_store.data;
34
35enum vvar_pages {
36 VVAR_DATA_PAGE_OFFSET,
37 VVAR_TIMENS_PAGE_OFFSET,
38 VVAR_NR_PAGES,
39};
40
41#ifdef CONFIG_TIME_NS
42struct vdso_data *arch_get_vdso_data(void *vvar_page)
43{
44 return (struct vdso_data *)(vvar_page);
45}
46
47/*
48 * The VVAR page layout depends on whether a task belongs to the root or
49 * non-root time namespace. Whenever a task changes its namespace, the VVAR
50 * page tables are cleared and then they will be re-faulted with a
51 * corresponding layout.
52 * See also the comment near timens_setup_vdso_data() for details.
53 */
54int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
55{
56 struct mm_struct *mm = task->mm;
57 VMA_ITERATOR(vmi, mm, 0);
58 struct vm_area_struct *vma;
59
60 mmap_read_lock(mm);
61 for_each_vma(vmi, vma) {
62 if (!vma_is_special_mapping(vma, &vvar_mapping))
63 continue;
64 zap_vma_pages(vma);
65 break;
66 }
67 mmap_read_unlock(mm);
68 return 0;
69}
70#endif
71
72static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
73 struct vm_area_struct *vma, struct vm_fault *vmf)
74{
75 struct page *timens_page = find_timens_vvar_page(vma);
76 unsigned long addr, pfn;
77 vm_fault_t err;
78
79 switch (vmf->pgoff) {
80 case VVAR_DATA_PAGE_OFFSET:
81 pfn = virt_to_pfn(vdso_data);
82 if (timens_page) {
83 /*
84 * Fault in VVAR page too, since it will be accessed
85 * to get clock data anyway.
86 */
87 addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
88 err = vmf_insert_pfn(vma, addr, pfn);
89 if (unlikely(err & VM_FAULT_ERROR))
90 return err;
91 pfn = page_to_pfn(timens_page);
92 }
93 break;
94#ifdef CONFIG_TIME_NS
95 case VVAR_TIMENS_PAGE_OFFSET:
96 /*
97 * If a task belongs to a time namespace then a namespace
98 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
99 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
100 * offset.
101 * See also the comment near timens_setup_vdso_data().
102 */
103 if (!timens_page)
104 return VM_FAULT_SIGBUS;
105 pfn = virt_to_pfn(vdso_data);
106 break;
107#endif /* CONFIG_TIME_NS */
108 default:
109 return VM_FAULT_SIGBUS;
110 }
111 return vmf_insert_pfn(vma, vmf->address, pfn);
112}
113
114static int vdso_mremap(const struct vm_special_mapping *sm,
115 struct vm_area_struct *vma)
116{
117 current->mm->context.vdso_base = vma->vm_start;
118 return 0;
119}
120
121static struct vm_special_mapping vvar_mapping = {
122 .name = "[vvar]",
123 .fault = vvar_fault,
124};
125
126static struct vm_special_mapping vdso64_mapping = {
127 .name = "[vdso]",
128 .mremap = vdso_mremap,
129};
130
131static struct vm_special_mapping vdso32_mapping = {
132 .name = "[vdso]",
133 .mremap = vdso_mremap,
134};
135
136int vdso_getcpu_init(void)
137{
138 set_tod_programmable_field(smp_processor_id());
139 return 0;
140}
141early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
142
143static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
144{
145 unsigned long vvar_start, vdso_text_start, vdso_text_len;
146 struct vm_special_mapping *vdso_mapping;
147 struct mm_struct *mm = current->mm;
148 struct vm_area_struct *vma;
149 int rc;
150
151 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
152 if (mmap_write_lock_killable(mm))
153 return -EINTR;
154
155 if (is_compat_task()) {
156 vdso_text_len = vdso32_end - vdso32_start;
157 vdso_mapping = &vdso32_mapping;
158 } else {
159 vdso_text_len = vdso64_end - vdso64_start;
160 vdso_mapping = &vdso64_mapping;
161 }
162 vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
163 rc = vvar_start;
164 if (IS_ERR_VALUE(vvar_start))
165 goto out;
166 vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
167 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
168 VM_PFNMAP,
169 &vvar_mapping);
170 rc = PTR_ERR(vma);
171 if (IS_ERR(vma))
172 goto out;
173 vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
174 /* VM_MAYWRITE for COW so gdb can set breakpoints */
175 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
176 VM_READ|VM_EXEC|
177 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
178 vdso_mapping);
179 if (IS_ERR(vma)) {
180 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
181 rc = PTR_ERR(vma);
182 } else {
183 current->mm->context.vdso_base = vdso_text_start;
184 rc = 0;
185 }
186out:
187 mmap_write_unlock(mm);
188 return rc;
189}
190
191static unsigned long vdso_addr(unsigned long start, unsigned long len)
192{
193 unsigned long addr, end, offset;
194
195 /*
196 * Round up the start address. It can start out unaligned as a result
197 * of stack start randomization.
198 */
199 start = PAGE_ALIGN(start);
200
201 /* Round the lowest possible end address up to a PMD boundary. */
202 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
203 if (end >= VDSO_BASE)
204 end = VDSO_BASE;
205 end -= len;
206
207 if (end > start) {
208 offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
209 addr = start + (offset << PAGE_SHIFT);
210 } else {
211 addr = start;
212 }
213 return addr;
214}
215
216unsigned long vdso_size(void)
217{
218 unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
219
220 if (is_compat_task())
221 size += vdso32_end - vdso32_start;
222 else
223 size += vdso64_end - vdso64_start;
224 return PAGE_ALIGN(size);
225}
226
227int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
228{
229 unsigned long addr = VDSO_BASE;
230 unsigned long size = vdso_size();
231
232 if (current->flags & PF_RANDOMIZE)
233 addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
234 return map_vdso(addr, size);
235}
236
237static struct page ** __init vdso_setup_pages(void *start, void *end)
238{
239 int pages = (end - start) >> PAGE_SHIFT;
240 struct page **pagelist;
241 int i;
242
243 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
244 if (!pagelist)
245 panic("%s: Cannot allocate page list for VDSO", __func__);
246 for (i = 0; i < pages; i++)
247 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
248 return pagelist;
249}
250
251static int __init vdso_init(void)
252{
253 vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
254 if (IS_ENABLED(CONFIG_COMPAT))
255 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
256 return 0;
257}
258arch_initcall(vdso_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/binfmts.h>
10#include <linux/compat.h>
11#include <linux/elf.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/smp.h>
18#include <linux/time_namespace.h>
19#include <linux/random.h>
20#include <vdso/datapage.h>
21#include <asm/vdso.h>
22
23extern char vdso64_start[], vdso64_end[];
24extern char vdso32_start[], vdso32_end[];
25
26static struct vm_special_mapping vvar_mapping;
27
28static union {
29 struct vdso_data data[CS_BASES];
30 u8 page[PAGE_SIZE];
31} vdso_data_store __page_aligned_data;
32
33struct vdso_data *vdso_data = vdso_data_store.data;
34
35enum vvar_pages {
36 VVAR_DATA_PAGE_OFFSET,
37 VVAR_TIMENS_PAGE_OFFSET,
38 VVAR_NR_PAGES,
39};
40
41#ifdef CONFIG_TIME_NS
42struct vdso_data *arch_get_vdso_data(void *vvar_page)
43{
44 return (struct vdso_data *)(vvar_page);
45}
46
47/*
48 * The VVAR page layout depends on whether a task belongs to the root or
49 * non-root time namespace. Whenever a task changes its namespace, the VVAR
50 * page tables are cleared and then they will be re-faulted with a
51 * corresponding layout.
52 * See also the comment near timens_setup_vdso_data() for details.
53 */
54int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
55{
56 struct mm_struct *mm = task->mm;
57 VMA_ITERATOR(vmi, mm, 0);
58 struct vm_area_struct *vma;
59
60 mmap_read_lock(mm);
61 for_each_vma(vmi, vma) {
62 unsigned long size = vma->vm_end - vma->vm_start;
63
64 if (!vma_is_special_mapping(vma, &vvar_mapping))
65 continue;
66 zap_page_range(vma, vma->vm_start, size);
67 break;
68 }
69 mmap_read_unlock(mm);
70 return 0;
71}
72#endif
73
74static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
75 struct vm_area_struct *vma, struct vm_fault *vmf)
76{
77 struct page *timens_page = find_timens_vvar_page(vma);
78 unsigned long addr, pfn;
79 vm_fault_t err;
80
81 switch (vmf->pgoff) {
82 case VVAR_DATA_PAGE_OFFSET:
83 pfn = virt_to_pfn(vdso_data);
84 if (timens_page) {
85 /*
86 * Fault in VVAR page too, since it will be accessed
87 * to get clock data anyway.
88 */
89 addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
90 err = vmf_insert_pfn(vma, addr, pfn);
91 if (unlikely(err & VM_FAULT_ERROR))
92 return err;
93 pfn = page_to_pfn(timens_page);
94 }
95 break;
96#ifdef CONFIG_TIME_NS
97 case VVAR_TIMENS_PAGE_OFFSET:
98 /*
99 * If a task belongs to a time namespace then a namespace
100 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
101 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
102 * offset.
103 * See also the comment near timens_setup_vdso_data().
104 */
105 if (!timens_page)
106 return VM_FAULT_SIGBUS;
107 pfn = virt_to_pfn(vdso_data);
108 break;
109#endif /* CONFIG_TIME_NS */
110 default:
111 return VM_FAULT_SIGBUS;
112 }
113 return vmf_insert_pfn(vma, vmf->address, pfn);
114}
115
116static int vdso_mremap(const struct vm_special_mapping *sm,
117 struct vm_area_struct *vma)
118{
119 current->mm->context.vdso_base = vma->vm_start;
120 return 0;
121}
122
123static struct vm_special_mapping vvar_mapping = {
124 .name = "[vvar]",
125 .fault = vvar_fault,
126};
127
128static struct vm_special_mapping vdso64_mapping = {
129 .name = "[vdso]",
130 .mremap = vdso_mremap,
131};
132
133static struct vm_special_mapping vdso32_mapping = {
134 .name = "[vdso]",
135 .mremap = vdso_mremap,
136};
137
138int vdso_getcpu_init(void)
139{
140 set_tod_programmable_field(smp_processor_id());
141 return 0;
142}
143early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
144
145static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
146{
147 unsigned long vvar_start, vdso_text_start, vdso_text_len;
148 struct vm_special_mapping *vdso_mapping;
149 struct mm_struct *mm = current->mm;
150 struct vm_area_struct *vma;
151 int rc;
152
153 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
154 if (mmap_write_lock_killable(mm))
155 return -EINTR;
156
157 if (is_compat_task()) {
158 vdso_text_len = vdso32_end - vdso32_start;
159 vdso_mapping = &vdso32_mapping;
160 } else {
161 vdso_text_len = vdso64_end - vdso64_start;
162 vdso_mapping = &vdso64_mapping;
163 }
164 vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
165 rc = vvar_start;
166 if (IS_ERR_VALUE(vvar_start))
167 goto out;
168 vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
169 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
170 VM_PFNMAP,
171 &vvar_mapping);
172 rc = PTR_ERR(vma);
173 if (IS_ERR(vma))
174 goto out;
175 vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
176 /* VM_MAYWRITE for COW so gdb can set breakpoints */
177 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
178 VM_READ|VM_EXEC|
179 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
180 vdso_mapping);
181 if (IS_ERR(vma)) {
182 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
183 rc = PTR_ERR(vma);
184 } else {
185 current->mm->context.vdso_base = vdso_text_start;
186 rc = 0;
187 }
188out:
189 mmap_write_unlock(mm);
190 return rc;
191}
192
193static unsigned long vdso_addr(unsigned long start, unsigned long len)
194{
195 unsigned long addr, end, offset;
196
197 /*
198 * Round up the start address. It can start out unaligned as a result
199 * of stack start randomization.
200 */
201 start = PAGE_ALIGN(start);
202
203 /* Round the lowest possible end address up to a PMD boundary. */
204 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
205 if (end >= VDSO_BASE)
206 end = VDSO_BASE;
207 end -= len;
208
209 if (end > start) {
210 offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
211 addr = start + (offset << PAGE_SHIFT);
212 } else {
213 addr = start;
214 }
215 return addr;
216}
217
218unsigned long vdso_size(void)
219{
220 unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
221
222 if (is_compat_task())
223 size += vdso32_end - vdso32_start;
224 else
225 size += vdso64_end - vdso64_start;
226 return PAGE_ALIGN(size);
227}
228
229int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
230{
231 unsigned long addr = VDSO_BASE;
232 unsigned long size = vdso_size();
233
234 if (current->flags & PF_RANDOMIZE)
235 addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
236 return map_vdso(addr, size);
237}
238
239static struct page ** __init vdso_setup_pages(void *start, void *end)
240{
241 int pages = (end - start) >> PAGE_SHIFT;
242 struct page **pagelist;
243 int i;
244
245 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
246 if (!pagelist)
247 panic("%s: Cannot allocate page list for VDSO", __func__);
248 for (i = 0; i < pages; i++)
249 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
250 return pagelist;
251}
252
253static int __init vdso_init(void)
254{
255 vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
256 if (IS_ENABLED(CONFIG_COMPAT))
257 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
258 return 0;
259}
260arch_initcall(vdso_init);