Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * vdso setup for s390
  4 *
  5 *  Copyright IBM Corp. 2008
  6 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 */
  8
  9#include <linux/init.h>
 10#include <linux/errno.h>
 11#include <linux/sched.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/smp.h>
 15#include <linux/stddef.h>
 16#include <linux/unistd.h>
 17#include <linux/slab.h>
 18#include <linux/user.h>
 19#include <linux/elf.h>
 20#include <linux/security.h>
 21#include <linux/memblock.h>
 22#include <linux/compat.h>
 23#include <asm/asm-offsets.h>
 24#include <asm/pgtable.h>
 25#include <asm/processor.h>
 26#include <asm/mmu.h>
 27#include <asm/mmu_context.h>
 28#include <asm/sections.h>
 29#include <asm/vdso.h>
 30#include <asm/facility.h>
 31
 32#ifdef CONFIG_COMPAT_VDSO
 33extern char vdso32_start, vdso32_end;
 34static void *vdso32_kbase = &vdso32_start;
 35static unsigned int vdso32_pages;
 36static struct page **vdso32_pagelist;
 37#endif
 38
 39extern char vdso64_start, vdso64_end;
 40static void *vdso64_kbase = &vdso64_start;
 41static unsigned int vdso64_pages;
 42static struct page **vdso64_pagelist;
 43
 44/*
 45 * Should the kernel map a VDSO page into processes and pass its
 46 * address down to glibc upon exec()?
 47 */
 48unsigned int __read_mostly vdso_enabled = 1;
 49
 50static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
 51		      struct vm_area_struct *vma, struct vm_fault *vmf)
 52{
 53	struct page **vdso_pagelist;
 54	unsigned long vdso_pages;
 55
 56	vdso_pagelist = vdso64_pagelist;
 57	vdso_pages = vdso64_pages;
 58#ifdef CONFIG_COMPAT_VDSO
 59	if (vma->vm_mm->context.compat_mm) {
 60		vdso_pagelist = vdso32_pagelist;
 61		vdso_pages = vdso32_pages;
 62	}
 63#endif
 64
 65	if (vmf->pgoff >= vdso_pages)
 66		return VM_FAULT_SIGBUS;
 67
 68	vmf->page = vdso_pagelist[vmf->pgoff];
 69	get_page(vmf->page);
 70	return 0;
 71}
 72
 73static int vdso_mremap(const struct vm_special_mapping *sm,
 74		       struct vm_area_struct *vma)
 75{
 76	unsigned long vdso_pages;
 77
 78	vdso_pages = vdso64_pages;
 79#ifdef CONFIG_COMPAT_VDSO
 80	if (vma->vm_mm->context.compat_mm)
 81		vdso_pages = vdso32_pages;
 82#endif
 83
 84	if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
 85		return -EINVAL;
 86
 87	if (WARN_ON_ONCE(current->mm != vma->vm_mm))
 88		return -EFAULT;
 89
 90	current->mm->context.vdso_base = vma->vm_start;
 91	return 0;
 92}
 93
 94static const struct vm_special_mapping vdso_mapping = {
 95	.name = "[vdso]",
 96	.fault = vdso_fault,
 97	.mremap = vdso_mremap,
 98};
 99
100static int __init vdso_setup(char *str)
101{
102	bool enabled;
 
103
104	if (!kstrtobool(str, &enabled))
105		vdso_enabled = enabled;
106	return 1;
 
 
 
 
 
 
 
107}
108__setup("vdso=", vdso_setup);
109
110/*
111 * The vdso data page
112 */
113static union {
114	struct vdso_data	data;
115	u8			page[PAGE_SIZE];
116} vdso_data_store __page_aligned_data;
117struct vdso_data *vdso_data = &vdso_data_store.data;
118
119/*
120 * Setup vdso data page.
121 */
122static void __init vdso_init_data(struct vdso_data *vd)
123{
124	vd->ectg_available = test_facility(31);
125}
126
127/*
128 * Allocate/free per cpu vdso data.
129 */
130#define SEGMENT_ORDER	2
131
132/*
133 * The initial vdso_data structure for the boot CPU. Eventually
134 * it is replaced with a properly allocated structure in vdso_init.
135 * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
136 * pointer is required to be able to return from an interrupt or
137 * program check. See the exit paths in entry.S.
138 */
139struct vdso_data boot_vdso_data __initdata;
140
141void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
142{
143	lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
144}
145
146int vdso_alloc_per_cpu(struct lowcore *lowcore)
147{
148	unsigned long segment_table, page_table, page_frame;
149	struct vdso_per_cpu_data *vd;
150
151	segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
152	page_table = get_zeroed_page(GFP_KERNEL);
153	page_frame = get_zeroed_page(GFP_KERNEL);
154	if (!segment_table || !page_table || !page_frame)
155		goto out;
156	arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
157	arch_set_page_dat(virt_to_page(page_table), 0);
158
159	/* Initialize per-cpu vdso data page */
160	vd = (struct vdso_per_cpu_data *) page_frame;
161	vd->cpu_nr = lowcore->cpu_nr;
162	vd->node_id = cpu_to_node(vd->cpu_nr);
163
164	/* Set up page table for the vdso address space */
165	memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
166	memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
167
168	*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
169	*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
170
171	lowcore->vdso_asce = segment_table +
172		_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
173	lowcore->vdso_per_cpu_data = page_frame;
174
175	return 0;
176
177out:
178	free_page(page_frame);
179	free_page(page_table);
180	free_pages(segment_table, SEGMENT_ORDER);
181	return -ENOMEM;
182}
183
184void vdso_free_per_cpu(struct lowcore *lowcore)
185{
186	unsigned long segment_table, page_table, page_frame;
187
188	segment_table = lowcore->vdso_asce & PAGE_MASK;
189	page_table = *(unsigned long *) segment_table;
190	page_frame = *(unsigned long *) page_table;
191
192	free_page(page_frame);
193	free_page(page_table);
194	free_pages(segment_table, SEGMENT_ORDER);
195}
196
197/*
198 * This is called from binfmt_elf, we create the special vma for the
199 * vDSO and insert it into the mm struct tree
200 */
201int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
202{
203	struct mm_struct *mm = current->mm;
204	struct vm_area_struct *vma;
205	unsigned long vdso_pages;
206	unsigned long vdso_base;
207	int rc;
208
209	if (!vdso_enabled)
210		return 0;
 
 
 
 
 
211
212	vdso_pages = vdso64_pages;
213#ifdef CONFIG_COMPAT_VDSO
214	mm->context.compat_mm = is_compat_task();
215	if (mm->context.compat_mm)
216		vdso_pages = vdso32_pages;
217#endif
218	/*
219	 * vDSO has a problem and was disabled, just don't "enable" it for
220	 * the process
221	 */
222	if (vdso_pages == 0)
223		return 0;
224
225	/*
226	 * pick a base address for the vDSO in process space. We try to put
227	 * it at vdso_base which is the "natural" base for it, but we might
228	 * fail and end up putting it elsewhere.
229	 */
230	if (down_write_killable(&mm->mmap_sem))
231		return -EINTR;
232	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
233	if (IS_ERR_VALUE(vdso_base)) {
234		rc = vdso_base;
235		goto out_up;
236	}
237
238	/*
239	 * our vma flags don't have VM_WRITE so by default, the process
240	 * isn't allowed to write those pages.
241	 * gdb can break that with ptrace interface, and thus trigger COW
242	 * on those pages but it's then your responsibility to never do that
243	 * on the "data" page of the vDSO or you'll stop getting kernel
244	 * updates and your nice userland gettimeofday will be totally dead.
245	 * It's fine to use that for setting breakpoints in the vDSO code
246	 * pages though.
247	 */
248	vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
249				       VM_READ|VM_EXEC|
250				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
251				       &vdso_mapping);
252	if (IS_ERR(vma)) {
253		rc = PTR_ERR(vma);
254		goto out_up;
255	}
256
257	current->mm->context.vdso_base = vdso_base;
258	rc = 0;
259
260out_up:
261	up_write(&mm->mmap_sem);
262	return rc;
263}
264
265static int __init vdso_init(void)
266{
267	int i;
268
269	vdso_init_data(vdso_data);
270#ifdef CONFIG_COMPAT_VDSO
271	/* Calculate the size of the 32 bit vDSO */
272	vdso32_pages = ((&vdso32_end - &vdso32_start
273			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
274
275	/* Make sure pages are in the correct state */
276	vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
277				  GFP_KERNEL);
278	BUG_ON(vdso32_pagelist == NULL);
279	for (i = 0; i < vdso32_pages - 1; i++) {
280		struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
 
281		get_page(pg);
282		vdso32_pagelist[i] = pg;
283	}
284	vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
285	vdso32_pagelist[vdso32_pages] = NULL;
286#endif
287
288	/* Calculate the size of the 64 bit vDSO */
289	vdso64_pages = ((&vdso64_end - &vdso64_start
290			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
291
292	/* Make sure pages are in the correct state */
293	vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
294				  GFP_KERNEL);
295	BUG_ON(vdso64_pagelist == NULL);
296	for (i = 0; i < vdso64_pages - 1; i++) {
297		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
 
298		get_page(pg);
299		vdso64_pagelist[i] = pg;
300	}
301	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
302	vdso64_pagelist[vdso64_pages] = NULL;
303	if (vdso_alloc_per_cpu(&S390_lowcore))
304		BUG();
305
306	get_page(virt_to_page(vdso_data));
307
308	return 0;
309}
310early_initcall(vdso_init);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * vdso setup for s390
  4 *
  5 *  Copyright IBM Corp. 2008
  6 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 */
  8
  9#include <linux/init.h>
 10#include <linux/errno.h>
 11#include <linux/sched.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/smp.h>
 15#include <linux/stddef.h>
 16#include <linux/unistd.h>
 17#include <linux/slab.h>
 18#include <linux/user.h>
 19#include <linux/elf.h>
 20#include <linux/security.h>
 21#include <linux/bootmem.h>
 22#include <linux/compat.h>
 23#include <asm/asm-offsets.h>
 24#include <asm/pgtable.h>
 25#include <asm/processor.h>
 26#include <asm/mmu.h>
 27#include <asm/mmu_context.h>
 28#include <asm/sections.h>
 29#include <asm/vdso.h>
 30#include <asm/facility.h>
 31
 32#ifdef CONFIG_COMPAT
 33extern char vdso32_start, vdso32_end;
 34static void *vdso32_kbase = &vdso32_start;
 35static unsigned int vdso32_pages;
 36static struct page **vdso32_pagelist;
 37#endif
 38
 39extern char vdso64_start, vdso64_end;
 40static void *vdso64_kbase = &vdso64_start;
 41static unsigned int vdso64_pages;
 42static struct page **vdso64_pagelist;
 43
 44/*
 45 * Should the kernel map a VDSO page into processes and pass its
 46 * address down to glibc upon exec()?
 47 */
 48unsigned int __read_mostly vdso_enabled = 1;
 49
 50static int vdso_fault(const struct vm_special_mapping *sm,
 51		      struct vm_area_struct *vma, struct vm_fault *vmf)
 52{
 53	struct page **vdso_pagelist;
 54	unsigned long vdso_pages;
 55
 56	vdso_pagelist = vdso64_pagelist;
 57	vdso_pages = vdso64_pages;
 58#ifdef CONFIG_COMPAT
 59	if (is_compat_task()) {
 60		vdso_pagelist = vdso32_pagelist;
 61		vdso_pages = vdso32_pages;
 62	}
 63#endif
 64
 65	if (vmf->pgoff >= vdso_pages)
 66		return VM_FAULT_SIGBUS;
 67
 68	vmf->page = vdso_pagelist[vmf->pgoff];
 69	get_page(vmf->page);
 70	return 0;
 71}
 72
 73static int vdso_mremap(const struct vm_special_mapping *sm,
 74		       struct vm_area_struct *vma)
 75{
 76	unsigned long vdso_pages;
 77
 78	vdso_pages = vdso64_pages;
 79#ifdef CONFIG_COMPAT
 80	if (is_compat_task())
 81		vdso_pages = vdso32_pages;
 82#endif
 83
 84	if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
 85		return -EINVAL;
 86
 87	if (WARN_ON_ONCE(current->mm != vma->vm_mm))
 88		return -EFAULT;
 89
 90	current->mm->context.vdso_base = vma->vm_start;
 91	return 0;
 92}
 93
 94static const struct vm_special_mapping vdso_mapping = {
 95	.name = "[vdso]",
 96	.fault = vdso_fault,
 97	.mremap = vdso_mremap,
 98};
 99
100static int __init vdso_setup(char *s)
101{
102	unsigned long val;
103	int rc;
104
105	rc = 0;
106	if (strncmp(s, "on", 3) == 0)
107		vdso_enabled = 1;
108	else if (strncmp(s, "off", 4) == 0)
109		vdso_enabled = 0;
110	else {
111		rc = kstrtoul(s, 0, &val);
112		vdso_enabled = rc ? 0 : !!val;
113	}
114	return !rc;
115}
116__setup("vdso=", vdso_setup);
117
118/*
119 * The vdso data page
120 */
121static union {
122	struct vdso_data	data;
123	u8			page[PAGE_SIZE];
124} vdso_data_store __page_aligned_data;
125struct vdso_data *vdso_data = &vdso_data_store.data;
126
127/*
128 * Setup vdso data page.
129 */
130static void __init vdso_init_data(struct vdso_data *vd)
131{
132	vd->ectg_available = test_facility(31);
133}
134
135/*
136 * Allocate/free per cpu vdso data.
137 */
138#define SEGMENT_ORDER	2
139
140/*
141 * The initial vdso_data structure for the boot CPU. Eventually
142 * it is replaced with a properly allocated structure in vdso_init.
143 * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
144 * pointer is required to be able to return from an interrupt or
145 * program check. See the exit paths in entry.S.
146 */
147struct vdso_data boot_vdso_data __initdata;
148
149void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
150{
151	lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
152}
153
154int vdso_alloc_per_cpu(struct lowcore *lowcore)
155{
156	unsigned long segment_table, page_table, page_frame;
157	struct vdso_per_cpu_data *vd;
158
159	segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
160	page_table = get_zeroed_page(GFP_KERNEL);
161	page_frame = get_zeroed_page(GFP_KERNEL);
162	if (!segment_table || !page_table || !page_frame)
163		goto out;
164	arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
165	arch_set_page_dat(virt_to_page(page_table), 0);
166
167	/* Initialize per-cpu vdso data page */
168	vd = (struct vdso_per_cpu_data *) page_frame;
169	vd->cpu_nr = lowcore->cpu_nr;
170	vd->node_id = cpu_to_node(vd->cpu_nr);
171
172	/* Set up page table for the vdso address space */
173	memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
174	memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
175
176	*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
177	*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
178
179	lowcore->vdso_asce = segment_table +
180		_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
181	lowcore->vdso_per_cpu_data = page_frame;
182
183	return 0;
184
185out:
186	free_page(page_frame);
187	free_page(page_table);
188	free_pages(segment_table, SEGMENT_ORDER);
189	return -ENOMEM;
190}
191
192void vdso_free_per_cpu(struct lowcore *lowcore)
193{
194	unsigned long segment_table, page_table, page_frame;
195
196	segment_table = lowcore->vdso_asce & PAGE_MASK;
197	page_table = *(unsigned long *) segment_table;
198	page_frame = *(unsigned long *) page_table;
199
200	free_page(page_frame);
201	free_page(page_table);
202	free_pages(segment_table, SEGMENT_ORDER);
203}
204
205/*
206 * This is called from binfmt_elf, we create the special vma for the
207 * vDSO and insert it into the mm struct tree
208 */
209int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
210{
211	struct mm_struct *mm = current->mm;
212	struct vm_area_struct *vma;
213	unsigned long vdso_pages;
214	unsigned long vdso_base;
215	int rc;
216
217	if (!vdso_enabled)
218		return 0;
219	/*
220	 * Only map the vdso for dynamically linked elf binaries.
221	 */
222	if (!uses_interp)
223		return 0;
224
225	vdso_pages = vdso64_pages;
226#ifdef CONFIG_COMPAT
227	if (is_compat_task())
 
228		vdso_pages = vdso32_pages;
229#endif
230	/*
231	 * vDSO has a problem and was disabled, just don't "enable" it for
232	 * the process
233	 */
234	if (vdso_pages == 0)
235		return 0;
236
237	/*
238	 * pick a base address for the vDSO in process space. We try to put
239	 * it at vdso_base which is the "natural" base for it, but we might
240	 * fail and end up putting it elsewhere.
241	 */
242	if (down_write_killable(&mm->mmap_sem))
243		return -EINTR;
244	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
245	if (IS_ERR_VALUE(vdso_base)) {
246		rc = vdso_base;
247		goto out_up;
248	}
249
250	/*
251	 * our vma flags don't have VM_WRITE so by default, the process
252	 * isn't allowed to write those pages.
253	 * gdb can break that with ptrace interface, and thus trigger COW
254	 * on those pages but it's then your responsibility to never do that
255	 * on the "data" page of the vDSO or you'll stop getting kernel
256	 * updates and your nice userland gettimeofday will be totally dead.
257	 * It's fine to use that for setting breakpoints in the vDSO code
258	 * pages though.
259	 */
260	vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
261				       VM_READ|VM_EXEC|
262				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
263				       &vdso_mapping);
264	if (IS_ERR(vma)) {
265		rc = PTR_ERR(vma);
266		goto out_up;
267	}
268
269	current->mm->context.vdso_base = vdso_base;
270	rc = 0;
271
272out_up:
273	up_write(&mm->mmap_sem);
274	return rc;
275}
276
277static int __init vdso_init(void)
278{
279	int i;
280
281	vdso_init_data(vdso_data);
282#ifdef CONFIG_COMPAT
283	/* Calculate the size of the 32 bit vDSO */
284	vdso32_pages = ((&vdso32_end - &vdso32_start
285			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
286
287	/* Make sure pages are in the correct state */
288	vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
289				  GFP_KERNEL);
290	BUG_ON(vdso32_pagelist == NULL);
291	for (i = 0; i < vdso32_pages - 1; i++) {
292		struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
293		ClearPageReserved(pg);
294		get_page(pg);
295		vdso32_pagelist[i] = pg;
296	}
297	vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
298	vdso32_pagelist[vdso32_pages] = NULL;
299#endif
300
301	/* Calculate the size of the 64 bit vDSO */
302	vdso64_pages = ((&vdso64_end - &vdso64_start
303			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
304
305	/* Make sure pages are in the correct state */
306	vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
307				  GFP_KERNEL);
308	BUG_ON(vdso64_pagelist == NULL);
309	for (i = 0; i < vdso64_pages - 1; i++) {
310		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
311		ClearPageReserved(pg);
312		get_page(pg);
313		vdso64_pagelist[i] = pg;
314	}
315	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
316	vdso64_pagelist[vdso64_pages] = NULL;
317	if (vdso_alloc_per_cpu(&S390_lowcore))
318		BUG();
319
320	get_page(virt_to_page(vdso_data));
321
322	return 0;
323}
324early_initcall(vdso_init);