Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * vdso setup for s390
  4 *
  5 *  Copyright IBM Corp. 2008
  6 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
 
 
 
 
  7 */
  8
  9#include <linux/binfmts.h>
 10#include <linux/compat.h>
 11#include <linux/elf.h>
 12#include <linux/errno.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 16#include <linux/slab.h>
 17#include <linux/smp.h>
 18#include <linux/time_namespace.h>
 19#include <linux/random.h>
 20#include <vdso/datapage.h>
 
 
 
 
 
 
 
 
 
 
 
 21#include <asm/vdso.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 22
 23extern char vdso64_start[], vdso64_end[];
 24extern char vdso32_start[], vdso32_end[];
 
 
 
 
 
 
 
 
 25
 26static struct vm_special_mapping vvar_mapping;
 
 
 
 
 
 
 
 
 
 
 
 27
 
 
 
 28static union {
 29	struct vdso_data	data[CS_BASES];
 30	u8			page[PAGE_SIZE];
 31} vdso_data_store __page_aligned_data;
 
 32
 33struct vdso_data *vdso_data = vdso_data_store.data;
 34
 35enum vvar_pages {
 36	VVAR_DATA_PAGE_OFFSET,
 37	VVAR_TIMENS_PAGE_OFFSET,
 38	VVAR_NR_PAGES,
 39};
 40
 41#ifdef CONFIG_TIME_NS
 42struct vdso_data *arch_get_vdso_data(void *vvar_page)
 43{
 44	return (struct vdso_data *)(vvar_page);
 45}
 46
 47/*
 48 * The VVAR page layout depends on whether a task belongs to the root or
 49 * non-root time namespace. Whenever a task changes its namespace, the VVAR
 50 * page tables are cleared and then they will be re-faulted with a
 51 * corresponding layout.
 52 * See also the comment near timens_setup_vdso_data() for details.
 53 */
 54int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
 
 
 55{
 56	struct mm_struct *mm = task->mm;
 57	VMA_ITERATOR(vmi, mm, 0);
 58	struct vm_area_struct *vma;
 59
 60	mmap_read_lock(mm);
 61	for_each_vma(vmi, vma) {
 62		unsigned long size = vma->vm_end - vma->vm_start;
 63
 64		if (!vma_is_special_mapping(vma, &vvar_mapping))
 65			continue;
 66		zap_page_range(vma, vma->vm_start, size);
 67		break;
 68	}
 69	mmap_read_unlock(mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70	return 0;
 71}
 72#endif
 73
 74static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 75			     struct vm_area_struct *vma, struct vm_fault *vmf)
 76{
 77	struct page *timens_page = find_timens_vvar_page(vma);
 78	unsigned long addr, pfn;
 79	vm_fault_t err;
 80
 81	switch (vmf->pgoff) {
 82	case VVAR_DATA_PAGE_OFFSET:
 83		pfn = virt_to_pfn(vdso_data);
 84		if (timens_page) {
 85			/*
 86			 * Fault in VVAR page too, since it will be accessed
 87			 * to get clock data anyway.
 88			 */
 89			addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
 90			err = vmf_insert_pfn(vma, addr, pfn);
 91			if (unlikely(err & VM_FAULT_ERROR))
 92				return err;
 93			pfn = page_to_pfn(timens_page);
 94		}
 95		break;
 96#ifdef CONFIG_TIME_NS
 97	case VVAR_TIMENS_PAGE_OFFSET:
 98		/*
 99		 * If a task belongs to a time namespace then a namespace
100		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
101		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
102		 * offset.
103		 * See also the comment near timens_setup_vdso_data().
104		 */
105		if (!timens_page)
106			return VM_FAULT_SIGBUS;
107		pfn = virt_to_pfn(vdso_data);
108		break;
109#endif /* CONFIG_TIME_NS */
110	default:
111		return VM_FAULT_SIGBUS;
112	}
113	return vmf_insert_pfn(vma, vmf->address, pfn);
114}
115
116static int vdso_mremap(const struct vm_special_mapping *sm,
117		       struct vm_area_struct *vma)
118{
119	current->mm->context.vdso_base = vma->vm_start;
120	return 0;
121}
122
123static struct vm_special_mapping vvar_mapping = {
124	.name = "[vvar]",
125	.fault = vvar_fault,
126};
127
128static struct vm_special_mapping vdso64_mapping = {
129	.name = "[vdso]",
130	.mremap = vdso_mremap,
131};
132
133static struct vm_special_mapping vdso32_mapping = {
134	.name = "[vdso]",
135	.mremap = vdso_mremap,
136};
137
138int vdso_getcpu_init(void)
 
 
 
 
 
 
 
 
 
 
 
139{
140	set_tod_programmable_field(smp_processor_id());
141	return 0;
 
 
 
 
142}
143early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
144
145static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
 
 
 
 
146{
147	unsigned long vvar_start, vdso_text_start, vdso_text_len;
148	struct vm_special_mapping *vdso_mapping;
149	struct mm_struct *mm = current->mm;
150	struct vm_area_struct *vma;
 
 
151	int rc;
152
153	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
154	if (mmap_write_lock_killable(mm))
155		return -EINTR;
 
 
 
 
156
 
 
 
157	if (is_compat_task()) {
158		vdso_text_len = vdso32_end - vdso32_start;
159		vdso_mapping = &vdso32_mapping;
160	} else {
161		vdso_text_len = vdso64_end - vdso64_start;
162		vdso_mapping = &vdso64_mapping;
163	}
164	vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
165	rc = vvar_start;
166	if (IS_ERR_VALUE(vvar_start))
167		goto out;
168	vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
169				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
170				       VM_PFNMAP,
171				       &vvar_mapping);
172	rc = PTR_ERR(vma);
173	if (IS_ERR(vma))
174		goto out;
175	vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
176	/* VM_MAYWRITE for COW so gdb can set breakpoints */
177	vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
178				       VM_READ|VM_EXEC|
179				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
180				       vdso_mapping);
181	if (IS_ERR(vma)) {
182		do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
183		rc = PTR_ERR(vma);
184	} else {
185		current->mm->context.vdso_base = vdso_text_start;
186		rc = 0;
187	}
188out:
189	mmap_write_unlock(mm);
190	return rc;
191}
192
193static unsigned long vdso_addr(unsigned long start, unsigned long len)
194{
195	unsigned long addr, end, offset;
196
197	/*
198	 * Round up the start address. It can start out unaligned as a result
199	 * of stack start randomization.
 
200	 */
201	start = PAGE_ALIGN(start);
202
203	/* Round the lowest possible end address up to a PMD boundary. */
204	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
205	if (end >= VDSO_BASE)
206		end = VDSO_BASE;
207	end -= len;
208
209	if (end > start) {
210		offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
211		addr = start + (offset << PAGE_SHIFT);
212	} else {
213		addr = start;
214	}
215	return addr;
216}
217
218unsigned long vdso_size(void)
219{
220	unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
 
 
 
221
222	if (is_compat_task())
223		size += vdso32_end - vdso32_start;
224	else
225		size += vdso64_end - vdso64_start;
226	return PAGE_ALIGN(size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227}
228
229int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
230{
231	unsigned long addr = VDSO_BASE;
232	unsigned long size = vdso_size();
233
234	if (current->flags & PF_RANDOMIZE)
235		addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
236	return map_vdso(addr, size);
237}
238
239static struct page ** __init vdso_setup_pages(void *start, void *end)
240{
241	int pages = (end - start) >> PAGE_SHIFT;
242	struct page **pagelist;
243	int i;
244
245	pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
246	if (!pagelist)
247		panic("%s: Cannot allocate page list for VDSO", __func__);
248	for (i = 0; i < pages; i++)
249		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
250	return pagelist;
251}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
253static int __init vdso_init(void)
254{
255	vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
256	if (IS_ENABLED(CONFIG_COMPAT))
257		vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
258	return 0;
259}
260arch_initcall(vdso_init);
v4.6
 
  1/*
  2 * vdso setup for s390
  3 *
  4 *  Copyright IBM Corp. 2008
  5 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License (version 2 only)
  9 * as published by the Free Software Foundation.
 10 */
 11
 12#include <linux/module.h>
 
 
 13#include <linux/errno.h>
 14#include <linux/sched.h>
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 
 17#include <linux/smp.h>
 18#include <linux/stddef.h>
 19#include <linux/unistd.h>
 20#include <linux/slab.h>
 21#include <linux/user.h>
 22#include <linux/elf.h>
 23#include <linux/security.h>
 24#include <linux/bootmem.h>
 25#include <linux/compat.h>
 26#include <asm/asm-offsets.h>
 27#include <asm/pgtable.h>
 28#include <asm/processor.h>
 29#include <asm/mmu.h>
 30#include <asm/mmu_context.h>
 31#include <asm/sections.h>
 32#include <asm/vdso.h>
 33#include <asm/facility.h>
 34
 35#ifdef CONFIG_COMPAT
 36extern char vdso32_start, vdso32_end;
 37static void *vdso32_kbase = &vdso32_start;
 38static unsigned int vdso32_pages;
 39static struct page **vdso32_pagelist;
 40#endif
 41
 42extern char vdso64_start, vdso64_end;
 43static void *vdso64_kbase = &vdso64_start;
 44static unsigned int vdso64_pages;
 45static struct page **vdso64_pagelist;
 46
 47/*
 48 * Should the kernel map a VDSO page into processes and pass its
 49 * address down to glibc upon exec()?
 50 */
 51unsigned int __read_mostly vdso_enabled = 1;
 52
 53static int __init vdso_setup(char *s)
 54{
 55	unsigned long val;
 56	int rc;
 57
 58	rc = 0;
 59	if (strncmp(s, "on", 3) == 0)
 60		vdso_enabled = 1;
 61	else if (strncmp(s, "off", 4) == 0)
 62		vdso_enabled = 0;
 63	else {
 64		rc = kstrtoul(s, 0, &val);
 65		vdso_enabled = rc ? 0 : !!val;
 66	}
 67	return !rc;
 68}
 69__setup("vdso=", vdso_setup);
 70
 71/*
 72 * The vdso data page
 73 */
 74static union {
 75	struct vdso_data	data;
 76	u8			page[PAGE_SIZE];
 77} vdso_data_store __page_aligned_data;
 78struct vdso_data *vdso_data = &vdso_data_store.data;
 79
 80/*
 81 * Setup vdso data page.
 82 */
 83static void __init vdso_init_data(struct vdso_data *vd)
 
 
 
 
 
 
 84{
 85	vd->ectg_available = test_facility(31);
 86}
 87
 88/*
 89 * Allocate/free per cpu vdso data.
 
 
 
 
 90 */
 91#define SEGMENT_ORDER	2
 92
 93int vdso_alloc_per_cpu(struct lowcore *lowcore)
 94{
 95	unsigned long segment_table, page_table, page_frame;
 96	struct vdso_per_cpu_data *vd;
 97	u32 *psal, *aste;
 98	int i;
 99
100	lowcore->vdso_per_cpu_data = __LC_PASTE;
101
102	if (!vdso_enabled)
103		return 0;
104
105	segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
106	page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
107	page_frame = get_zeroed_page(GFP_KERNEL);
108	if (!segment_table || !page_table || !page_frame)
109		goto out;
110
111	/* Initialize per-cpu vdso data page */
112	vd = (struct vdso_per_cpu_data *) page_frame;
113	vd->cpu_nr = lowcore->cpu_nr;
114	vd->node_id = cpu_to_node(vd->cpu_nr);
115
116	/* Set up access register mode page table */
117	clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
118		    PAGE_SIZE << SEGMENT_ORDER);
119	clear_table((unsigned long *) page_table, _PAGE_INVALID,
120		    256*sizeof(unsigned long));
121
122	*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
123	*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
124
125	psal = (u32 *) (page_table + 256*sizeof(unsigned long));
126	aste = psal + 32;
127
128	for (i = 4; i < 32; i += 4)
129		psal[i] = 0x80000000;
130
131	lowcore->paste[4] = (u32)(addr_t) psal;
132	psal[0] = 0x02000000;
133	psal[2] = (u32)(addr_t) aste;
134	*(unsigned long *) (aste + 2) = segment_table +
135		_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
136	aste[4] = (u32)(addr_t) psal;
137	lowcore->vdso_per_cpu_data = page_frame;
138
139	return 0;
 
 
140
141out:
142	free_page(page_frame);
143	free_page(page_table);
144	free_pages(segment_table, SEGMENT_ORDER);
145	return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146}
147
148void vdso_free_per_cpu(struct lowcore *lowcore)
 
149{
150	unsigned long segment_table, page_table, page_frame;
151	u32 *psal, *aste;
 
152
153	if (!vdso_enabled)
154		return;
 
 
 
 
 
 
 
 
 
 
 
 
155
156	psal = (u32 *)(addr_t) lowcore->paste[4];
157	aste = (u32 *)(addr_t) psal[2];
158	segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
159	page_table = *(unsigned long *) segment_table;
160	page_frame = *(unsigned long *) page_table;
161
162	free_page(page_frame);
163	free_page(page_table);
164	free_pages(segment_table, SEGMENT_ORDER);
165}
166
167static void vdso_init_cr5(void)
168{
169	unsigned long cr5;
170
171	if (!vdso_enabled)
172		return;
173	cr5 = offsetof(struct lowcore, paste);
174	__ctl_load(cr5, 5, 5);
175}
 
176
177/*
178 * This is called from binfmt_elf, we create the special vma for the
179 * vDSO and insert it into the mm struct tree
180 */
181int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
182{
 
 
183	struct mm_struct *mm = current->mm;
184	struct page **vdso_pagelist;
185	unsigned long vdso_pages;
186	unsigned long vdso_base;
187	int rc;
188
189	if (!vdso_enabled)
190		return 0;
191	/*
192	 * Only map the vdso for dynamically linked elf binaries.
193	 */
194	if (!uses_interp)
195		return 0;
196
197	vdso_pagelist = vdso64_pagelist;
198	vdso_pages = vdso64_pages;
199#ifdef CONFIG_COMPAT
200	if (is_compat_task()) {
201		vdso_pagelist = vdso32_pagelist;
202		vdso_pages = vdso32_pages;
 
 
 
203	}
204#endif
205	/*
206	 * vDSO has a problem and was disabled, just don't "enable" it for
207	 * the process
208	 */
209	if (vdso_pages == 0)
210		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
212	current->mm->context.vdso_base = 0;
 
 
213
214	/*
215	 * pick a base address for the vDSO in process space. We try to put
216	 * it at vdso_base which is the "natural" base for it, but we might
217	 * fail and end up putting it elsewhere.
218	 */
219	down_write(&mm->mmap_sem);
220	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
221	if (IS_ERR_VALUE(vdso_base)) {
222		rc = vdso_base;
223		goto out_up;
 
 
 
 
 
 
 
 
224	}
 
 
225
226	/*
227	 * Put vDSO base into mm struct. We need to do this before calling
228	 * install_special_mapping or the perf counter mmap tracking code
229	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
230	 */
231	current->mm->context.vdso_base = vdso_base;
232
233	/*
234	 * our vma flags don't have VM_WRITE so by default, the process
235	 * isn't allowed to write those pages.
236	 * gdb can break that with ptrace interface, and thus trigger COW
237	 * on those pages but it's then your responsibility to never do that
238	 * on the "data" page of the vDSO or you'll stop getting kernel
239	 * updates and your nice userland gettimeofday will be totally dead.
240	 * It's fine to use that for setting breakpoints in the vDSO code
241	 * pages though.
242	 */
243	rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
244				     VM_READ|VM_EXEC|
245				     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
246				     vdso_pagelist);
247	if (rc)
248		current->mm->context.vdso_base = 0;
249out_up:
250	up_write(&mm->mmap_sem);
251	return rc;
252}
253
254const char *arch_vma_name(struct vm_area_struct *vma)
255{
256	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
257		return "[vdso]";
258	return NULL;
 
 
 
259}
260
261static int __init vdso_init(void)
262{
 
 
263	int i;
264
265	if (!vdso_enabled)
266		return 0;
267	vdso_init_data(vdso_data);
268#ifdef CONFIG_COMPAT
269	/* Calculate the size of the 32 bit vDSO */
270	vdso32_pages = ((&vdso32_end - &vdso32_start
271			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
272
273	/* Make sure pages are in the correct state */
274	vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
275				  GFP_KERNEL);
276	BUG_ON(vdso32_pagelist == NULL);
277	for (i = 0; i < vdso32_pages - 1; i++) {
278		struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
279		ClearPageReserved(pg);
280		get_page(pg);
281		vdso32_pagelist[i] = pg;
282	}
283	vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
284	vdso32_pagelist[vdso32_pages] = NULL;
285#endif
286
287	/* Calculate the size of the 64 bit vDSO */
288	vdso64_pages = ((&vdso64_end - &vdso64_start
289			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
290
291	/* Make sure pages are in the correct state */
292	vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
293				  GFP_KERNEL);
294	BUG_ON(vdso64_pagelist == NULL);
295	for (i = 0; i < vdso64_pages - 1; i++) {
296		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
297		ClearPageReserved(pg);
298		get_page(pg);
299		vdso64_pagelist[i] = pg;
300	}
301	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
302	vdso64_pagelist[vdso64_pages] = NULL;
303	if (vdso_alloc_per_cpu(&S390_lowcore))
304		BUG();
305	vdso_init_cr5();
306
307	get_page(virt_to_page(vdso_data));
308
 
 
 
 
 
309	return 0;
310}
311early_initcall(vdso_init);