Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  4 *                    <benh@kernel.crashing.org>
  5 * Copyright (C) 2012 ARM Limited
  6 * Copyright (C) 2015 Regents of the University of California
  7 */
  8
  9#include <linux/elf.h>
 10#include <linux/mm.h>
 11#include <linux/slab.h>
 12#include <linux/binfmts.h>
 13#include <linux/err.h>
 14#include <asm/page.h>
 15#include <asm/vdso.h>
 16#include <linux/time_namespace.h>
 17
 18#ifdef CONFIG_GENERIC_TIME_VSYSCALL
 19#include <vdso/datapage.h>
 20#else
 21struct vdso_data {
 22};
 23#endif
 24
 25extern char vdso_start[], vdso_end[];
 26#ifdef CONFIG_COMPAT
 27extern char compat_vdso_start[], compat_vdso_end[];
 28#endif
 29
 30enum vvar_pages {
 31	VVAR_DATA_PAGE_OFFSET,
 32	VVAR_TIMENS_PAGE_OFFSET,
 33	VVAR_NR_PAGES,
 34};
 35
 36enum rv_vdso_map {
 37	RV_VDSO_MAP_VVAR,
 38	RV_VDSO_MAP_VDSO,
 39};
 40
 41#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
 
 42
 43/*
 44 * The vDSO data page.
 45 */
 46static union {
 47	struct vdso_data	data;
 48	u8			page[PAGE_SIZE];
 49} vdso_data_store __page_aligned_data;
 50struct vdso_data *vdso_data = &vdso_data_store.data;
 51
 52struct __vdso_info {
 53	const char *name;
 54	const char *vdso_code_start;
 55	const char *vdso_code_end;
 56	unsigned long vdso_pages;
 57	/* Data Mapping */
 58	struct vm_special_mapping *dm;
 59	/* Code Mapping */
 60	struct vm_special_mapping *cm;
 61};
 62
 63static struct __vdso_info vdso_info;
 64#ifdef CONFIG_COMPAT
 65static struct __vdso_info compat_vdso_info;
 66#endif
 67
 68static int vdso_mremap(const struct vm_special_mapping *sm,
 69		       struct vm_area_struct *new_vma)
 70{
 71	current->mm->context.vdso = (void *)new_vma->vm_start;
 72
 73	return 0;
 74}
 75
 76static void __init __vdso_init(struct __vdso_info *vdso_info)
 77{
 78	unsigned int i;
 79	struct page **vdso_pagelist;
 80	unsigned long pfn;
 81
 82	if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
 83		panic("vDSO is not a valid ELF object!\n");
 84
 85	vdso_info->vdso_pages = (
 86		vdso_info->vdso_code_end -
 87		vdso_info->vdso_code_start) >>
 88		PAGE_SHIFT;
 89
 90	vdso_pagelist = kcalloc(vdso_info->vdso_pages,
 91				sizeof(struct page *),
 92				GFP_KERNEL);
 93	if (vdso_pagelist == NULL)
 94		panic("vDSO kcalloc failed!\n");
 95
 96	/* Grab the vDSO code pages. */
 97	pfn = sym_to_pfn(vdso_info->vdso_code_start);
 98
 99	for (i = 0; i < vdso_info->vdso_pages; i++)
100		vdso_pagelist[i] = pfn_to_page(pfn + i);
101
102	vdso_info->cm->pages = vdso_pagelist;
103}
104
105#ifdef CONFIG_TIME_NS
106struct vdso_data *arch_get_vdso_data(void *vvar_page)
107{
108	return (struct vdso_data *)(vvar_page);
109}
110
111/*
112 * The vvar mapping contains data for a specific time namespace, so when a task
113 * changes namespace we must unmap its vvar data for the old namespace.
114 * Subsequent faults will map in data for the new namespace.
115 *
116 * For more details see timens_setup_vdso_data().
117 */
118int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
119{
120	struct mm_struct *mm = task->mm;
121	struct vm_area_struct *vma;
122	VMA_ITERATOR(vmi, mm, 0);
123
124	mmap_read_lock(mm);
125
126	for_each_vma(vmi, vma) {
127		unsigned long size = vma->vm_end - vma->vm_start;
128
129		if (vma_is_special_mapping(vma, vdso_info.dm))
130			zap_page_range(vma, vma->vm_start, size);
131#ifdef CONFIG_COMPAT
132		if (vma_is_special_mapping(vma, compat_vdso_info.dm))
133			zap_page_range(vma, vma->vm_start, size);
134#endif
135	}
136
137	mmap_read_unlock(mm);
138	return 0;
139}
140#endif
141
142static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
143			     struct vm_area_struct *vma, struct vm_fault *vmf)
144{
145	struct page *timens_page = find_timens_vvar_page(vma);
146	unsigned long pfn;
147
148	switch (vmf->pgoff) {
149	case VVAR_DATA_PAGE_OFFSET:
150		if (timens_page)
151			pfn = page_to_pfn(timens_page);
152		else
153			pfn = sym_to_pfn(vdso_data);
154		break;
155#ifdef CONFIG_TIME_NS
156	case VVAR_TIMENS_PAGE_OFFSET:
157		/*
158		 * If a task belongs to a time namespace then a namespace
159		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
160		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
161		 * offset.
162		 * See also the comment near timens_setup_vdso_data().
163		 */
164		if (!timens_page)
165			return VM_FAULT_SIGBUS;
166		pfn = sym_to_pfn(vdso_data);
167		break;
168#endif /* CONFIG_TIME_NS */
169	default:
170		return VM_FAULT_SIGBUS;
171	}
172
173	return vmf_insert_pfn(vma, vmf->address, pfn);
174}
175
176static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
177	[RV_VDSO_MAP_VVAR] = {
178		.name   = "[vvar]",
179		.fault = vvar_fault,
180	},
181	[RV_VDSO_MAP_VDSO] = {
182		.name   = "[vdso]",
183		.mremap = vdso_mremap,
184	},
185};
186
187static struct __vdso_info vdso_info __ro_after_init = {
188	.name = "vdso",
189	.vdso_code_start = vdso_start,
190	.vdso_code_end = vdso_end,
191	.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
192	.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
193};
194
195#ifdef CONFIG_COMPAT
196static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
197	[RV_VDSO_MAP_VVAR] = {
198		.name   = "[vvar]",
199		.fault = vvar_fault,
200	},
201	[RV_VDSO_MAP_VDSO] = {
202		.name   = "[vdso]",
203		.mremap = vdso_mremap,
204	},
205};
206
207static struct __vdso_info compat_vdso_info __ro_after_init = {
208	.name = "compat_vdso",
209	.vdso_code_start = compat_vdso_start,
210	.vdso_code_end = compat_vdso_end,
211	.dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
212	.cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
213};
214#endif
215
216static int __init vdso_init(void)
217{
218	__vdso_init(&vdso_info);
219#ifdef CONFIG_COMPAT
220	__vdso_init(&compat_vdso_info);
221#endif
222
223	return 0;
224}
225arch_initcall(vdso_init);
226
227static int __setup_additional_pages(struct mm_struct *mm,
228				    struct linux_binprm *bprm,
229				    int uses_interp,
230				    struct __vdso_info *vdso_info)
231{
232	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
233	void *ret;
234
235	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
236
237	vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
238	/* Be sure to map the data page */
239	vdso_mapping_len = vdso_text_len + VVAR_SIZE;
240
241	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
 
242	if (IS_ERR_VALUE(vdso_base)) {
243		ret = ERR_PTR(vdso_base);
244		goto up_fail;
245	}
246
247	ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
248		(VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
249	if (IS_ERR(ret))
250		goto up_fail;
251
252	vdso_base += VVAR_SIZE;
253	mm->context.vdso = (void *)vdso_base;
254
255	ret =
256	   _install_special_mapping(mm, vdso_base, vdso_text_len,
257		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
258		vdso_info->cm);
259
260	if (IS_ERR(ret))
261		goto up_fail;
262
263	return 0;
264
265up_fail:
266	mm->context.vdso = NULL;
267	return PTR_ERR(ret);
268}
269
270#ifdef CONFIG_COMPAT
271int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
272				       int uses_interp)
273{
274	struct mm_struct *mm = current->mm;
275	int ret;
276
277	if (mmap_write_lock_killable(mm))
278		return -EINTR;
279
280	ret = __setup_additional_pages(mm, bprm, uses_interp,
281							&compat_vdso_info);
282	mmap_write_unlock(mm);
283
 
 
284	return ret;
285}
286#endif
287
288int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
289{
290	struct mm_struct *mm = current->mm;
291	int ret;
292
293	if (mmap_write_lock_killable(mm))
294		return -EINTR;
295
296	ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
297	mmap_write_unlock(mm);
298
299	return ret;
300}
v5.4
 1// SPDX-License-Identifier: GPL-2.0-only
 2/*
 3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
 4 *                    <benh@kernel.crashing.org>
 5 * Copyright (C) 2012 ARM Limited
 6 * Copyright (C) 2015 Regents of the University of California
 7 */
 8
 9#include <linux/elf.h>
10#include <linux/mm.h>
11#include <linux/slab.h>
12#include <linux/binfmts.h>
13#include <linux/err.h>
 
 
 
14
15#include <asm/vdso.h>
 
 
 
 
 
16
17extern char vdso_start[], vdso_end[];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
19static unsigned int vdso_pages;
20static struct page **vdso_pagelist;
21
22/*
23 * The vDSO data page.
24 */
25static union {
26	struct vdso_data	data;
27	u8			page[PAGE_SIZE];
28} vdso_data_store __page_aligned_data;
29static struct vdso_data *vdso_data = &vdso_data_store.data;
30
31static int __init vdso_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32{
33	unsigned int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
35	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
36	vdso_pagelist =
37		kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
38	if (unlikely(vdso_pagelist == NULL)) {
39		pr_err("vdso: pagelist allocation failed\n");
40		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41	}
42
43	for (i = 0; i < vdso_pages; i++) {
44		struct page *pg;
 
 
 
 
 
 
 
 
45
46		pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
47		vdso_pagelist[i] = pg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48	}
49	vdso_pagelist[i] = virt_to_page(vdso_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
51	return 0;
52}
53arch_initcall(vdso_init);
54
55int arch_setup_additional_pages(struct linux_binprm *bprm,
56	int uses_interp)
 
 
57{
58	struct mm_struct *mm = current->mm;
59	unsigned long vdso_base, vdso_len;
60	int ret;
 
61
62	vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
 
 
63
64	down_write(&mm->mmap_sem);
65	vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
66	if (IS_ERR_VALUE(vdso_base)) {
67		ret = vdso_base;
68		goto end;
69	}
70
71	/*
72	 * Put vDSO base into mm struct. We need to do this before calling
73	 * install_special_mapping or the perf counter mmap tracking code
74	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
75	 */
 
76	mm->context.vdso = (void *)vdso_base;
77
78	ret = install_special_mapping(mm, vdso_base, vdso_len,
 
79		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
80		vdso_pagelist);
 
 
 
81
82	if (unlikely(ret))
83		mm->context.vdso = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
85end:
86	up_write(&mm->mmap_sem);
87	return ret;
88}
 
89
90const char *arch_vma_name(struct vm_area_struct *vma)
91{
92	if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
93		return "[vdso]";
94	return NULL;
 
 
 
 
 
 
 
95}