Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  4 *                    <benh@kernel.crashing.org>
  5 * Copyright (C) 2012 ARM Limited
  6 * Copyright (C) 2015 Regents of the University of California
  7 */
  8
  9#include <linux/elf.h>
 10#include <linux/mm.h>
 11#include <linux/slab.h>
 12#include <linux/binfmts.h>
 13#include <linux/err.h>
 14#include <asm/page.h>
 15#include <asm/vdso.h>
 16#include <linux/time_namespace.h>
 17#include <vdso/datapage.h>
 18#include <vdso/vsyscall.h>
 19
 20enum vvar_pages {
 21	VVAR_DATA_PAGE_OFFSET,
 22	VVAR_TIMENS_PAGE_OFFSET,
 23	VVAR_NR_PAGES,
 24};
 25
 26enum rv_vdso_map {
 27	RV_VDSO_MAP_VVAR,
 28	RV_VDSO_MAP_VDSO,
 29};
 30
 31#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
 32
 33/*
 34 * The vDSO data page.
 35 */
 36static union {
 37	struct vdso_data	data;
 38	u8			page[PAGE_SIZE];
 39} vdso_data_store __page_aligned_data;
 40struct vdso_data *vdso_data = &vdso_data_store.data;
 41
 42struct __vdso_info {
 43	const char *name;
 44	const char *vdso_code_start;
 45	const char *vdso_code_end;
 46	unsigned long vdso_pages;
 47	/* Data Mapping */
 48	struct vm_special_mapping *dm;
 49	/* Code Mapping */
 50	struct vm_special_mapping *cm;
 51};
 52
 53static struct __vdso_info vdso_info;
 54#ifdef CONFIG_COMPAT
 55static struct __vdso_info compat_vdso_info;
 56#endif
 57
 58static int vdso_mremap(const struct vm_special_mapping *sm,
 59		       struct vm_area_struct *new_vma)
 60{
 61	current->mm->context.vdso = (void *)new_vma->vm_start;
 62
 63	return 0;
 64}
 65
 66static void __init __vdso_init(struct __vdso_info *vdso_info)
 67{
 68	unsigned int i;
 69	struct page **vdso_pagelist;
 70	unsigned long pfn;
 71
 72	if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
 73		panic("vDSO is not a valid ELF object!\n");
 74
 75	vdso_info->vdso_pages = (
 76		vdso_info->vdso_code_end -
 77		vdso_info->vdso_code_start) >>
 78		PAGE_SHIFT;
 79
 80	vdso_pagelist = kcalloc(vdso_info->vdso_pages,
 81				sizeof(struct page *),
 82				GFP_KERNEL);
 83	if (vdso_pagelist == NULL)
 84		panic("vDSO kcalloc failed!\n");
 85
 86	/* Grab the vDSO code pages. */
 87	pfn = sym_to_pfn(vdso_info->vdso_code_start);
 88
 89	for (i = 0; i < vdso_info->vdso_pages; i++)
 90		vdso_pagelist[i] = pfn_to_page(pfn + i);
 91
 92	vdso_info->cm->pages = vdso_pagelist;
 93}
 94
 95#ifdef CONFIG_TIME_NS
 96struct vdso_data *arch_get_vdso_data(void *vvar_page)
 97{
 98	return (struct vdso_data *)(vvar_page);
 99}
100
101/*
102 * The vvar mapping contains data for a specific time namespace, so when a task
103 * changes namespace we must unmap its vvar data for the old namespace.
104 * Subsequent faults will map in data for the new namespace.
105 *
106 * For more details see timens_setup_vdso_data().
107 */
108int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
109{
110	struct mm_struct *mm = task->mm;
111	struct vm_area_struct *vma;
112	VMA_ITERATOR(vmi, mm, 0);
113
114	mmap_read_lock(mm);
115
116	for_each_vma(vmi, vma) {
117		if (vma_is_special_mapping(vma, vdso_info.dm))
118			zap_vma_pages(vma);
119#ifdef CONFIG_COMPAT
120		if (vma_is_special_mapping(vma, compat_vdso_info.dm))
121			zap_vma_pages(vma);
122#endif
123	}
124
125	mmap_read_unlock(mm);
126	return 0;
127}
128#endif
129
130static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
131			     struct vm_area_struct *vma, struct vm_fault *vmf)
132{
133	struct page *timens_page = find_timens_vvar_page(vma);
134	unsigned long pfn;
135
136	switch (vmf->pgoff) {
137	case VVAR_DATA_PAGE_OFFSET:
138		if (timens_page)
139			pfn = page_to_pfn(timens_page);
140		else
141			pfn = sym_to_pfn(vdso_data);
142		break;
143#ifdef CONFIG_TIME_NS
144	case VVAR_TIMENS_PAGE_OFFSET:
145		/*
146		 * If a task belongs to a time namespace then a namespace
147		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
148		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
149		 * offset.
150		 * See also the comment near timens_setup_vdso_data().
151		 */
152		if (!timens_page)
153			return VM_FAULT_SIGBUS;
154		pfn = sym_to_pfn(vdso_data);
155		break;
156#endif /* CONFIG_TIME_NS */
157	default:
158		return VM_FAULT_SIGBUS;
159	}
160
161	return vmf_insert_pfn(vma, vmf->address, pfn);
162}
163
164static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
165	[RV_VDSO_MAP_VVAR] = {
166		.name   = "[vvar]",
167		.fault = vvar_fault,
168	},
169	[RV_VDSO_MAP_VDSO] = {
170		.name   = "[vdso]",
171		.mremap = vdso_mremap,
172	},
173};
174
175static struct __vdso_info vdso_info __ro_after_init = {
176	.name = "vdso",
177	.vdso_code_start = vdso_start,
178	.vdso_code_end = vdso_end,
179	.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
180	.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
181};
182
183#ifdef CONFIG_COMPAT
184static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
185	[RV_VDSO_MAP_VVAR] = {
186		.name   = "[vvar]",
187		.fault = vvar_fault,
188	},
189	[RV_VDSO_MAP_VDSO] = {
190		.name   = "[vdso]",
191		.mremap = vdso_mremap,
192	},
193};
194
195static struct __vdso_info compat_vdso_info __ro_after_init = {
196	.name = "compat_vdso",
197	.vdso_code_start = compat_vdso_start,
198	.vdso_code_end = compat_vdso_end,
199	.dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
200	.cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
201};
202#endif
203
204static int __init vdso_init(void)
205{
206	__vdso_init(&vdso_info);
207#ifdef CONFIG_COMPAT
208	__vdso_init(&compat_vdso_info);
209#endif
210
211	return 0;
212}
213arch_initcall(vdso_init);
214
215static int __setup_additional_pages(struct mm_struct *mm,
216				    struct linux_binprm *bprm,
217				    int uses_interp,
218				    struct __vdso_info *vdso_info)
219{
220	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
221	void *ret;
222
223	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
224
225	vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
226	/* Be sure to map the data page */
227	vdso_mapping_len = vdso_text_len + VVAR_SIZE;
228
229	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
230	if (IS_ERR_VALUE(vdso_base)) {
231		ret = ERR_PTR(vdso_base);
232		goto up_fail;
233	}
234
235	ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
236		(VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
237	if (IS_ERR(ret))
238		goto up_fail;
239
240	vdso_base += VVAR_SIZE;
241	mm->context.vdso = (void *)vdso_base;
242
243	ret =
244	   _install_special_mapping(mm, vdso_base, vdso_text_len,
245		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
246		vdso_info->cm);
247
248	if (IS_ERR(ret))
249		goto up_fail;
250
251	return 0;
252
253up_fail:
254	mm->context.vdso = NULL;
255	return PTR_ERR(ret);
256}
257
258#ifdef CONFIG_COMPAT
259int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
260				       int uses_interp)
261{
262	struct mm_struct *mm = current->mm;
263	int ret;
264
265	if (mmap_write_lock_killable(mm))
266		return -EINTR;
267
268	ret = __setup_additional_pages(mm, bprm, uses_interp,
269							&compat_vdso_info);
270	mmap_write_unlock(mm);
271
272	return ret;
273}
274#endif
275
276int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
277{
278	struct mm_struct *mm = current->mm;
279	int ret;
280
281	if (mmap_write_lock_killable(mm))
282		return -EINTR;
283
284	ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
285	mmap_write_unlock(mm);
286
287	return ret;
288}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  4 *                    <benh@kernel.crashing.org>
  5 * Copyright (C) 2012 ARM Limited
  6 * Copyright (C) 2015 Regents of the University of California
  7 */
  8
  9#include <linux/elf.h>
 10#include <linux/mm.h>
 11#include <linux/slab.h>
 12#include <linux/binfmts.h>
 13#include <linux/err.h>
 14#include <asm/page.h>
 15#include <asm/vdso.h>
 16#include <linux/time_namespace.h>
 17#include <vdso/datapage.h>
 18#include <vdso/vsyscall.h>
 19
 20enum vvar_pages {
 21	VVAR_DATA_PAGE_OFFSET,
 22	VVAR_TIMENS_PAGE_OFFSET,
 23	VVAR_NR_PAGES,
 24};
 25
 26enum rv_vdso_map {
 27	RV_VDSO_MAP_VVAR,
 28	RV_VDSO_MAP_VDSO,
 29};
 30
 31#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
 32
 33static union vdso_data_store vdso_data_store __page_aligned_data;
 34struct vdso_data *vdso_data = vdso_data_store.data;
 
 
 
 
 
 
 35
 36struct __vdso_info {
 37	const char *name;
 38	const char *vdso_code_start;
 39	const char *vdso_code_end;
 40	unsigned long vdso_pages;
 41	/* Data Mapping */
 42	struct vm_special_mapping *dm;
 43	/* Code Mapping */
 44	struct vm_special_mapping *cm;
 45};
 46
 47static struct __vdso_info vdso_info;
 48#ifdef CONFIG_COMPAT
 49static struct __vdso_info compat_vdso_info;
 50#endif
 51
 52static int vdso_mremap(const struct vm_special_mapping *sm,
 53		       struct vm_area_struct *new_vma)
 54{
 55	current->mm->context.vdso = (void *)new_vma->vm_start;
 56
 57	return 0;
 58}
 59
 60static void __init __vdso_init(struct __vdso_info *vdso_info)
 61{
 62	unsigned int i;
 63	struct page **vdso_pagelist;
 64	unsigned long pfn;
 65
 66	if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
 67		panic("vDSO is not a valid ELF object!\n");
 68
 69	vdso_info->vdso_pages = (
 70		vdso_info->vdso_code_end -
 71		vdso_info->vdso_code_start) >>
 72		PAGE_SHIFT;
 73
 74	vdso_pagelist = kcalloc(vdso_info->vdso_pages,
 75				sizeof(struct page *),
 76				GFP_KERNEL);
 77	if (vdso_pagelist == NULL)
 78		panic("vDSO kcalloc failed!\n");
 79
 80	/* Grab the vDSO code pages. */
 81	pfn = sym_to_pfn(vdso_info->vdso_code_start);
 82
 83	for (i = 0; i < vdso_info->vdso_pages; i++)
 84		vdso_pagelist[i] = pfn_to_page(pfn + i);
 85
 86	vdso_info->cm->pages = vdso_pagelist;
 87}
 88
 89#ifdef CONFIG_TIME_NS
 90struct vdso_data *arch_get_vdso_data(void *vvar_page)
 91{
 92	return (struct vdso_data *)(vvar_page);
 93}
 94
 95/*
 96 * The vvar mapping contains data for a specific time namespace, so when a task
 97 * changes namespace we must unmap its vvar data for the old namespace.
 98 * Subsequent faults will map in data for the new namespace.
 99 *
100 * For more details see timens_setup_vdso_data().
101 */
102int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
103{
104	struct mm_struct *mm = task->mm;
105	struct vm_area_struct *vma;
106	VMA_ITERATOR(vmi, mm, 0);
107
108	mmap_read_lock(mm);
109
110	for_each_vma(vmi, vma) {
111		if (vma_is_special_mapping(vma, vdso_info.dm))
112			zap_vma_pages(vma);
113#ifdef CONFIG_COMPAT
114		if (vma_is_special_mapping(vma, compat_vdso_info.dm))
115			zap_vma_pages(vma);
116#endif
117	}
118
119	mmap_read_unlock(mm);
120	return 0;
121}
122#endif
123
124static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
125			     struct vm_area_struct *vma, struct vm_fault *vmf)
126{
127	struct page *timens_page = find_timens_vvar_page(vma);
128	unsigned long pfn;
129
130	switch (vmf->pgoff) {
131	case VVAR_DATA_PAGE_OFFSET:
132		if (timens_page)
133			pfn = page_to_pfn(timens_page);
134		else
135			pfn = sym_to_pfn(vdso_data);
136		break;
137#ifdef CONFIG_TIME_NS
138	case VVAR_TIMENS_PAGE_OFFSET:
139		/*
140		 * If a task belongs to a time namespace then a namespace
141		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
142		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
143		 * offset.
144		 * See also the comment near timens_setup_vdso_data().
145		 */
146		if (!timens_page)
147			return VM_FAULT_SIGBUS;
148		pfn = sym_to_pfn(vdso_data);
149		break;
150#endif /* CONFIG_TIME_NS */
151	default:
152		return VM_FAULT_SIGBUS;
153	}
154
155	return vmf_insert_pfn(vma, vmf->address, pfn);
156}
157
158static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
159	[RV_VDSO_MAP_VVAR] = {
160		.name   = "[vvar]",
161		.fault = vvar_fault,
162	},
163	[RV_VDSO_MAP_VDSO] = {
164		.name   = "[vdso]",
165		.mremap = vdso_mremap,
166	},
167};
168
169static struct __vdso_info vdso_info __ro_after_init = {
170	.name = "vdso",
171	.vdso_code_start = vdso_start,
172	.vdso_code_end = vdso_end,
173	.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
174	.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
175};
176
177#ifdef CONFIG_COMPAT
178static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
179	[RV_VDSO_MAP_VVAR] = {
180		.name   = "[vvar]",
181		.fault = vvar_fault,
182	},
183	[RV_VDSO_MAP_VDSO] = {
184		.name   = "[vdso]",
185		.mremap = vdso_mremap,
186	},
187};
188
189static struct __vdso_info compat_vdso_info __ro_after_init = {
190	.name = "compat_vdso",
191	.vdso_code_start = compat_vdso_start,
192	.vdso_code_end = compat_vdso_end,
193	.dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
194	.cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
195};
196#endif
197
198static int __init vdso_init(void)
199{
200	__vdso_init(&vdso_info);
201#ifdef CONFIG_COMPAT
202	__vdso_init(&compat_vdso_info);
203#endif
204
205	return 0;
206}
207arch_initcall(vdso_init);
208
209static int __setup_additional_pages(struct mm_struct *mm,
210				    struct linux_binprm *bprm,
211				    int uses_interp,
212				    struct __vdso_info *vdso_info)
213{
214	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
215	void *ret;
216
217	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
218
219	vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
220	/* Be sure to map the data page */
221	vdso_mapping_len = vdso_text_len + VVAR_SIZE;
222
223	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
224	if (IS_ERR_VALUE(vdso_base)) {
225		ret = ERR_PTR(vdso_base);
226		goto up_fail;
227	}
228
229	ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
230		(VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
231	if (IS_ERR(ret))
232		goto up_fail;
233
234	vdso_base += VVAR_SIZE;
235	mm->context.vdso = (void *)vdso_base;
236
237	ret =
238	   _install_special_mapping(mm, vdso_base, vdso_text_len,
239		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
240		vdso_info->cm);
241
242	if (IS_ERR(ret))
243		goto up_fail;
244
245	return 0;
246
247up_fail:
248	mm->context.vdso = NULL;
249	return PTR_ERR(ret);
250}
251
252#ifdef CONFIG_COMPAT
253int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
254				       int uses_interp)
255{
256	struct mm_struct *mm = current->mm;
257	int ret;
258
259	if (mmap_write_lock_killable(mm))
260		return -EINTR;
261
262	ret = __setup_additional_pages(mm, bprm, uses_interp,
263							&compat_vdso_info);
264	mmap_write_unlock(mm);
265
266	return ret;
267}
268#endif
269
270int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
271{
272	struct mm_struct *mm = current->mm;
273	int ret;
274
275	if (mmap_write_lock_killable(mm))
276		return -EINTR;
277
278	ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
279	mmap_write_unlock(mm);
280
281	return ret;
282}