Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  3 *                    <benh@kernel.crashing.org>
  4 * Copyright (C) 2012 ARM Limited
  5 * Copyright (C) 2015 Regents of the University of California
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 
 20#include <linux/mm.h>
 21#include <linux/slab.h>
 22#include <linux/binfmts.h>
 23#include <linux/err.h>
 24
 25#include <asm/vdso.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27extern char vdso_start[], vdso_end[];
 
 
 
 28
 29static unsigned int vdso_pages;
 30static struct page **vdso_pagelist;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32/*
 33 * The vDSO data page.
 
 
 
 
 34 */
 35static union {
 36	struct vdso_data	data;
 37	u8			page[PAGE_SIZE];
 38} vdso_data_store __page_aligned_data;
 39struct vdso_data *vdso_data = &vdso_data_store.data;
 
 
 
 
 
 
 
 40
 41static int __init vdso_init(void)
 
 
 
 
 
 
 42{
 43	unsigned int i;
 
 44
 45	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
 46	vdso_pagelist =
 47		kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
 48	if (unlikely(vdso_pagelist == NULL)) {
 49		pr_err("vdso: pagelist allocation failed\n");
 50		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51	}
 52
 53	for (i = 0; i < vdso_pages; i++) {
 54		struct page *pg;
 55
 56		pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
 57		ClearPageReserved(pg);
 58		vdso_pagelist[i] = pg;
 59	}
 60	vdso_pagelist[i] = virt_to_page(vdso_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61
 62	return 0;
 63}
 64arch_initcall(vdso_init);
 65
 66int arch_setup_additional_pages(struct linux_binprm *bprm,
 67	int uses_interp)
 
 
 68{
 69	struct mm_struct *mm = current->mm;
 70	unsigned long vdso_base, vdso_len;
 71	int ret;
 
 72
 73	vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
 
 
 74
 75	down_write(&mm->mmap_sem);
 76	vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
 77	if (IS_ERR_VALUE(vdso_base)) {
 78		ret = vdso_base;
 79		goto end;
 80	}
 81
 82	/*
 83	 * Put vDSO base into mm struct. We need to do this before calling
 84	 * install_special_mapping or the perf counter mmap tracking code
 85	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
 86	 */
 
 87	mm->context.vdso = (void *)vdso_base;
 88
 89	ret = install_special_mapping(mm, vdso_base, vdso_len,
 
 90		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
 91		vdso_pagelist);
 92
 93	if (unlikely(ret))
 94		mm->context.vdso = NULL;
 95
 96end:
 97	up_write(&mm->mmap_sem);
 98	return ret;
 
 
 99}
100
101const char *arch_vma_name(struct vm_area_struct *vma)
 
 
102{
103	if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
104		return "[vdso]";
105	return NULL;
106}
107
108/*
109 * Function stubs to prevent linker errors when AT_SYSINFO_EHDR is defined
110 */
111
112int in_gate_area_no_mm(unsigned long addr)
113{
114	return 0;
115}
116
117int in_gate_area(struct mm_struct *mm, unsigned long addr)
118{
119	return 0;
120}
 
121
122struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
123{
124	return NULL;
 
 
 
 
 
 
 
 
 
125}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  4 *                    <benh@kernel.crashing.org>
  5 * Copyright (C) 2012 ARM Limited
  6 * Copyright (C) 2015 Regents of the University of California
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <linux/elf.h>
 10#include <linux/mm.h>
 11#include <linux/slab.h>
 12#include <linux/binfmts.h>
 13#include <linux/err.h>
 14#include <asm/page.h>
 15#include <asm/vdso.h>
 16#include <linux/time_namespace.h>
 17#include <vdso/datapage.h>
 18#include <vdso/vsyscall.h>
 19
 20enum vvar_pages {
 21	VVAR_DATA_PAGE_OFFSET,
 22	VVAR_TIMENS_PAGE_OFFSET,
 23	VVAR_NR_PAGES,
 24};
 25
 26#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
 27
 28static union vdso_data_store vdso_data_store __page_aligned_data;
 29struct vdso_data *vdso_data = vdso_data_store.data;
 30
 31struct __vdso_info {
 32	const char *name;
 33	const char *vdso_code_start;
 34	const char *vdso_code_end;
 35	unsigned long vdso_pages;
 36	/* Code Mapping */
 37	struct vm_special_mapping *cm;
 38};
 39
 40static struct __vdso_info vdso_info;
 41#ifdef CONFIG_COMPAT
 42static struct __vdso_info compat_vdso_info;
 43#endif
 44
 45static int vdso_mremap(const struct vm_special_mapping *sm,
 46		       struct vm_area_struct *new_vma)
 47{
 48	current->mm->context.vdso = (void *)new_vma->vm_start;
 49
 50	return 0;
 51}
 52
 53static void __init __vdso_init(struct __vdso_info *vdso_info)
 54{
 55	unsigned int i;
 56	struct page **vdso_pagelist;
 57	unsigned long pfn;
 58
 59	if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
 60		panic("vDSO is not a valid ELF object!\n");
 61
 62	vdso_info->vdso_pages = (
 63		vdso_info->vdso_code_end -
 64		vdso_info->vdso_code_start) >>
 65		PAGE_SHIFT;
 66
 67	vdso_pagelist = kcalloc(vdso_info->vdso_pages,
 68				sizeof(struct page *),
 69				GFP_KERNEL);
 70	if (vdso_pagelist == NULL)
 71		panic("vDSO kcalloc failed!\n");
 72
 73	/* Grab the vDSO code pages. */
 74	pfn = sym_to_pfn(vdso_info->vdso_code_start);
 75
 76	for (i = 0; i < vdso_info->vdso_pages; i++)
 77		vdso_pagelist[i] = pfn_to_page(pfn + i);
 78
 79	vdso_info->cm->pages = vdso_pagelist;
 80}
 81
 82#ifdef CONFIG_TIME_NS
 83struct vdso_data *arch_get_vdso_data(void *vvar_page)
 84{
 85	return (struct vdso_data *)(vvar_page);
 86}
 87
 88static const struct vm_special_mapping rv_vvar_map;
 89
 90/*
 91 * The vvar mapping contains data for a specific time namespace, so when a task
 92 * changes namespace we must unmap its vvar data for the old namespace.
 93 * Subsequent faults will map in data for the new namespace.
 94 *
 95 * For more details see timens_setup_vdso_data().
 96 */
 97int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
 98{
 99	struct mm_struct *mm = task->mm;
100	struct vm_area_struct *vma;
101	VMA_ITERATOR(vmi, mm, 0);
102
103	mmap_read_lock(mm);
104
105	for_each_vma(vmi, vma) {
106		if (vma_is_special_mapping(vma, &rv_vvar_map))
107			zap_vma_pages(vma);
108	}
109
110	mmap_read_unlock(mm);
111	return 0;
112}
113#endif
114
115static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
116			     struct vm_area_struct *vma, struct vm_fault *vmf)
117{
118	struct page *timens_page = find_timens_vvar_page(vma);
119	unsigned long pfn;
120
121	switch (vmf->pgoff) {
122	case VVAR_DATA_PAGE_OFFSET:
123		if (timens_page)
124			pfn = page_to_pfn(timens_page);
125		else
126			pfn = sym_to_pfn(vdso_data);
127		break;
128#ifdef CONFIG_TIME_NS
129	case VVAR_TIMENS_PAGE_OFFSET:
130		/*
131		 * If a task belongs to a time namespace then a namespace
132		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
133		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
134		 * offset.
135		 * See also the comment near timens_setup_vdso_data().
136		 */
137		if (!timens_page)
138			return VM_FAULT_SIGBUS;
139		pfn = sym_to_pfn(vdso_data);
140		break;
141#endif /* CONFIG_TIME_NS */
142	default:
143		return VM_FAULT_SIGBUS;
144	}
145
146	return vmf_insert_pfn(vma, vmf->address, pfn);
147}
148
149static const struct vm_special_mapping rv_vvar_map = {
150	.name   = "[vvar]",
151	.fault = vvar_fault,
152};
153
154static struct vm_special_mapping rv_vdso_map __ro_after_init = {
155	.name   = "[vdso]",
156	.mremap = vdso_mremap,
157};
158
159static struct __vdso_info vdso_info __ro_after_init = {
160	.name = "vdso",
161	.vdso_code_start = vdso_start,
162	.vdso_code_end = vdso_end,
163	.cm = &rv_vdso_map,
164};
165
166#ifdef CONFIG_COMPAT
167static struct vm_special_mapping rv_compat_vdso_map __ro_after_init = {
168	.name   = "[vdso]",
169	.mremap = vdso_mremap,
170};
171
172static struct __vdso_info compat_vdso_info __ro_after_init = {
173	.name = "compat_vdso",
174	.vdso_code_start = compat_vdso_start,
175	.vdso_code_end = compat_vdso_end,
176	.cm = &rv_compat_vdso_map,
177};
178#endif
179
180static int __init vdso_init(void)
181{
182	__vdso_init(&vdso_info);
183#ifdef CONFIG_COMPAT
184	__vdso_init(&compat_vdso_info);
185#endif
186
187	return 0;
188}
189arch_initcall(vdso_init);
190
191static int __setup_additional_pages(struct mm_struct *mm,
192				    struct linux_binprm *bprm,
193				    int uses_interp,
194				    struct __vdso_info *vdso_info)
195{
196	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
197	void *ret;
198
199	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
200
201	vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
202	/* Be sure to map the data page */
203	vdso_mapping_len = vdso_text_len + VVAR_SIZE;
204
205	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
 
206	if (IS_ERR_VALUE(vdso_base)) {
207		ret = ERR_PTR(vdso_base);
208		goto up_fail;
209	}
210
211	ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
212		(VM_READ | VM_MAYREAD | VM_PFNMAP), &rv_vvar_map);
213	if (IS_ERR(ret))
214		goto up_fail;
215
216	vdso_base += VVAR_SIZE;
217	mm->context.vdso = (void *)vdso_base;
218
219	ret =
220	   _install_special_mapping(mm, vdso_base, vdso_text_len,
221		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
222		vdso_info->cm);
223
224	if (IS_ERR(ret))
225		goto up_fail;
226
227	return 0;
228
229up_fail:
230	mm->context.vdso = NULL;
231	return PTR_ERR(ret);
232}
233
234#ifdef CONFIG_COMPAT
235int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
236				       int uses_interp)
237{
238	struct mm_struct *mm = current->mm;
239	int ret;
 
 
240
241	if (mmap_write_lock_killable(mm))
242		return -EINTR;
 
243
244	ret = __setup_additional_pages(mm, bprm, uses_interp,
245							&compat_vdso_info);
246	mmap_write_unlock(mm);
 
247
248	return ret;
 
 
249}
250#endif
251
252int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
253{
254	struct mm_struct *mm = current->mm;
255	int ret;
256
257	if (mmap_write_lock_killable(mm))
258		return -EINTR;
259
260	ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
261	mmap_write_unlock(mm);
262
263	return ret;
264}