Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  3 *                    <benh@kernel.crashing.org>
  4 * Copyright (C) 2012 ARM Limited
  5 * Copyright (C) 2015 Regents of the University of California
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 
 20#include <linux/mm.h>
 21#include <linux/slab.h>
 22#include <linux/binfmts.h>
 23#include <linux/err.h>
 24
 25#include <asm/vdso.h>
 26
 
 
 
 
 
 
 
 27extern char vdso_start[], vdso_end[];
 28
 29static unsigned int vdso_pages;
 30static struct page **vdso_pagelist;
 
 
 
 
 
 
 
 31
 32/*
 33 * The vDSO data page.
 34 */
 35static union {
 36	struct vdso_data	data;
 37	u8			page[PAGE_SIZE];
 38} vdso_data_store __page_aligned_data;
 39struct vdso_data *vdso_data = &vdso_data_store.data;
 40
 41static int __init vdso_init(void)
 42{
 43	unsigned int i;
 44
 45	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
 46	vdso_pagelist =
 47		kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
 48	if (unlikely(vdso_pagelist == NULL)) {
 49		pr_err("vdso: pagelist allocation failed\n");
 50		return -ENOMEM;
 51	}
 52
 53	for (i = 0; i < vdso_pages; i++) {
 54		struct page *pg;
 55
 56		pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
 57		ClearPageReserved(pg);
 58		vdso_pagelist[i] = pg;
 59	}
 60	vdso_pagelist[i] = virt_to_page(vdso_data);
 61
 62	return 0;
 63}
 64arch_initcall(vdso_init);
 65
 66int arch_setup_additional_pages(struct linux_binprm *bprm,
 67	int uses_interp)
 68{
 69	struct mm_struct *mm = current->mm;
 70	unsigned long vdso_base, vdso_len;
 71	int ret;
 72
 73	vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
 
 
 
 
 
 74
 75	down_write(&mm->mmap_sem);
 76	vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
 77	if (IS_ERR_VALUE(vdso_base)) {
 78		ret = vdso_base;
 79		goto end;
 80	}
 81
 82	/*
 83	 * Put vDSO base into mm struct. We need to do this before calling
 84	 * install_special_mapping or the perf counter mmap tracking code
 85	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
 86	 */
 87	mm->context.vdso = (void *)vdso_base;
 88
 89	ret = install_special_mapping(mm, vdso_base, vdso_len,
 
 
 90		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
 91		vdso_pagelist);
 92
 93	if (unlikely(ret))
 94		mm->context.vdso = NULL;
 
 
 
 
 
 
 
 95
 96end:
 97	up_write(&mm->mmap_sem);
 98	return ret;
 99}
100
101const char *arch_vma_name(struct vm_area_struct *vma)
102{
103	if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
104		return "[vdso]";
105	return NULL;
106}
107
108/*
109 * Function stubs to prevent linker errors when AT_SYSINFO_EHDR is defined
110 */
111
112int in_gate_area_no_mm(unsigned long addr)
113{
114	return 0;
115}
116
117int in_gate_area(struct mm_struct *mm, unsigned long addr)
118{
119	return 0;
120}
121
122struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
123{
124	return NULL;
125}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  4 *                    <benh@kernel.crashing.org>
  5 * Copyright (C) 2012 ARM Limited
  6 * Copyright (C) 2015 Regents of the University of California
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <linux/elf.h>
 10#include <linux/mm.h>
 11#include <linux/slab.h>
 12#include <linux/binfmts.h>
 13#include <linux/err.h>
 14#include <asm/page.h>
 15#include <asm/vdso.h>
 16
 17#ifdef CONFIG_GENERIC_TIME_VSYSCALL
 18#include <vdso/datapage.h>
 19#else
 20struct vdso_data {
 21};
 22#endif
 23
 24extern char vdso_start[], vdso_end[];
 25
 26enum vvar_pages {
 27	VVAR_DATA_PAGE_OFFSET,
 28	VVAR_NR_PAGES,
 29};
 30
 31#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
 32
 33static unsigned int vdso_pages __ro_after_init;
 34static struct page **vdso_pagelist __ro_after_init;
 35
 36/*
 37 * The vDSO data page.
 38 */
 39static union {
 40	struct vdso_data	data;
 41	u8			page[PAGE_SIZE];
 42} vdso_data_store __page_aligned_data;
 43struct vdso_data *vdso_data = &vdso_data_store.data;
 44
 45static int __init vdso_init(void)
 46{
 47	unsigned int i;
 48
 49	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
 50	vdso_pagelist =
 51		kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
 52	if (unlikely(vdso_pagelist == NULL)) {
 53		pr_err("vdso: pagelist allocation failed\n");
 54		return -ENOMEM;
 55	}
 56
 57	for (i = 0; i < vdso_pages; i++) {
 58		struct page *pg;
 59
 60		pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
 
 61		vdso_pagelist[i] = pg;
 62	}
 63	vdso_pagelist[i] = virt_to_page(vdso_data);
 64
 65	return 0;
 66}
 67arch_initcall(vdso_init);
 68
 69int arch_setup_additional_pages(struct linux_binprm *bprm,
 70	int uses_interp)
 71{
 72	struct mm_struct *mm = current->mm;
 73	unsigned long vdso_base, vdso_len;
 74	int ret;
 75
 76	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
 77
 78	vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
 79
 80	if (mmap_write_lock_killable(mm))
 81		return -EINTR;
 82
 
 83	vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
 84	if (IS_ERR_VALUE(vdso_base)) {
 85		ret = vdso_base;
 86		goto end;
 87	}
 88
 89	mm->context.vdso = NULL;
 90	ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
 91		(VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
 92	if (unlikely(ret))
 93		goto end;
 
 94
 95	ret =
 96	   install_special_mapping(mm, vdso_base + VVAR_SIZE,
 97		vdso_pages << PAGE_SHIFT,
 98		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
 99		vdso_pagelist);
100
101	if (unlikely(ret))
102		goto end;
103
104	/*
105	 * Put vDSO base into mm struct. We need to do this before calling
106	 * install_special_mapping or the perf counter mmap tracking code
107	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
108	 */
109	mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
110
111end:
112	mmap_write_unlock(mm);
113	return ret;
114}
115
116const char *arch_vma_name(struct vm_area_struct *vma)
117{
118	if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
119		return "[vdso]";
120	if (vma->vm_mm && (vma->vm_start ==
121			   (long)vma->vm_mm->context.vdso - VVAR_SIZE))
122		return "[vdso_data]";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123	return NULL;
124}