Linux Audio

Check our new training course

Loading...
v5.4
 1// SPDX-License-Identifier: GPL-2.0
 2/*
 3 * arch/sh/kernel/vsyscall/vsyscall.c
 4 *
 5 *  Copyright (C) 2006 Paul Mundt
 6 *
 7 * vDSO randomization
 8 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
 9 */
10#include <linux/mm.h>
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/gfp.h>
14#include <linux/module.h>
15#include <linux/elf.h>
16#include <linux/sched.h>
17#include <linux/err.h>
18
19/*
20 * Should the kernel map a VDSO page into processes and pass its
21 * address down to glibc upon exec()?
22 */
23unsigned int __read_mostly vdso_enabled = 1;
24EXPORT_SYMBOL_GPL(vdso_enabled);
25
26static int __init vdso_setup(char *s)
27{
28	vdso_enabled = simple_strtoul(s, NULL, 0);
29	return 1;
30}
31__setup("vdso=", vdso_setup);
32
33/*
34 * These symbols are defined by vsyscall.o to mark the bounds
35 * of the ELF DSO images included therein.
36 */
37extern const char vsyscall_trapa_start, vsyscall_trapa_end;
38static struct page *syscall_pages[1];
 
 
 
 
39
40int __init vsyscall_init(void)
41{
42	void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
43	syscall_pages[0] = virt_to_page(syscall_page);
44
45	/*
46	 * XXX: Map this page to a fixmap entry if we get around
47	 * to adding the page to ELF core dumps
48	 */
49
50	memcpy(syscall_page,
51	       &vsyscall_trapa_start,
52	       &vsyscall_trapa_end - &vsyscall_trapa_start);
53
54	return 0;
55}
56
57/* Setup a VMA at program startup for the vsyscall page */
58int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
59{
60	struct mm_struct *mm = current->mm;
 
61	unsigned long addr;
62	int ret;
63
64	if (down_write_killable(&mm->mmap_sem))
65		return -EINTR;
66
67	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
68	if (IS_ERR_VALUE(addr)) {
69		ret = addr;
70		goto up_fail;
71	}
72
73	ret = install_special_mapping(mm, addr, PAGE_SIZE,
 
74				      VM_READ | VM_EXEC |
75				      VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
76				      syscall_pages);
77	if (unlikely(ret))
 
78		goto up_fail;
79
80	current->mm->context.vdso = (void *)addr;
 
81
82up_fail:
83	up_write(&mm->mmap_sem);
84	return ret;
85}
86
87const char *arch_vma_name(struct vm_area_struct *vma)
88{
89	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
90		return "[vdso]";
91
92	return NULL;
93}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * arch/sh/kernel/vsyscall/vsyscall.c
  4 *
  5 *  Copyright (C) 2006 Paul Mundt
  6 *
  7 * vDSO randomization
  8 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
  9 */
 10#include <linux/mm.h>
 11#include <linux/kernel.h>
 12#include <linux/init.h>
 13#include <linux/gfp.h>
 14#include <linux/module.h>
 15#include <linux/elf.h>
 16#include <linux/sched.h>
 17#include <linux/err.h>
 18
 19/*
 20 * Should the kernel map a VDSO page into processes and pass its
 21 * address down to glibc upon exec()?
 22 */
 23unsigned int __read_mostly vdso_enabled = 1;
 24EXPORT_SYMBOL_GPL(vdso_enabled);
 25
 26static int __init vdso_setup(char *s)
 27{
 28	vdso_enabled = simple_strtoul(s, NULL, 0);
 29	return 1;
 30}
 31__setup("vdso=", vdso_setup);
 32
 33/*
 34 * These symbols are defined by vsyscall.o to mark the bounds
 35 * of the ELF DSO images included therein.
 36 */
 37extern const char vsyscall_trapa_start, vsyscall_trapa_end;
 38static struct page *syscall_pages[1];
 39static struct vm_special_mapping vdso_mapping = {
 40	.name = "[vdso]",
 41	.pages = syscall_pages,
 42};
 43
 44int __init vsyscall_init(void)
 45{
 46	void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
 47	syscall_pages[0] = virt_to_page(syscall_page);
 48
 49	/*
 50	 * XXX: Map this page to a fixmap entry if we get around
 51	 * to adding the page to ELF core dumps
 52	 */
 53
 54	memcpy(syscall_page,
 55	       &vsyscall_trapa_start,
 56	       &vsyscall_trapa_end - &vsyscall_trapa_start);
 57
 58	return 0;
 59}
 60
 61/* Setup a VMA at program startup for the vsyscall page */
 62int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 63{
 64	struct mm_struct *mm = current->mm;
 65	struct vm_area_struct *vma;
 66	unsigned long addr;
 67	int ret;
 68
 69	if (mmap_write_lock_killable(mm))
 70		return -EINTR;
 71
 72	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
 73	if (IS_ERR_VALUE(addr)) {
 74		ret = addr;
 75		goto up_fail;
 76	}
 77
 78	vdso_mapping.pages = syscall_pages;
 79	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
 80				      VM_READ | VM_EXEC |
 81				      VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
 82				      &vdso_mapping);
 83	ret = PTR_ERR(vma);
 84	if (IS_ERR(vma))
 85		goto up_fail;
 86
 87	current->mm->context.vdso = (void *)addr;
 88	ret = 0;
 89
 90up_fail:
 91	mmap_write_unlock(mm);
 92	return ret;
 93}
 94
 95const char *arch_vma_name(struct vm_area_struct *vma)
 96{
 97	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
 98		return "[vdso]";
 99
100	return NULL;
101}