Loading...
Note: File does not exist in v6.13.7.
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
7#include <linux/err.h>
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/random.h>
12#include <linux/elf.h>
13#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
16#include <asm/vdso.h>
17#include <asm/page.h>
18
19unsigned int __read_mostly vdso_enabled = 1;
20
21extern char vdso_start[], vdso_end[];
22extern unsigned short vdso_sync_cpuid;
23
24extern struct page *vdso_pages[];
25static unsigned vdso_size;
26
27static void __init patch_vdso(void *vdso, size_t len)
28{
29 Elf64_Ehdr *hdr = vdso;
30 Elf64_Shdr *sechdrs, *alt_sec = 0;
31 char *secstrings;
32 void *alt_data;
33 int i;
34
35 BUG_ON(len < sizeof(Elf64_Ehdr));
36 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
37
38 sechdrs = (void *)hdr + hdr->e_shoff;
39 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
40
41 for (i = 1; i < hdr->e_shnum; i++) {
42 Elf64_Shdr *shdr = &sechdrs[i];
43 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
44 alt_sec = shdr;
45 goto found;
46 }
47 }
48
49 /* If we get here, it's probably a bug. */
50 pr_warning("patch_vdso: .altinstructions not found\n");
51 return; /* nothing to patch */
52
53found:
54 alt_data = (void *)hdr + alt_sec->sh_offset;
55 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
56}
57
58static int __init init_vdso(void)
59{
60 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
61 int i;
62
63 patch_vdso(vdso_start, vdso_end - vdso_start);
64
65 vdso_size = npages << PAGE_SHIFT;
66 for (i = 0; i < npages; i++)
67 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
68
69 return 0;
70}
71subsys_initcall(init_vdso);
72
73struct linux_binprm;
74
75/* Put the vdso above the (randomized) stack with another randomized offset.
76 This way there is no hole in the middle of address space.
77 To save memory make sure it is still in the same PTE as the stack top.
78 This doesn't give that many random bits */
79static unsigned long vdso_addr(unsigned long start, unsigned len)
80{
81 unsigned long addr, end;
82 unsigned offset;
83 end = (start + PMD_SIZE - 1) & PMD_MASK;
84 if (end >= TASK_SIZE_MAX)
85 end = TASK_SIZE_MAX;
86 end -= len;
87 /* This loses some more bits than a modulo, but is cheaper */
88 offset = get_random_int() & (PTRS_PER_PTE - 1);
89 addr = start + (offset << PAGE_SHIFT);
90 if (addr >= end)
91 addr = end;
92 return addr;
93}
94
95/* Setup a VMA at program startup for the vsyscall page.
96 Not called for compat tasks */
97int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
98{
99 struct mm_struct *mm = current->mm;
100 unsigned long addr;
101 int ret;
102
103 if (!vdso_enabled)
104 return 0;
105
106 down_write(&mm->mmap_sem);
107 addr = vdso_addr(mm->start_stack, vdso_size);
108 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
109 if (IS_ERR_VALUE(addr)) {
110 ret = addr;
111 goto up_fail;
112 }
113
114 current->mm->context.vdso = (void *)addr;
115
116 ret = install_special_mapping(mm, addr, vdso_size,
117 VM_READ|VM_EXEC|
118 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
119 VM_ALWAYSDUMP,
120 vdso_pages);
121 if (ret) {
122 current->mm->context.vdso = NULL;
123 goto up_fail;
124 }
125
126up_fail:
127 up_write(&mm->mmap_sem);
128 return ret;
129}
130
131static __init int vdso_setup(char *s)
132{
133 vdso_enabled = simple_strtoul(s, NULL, 0);
134 return 0;
135}
136__setup("vdso=", vdso_setup);