Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Set up the VMAs to tell the VM about the vDSO.
  3 * Copyright 2007 Andi Kleen, SUSE Labs.
  4 * Subject to the GPL, v.2
  5 */
  6#include <linux/mm.h>
  7#include <linux/err.h>
  8#include <linux/sched.h>
  9#include <linux/slab.h>
 10#include <linux/init.h>
 11#include <linux/random.h>
 12#include <linux/elf.h>
 13#include <asm/vsyscall.h>
 14#include <asm/vgtod.h>
 15#include <asm/proto.h>
 16#include <asm/vdso.h>
 17#include <asm/page.h>
 18
 
 19unsigned int __read_mostly vdso_enabled = 1;
 20
 21extern char vdso_start[], vdso_end[];
 22extern unsigned short vdso_sync_cpuid;
 23
 24extern struct page *vdso_pages[];
 25static unsigned vdso_size;
 26
 27static void __init patch_vdso(void *vdso, size_t len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28{
 29	Elf64_Ehdr *hdr = vdso;
 30	Elf64_Shdr *sechdrs, *alt_sec = 0;
 31	char *secstrings;
 32	void *alt_data;
 33	int i;
 34
 35	BUG_ON(len < sizeof(Elf64_Ehdr));
 36	BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
 37
 38	sechdrs = (void *)hdr + hdr->e_shoff;
 39	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 40
 41	for (i = 1; i < hdr->e_shnum; i++) {
 42		Elf64_Shdr *shdr = &sechdrs[i];
 43		if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
 44			alt_sec = shdr;
 45			goto found;
 46		}
 47	}
 48
 49	/* If we get here, it's probably a bug. */
 50	pr_warning("patch_vdso: .altinstructions not found\n");
 51	return;  /* nothing to patch */
 52
 53found:
 54	alt_data = (void *)hdr + alt_sec->sh_offset;
 55	apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
 56}
 57
 58static int __init init_vdso(void)
 59{
 60	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
 61	int i;
 62
 63	patch_vdso(vdso_start, vdso_end - vdso_start);
 64
 65	vdso_size = npages << PAGE_SHIFT;
 66	for (i = 0; i < npages; i++)
 67		vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
 68
 
 
 
 
 
 
 
 
 69	return 0;
 70}
 71subsys_initcall(init_vdso);
 72
 73struct linux_binprm;
 74
 75/* Put the vdso above the (randomized) stack with another randomized offset.
 76   This way there is no hole in the middle of address space.
 77   To save memory make sure it is still in the same PTE as the stack top.
 78   This doesn't give that many random bits */
 79static unsigned long vdso_addr(unsigned long start, unsigned len)
 80{
 81	unsigned long addr, end;
 82	unsigned offset;
 83	end = (start + PMD_SIZE - 1) & PMD_MASK;
 84	if (end >= TASK_SIZE_MAX)
 85		end = TASK_SIZE_MAX;
 86	end -= len;
 87	/* This loses some more bits than a modulo, but is cheaper */
 88	offset = get_random_int() & (PTRS_PER_PTE - 1);
 89	addr = start + (offset << PAGE_SHIFT);
 90	if (addr >= end)
 91		addr = end;
 
 
 
 
 
 
 
 
 
 92	return addr;
 93}
 94
 95/* Setup a VMA at program startup for the vsyscall page.
 96   Not called for compat tasks */
 97int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 
 
 
 98{
 99	struct mm_struct *mm = current->mm;
100	unsigned long addr;
101	int ret;
102
103	if (!vdso_enabled)
104		return 0;
105
106	down_write(&mm->mmap_sem);
107	addr = vdso_addr(mm->start_stack, vdso_size);
108	addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
109	if (IS_ERR_VALUE(addr)) {
110		ret = addr;
111		goto up_fail;
112	}
113
114	current->mm->context.vdso = (void *)addr;
115
116	ret = install_special_mapping(mm, addr, vdso_size,
117				      VM_READ|VM_EXEC|
118				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
119				      VM_ALWAYSDUMP,
120				      vdso_pages);
121	if (ret) {
122		current->mm->context.vdso = NULL;
123		goto up_fail;
124	}
125
126up_fail:
127	up_write(&mm->mmap_sem);
128	return ret;
129}
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131static __init int vdso_setup(char *s)
132{
133	vdso_enabled = simple_strtoul(s, NULL, 0);
134	return 0;
135}
136__setup("vdso=", vdso_setup);
v3.15
  1/*
  2 * Set up the VMAs to tell the VM about the vDSO.
  3 * Copyright 2007 Andi Kleen, SUSE Labs.
  4 * Subject to the GPL, v.2
  5 */
  6#include <linux/mm.h>
  7#include <linux/err.h>
  8#include <linux/sched.h>
  9#include <linux/slab.h>
 10#include <linux/init.h>
 11#include <linux/random.h>
 12#include <linux/elf.h>
 13#include <asm/vsyscall.h>
 14#include <asm/vgtod.h>
 15#include <asm/proto.h>
 16#include <asm/vdso.h>
 17#include <asm/page.h>
 18
 19#if defined(CONFIG_X86_64)
 20unsigned int __read_mostly vdso_enabled = 1;
 21
 22DECLARE_VDSO_IMAGE(vdso);
 23extern unsigned short vdso_sync_cpuid;
 
 
 24static unsigned vdso_size;
 25
 26#ifdef CONFIG_X86_X32_ABI
 27DECLARE_VDSO_IMAGE(vdsox32);
 28static unsigned vdsox32_size;
 29#endif
 30#endif
 31
 32#if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
 33	defined(CONFIG_COMPAT)
 34void __init patch_vdso32(void *vdso, size_t len)
 35{
 36	Elf32_Ehdr *hdr = vdso;
 37	Elf32_Shdr *sechdrs, *alt_sec = 0;
 38	char *secstrings;
 39	void *alt_data;
 40	int i;
 41
 42	BUG_ON(len < sizeof(Elf32_Ehdr));
 43	BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
 44
 45	sechdrs = (void *)hdr + hdr->e_shoff;
 46	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 47
 48	for (i = 1; i < hdr->e_shnum; i++) {
 49		Elf32_Shdr *shdr = &sechdrs[i];
 50		if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
 51			alt_sec = shdr;
 52			goto found;
 53		}
 54	}
 55
 56	/* If we get here, it's probably a bug. */
 57	pr_warning("patch_vdso32: .altinstructions not found\n");
 58	return;  /* nothing to patch */
 59
 60found:
 61	alt_data = (void *)hdr + alt_sec->sh_offset;
 62	apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
 63}
 64#endif
 65
 66#if defined(CONFIG_X86_64)
 67static void __init patch_vdso64(void *vdso, size_t len)
 68{
 69	Elf64_Ehdr *hdr = vdso;
 70	Elf64_Shdr *sechdrs, *alt_sec = 0;
 71	char *secstrings;
 72	void *alt_data;
 73	int i;
 74
 75	BUG_ON(len < sizeof(Elf64_Ehdr));
 76	BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
 77
 78	sechdrs = (void *)hdr + hdr->e_shoff;
 79	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 80
 81	for (i = 1; i < hdr->e_shnum; i++) {
 82		Elf64_Shdr *shdr = &sechdrs[i];
 83		if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
 84			alt_sec = shdr;
 85			goto found;
 86		}
 87	}
 88
 89	/* If we get here, it's probably a bug. */
 90	pr_warning("patch_vdso64: .altinstructions not found\n");
 91	return;  /* nothing to patch */
 92
 93found:
 94	alt_data = (void *)hdr + alt_sec->sh_offset;
 95	apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
 96}
 97
 98static int __init init_vdso(void)
 99{
100	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
101	int i;
102
103	patch_vdso64(vdso_start, vdso_end - vdso_start);
104
105	vdso_size = npages << PAGE_SHIFT;
106	for (i = 0; i < npages; i++)
107		vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
108
109#ifdef CONFIG_X86_X32_ABI
110	patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
111	npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
112	vdsox32_size = npages << PAGE_SHIFT;
113	for (i = 0; i < npages; i++)
114		vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
115#endif
116
117	return 0;
118}
119subsys_initcall(init_vdso);
120
121struct linux_binprm;
122
123/* Put the vdso above the (randomized) stack with another randomized offset.
124   This way there is no hole in the middle of address space.
125   To save memory make sure it is still in the same PTE as the stack top.
126   This doesn't give that many random bits */
127static unsigned long vdso_addr(unsigned long start, unsigned len)
128{
129	unsigned long addr, end;
130	unsigned offset;
131	end = (start + PMD_SIZE - 1) & PMD_MASK;
132	if (end >= TASK_SIZE_MAX)
133		end = TASK_SIZE_MAX;
134	end -= len;
135	/* This loses some more bits than a modulo, but is cheaper */
136	offset = get_random_int() & (PTRS_PER_PTE - 1);
137	addr = start + (offset << PAGE_SHIFT);
138	if (addr >= end)
139		addr = end;
140
141	/*
142	 * page-align it here so that get_unmapped_area doesn't
143	 * align it wrongfully again to the next page. addr can come in 4K
144	 * unaligned here as a result of stack start randomization.
145	 */
146	addr = PAGE_ALIGN(addr);
147	addr = align_vdso_addr(addr);
148
149	return addr;
150}
151
152/* Setup a VMA at program startup for the vsyscall page.
153   Not called for compat tasks */
154static int setup_additional_pages(struct linux_binprm *bprm,
155				  int uses_interp,
156				  struct page **pages,
157				  unsigned size)
158{
159	struct mm_struct *mm = current->mm;
160	unsigned long addr;
161	int ret;
162
163	if (!vdso_enabled)
164		return 0;
165
166	down_write(&mm->mmap_sem);
167	addr = vdso_addr(mm->start_stack, size);
168	addr = get_unmapped_area(NULL, addr, size, 0, 0);
169	if (IS_ERR_VALUE(addr)) {
170		ret = addr;
171		goto up_fail;
172	}
173
174	current->mm->context.vdso = (void *)addr;
175
176	ret = install_special_mapping(mm, addr, size,
177				      VM_READ|VM_EXEC|
178				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
179				      pages);
 
180	if (ret) {
181		current->mm->context.vdso = NULL;
182		goto up_fail;
183	}
184
185up_fail:
186	up_write(&mm->mmap_sem);
187	return ret;
188}
189
190int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
191{
192	return setup_additional_pages(bprm, uses_interp, vdso_pages,
193				      vdso_size);
194}
195
196#ifdef CONFIG_X86_X32_ABI
197int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
198{
199	return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
200				      vdsox32_size);
201}
202#endif
203
204static __init int vdso_setup(char *s)
205{
206	vdso_enabled = simple_strtoul(s, NULL, 0);
207	return 0;
208}
209__setup("vdso=", vdso_setup);
210#endif