Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Set up the VMAs to tell the VM about the vDSO.
  4 * Copyright 2007 Andi Kleen, SUSE Labs.
  5 */
  6
  7/*
  8 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/err.h>
 13#include <linux/sched.h>
 14#include <linux/slab.h>
 15#include <linux/init.h>
 16#include <linux/linkage.h>
 17#include <linux/random.h>
 18#include <linux/elf.h>
 19#include <asm/cacheflush.h>
 20#include <asm/spitfire.h>
 21#include <asm/vdso.h>
 22#include <asm/vvar.h>
 23#include <asm/page.h>
 24
 25unsigned int __read_mostly vdso_enabled = 1;
 26
 27static struct vm_special_mapping vvar_mapping = {
 28	.name = "[vvar]"
 29};
 30
 31#ifdef	CONFIG_SPARC64
 32static struct vm_special_mapping vdso_mapping64 = {
 33	.name = "[vdso]"
 34};
 35#endif
 36
 37#ifdef CONFIG_COMPAT
 38static struct vm_special_mapping vdso_mapping32 = {
 39	.name = "[vdso]"
 40};
 41#endif
 42
 43struct vvar_data *vvar_data;
 44
 45struct vdso_elfinfo32 {
 46	Elf32_Ehdr	*hdr;
 47	Elf32_Sym	*dynsym;
 48	unsigned long	dynsymsize;
 49	const char	*dynstr;
 50	unsigned long	text;
 51};
 52
 53struct vdso_elfinfo64 {
 54	Elf64_Ehdr	*hdr;
 55	Elf64_Sym	*dynsym;
 56	unsigned long	dynsymsize;
 57	const char	*dynstr;
 58	unsigned long	text;
 59};
 60
 61struct vdso_elfinfo {
 62	union {
 63		struct vdso_elfinfo32 elf32;
 64		struct vdso_elfinfo64 elf64;
 65	} u;
 66};
 67
 68static void *one_section64(struct vdso_elfinfo64 *e, const char *name,
 69			   unsigned long *size)
 70{
 71	const char *snames;
 72	Elf64_Shdr *shdrs;
 73	unsigned int i;
 74
 75	shdrs = (void *)e->hdr + e->hdr->e_shoff;
 76	snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
 77	for (i = 1; i < e->hdr->e_shnum; i++) {
 78		if (!strcmp(snames+shdrs[i].sh_name, name)) {
 79			if (size)
 80				*size = shdrs[i].sh_size;
 81			return (void *)e->hdr + shdrs[i].sh_offset;
 82		}
 83	}
 84	return NULL;
 85}
 86
 87static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e)
 88{
 89	struct vdso_elfinfo64 *e = &_e->u.elf64;
 90
 91	e->hdr = image->data;
 92	e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize);
 93	e->dynstr = one_section64(e, ".dynstr", NULL);
 94
 95	if (!e->dynsym || !e->dynstr) {
 96		pr_err("VDSO64: Missing symbol sections.\n");
 97		return -ENODEV;
 98	}
 99	return 0;
100}
101
102static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name)
103{
104	unsigned int i;
105
106	for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) {
107		Elf64_Sym *s = &e->dynsym[i];
108		if (s->st_name == 0)
109			continue;
110		if (!strcmp(e->dynstr + s->st_name, name))
111			return s;
112	}
113	return NULL;
114}
115
116static int patchsym64(struct vdso_elfinfo *_e, const char *orig,
117		      const char *new)
118{
119	struct vdso_elfinfo64 *e = &_e->u.elf64;
120	Elf64_Sym *osym = find_sym64(e, orig);
121	Elf64_Sym *nsym = find_sym64(e, new);
122
123	if (!nsym || !osym) {
124		pr_err("VDSO64: Missing symbols.\n");
125		return -ENODEV;
126	}
127	osym->st_value = nsym->st_value;
128	osym->st_size = nsym->st_size;
129	osym->st_info = nsym->st_info;
130	osym->st_other = nsym->st_other;
131	osym->st_shndx = nsym->st_shndx;
132
133	return 0;
134}
135
136static void *one_section32(struct vdso_elfinfo32 *e, const char *name,
137			   unsigned long *size)
138{
139	const char *snames;
140	Elf32_Shdr *shdrs;
141	unsigned int i;
142
143	shdrs = (void *)e->hdr + e->hdr->e_shoff;
144	snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
145	for (i = 1; i < e->hdr->e_shnum; i++) {
146		if (!strcmp(snames+shdrs[i].sh_name, name)) {
147			if (size)
148				*size = shdrs[i].sh_size;
149			return (void *)e->hdr + shdrs[i].sh_offset;
150		}
151	}
152	return NULL;
153}
154
155static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e)
156{
157	struct vdso_elfinfo32 *e = &_e->u.elf32;
158
159	e->hdr = image->data;
160	e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize);
161	e->dynstr = one_section32(e, ".dynstr", NULL);
162
163	if (!e->dynsym || !e->dynstr) {
164		pr_err("VDSO32: Missing symbol sections.\n");
165		return -ENODEV;
166	}
167	return 0;
168}
169
170static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name)
171{
172	unsigned int i;
173
174	for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) {
175		Elf32_Sym *s = &e->dynsym[i];
176		if (s->st_name == 0)
177			continue;
178		if (!strcmp(e->dynstr + s->st_name, name))
179			return s;
180	}
181	return NULL;
182}
183
184static int patchsym32(struct vdso_elfinfo *_e, const char *orig,
185		      const char *new)
186{
187	struct vdso_elfinfo32 *e = &_e->u.elf32;
188	Elf32_Sym *osym = find_sym32(e, orig);
189	Elf32_Sym *nsym = find_sym32(e, new);
190
191	if (!nsym || !osym) {
192		pr_err("VDSO32: Missing symbols.\n");
193		return -ENODEV;
194	}
195	osym->st_value = nsym->st_value;
196	osym->st_size = nsym->st_size;
197	osym->st_info = nsym->st_info;
198	osym->st_other = nsym->st_other;
199	osym->st_shndx = nsym->st_shndx;
200
201	return 0;
202}
203
204static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e,
205			 bool elf64)
206{
207	if (elf64)
208		return find_sections64(image, e);
209	else
210		return find_sections32(image, e);
211}
212
213static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig,
214			    const char *new_target, bool elf64)
215{
216	if (elf64)
217		return patchsym64(e, orig, new_target);
218	else
219		return patchsym32(e, orig, new_target);
220}
221
222static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64)
223{
224	int err;
225
226	err = find_sections(image, e, elf64);
227	if (err)
228		return err;
229
230	err = patch_one_symbol(e,
231			       "__vdso_gettimeofday",
232			       "__vdso_gettimeofday_stick", elf64);
233	if (err)
234		return err;
235
236	return patch_one_symbol(e,
237				"__vdso_clock_gettime",
238				"__vdso_clock_gettime_stick", elf64);
239	return 0;
240}
241
242/*
243 * Allocate pages for the vdso and vvar, and copy in the vdso text from the
244 * kernel image.
245 */
246int __init init_vdso_image(const struct vdso_image *image,
247			   struct vm_special_mapping *vdso_mapping, bool elf64)
248{
249	int cnpages = (image->size) / PAGE_SIZE;
250	struct page *dp, **dpp = NULL;
251	struct page *cp, **cpp = NULL;
252	struct vdso_elfinfo ei;
253	int i, dnpages = 0;
254
255	if (tlb_type != spitfire) {
256		int err = stick_patch(image, &ei, elf64);
257		if (err)
258			return err;
259	}
260
261	/*
262	 * First, the vdso text.  This is initialied data, an integral number of
263	 * pages long.
264	 */
265	if (WARN_ON(image->size % PAGE_SIZE != 0))
266		goto oom;
267
268	cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
269	vdso_mapping->pages = cpp;
270
271	if (!cpp)
272		goto oom;
273
274	for (i = 0; i < cnpages; i++) {
275		cp = alloc_page(GFP_KERNEL);
276		if (!cp)
277			goto oom;
278		cpp[i] = cp;
279		copy_page(page_address(cp), image->data + i * PAGE_SIZE);
280	}
281
282	/*
283	 * Now the vvar page.  This is uninitialized data.
284	 */
285
286	if (vvar_data == NULL) {
287		dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
288		if (WARN_ON(dnpages != 1))
289			goto oom;
290		dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
291		vvar_mapping.pages = dpp;
292
293		if (!dpp)
294			goto oom;
295
296		dp = alloc_page(GFP_KERNEL);
297		if (!dp)
298			goto oom;
299
300		dpp[0] = dp;
301		vvar_data = page_address(dp);
302		memset(vvar_data, 0, PAGE_SIZE);
303
304		vvar_data->seq = 0;
305	}
306
307	return 0;
308 oom:
309	if (cpp != NULL) {
310		for (i = 0; i < cnpages; i++) {
311			if (cpp[i] != NULL)
312				__free_page(cpp[i]);
313		}
314		kfree(cpp);
315		vdso_mapping->pages = NULL;
316	}
317
318	if (dpp != NULL) {
319		for (i = 0; i < dnpages; i++) {
320			if (dpp[i] != NULL)
321				__free_page(dpp[i]);
322		}
323		kfree(dpp);
324		vvar_mapping.pages = NULL;
325	}
326
327	pr_warn("Cannot allocate vdso\n");
328	vdso_enabled = 0;
329	return -ENOMEM;
330}
331
332static int __init init_vdso(void)
333{
334	int err = 0;
335#ifdef CONFIG_SPARC64
336	err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true);
337	if (err)
338		return err;
339#endif
340
341#ifdef CONFIG_COMPAT
342	err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false);
343#endif
344	return err;
345
346}
347subsys_initcall(init_vdso);
348
349struct linux_binprm;
350
351/* Shuffle the vdso up a bit, randomly. */
352static unsigned long vdso_addr(unsigned long start, unsigned int len)
353{
354	unsigned int offset;
355
356	/* This loses some more bits than a modulo, but is cheaper */
357	offset = get_random_u32_below(PTRS_PER_PTE);
358	return start + (offset << PAGE_SHIFT);
359}
360
361static int map_vdso(const struct vdso_image *image,
362		struct vm_special_mapping *vdso_mapping)
363{
364	struct mm_struct *mm = current->mm;
365	struct vm_area_struct *vma;
366	unsigned long text_start, addr = 0;
367	int ret = 0;
368
369	mmap_write_lock(mm);
370
371	/*
372	 * First, get an unmapped region: then randomize it, and make sure that
373	 * region is free.
374	 */
375	if (current->flags & PF_RANDOMIZE) {
376		addr = get_unmapped_area(NULL, 0,
377					 image->size - image->sym_vvar_start,
378					 0, 0);
379		if (IS_ERR_VALUE(addr)) {
380			ret = addr;
381			goto up_fail;
382		}
383		addr = vdso_addr(addr, image->size - image->sym_vvar_start);
384	}
385	addr = get_unmapped_area(NULL, addr,
386				 image->size - image->sym_vvar_start, 0, 0);
387	if (IS_ERR_VALUE(addr)) {
388		ret = addr;
389		goto up_fail;
390	}
391
392	text_start = addr - image->sym_vvar_start;
393	current->mm->context.vdso = (void __user *)text_start;
394
395	/*
396	 * MAYWRITE to allow gdb to COW and set breakpoints
397	 */
398	vma = _install_special_mapping(mm,
399				       text_start,
400				       image->size,
401				       VM_READ|VM_EXEC|
402				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
403				       vdso_mapping);
404
405	if (IS_ERR(vma)) {
406		ret = PTR_ERR(vma);
407		goto up_fail;
408	}
409
410	vma = _install_special_mapping(mm,
411				       addr,
412				       -image->sym_vvar_start,
413				       VM_READ|VM_MAYREAD,
414				       &vvar_mapping);
415
416	if (IS_ERR(vma)) {
417		ret = PTR_ERR(vma);
418		do_munmap(mm, text_start, image->size, NULL);
419	}
420
421up_fail:
422	if (ret)
423		current->mm->context.vdso = NULL;
424
425	mmap_write_unlock(mm);
426	return ret;
427}
428
429int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
430{
431
432	if (!vdso_enabled)
433		return 0;
434
435#if defined CONFIG_COMPAT
436	if (!(is_32bit_task()))
437		return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
438	else
439		return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
440#else
441	return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
442#endif
443
444}
445
446static __init int vdso_setup(char *s)
447{
448	int err;
449	unsigned long val;
450
451	err = kstrtoul(s, 10, &val);
452	if (!err)
453		vdso_enabled = val;
454	return 1;
 
455}
456__setup("vdso=", vdso_setup);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Set up the VMAs to tell the VM about the vDSO.
  4 * Copyright 2007 Andi Kleen, SUSE Labs.
  5 */
  6
  7/*
  8 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/err.h>
 13#include <linux/sched.h>
 14#include <linux/slab.h>
 15#include <linux/init.h>
 16#include <linux/linkage.h>
 17#include <linux/random.h>
 18#include <linux/elf.h>
 19#include <asm/cacheflush.h>
 20#include <asm/spitfire.h>
 21#include <asm/vdso.h>
 22#include <asm/vvar.h>
 23#include <asm/page.h>
 24
 25unsigned int __read_mostly vdso_enabled = 1;
 26
 27static struct vm_special_mapping vvar_mapping = {
 28	.name = "[vvar]"
 29};
 30
 31#ifdef	CONFIG_SPARC64
 32static struct vm_special_mapping vdso_mapping64 = {
 33	.name = "[vdso]"
 34};
 35#endif
 36
 37#ifdef CONFIG_COMPAT
 38static struct vm_special_mapping vdso_mapping32 = {
 39	.name = "[vdso]"
 40};
 41#endif
 42
 43struct vvar_data *vvar_data;
 44
 45struct vdso_elfinfo32 {
 46	Elf32_Ehdr	*hdr;
 47	Elf32_Sym	*dynsym;
 48	unsigned long	dynsymsize;
 49	const char	*dynstr;
 50	unsigned long	text;
 51};
 52
 53struct vdso_elfinfo64 {
 54	Elf64_Ehdr	*hdr;
 55	Elf64_Sym	*dynsym;
 56	unsigned long	dynsymsize;
 57	const char	*dynstr;
 58	unsigned long	text;
 59};
 60
 61struct vdso_elfinfo {
 62	union {
 63		struct vdso_elfinfo32 elf32;
 64		struct vdso_elfinfo64 elf64;
 65	} u;
 66};
 67
 68static void *one_section64(struct vdso_elfinfo64 *e, const char *name,
 69			   unsigned long *size)
 70{
 71	const char *snames;
 72	Elf64_Shdr *shdrs;
 73	unsigned int i;
 74
 75	shdrs = (void *)e->hdr + e->hdr->e_shoff;
 76	snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
 77	for (i = 1; i < e->hdr->e_shnum; i++) {
 78		if (!strcmp(snames+shdrs[i].sh_name, name)) {
 79			if (size)
 80				*size = shdrs[i].sh_size;
 81			return (void *)e->hdr + shdrs[i].sh_offset;
 82		}
 83	}
 84	return NULL;
 85}
 86
 87static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e)
 88{
 89	struct vdso_elfinfo64 *e = &_e->u.elf64;
 90
 91	e->hdr = image->data;
 92	e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize);
 93	e->dynstr = one_section64(e, ".dynstr", NULL);
 94
 95	if (!e->dynsym || !e->dynstr) {
 96		pr_err("VDSO64: Missing symbol sections.\n");
 97		return -ENODEV;
 98	}
 99	return 0;
100}
101
102static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name)
103{
104	unsigned int i;
105
106	for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) {
107		Elf64_Sym *s = &e->dynsym[i];
108		if (s->st_name == 0)
109			continue;
110		if (!strcmp(e->dynstr + s->st_name, name))
111			return s;
112	}
113	return NULL;
114}
115
116static int patchsym64(struct vdso_elfinfo *_e, const char *orig,
117		      const char *new)
118{
119	struct vdso_elfinfo64 *e = &_e->u.elf64;
120	Elf64_Sym *osym = find_sym64(e, orig);
121	Elf64_Sym *nsym = find_sym64(e, new);
122
123	if (!nsym || !osym) {
124		pr_err("VDSO64: Missing symbols.\n");
125		return -ENODEV;
126	}
127	osym->st_value = nsym->st_value;
128	osym->st_size = nsym->st_size;
129	osym->st_info = nsym->st_info;
130	osym->st_other = nsym->st_other;
131	osym->st_shndx = nsym->st_shndx;
132
133	return 0;
134}
135
136static void *one_section32(struct vdso_elfinfo32 *e, const char *name,
137			   unsigned long *size)
138{
139	const char *snames;
140	Elf32_Shdr *shdrs;
141	unsigned int i;
142
143	shdrs = (void *)e->hdr + e->hdr->e_shoff;
144	snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
145	for (i = 1; i < e->hdr->e_shnum; i++) {
146		if (!strcmp(snames+shdrs[i].sh_name, name)) {
147			if (size)
148				*size = shdrs[i].sh_size;
149			return (void *)e->hdr + shdrs[i].sh_offset;
150		}
151	}
152	return NULL;
153}
154
155static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e)
156{
157	struct vdso_elfinfo32 *e = &_e->u.elf32;
158
159	e->hdr = image->data;
160	e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize);
161	e->dynstr = one_section32(e, ".dynstr", NULL);
162
163	if (!e->dynsym || !e->dynstr) {
164		pr_err("VDSO32: Missing symbol sections.\n");
165		return -ENODEV;
166	}
167	return 0;
168}
169
170static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name)
171{
172	unsigned int i;
173
174	for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) {
175		Elf32_Sym *s = &e->dynsym[i];
176		if (s->st_name == 0)
177			continue;
178		if (!strcmp(e->dynstr + s->st_name, name))
179			return s;
180	}
181	return NULL;
182}
183
184static int patchsym32(struct vdso_elfinfo *_e, const char *orig,
185		      const char *new)
186{
187	struct vdso_elfinfo32 *e = &_e->u.elf32;
188	Elf32_Sym *osym = find_sym32(e, orig);
189	Elf32_Sym *nsym = find_sym32(e, new);
190
191	if (!nsym || !osym) {
192		pr_err("VDSO32: Missing symbols.\n");
193		return -ENODEV;
194	}
195	osym->st_value = nsym->st_value;
196	osym->st_size = nsym->st_size;
197	osym->st_info = nsym->st_info;
198	osym->st_other = nsym->st_other;
199	osym->st_shndx = nsym->st_shndx;
200
201	return 0;
202}
203
204static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e,
205			 bool elf64)
206{
207	if (elf64)
208		return find_sections64(image, e);
209	else
210		return find_sections32(image, e);
211}
212
213static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig,
214			    const char *new_target, bool elf64)
215{
216	if (elf64)
217		return patchsym64(e, orig, new_target);
218	else
219		return patchsym32(e, orig, new_target);
220}
221
222static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64)
223{
224	int err;
225
226	err = find_sections(image, e, elf64);
227	if (err)
228		return err;
229
230	err = patch_one_symbol(e,
231			       "__vdso_gettimeofday",
232			       "__vdso_gettimeofday_stick", elf64);
233	if (err)
234		return err;
235
236	return patch_one_symbol(e,
237				"__vdso_clock_gettime",
238				"__vdso_clock_gettime_stick", elf64);
239	return 0;
240}
241
242/*
243 * Allocate pages for the vdso and vvar, and copy in the vdso text from the
244 * kernel image.
245 */
246int __init init_vdso_image(const struct vdso_image *image,
247			   struct vm_special_mapping *vdso_mapping, bool elf64)
248{
249	int cnpages = (image->size) / PAGE_SIZE;
250	struct page *dp, **dpp = NULL;
251	struct page *cp, **cpp = NULL;
252	struct vdso_elfinfo ei;
253	int i, dnpages = 0;
254
255	if (tlb_type != spitfire) {
256		int err = stick_patch(image, &ei, elf64);
257		if (err)
258			return err;
259	}
260
261	/*
262	 * First, the vdso text.  This is initialied data, an integral number of
263	 * pages long.
264	 */
265	if (WARN_ON(image->size % PAGE_SIZE != 0))
266		goto oom;
267
268	cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
269	vdso_mapping->pages = cpp;
270
271	if (!cpp)
272		goto oom;
273
274	for (i = 0; i < cnpages; i++) {
275		cp = alloc_page(GFP_KERNEL);
276		if (!cp)
277			goto oom;
278		cpp[i] = cp;
279		copy_page(page_address(cp), image->data + i * PAGE_SIZE);
280	}
281
282	/*
283	 * Now the vvar page.  This is uninitialized data.
284	 */
285
286	if (vvar_data == NULL) {
287		dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
288		if (WARN_ON(dnpages != 1))
289			goto oom;
290		dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
291		vvar_mapping.pages = dpp;
292
293		if (!dpp)
294			goto oom;
295
296		dp = alloc_page(GFP_KERNEL);
297		if (!dp)
298			goto oom;
299
300		dpp[0] = dp;
301		vvar_data = page_address(dp);
302		memset(vvar_data, 0, PAGE_SIZE);
303
304		vvar_data->seq = 0;
305	}
306
307	return 0;
308 oom:
309	if (cpp != NULL) {
310		for (i = 0; i < cnpages; i++) {
311			if (cpp[i] != NULL)
312				__free_page(cpp[i]);
313		}
314		kfree(cpp);
315		vdso_mapping->pages = NULL;
316	}
317
318	if (dpp != NULL) {
319		for (i = 0; i < dnpages; i++) {
320			if (dpp[i] != NULL)
321				__free_page(dpp[i]);
322		}
323		kfree(dpp);
324		vvar_mapping.pages = NULL;
325	}
326
327	pr_warn("Cannot allocate vdso\n");
328	vdso_enabled = 0;
329	return -ENOMEM;
330}
331
332static int __init init_vdso(void)
333{
334	int err = 0;
335#ifdef CONFIG_SPARC64
336	err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true);
337	if (err)
338		return err;
339#endif
340
341#ifdef CONFIG_COMPAT
342	err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false);
343#endif
344	return err;
345
346}
347subsys_initcall(init_vdso);
348
349struct linux_binprm;
350
351/* Shuffle the vdso up a bit, randomly. */
352static unsigned long vdso_addr(unsigned long start, unsigned int len)
353{
354	unsigned int offset;
355
356	/* This loses some more bits than a modulo, but is cheaper */
357	offset = get_random_u32_below(PTRS_PER_PTE);
358	return start + (offset << PAGE_SHIFT);
359}
360
361static int map_vdso(const struct vdso_image *image,
362		struct vm_special_mapping *vdso_mapping)
363{
364	struct mm_struct *mm = current->mm;
365	struct vm_area_struct *vma;
366	unsigned long text_start, addr = 0;
367	int ret = 0;
368
369	mmap_write_lock(mm);
370
371	/*
372	 * First, get an unmapped region: then randomize it, and make sure that
373	 * region is free.
374	 */
375	if (current->flags & PF_RANDOMIZE) {
376		addr = get_unmapped_area(NULL, 0,
377					 image->size - image->sym_vvar_start,
378					 0, 0);
379		if (IS_ERR_VALUE(addr)) {
380			ret = addr;
381			goto up_fail;
382		}
383		addr = vdso_addr(addr, image->size - image->sym_vvar_start);
384	}
385	addr = get_unmapped_area(NULL, addr,
386				 image->size - image->sym_vvar_start, 0, 0);
387	if (IS_ERR_VALUE(addr)) {
388		ret = addr;
389		goto up_fail;
390	}
391
392	text_start = addr - image->sym_vvar_start;
393	current->mm->context.vdso = (void __user *)text_start;
394
395	/*
396	 * MAYWRITE to allow gdb to COW and set breakpoints
397	 */
398	vma = _install_special_mapping(mm,
399				       text_start,
400				       image->size,
401				       VM_READ|VM_EXEC|
402				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
403				       vdso_mapping);
404
405	if (IS_ERR(vma)) {
406		ret = PTR_ERR(vma);
407		goto up_fail;
408	}
409
410	vma = _install_special_mapping(mm,
411				       addr,
412				       -image->sym_vvar_start,
413				       VM_READ|VM_MAYREAD,
414				       &vvar_mapping);
415
416	if (IS_ERR(vma)) {
417		ret = PTR_ERR(vma);
418		do_munmap(mm, text_start, image->size, NULL);
419	}
420
421up_fail:
422	if (ret)
423		current->mm->context.vdso = NULL;
424
425	mmap_write_unlock(mm);
426	return ret;
427}
428
429int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
430{
431
432	if (!vdso_enabled)
433		return 0;
434
435#if defined CONFIG_COMPAT
436	if (!(is_32bit_task()))
437		return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
438	else
439		return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
440#else
441	return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
442#endif
443
444}
445
446static __init int vdso_setup(char *s)
447{
448	int err;
449	unsigned long val;
450
451	err = kstrtoul(s, 10, &val);
452	if (err)
453		return err;
454	vdso_enabled = val;
455	return 0;
456}
457__setup("vdso=", vdso_setup);