Linux Audio

Check our new training course

Loading...
v5.4
 1// SPDX-License-Identifier: GPL-2.0-only
 2/*
 3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
 4 *                    <benh@kernel.crashing.org>
 5 * Copyright (C) 2012 ARM Limited
 6 * Copyright (C) 2015 Regents of the University of California
 7 */
 8
 9#include <linux/elf.h>
10#include <linux/mm.h>
11#include <linux/slab.h>
12#include <linux/binfmts.h>
13#include <linux/err.h>
14
15#include <asm/vdso.h>
16
 
 
 
 
 
 
 
17extern char vdso_start[], vdso_end[];
18
19static unsigned int vdso_pages;
20static struct page **vdso_pagelist;
 
 
 
 
 
 
 
21
22/*
23 * The vDSO data page.
24 */
25static union {
26	struct vdso_data	data;
27	u8			page[PAGE_SIZE];
28} vdso_data_store __page_aligned_data;
29static struct vdso_data *vdso_data = &vdso_data_store.data;
30
31static int __init vdso_init(void)
32{
33	unsigned int i;
34
35	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
36	vdso_pagelist =
37		kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
38	if (unlikely(vdso_pagelist == NULL)) {
39		pr_err("vdso: pagelist allocation failed\n");
40		return -ENOMEM;
41	}
42
43	for (i = 0; i < vdso_pages; i++) {
44		struct page *pg;
45
46		pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
47		vdso_pagelist[i] = pg;
48	}
49	vdso_pagelist[i] = virt_to_page(vdso_data);
50
51	return 0;
52}
53arch_initcall(vdso_init);
54
55int arch_setup_additional_pages(struct linux_binprm *bprm,
56	int uses_interp)
57{
58	struct mm_struct *mm = current->mm;
59	unsigned long vdso_base, vdso_len;
60	int ret;
61
62	vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
 
 
 
 
 
63
64	down_write(&mm->mmap_sem);
65	vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
66	if (IS_ERR_VALUE(vdso_base)) {
67		ret = vdso_base;
68		goto end;
69	}
70
71	/*
72	 * Put vDSO base into mm struct. We need to do this before calling
73	 * install_special_mapping or the perf counter mmap tracking code
74	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
75	 */
76	mm->context.vdso = (void *)vdso_base;
77
78	ret = install_special_mapping(mm, vdso_base, vdso_len,
 
 
79		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
80		vdso_pagelist);
81
82	if (unlikely(ret))
83		mm->context.vdso = NULL;
 
 
 
 
 
 
 
84
85end:
86	up_write(&mm->mmap_sem);
87	return ret;
88}
89
90const char *arch_vma_name(struct vm_area_struct *vma)
91{
92	if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
93		return "[vdso]";
 
 
 
94	return NULL;
95}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  4 *                    <benh@kernel.crashing.org>
  5 * Copyright (C) 2012 ARM Limited
  6 * Copyright (C) 2015 Regents of the University of California
  7 */
  8
  9#include <linux/elf.h>
 10#include <linux/mm.h>
 11#include <linux/slab.h>
 12#include <linux/binfmts.h>
 13#include <linux/err.h>
 14#include <asm/page.h>
 15#include <asm/vdso.h>
 16
 17#ifdef CONFIG_GENERIC_TIME_VSYSCALL
 18#include <vdso/datapage.h>
 19#else
 20struct vdso_data {
 21};
 22#endif
 23
 24extern char vdso_start[], vdso_end[];
 25
 26enum vvar_pages {
 27	VVAR_DATA_PAGE_OFFSET,
 28	VVAR_NR_PAGES,
 29};
 30
 31#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
 32
 33static unsigned int vdso_pages __ro_after_init;
 34static struct page **vdso_pagelist __ro_after_init;
 35
 36/*
 37 * The vDSO data page.
 38 */
 39static union {
 40	struct vdso_data	data;
 41	u8			page[PAGE_SIZE];
 42} vdso_data_store __page_aligned_data;
 43struct vdso_data *vdso_data = &vdso_data_store.data;
 44
 45static int __init vdso_init(void)
 46{
 47	unsigned int i;
 48
 49	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
 50	vdso_pagelist =
 51		kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
 52	if (unlikely(vdso_pagelist == NULL)) {
 53		pr_err("vdso: pagelist allocation failed\n");
 54		return -ENOMEM;
 55	}
 56
 57	for (i = 0; i < vdso_pages; i++) {
 58		struct page *pg;
 59
 60		pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
 61		vdso_pagelist[i] = pg;
 62	}
 63	vdso_pagelist[i] = virt_to_page(vdso_data);
 64
 65	return 0;
 66}
 67arch_initcall(vdso_init);
 68
 69int arch_setup_additional_pages(struct linux_binprm *bprm,
 70	int uses_interp)
 71{
 72	struct mm_struct *mm = current->mm;
 73	unsigned long vdso_base, vdso_len;
 74	int ret;
 75
 76	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
 77
 78	vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
 79
 80	if (mmap_write_lock_killable(mm))
 81		return -EINTR;
 82
 
 83	vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
 84	if (IS_ERR_VALUE(vdso_base)) {
 85		ret = vdso_base;
 86		goto end;
 87	}
 88
 89	mm->context.vdso = NULL;
 90	ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
 91		(VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
 92	if (unlikely(ret))
 93		goto end;
 
 94
 95	ret =
 96	   install_special_mapping(mm, vdso_base + VVAR_SIZE,
 97		vdso_pages << PAGE_SHIFT,
 98		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
 99		vdso_pagelist);
100
101	if (unlikely(ret))
102		goto end;
103
104	/*
105	 * Put vDSO base into mm struct. We need to do this before calling
106	 * install_special_mapping or the perf counter mmap tracking code
107	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
108	 */
109	mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
110
111end:
112	mmap_write_unlock(mm);
113	return ret;
114}
115
116const char *arch_vma_name(struct vm_area_struct *vma)
117{
118	if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
119		return "[vdso]";
120	if (vma->vm_mm && (vma->vm_start ==
121			   (long)vma->vm_mm->context.vdso - VVAR_SIZE))
122		return "[vdso_data]";
123	return NULL;
124}