Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Author: Huacai Chen <chenhuacai@loongson.cn>
  4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5 */
  6
  7#include <linux/binfmts.h>
  8#include <linux/elf.h>
  9#include <linux/err.h>
 10#include <linux/init.h>
 11#include <linux/ioport.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/random.h>
 15#include <linux/sched.h>
 16#include <linux/slab.h>
 17#include <linux/time_namespace.h>
 18#include <linux/timekeeper_internal.h>
 19
 20#include <asm/page.h>
 21#include <asm/vdso.h>
 22#include <vdso/helpers.h>
 23#include <vdso/vsyscall.h>
 24#include <generated/vdso-offsets.h>
 25
 26extern char vdso_start[], vdso_end[];
 27
 28/* Kernel-provided data used by the VDSO. */
 29static union {
 30	u8 page[PAGE_SIZE];
 31	struct vdso_data data[CS_BASES];
 32} generic_vdso_data __page_aligned_data;
 33
 34static union {
 35	u8 page[LOONGARCH_VDSO_DATA_SIZE];
 36	struct loongarch_vdso_data vdata;
 37} loongarch_vdso_data __page_aligned_data;
 38
 39static struct page *vdso_pages[] = { NULL };
 40struct vdso_data *vdso_data = generic_vdso_data.data;
 41struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
 42
 43static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
 44{
 45	current->mm->context.vdso = (void *)(new_vma->vm_start);
 46
 47	return 0;
 48}
 49
 50static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 51			     struct vm_area_struct *vma, struct vm_fault *vmf)
 52{
 53	unsigned long pfn;
 54	struct page *timens_page = find_timens_vvar_page(vma);
 55
 56	switch (vmf->pgoff) {
 57	case VVAR_GENERIC_PAGE_OFFSET:
 58		if (!timens_page)
 59			pfn = sym_to_pfn(vdso_data);
 60		else
 61			pfn = page_to_pfn(timens_page);
 62		break;
 63#ifdef CONFIG_TIME_NS
 64	case VVAR_TIMENS_PAGE_OFFSET:
 65		/*
 66		 * If a task belongs to a time namespace then a namespace specific
 67		 * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
 68		 * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
 69		 * See also the comment near timens_setup_vdso_data().
 70		 */
 71		if (!timens_page)
 72			return VM_FAULT_SIGBUS;
 73		else
 74			pfn = sym_to_pfn(vdso_data);
 75		break;
 76#endif /* CONFIG_TIME_NS */
 77	case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
 78		pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
 79		break;
 80	default:
 81		return VM_FAULT_SIGBUS;
 82	}
 83
 84	return vmf_insert_pfn(vma, vmf->address, pfn);
 85}
 86
 87struct loongarch_vdso_info vdso_info = {
 88	.vdso = vdso_start,
 89	.size = PAGE_SIZE,
 90	.code_mapping = {
 91		.name = "[vdso]",
 92		.pages = vdso_pages,
 93		.mremap = vdso_mremap,
 94	},
 95	.data_mapping = {
 96		.name = "[vvar]",
 97		.fault = vvar_fault,
 98	},
 99	.offset_sigreturn = vdso_offset_sigreturn,
100};
101
102static int __init init_vdso(void)
103{
104	unsigned long i, cpu, pfn;
105
106	BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
107	BUG_ON(!PAGE_ALIGNED(vdso_info.size));
108
109	for_each_possible_cpu(cpu)
110		vdso_pdata[cpu].node = cpu_to_node(cpu);
111
112	pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
113	for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
114		vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
115
116	return 0;
117}
118subsys_initcall(init_vdso);
119
120#ifdef CONFIG_TIME_NS
121struct vdso_data *arch_get_vdso_data(void *vvar_page)
122{
123	return (struct vdso_data *)(vvar_page);
124}
125
126/*
127 * The vvar mapping contains data for a specific time namespace, so when a
128 * task changes namespace we must unmap its vvar data for the old namespace.
129 * Subsequent faults will map in data for the new namespace.
130 *
131 * For more details see timens_setup_vdso_data().
132 */
133int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
134{
135	struct mm_struct *mm = task->mm;
136	struct vm_area_struct *vma;
137
138	VMA_ITERATOR(vmi, mm, 0);
139
140	mmap_read_lock(mm);
141	for_each_vma(vmi, vma) {
142		if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
143			zap_vma_pages(vma);
144	}
145	mmap_read_unlock(mm);
146
147	return 0;
148}
149#endif
150
151static unsigned long vdso_base(void)
152{
153	unsigned long base = STACK_TOP;
154
155	if (current->flags & PF_RANDOMIZE) {
156		base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
157		base = PAGE_ALIGN(base);
158	}
159
160	return base;
161}
162
163int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
164{
165	int ret;
166	unsigned long size, data_addr, vdso_addr;
167	struct mm_struct *mm = current->mm;
168	struct vm_area_struct *vma;
169	struct loongarch_vdso_info *info = current->thread.vdso;
170
171	if (mmap_write_lock_killable(mm))
172		return -EINTR;
173
174	/*
175	 * Determine total area size. This includes the VDSO data itself
176	 * and the data pages.
177	 */
178	size = VVAR_SIZE + info->size;
 
179
180	data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
181	if (IS_ERR_VALUE(data_addr)) {
182		ret = data_addr;
183		goto out;
184	}
 
185
186	vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
187				       VM_READ | VM_MAYREAD | VM_PFNMAP,
188				       &info->data_mapping);
189	if (IS_ERR(vma)) {
190		ret = PTR_ERR(vma);
191		goto out;
192	}
193
194	vdso_addr = data_addr + VVAR_SIZE;
 
 
 
 
 
 
 
195	vma = _install_special_mapping(mm, vdso_addr, info->size,
196				       VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
197				       &info->code_mapping);
198	if (IS_ERR(vma)) {
199		ret = PTR_ERR(vma);
200		goto out;
201	}
202
203	mm->context.vdso = (void *)vdso_addr;
204	ret = 0;
205
206out:
207	mmap_write_unlock(mm);
208	return ret;
209}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Author: Huacai Chen <chenhuacai@loongson.cn>
  4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5 */
  6
  7#include <linux/binfmts.h>
  8#include <linux/elf.h>
  9#include <linux/err.h>
 10#include <linux/init.h>
 11#include <linux/ioport.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/random.h>
 15#include <linux/sched.h>
 16#include <linux/slab.h>
 
 17#include <linux/timekeeper_internal.h>
 18
 19#include <asm/page.h>
 20#include <asm/vdso.h>
 21#include <vdso/helpers.h>
 22#include <vdso/vsyscall.h>
 23#include <generated/vdso-offsets.h>
 24
 25extern char vdso_start[], vdso_end[];
 26
 27/* Kernel-provided data used by the VDSO. */
 28static union {
 29	u8 page[VDSO_DATA_SIZE];
 
 
 
 
 
 30	struct loongarch_vdso_data vdata;
 31} loongarch_vdso_data __page_aligned_data;
 32
 33static struct page *vdso_pages[] = { NULL };
 34struct vdso_data *vdso_data = loongarch_vdso_data.vdata.data;
 35struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
 36
 37static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
 38{
 39	current->mm->context.vdso = (void *)(new_vma->vm_start);
 40
 41	return 0;
 42}
 43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44struct loongarch_vdso_info vdso_info = {
 45	.vdso = vdso_start,
 46	.size = PAGE_SIZE,
 47	.code_mapping = {
 48		.name = "[vdso]",
 49		.pages = vdso_pages,
 50		.mremap = vdso_mremap,
 51	},
 52	.data_mapping = {
 53		.name = "[vvar]",
 
 54	},
 55	.offset_sigreturn = vdso_offset_sigreturn,
 56};
 57
 58static int __init init_vdso(void)
 59{
 60	unsigned long i, cpu, pfn;
 61
 62	BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
 63	BUG_ON(!PAGE_ALIGNED(vdso_info.size));
 64
 65	for_each_possible_cpu(cpu)
 66		vdso_pdata[cpu].node = cpu_to_node(cpu);
 67
 68	pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
 69	for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
 70		vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
 71
 72	return 0;
 73}
 74subsys_initcall(init_vdso);
 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76static unsigned long vdso_base(void)
 77{
 78	unsigned long base = STACK_TOP;
 79
 80	if (current->flags & PF_RANDOMIZE) {
 81		base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
 82		base = PAGE_ALIGN(base);
 83	}
 84
 85	return base;
 86}
 87
 88int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 89{
 90	int ret;
 91	unsigned long vvar_size, size, data_addr, vdso_addr;
 92	struct mm_struct *mm = current->mm;
 93	struct vm_area_struct *vma;
 94	struct loongarch_vdso_info *info = current->thread.vdso;
 95
 96	if (mmap_write_lock_killable(mm))
 97		return -EINTR;
 98
 99	/*
100	 * Determine total area size. This includes the VDSO data itself
101	 * and the data pages.
102	 */
103	vvar_size = VDSO_DATA_SIZE;
104	size = vvar_size + info->size;
105
106	data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
107	if (IS_ERR_VALUE(data_addr)) {
108		ret = data_addr;
109		goto out;
110	}
111	vdso_addr = data_addr + VDSO_DATA_SIZE;
112
113	vma = _install_special_mapping(mm, data_addr, vvar_size,
114				       VM_READ | VM_MAYREAD,
115				       &info->data_mapping);
116	if (IS_ERR(vma)) {
117		ret = PTR_ERR(vma);
118		goto out;
119	}
120
121	/* Map VDSO data page. */
122	ret = remap_pfn_range(vma, data_addr,
123			      virt_to_phys(&loongarch_vdso_data) >> PAGE_SHIFT,
124			      vvar_size, PAGE_READONLY);
125	if (ret)
126		goto out;
127
128	/* Map VDSO code page. */
129	vma = _install_special_mapping(mm, vdso_addr, info->size,
130				       VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
131				       &info->code_mapping);
132	if (IS_ERR(vma)) {
133		ret = PTR_ERR(vma);
134		goto out;
135	}
136
137	mm->context.vdso = (void *)vdso_addr;
138	ret = 0;
139
140out:
141	mmap_write_unlock(mm);
142	return ret;
143}