Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Author: Huacai Chen <chenhuacai@loongson.cn>
  4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5 */
  6
  7#include <linux/binfmts.h>
  8#include <linux/elf.h>
  9#include <linux/err.h>
 10#include <linux/init.h>
 11#include <linux/ioport.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/random.h>
 15#include <linux/sched.h>
 16#include <linux/slab.h>
 17#include <linux/time_namespace.h>
 18#include <linux/timekeeper_internal.h>
 19
 20#include <asm/page.h>
 21#include <asm/vdso.h>
 22#include <vdso/helpers.h>
 23#include <vdso/vsyscall.h>
 
 24#include <generated/vdso-offsets.h>
 25
 26extern char vdso_start[], vdso_end[];
 27
 28/* Kernel-provided data used by the VDSO. */
 29static union {
 30	u8 page[PAGE_SIZE];
 31	struct vdso_data data[CS_BASES];
 32} generic_vdso_data __page_aligned_data;
 33
 34static union {
 35	u8 page[LOONGARCH_VDSO_DATA_SIZE];
 36	struct loongarch_vdso_data vdata;
 37} loongarch_vdso_data __page_aligned_data;
 38
 39static struct page *vdso_pages[] = { NULL };
 40struct vdso_data *vdso_data = generic_vdso_data.data;
 41struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
 42
 43static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
 44{
 45	current->mm->context.vdso = (void *)(new_vma->vm_start);
 46
 47	return 0;
 48}
 49
 50static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 51			     struct vm_area_struct *vma, struct vm_fault *vmf)
 52{
 53	unsigned long pfn;
 54	struct page *timens_page = find_timens_vvar_page(vma);
 55
 56	switch (vmf->pgoff) {
 57	case VVAR_GENERIC_PAGE_OFFSET:
 58		if (!timens_page)
 59			pfn = sym_to_pfn(vdso_data);
 60		else
 61			pfn = page_to_pfn(timens_page);
 62		break;
 63#ifdef CONFIG_TIME_NS
 64	case VVAR_TIMENS_PAGE_OFFSET:
 65		/*
 66		 * If a task belongs to a time namespace then a namespace specific
 67		 * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
 68		 * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
 69		 * See also the comment near timens_setup_vdso_data().
 70		 */
 71		if (!timens_page)
 72			return VM_FAULT_SIGBUS;
 73		else
 74			pfn = sym_to_pfn(vdso_data);
 75		break;
 76#endif /* CONFIG_TIME_NS */
 77	case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
 78		pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
 79		break;
 80	default:
 81		return VM_FAULT_SIGBUS;
 82	}
 83
 84	return vmf_insert_pfn(vma, vmf->address, pfn);
 85}
 86
 87struct loongarch_vdso_info vdso_info = {
 88	.vdso = vdso_start,
 89	.size = PAGE_SIZE,
 90	.code_mapping = {
 91		.name = "[vdso]",
 92		.pages = vdso_pages,
 93		.mremap = vdso_mremap,
 94	},
 95	.data_mapping = {
 96		.name = "[vvar]",
 97		.fault = vvar_fault,
 98	},
 99	.offset_sigreturn = vdso_offset_sigreturn,
100};
101
102static int __init init_vdso(void)
103{
104	unsigned long i, cpu, pfn;
105
106	BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
107	BUG_ON(!PAGE_ALIGNED(vdso_info.size));
108
109	for_each_possible_cpu(cpu)
110		vdso_pdata[cpu].node = cpu_to_node(cpu);
111
112	pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
113	for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
114		vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
115
116	return 0;
117}
118subsys_initcall(init_vdso);
119
120#ifdef CONFIG_TIME_NS
121struct vdso_data *arch_get_vdso_data(void *vvar_page)
122{
123	return (struct vdso_data *)(vvar_page);
124}
125
126/*
127 * The vvar mapping contains data for a specific time namespace, so when a
128 * task changes namespace we must unmap its vvar data for the old namespace.
129 * Subsequent faults will map in data for the new namespace.
130 *
131 * For more details see timens_setup_vdso_data().
132 */
133int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
134{
135	struct mm_struct *mm = task->mm;
136	struct vm_area_struct *vma;
137
138	VMA_ITERATOR(vmi, mm, 0);
139
140	mmap_read_lock(mm);
141	for_each_vma(vmi, vma) {
142		if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
143			zap_vma_pages(vma);
144	}
145	mmap_read_unlock(mm);
146
147	return 0;
148}
149#endif
150
151static unsigned long vdso_base(void)
152{
153	unsigned long base = STACK_TOP;
154
155	if (current->flags & PF_RANDOMIZE) {
156		base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
157		base = PAGE_ALIGN(base);
158	}
159
160	return base;
161}
162
163int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
164{
165	int ret;
166	unsigned long size, data_addr, vdso_addr;
167	struct mm_struct *mm = current->mm;
168	struct vm_area_struct *vma;
169	struct loongarch_vdso_info *info = current->thread.vdso;
170
171	if (mmap_write_lock_killable(mm))
172		return -EINTR;
173
174	/*
175	 * Determine total area size. This includes the VDSO data itself
176	 * and the data pages.
177	 */
178	size = VVAR_SIZE + info->size;
179
180	data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
181	if (IS_ERR_VALUE(data_addr)) {
182		ret = data_addr;
183		goto out;
184	}
185
186	vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
187				       VM_READ | VM_MAYREAD | VM_PFNMAP,
188				       &info->data_mapping);
189	if (IS_ERR(vma)) {
190		ret = PTR_ERR(vma);
191		goto out;
192	}
193
194	vdso_addr = data_addr + VVAR_SIZE;
195	vma = _install_special_mapping(mm, vdso_addr, info->size,
196				       VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
197				       &info->code_mapping);
198	if (IS_ERR(vma)) {
199		ret = PTR_ERR(vma);
200		goto out;
201	}
202
203	mm->context.vdso = (void *)vdso_addr;
204	ret = 0;
205
206out:
207	mmap_write_unlock(mm);
208	return ret;
209}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Author: Huacai Chen <chenhuacai@loongson.cn>
  4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5 */
  6
  7#include <linux/binfmts.h>
  8#include <linux/elf.h>
  9#include <linux/err.h>
 10#include <linux/init.h>
 11#include <linux/ioport.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/random.h>
 15#include <linux/sched.h>
 16#include <linux/slab.h>
 17#include <linux/time_namespace.h>
 18#include <linux/timekeeper_internal.h>
 19
 20#include <asm/page.h>
 21#include <asm/vdso.h>
 22#include <vdso/helpers.h>
 23#include <vdso/vsyscall.h>
 24#include <vdso/datapage.h>
 25#include <generated/vdso-offsets.h>
 26
 27extern char vdso_start[], vdso_end[];
 28
 29/* Kernel-provided data used by the VDSO. */
 30static union vdso_data_store generic_vdso_data __page_aligned_data;
 
 
 
 31
 32static union {
 33	u8 page[LOONGARCH_VDSO_DATA_SIZE];
 34	struct loongarch_vdso_data vdata;
 35} loongarch_vdso_data __page_aligned_data;
 36
 37static struct page *vdso_pages[] = { NULL };
 38struct vdso_data *vdso_data = generic_vdso_data.data;
 39struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
 40
 41static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
 42{
 43	current->mm->context.vdso = (void *)(new_vma->vm_start);
 44
 45	return 0;
 46}
 47
 48static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 49			     struct vm_area_struct *vma, struct vm_fault *vmf)
 50{
 51	unsigned long pfn;
 52	struct page *timens_page = find_timens_vvar_page(vma);
 53
 54	switch (vmf->pgoff) {
 55	case VVAR_GENERIC_PAGE_OFFSET:
 56		if (!timens_page)
 57			pfn = sym_to_pfn(vdso_data);
 58		else
 59			pfn = page_to_pfn(timens_page);
 60		break;
 61#ifdef CONFIG_TIME_NS
 62	case VVAR_TIMENS_PAGE_OFFSET:
 63		/*
 64		 * If a task belongs to a time namespace then a namespace specific
 65		 * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
 66		 * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
 67		 * See also the comment near timens_setup_vdso_data().
 68		 */
 69		if (!timens_page)
 70			return VM_FAULT_SIGBUS;
 71		else
 72			pfn = sym_to_pfn(vdso_data);
 73		break;
 74#endif /* CONFIG_TIME_NS */
 75	case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
 76		pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
 77		break;
 78	default:
 79		return VM_FAULT_SIGBUS;
 80	}
 81
 82	return vmf_insert_pfn(vma, vmf->address, pfn);
 83}
 84
 85struct loongarch_vdso_info vdso_info = {
 86	.vdso = vdso_start,
 87	.size = PAGE_SIZE,
 88	.code_mapping = {
 89		.name = "[vdso]",
 90		.pages = vdso_pages,
 91		.mremap = vdso_mremap,
 92	},
 93	.data_mapping = {
 94		.name = "[vvar]",
 95		.fault = vvar_fault,
 96	},
 97	.offset_sigreturn = vdso_offset_sigreturn,
 98};
 99
100static int __init init_vdso(void)
101{
102	unsigned long i, cpu, pfn;
103
104	BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
105	BUG_ON(!PAGE_ALIGNED(vdso_info.size));
106
107	for_each_possible_cpu(cpu)
108		vdso_pdata[cpu].node = cpu_to_node(cpu);
109
110	pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
111	for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
112		vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
113
114	return 0;
115}
116subsys_initcall(init_vdso);
117
118#ifdef CONFIG_TIME_NS
119struct vdso_data *arch_get_vdso_data(void *vvar_page)
120{
121	return (struct vdso_data *)(vvar_page);
122}
123
124/*
125 * The vvar mapping contains data for a specific time namespace, so when a
126 * task changes namespace we must unmap its vvar data for the old namespace.
127 * Subsequent faults will map in data for the new namespace.
128 *
129 * For more details see timens_setup_vdso_data().
130 */
131int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
132{
133	struct mm_struct *mm = task->mm;
134	struct vm_area_struct *vma;
135
136	VMA_ITERATOR(vmi, mm, 0);
137
138	mmap_read_lock(mm);
139	for_each_vma(vmi, vma) {
140		if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
141			zap_vma_pages(vma);
142	}
143	mmap_read_unlock(mm);
144
145	return 0;
146}
147#endif
148
149static unsigned long vdso_base(void)
150{
151	unsigned long base = STACK_TOP;
152
153	if (current->flags & PF_RANDOMIZE) {
154		base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
155		base = PAGE_ALIGN(base);
156	}
157
158	return base;
159}
160
161int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
162{
163	int ret;
164	unsigned long size, data_addr, vdso_addr;
165	struct mm_struct *mm = current->mm;
166	struct vm_area_struct *vma;
167	struct loongarch_vdso_info *info = current->thread.vdso;
168
169	if (mmap_write_lock_killable(mm))
170		return -EINTR;
171
172	/*
173	 * Determine total area size. This includes the VDSO data itself
174	 * and the data pages.
175	 */
176	size = VVAR_SIZE + info->size;
177
178	data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
179	if (IS_ERR_VALUE(data_addr)) {
180		ret = data_addr;
181		goto out;
182	}
183
184	vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
185				       VM_READ | VM_MAYREAD | VM_PFNMAP,
186				       &info->data_mapping);
187	if (IS_ERR(vma)) {
188		ret = PTR_ERR(vma);
189		goto out;
190	}
191
192	vdso_addr = data_addr + VVAR_SIZE;
193	vma = _install_special_mapping(mm, vdso_addr, info->size,
194				       VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
195				       &info->code_mapping);
196	if (IS_ERR(vma)) {
197		ret = PTR_ERR(vma);
198		goto out;
199	}
200
201	mm->context.vdso = (void *)vdso_addr;
202	ret = 0;
203
204out:
205	mmap_write_unlock(mm);
206	return ret;
207}