Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Author: Huacai Chen <chenhuacai@loongson.cn>
  4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5 */
  6
  7#include <linux/binfmts.h>
  8#include <linux/elf.h>
  9#include <linux/err.h>
 10#include <linux/init.h>
 11#include <linux/ioport.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/random.h>
 15#include <linux/sched.h>
 16#include <linux/slab.h>
 17#include <linux/time_namespace.h>
 18#include <linux/timekeeper_internal.h>
 19
 20#include <asm/page.h>
 21#include <asm/vdso.h>
 22#include <vdso/helpers.h>
 23#include <vdso/vsyscall.h>
 24#include <generated/vdso-offsets.h>
 25
 26extern char vdso_start[], vdso_end[];
 27
 28/* Kernel-provided data used by the VDSO. */
 29static union {
 30	u8 page[PAGE_SIZE];
 31	struct vdso_data data[CS_BASES];
 32} generic_vdso_data __page_aligned_data;
 33
 34static union {
 35	u8 page[LOONGARCH_VDSO_DATA_SIZE];
 36	struct loongarch_vdso_data vdata;
 37} loongarch_vdso_data __page_aligned_data;
 38
 39static struct page *vdso_pages[] = { NULL };
 40struct vdso_data *vdso_data = generic_vdso_data.data;
 41struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
 42
 43static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
 44{
 45	current->mm->context.vdso = (void *)(new_vma->vm_start);
 46
 47	return 0;
 48}
 49
 50static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 51			     struct vm_area_struct *vma, struct vm_fault *vmf)
 52{
 53	unsigned long pfn;
 54	struct page *timens_page = find_timens_vvar_page(vma);
 55
 56	switch (vmf->pgoff) {
 57	case VVAR_GENERIC_PAGE_OFFSET:
 58		if (!timens_page)
 59			pfn = sym_to_pfn(vdso_data);
 60		else
 61			pfn = page_to_pfn(timens_page);
 62		break;
 63#ifdef CONFIG_TIME_NS
 64	case VVAR_TIMENS_PAGE_OFFSET:
 65		/*
 66		 * If a task belongs to a time namespace then a namespace specific
 67		 * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
 68		 * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
 69		 * See also the comment near timens_setup_vdso_data().
 70		 */
 71		if (!timens_page)
 72			return VM_FAULT_SIGBUS;
 73		else
 74			pfn = sym_to_pfn(vdso_data);
 75		break;
 76#endif /* CONFIG_TIME_NS */
 77	case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
 78		pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
 79		break;
 80	default:
 81		return VM_FAULT_SIGBUS;
 82	}
 83
 84	return vmf_insert_pfn(vma, vmf->address, pfn);
 85}
 86
 87struct loongarch_vdso_info vdso_info = {
 88	.vdso = vdso_start,
 89	.size = PAGE_SIZE,
 90	.code_mapping = {
 91		.name = "[vdso]",
 92		.pages = vdso_pages,
 93		.mremap = vdso_mremap,
 94	},
 95	.data_mapping = {
 96		.name = "[vvar]",
 97		.fault = vvar_fault,
 98	},
 99	.offset_sigreturn = vdso_offset_sigreturn,
100};
101
102static int __init init_vdso(void)
103{
104	unsigned long i, cpu, pfn;
105
106	BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
107	BUG_ON(!PAGE_ALIGNED(vdso_info.size));
108
109	for_each_possible_cpu(cpu)
110		vdso_pdata[cpu].node = cpu_to_node(cpu);
111
112	pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
113	for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
114		vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
115
116	return 0;
117}
118subsys_initcall(init_vdso);
119
120#ifdef CONFIG_TIME_NS
121struct vdso_data *arch_get_vdso_data(void *vvar_page)
122{
123	return (struct vdso_data *)(vvar_page);
124}
125
126/*
127 * The vvar mapping contains data for a specific time namespace, so when a
128 * task changes namespace we must unmap its vvar data for the old namespace.
129 * Subsequent faults will map in data for the new namespace.
130 *
131 * For more details see timens_setup_vdso_data().
132 */
133int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
134{
135	struct mm_struct *mm = task->mm;
136	struct vm_area_struct *vma;
137
138	VMA_ITERATOR(vmi, mm, 0);
139
140	mmap_read_lock(mm);
141	for_each_vma(vmi, vma) {
142		if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
143			zap_vma_pages(vma);
144	}
145	mmap_read_unlock(mm);
146
147	return 0;
148}
149#endif
150
151static unsigned long vdso_base(void)
152{
153	unsigned long base = STACK_TOP;
154
155	if (current->flags & PF_RANDOMIZE) {
156		base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
157		base = PAGE_ALIGN(base);
158	}
159
160	return base;
161}
162
163int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
164{
165	int ret;
166	unsigned long size, data_addr, vdso_addr;
167	struct mm_struct *mm = current->mm;
168	struct vm_area_struct *vma;
169	struct loongarch_vdso_info *info = current->thread.vdso;
170
171	if (mmap_write_lock_killable(mm))
172		return -EINTR;
173
174	/*
175	 * Determine total area size. This includes the VDSO data itself
176	 * and the data pages.
177	 */
178	size = VVAR_SIZE + info->size;
179
180	data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
181	if (IS_ERR_VALUE(data_addr)) {
182		ret = data_addr;
183		goto out;
184	}
185
186	vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
187				       VM_READ | VM_MAYREAD | VM_PFNMAP,
188				       &info->data_mapping);
189	if (IS_ERR(vma)) {
190		ret = PTR_ERR(vma);
191		goto out;
192	}
193
194	vdso_addr = data_addr + VVAR_SIZE;
195	vma = _install_special_mapping(mm, vdso_addr, info->size,
196				       VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
197				       &info->code_mapping);
198	if (IS_ERR(vma)) {
199		ret = PTR_ERR(vma);
200		goto out;
201	}
202
203	mm->context.vdso = (void *)vdso_addr;
204	ret = 0;
205
206out:
207	mmap_write_unlock(mm);
208	return ret;
209}