Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * vdso setup for s390
  4 *
  5 *  Copyright IBM Corp. 2008
  6 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 */
  8
  9#include <linux/binfmts.h>
 10#include <linux/compat.h>
 11#include <linux/elf.h>
 12#include <linux/errno.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 
 15#include <linux/mm.h>
 16#include <linux/slab.h>
 17#include <linux/smp.h>
 18#include <linux/time_namespace.h>
 19#include <linux/random.h>
 20#include <vdso/datapage.h>
 
 
 21#include <asm/vdso.h>
 22
 23extern char vdso64_start[], vdso64_end[];
 24extern char vdso32_start[], vdso32_end[];
 25
 26static struct vm_special_mapping vvar_mapping;
 27
 28static union {
 29	struct vdso_data	data[CS_BASES];
 30	u8			page[PAGE_SIZE];
 31} vdso_data_store __page_aligned_data;
 32
 33struct vdso_data *vdso_data = vdso_data_store.data;
 34
 35enum vvar_pages {
 36	VVAR_DATA_PAGE_OFFSET,
 37	VVAR_TIMENS_PAGE_OFFSET,
 38	VVAR_NR_PAGES,
 39};
 40
 41#ifdef CONFIG_TIME_NS
 42struct vdso_data *arch_get_vdso_data(void *vvar_page)
 43{
 44	return (struct vdso_data *)(vvar_page);
 45}
 46
 47/*
 48 * The VVAR page layout depends on whether a task belongs to the root or
 49 * non-root time namespace. Whenever a task changes its namespace, the VVAR
 50 * page tables are cleared and then they will be re-faulted with a
 51 * corresponding layout.
 52 * See also the comment near timens_setup_vdso_data() for details.
 53 */
 54int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
 55{
 56	struct mm_struct *mm = task->mm;
 57	VMA_ITERATOR(vmi, mm, 0);
 58	struct vm_area_struct *vma;
 59
 60	mmap_read_lock(mm);
 61	for_each_vma(vmi, vma) {
 62		unsigned long size = vma->vm_end - vma->vm_start;
 63
 64		if (!vma_is_special_mapping(vma, &vvar_mapping))
 65			continue;
 66		zap_page_range(vma, vma->vm_start, size);
 67		break;
 68	}
 69	mmap_read_unlock(mm);
 70	return 0;
 71}
 72#endif
 73
 74static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 75			     struct vm_area_struct *vma, struct vm_fault *vmf)
 76{
 77	struct page *timens_page = find_timens_vvar_page(vma);
 78	unsigned long addr, pfn;
 79	vm_fault_t err;
 80
 81	switch (vmf->pgoff) {
 82	case VVAR_DATA_PAGE_OFFSET:
 83		pfn = virt_to_pfn(vdso_data);
 84		if (timens_page) {
 85			/*
 86			 * Fault in VVAR page too, since it will be accessed
 87			 * to get clock data anyway.
 88			 */
 89			addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
 90			err = vmf_insert_pfn(vma, addr, pfn);
 91			if (unlikely(err & VM_FAULT_ERROR))
 92				return err;
 93			pfn = page_to_pfn(timens_page);
 94		}
 95		break;
 96#ifdef CONFIG_TIME_NS
 97	case VVAR_TIMENS_PAGE_OFFSET:
 98		/*
 99		 * If a task belongs to a time namespace then a namespace
100		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
101		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
102		 * offset.
103		 * See also the comment near timens_setup_vdso_data().
104		 */
105		if (!timens_page)
106			return VM_FAULT_SIGBUS;
107		pfn = virt_to_pfn(vdso_data);
108		break;
109#endif /* CONFIG_TIME_NS */
110	default:
111		return VM_FAULT_SIGBUS;
112	}
113	return vmf_insert_pfn(vma, vmf->address, pfn);
114}
115
116static int vdso_mremap(const struct vm_special_mapping *sm,
117		       struct vm_area_struct *vma)
118{
119	current->mm->context.vdso_base = vma->vm_start;
120	return 0;
121}
122
123static struct vm_special_mapping vvar_mapping = {
124	.name = "[vvar]",
125	.fault = vvar_fault,
126};
127
128static struct vm_special_mapping vdso64_mapping = {
129	.name = "[vdso]",
130	.mremap = vdso_mremap,
131};
132
133static struct vm_special_mapping vdso32_mapping = {
134	.name = "[vdso]",
135	.mremap = vdso_mremap,
136};
137
138int vdso_getcpu_init(void)
139{
140	set_tod_programmable_field(smp_processor_id());
141	return 0;
142}
143early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
144
145static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
146{
147	unsigned long vvar_start, vdso_text_start, vdso_text_len;
148	struct vm_special_mapping *vdso_mapping;
149	struct mm_struct *mm = current->mm;
150	struct vm_area_struct *vma;
151	int rc;
152
153	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
154	if (mmap_write_lock_killable(mm))
155		return -EINTR;
156
157	if (is_compat_task()) {
158		vdso_text_len = vdso32_end - vdso32_start;
159		vdso_mapping = &vdso32_mapping;
160	} else {
161		vdso_text_len = vdso64_end - vdso64_start;
162		vdso_mapping = &vdso64_mapping;
163	}
164	vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
165	rc = vvar_start;
166	if (IS_ERR_VALUE(vvar_start))
167		goto out;
168	vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
169				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
170				       VM_PFNMAP,
171				       &vvar_mapping);
172	rc = PTR_ERR(vma);
173	if (IS_ERR(vma))
174		goto out;
175	vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
176	/* VM_MAYWRITE for COW so gdb can set breakpoints */
177	vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
178				       VM_READ|VM_EXEC|
179				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
180				       vdso_mapping);
181	if (IS_ERR(vma)) {
182		do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
183		rc = PTR_ERR(vma);
184	} else {
185		current->mm->context.vdso_base = vdso_text_start;
186		rc = 0;
187	}
188out:
189	mmap_write_unlock(mm);
190	return rc;
191}
192
193static unsigned long vdso_addr(unsigned long start, unsigned long len)
194{
195	unsigned long addr, end, offset;
196
197	/*
198	 * Round up the start address. It can start out unaligned as a result
199	 * of stack start randomization.
200	 */
201	start = PAGE_ALIGN(start);
202
203	/* Round the lowest possible end address up to a PMD boundary. */
204	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
205	if (end >= VDSO_BASE)
206		end = VDSO_BASE;
207	end -= len;
208
209	if (end > start) {
210		offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
211		addr = start + (offset << PAGE_SHIFT);
212	} else {
213		addr = start;
214	}
215	return addr;
216}
217
218unsigned long vdso_size(void)
219{
220	unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
221
222	if (is_compat_task())
223		size += vdso32_end - vdso32_start;
224	else
225		size += vdso64_end - vdso64_start;
226	return PAGE_ALIGN(size);
227}
228
 
 
 
 
 
229int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
230{
231	unsigned long addr = VDSO_BASE;
232	unsigned long size = vdso_size();
233
234	if (current->flags & PF_RANDOMIZE)
235		addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
236	return map_vdso(addr, size);
237}
238
239static struct page ** __init vdso_setup_pages(void *start, void *end)
240{
241	int pages = (end - start) >> PAGE_SHIFT;
242	struct page **pagelist;
243	int i;
244
245	pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
246	if (!pagelist)
247		panic("%s: Cannot allocate page list for VDSO", __func__);
248	for (i = 0; i < pages; i++)
249		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
250	return pagelist;
251}
252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253static int __init vdso_init(void)
254{
 
255	vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
256	if (IS_ENABLED(CONFIG_COMPAT))
257		vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
258	return 0;
259}
260arch_initcall(vdso_init);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * vdso setup for s390
  4 *
  5 *  Copyright IBM Corp. 2008
  6 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 */
  8
  9#include <linux/binfmts.h>
 10#include <linux/compat.h>
 11#include <linux/elf.h>
 12#include <linux/errno.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/mm.h>
 17#include <linux/slab.h>
 18#include <linux/smp.h>
 19#include <linux/time_namespace.h>
 20#include <linux/random.h>
 21#include <vdso/datapage.h>
 22#include <asm/vdso/vsyscall.h>
 23#include <asm/alternative.h>
 24#include <asm/vdso.h>
 25
 26extern char vdso64_start[], vdso64_end[];
 27extern char vdso32_start[], vdso32_end[];
 28
 29static struct vm_special_mapping vvar_mapping;
 30
 31static union vdso_data_store vdso_data_store __page_aligned_data;
 
 
 
 32
 33struct vdso_data *vdso_data = vdso_data_store.data;
 34
 
 
 
 
 
 
 35#ifdef CONFIG_TIME_NS
 36struct vdso_data *arch_get_vdso_data(void *vvar_page)
 37{
 38	return (struct vdso_data *)(vvar_page);
 39}
 40
 41/*
 42 * The VVAR page layout depends on whether a task belongs to the root or
 43 * non-root time namespace. Whenever a task changes its namespace, the VVAR
 44 * page tables are cleared and then they will be re-faulted with a
 45 * corresponding layout.
 46 * See also the comment near timens_setup_vdso_data() for details.
 47 */
 48int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
 49{
 50	struct mm_struct *mm = task->mm;
 51	VMA_ITERATOR(vmi, mm, 0);
 52	struct vm_area_struct *vma;
 53
 54	mmap_read_lock(mm);
 55	for_each_vma(vmi, vma) {
 
 
 56		if (!vma_is_special_mapping(vma, &vvar_mapping))
 57			continue;
 58		zap_vma_pages(vma);
 59		break;
 60	}
 61	mmap_read_unlock(mm);
 62	return 0;
 63}
 64#endif
 65
 66static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 67			     struct vm_area_struct *vma, struct vm_fault *vmf)
 68{
 69	struct page *timens_page = find_timens_vvar_page(vma);
 70	unsigned long addr, pfn;
 71	vm_fault_t err;
 72
 73	switch (vmf->pgoff) {
 74	case VVAR_DATA_PAGE_OFFSET:
 75		pfn = virt_to_pfn(vdso_data);
 76		if (timens_page) {
 77			/*
 78			 * Fault in VVAR page too, since it will be accessed
 79			 * to get clock data anyway.
 80			 */
 81			addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
 82			err = vmf_insert_pfn(vma, addr, pfn);
 83			if (unlikely(err & VM_FAULT_ERROR))
 84				return err;
 85			pfn = page_to_pfn(timens_page);
 86		}
 87		break;
 88#ifdef CONFIG_TIME_NS
 89	case VVAR_TIMENS_PAGE_OFFSET:
 90		/*
 91		 * If a task belongs to a time namespace then a namespace
 92		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
 93		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
 94		 * offset.
 95		 * See also the comment near timens_setup_vdso_data().
 96		 */
 97		if (!timens_page)
 98			return VM_FAULT_SIGBUS;
 99		pfn = virt_to_pfn(vdso_data);
100		break;
101#endif /* CONFIG_TIME_NS */
102	default:
103		return VM_FAULT_SIGBUS;
104	}
105	return vmf_insert_pfn(vma, vmf->address, pfn);
106}
107
108static int vdso_mremap(const struct vm_special_mapping *sm,
109		       struct vm_area_struct *vma)
110{
111	current->mm->context.vdso_base = vma->vm_start;
112	return 0;
113}
114
115static struct vm_special_mapping vvar_mapping = {
116	.name = "[vvar]",
117	.fault = vvar_fault,
118};
119
120static struct vm_special_mapping vdso64_mapping = {
121	.name = "[vdso]",
122	.mremap = vdso_mremap,
123};
124
125static struct vm_special_mapping vdso32_mapping = {
126	.name = "[vdso]",
127	.mremap = vdso_mremap,
128};
129
130int vdso_getcpu_init(void)
131{
132	set_tod_programmable_field(smp_processor_id());
133	return 0;
134}
135early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
136
137static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
138{
139	unsigned long vvar_start, vdso_text_start, vdso_text_len;
140	struct vm_special_mapping *vdso_mapping;
141	struct mm_struct *mm = current->mm;
142	struct vm_area_struct *vma;
143	int rc;
144
145	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
146	if (mmap_write_lock_killable(mm))
147		return -EINTR;
148
149	if (is_compat_task()) {
150		vdso_text_len = vdso32_end - vdso32_start;
151		vdso_mapping = &vdso32_mapping;
152	} else {
153		vdso_text_len = vdso64_end - vdso64_start;
154		vdso_mapping = &vdso64_mapping;
155	}
156	vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
157	rc = vvar_start;
158	if (IS_ERR_VALUE(vvar_start))
159		goto out;
160	vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
161				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
162				       VM_PFNMAP,
163				       &vvar_mapping);
164	rc = PTR_ERR(vma);
165	if (IS_ERR(vma))
166		goto out;
167	vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
168	/* VM_MAYWRITE for COW so gdb can set breakpoints */
169	vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
170				       VM_READ|VM_EXEC|
171				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
172				       vdso_mapping);
173	if (IS_ERR(vma)) {
174		do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
175		rc = PTR_ERR(vma);
176	} else {
177		current->mm->context.vdso_base = vdso_text_start;
178		rc = 0;
179	}
180out:
181	mmap_write_unlock(mm);
182	return rc;
183}
184
185static unsigned long vdso_addr(unsigned long start, unsigned long len)
186{
187	unsigned long addr, end, offset;
188
189	/*
190	 * Round up the start address. It can start out unaligned as a result
191	 * of stack start randomization.
192	 */
193	start = PAGE_ALIGN(start);
194
195	/* Round the lowest possible end address up to a PMD boundary. */
196	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
197	if (end >= VDSO_BASE)
198		end = VDSO_BASE;
199	end -= len;
200
201	if (end > start) {
202		offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
203		addr = start + (offset << PAGE_SHIFT);
204	} else {
205		addr = start;
206	}
207	return addr;
208}
209
210unsigned long vdso_text_size(void)
211{
212	unsigned long size;
213
214	if (is_compat_task())
215		size = vdso32_end - vdso32_start;
216	else
217		size = vdso64_end - vdso64_start;
218	return PAGE_ALIGN(size);
219}
220
221unsigned long vdso_size(void)
222{
223	return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
224}
225
226int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
227{
228	unsigned long addr = VDSO_BASE;
229	unsigned long size = vdso_size();
230
231	if (current->flags & PF_RANDOMIZE)
232		addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
233	return map_vdso(addr, size);
234}
235
236static struct page ** __init vdso_setup_pages(void *start, void *end)
237{
238	int pages = (end - start) >> PAGE_SHIFT;
239	struct page **pagelist;
240	int i;
241
242	pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
243	if (!pagelist)
244		panic("%s: Cannot allocate page list for VDSO", __func__);
245	for (i = 0; i < pages; i++)
246		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
247	return pagelist;
248}
249
250static void vdso_apply_alternatives(void)
251{
252	const struct elf64_shdr *alt, *shdr;
253	struct alt_instr *start, *end;
254	const struct elf64_hdr *hdr;
255
256	hdr = (struct elf64_hdr *)vdso64_start;
257	shdr = (void *)hdr + hdr->e_shoff;
258	alt = find_section(hdr, shdr, ".altinstructions");
259	if (!alt)
260		return;
261	start = (void *)hdr + alt->sh_offset;
262	end = (void *)hdr + alt->sh_offset + alt->sh_size;
263	apply_alternatives(start, end);
264}
265
266static int __init vdso_init(void)
267{
268	vdso_apply_alternatives();
269	vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
270	if (IS_ENABLED(CONFIG_COMPAT))
271		vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
272	return 0;
273}
274arch_initcall(vdso_init);