Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright 2007 Andi Kleen, SUSE Labs.
  4 *
  5 * This contains most of the x86 vDSO kernel-side code.
  6 */
  7#include <linux/mm.h>
  8#include <linux/err.h>
  9#include <linux/sched.h>
 10#include <linux/sched/task_stack.h>
 11#include <linux/slab.h>
 12#include <linux/init.h>
 13#include <linux/random.h>
 14#include <linux/elf.h>
 15#include <linux/cpu.h>
 16#include <linux/ptrace.h>
 17#include <linux/time_namespace.h>
 18
 19#include <asm/pvclock.h>
 20#include <asm/vgtod.h>
 21#include <asm/proto.h>
 22#include <asm/vdso.h>
 23#include <asm/vvar.h>
 24#include <asm/tlb.h>
 25#include <asm/page.h>
 26#include <asm/desc.h>
 27#include <asm/cpufeature.h>
 28#include <clocksource/hyperv_timer.h>
 29
 30#undef _ASM_X86_VVAR_H
 31#define EMIT_VVAR(name, offset)	\
 32	const size_t name ## _offset = offset;
 33#include <asm/vvar.h>
 34
 35struct vdso_data *arch_get_vdso_data(void *vvar_page)
 36{
 37	return (struct vdso_data *)(vvar_page + _vdso_data_offset);
 38}
 39#undef EMIT_VVAR
 40
 41unsigned int vclocks_used __read_mostly;
 42
 43#if defined(CONFIG_X86_64)
 44unsigned int __read_mostly vdso64_enabled = 1;
 45#endif
 46
 47void __init init_vdso_image(const struct vdso_image *image)
 48{
 49	BUG_ON(image->size % PAGE_SIZE != 0);
 50
 51	apply_alternatives((struct alt_instr *)(image->data + image->alt),
 52			   (struct alt_instr *)(image->data + image->alt +
 53						image->alt_len));
 54}
 55
 56static const struct vm_special_mapping vvar_mapping;
 57struct linux_binprm;
 58
 59static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
 60		      struct vm_area_struct *vma, struct vm_fault *vmf)
 61{
 62	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
 63
 64	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
 65		return VM_FAULT_SIGBUS;
 66
 67	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
 68	get_page(vmf->page);
 69	return 0;
 70}
 71
 72static void vdso_fix_landing(const struct vdso_image *image,
 73		struct vm_area_struct *new_vma)
 74{
 75#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
 76	if (in_ia32_syscall() && image == &vdso_image_32) {
 77		struct pt_regs *regs = current_pt_regs();
 78		unsigned long vdso_land = image->sym_int80_landing_pad;
 79		unsigned long old_land_addr = vdso_land +
 80			(unsigned long)current->mm->context.vdso;
 81
 82		/* Fixing userspace landing - look at do_fast_syscall_32 */
 83		if (regs->ip == old_land_addr)
 84			regs->ip = new_vma->vm_start + vdso_land;
 85	}
 86#endif
 87}
 88
 89static int vdso_mremap(const struct vm_special_mapping *sm,
 90		struct vm_area_struct *new_vma)
 91{
 
 92	const struct vdso_image *image = current->mm->context.vdso_image;
 93
 
 
 
 94	vdso_fix_landing(image, new_vma);
 95	current->mm->context.vdso = (void __user *)new_vma->vm_start;
 96
 97	return 0;
 98}
 99
 
 
 
 
 
 
 
 
 
 
 
 
100#ifdef CONFIG_TIME_NS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101/*
102 * The vvar page layout depends on whether a task belongs to the root or
103 * non-root time namespace. Whenever a task changes its namespace, the VVAR
104 * page tables are cleared and then they will re-faulted with a
105 * corresponding layout.
106 * See also the comment near timens_setup_vdso_data() for details.
107 */
108int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
109{
110	struct mm_struct *mm = task->mm;
111	struct vm_area_struct *vma;
112	VMA_ITERATOR(vmi, mm, 0);
113
114	mmap_read_lock(mm);
115	for_each_vma(vmi, vma) {
 
116		unsigned long size = vma->vm_end - vma->vm_start;
117
118		if (vma_is_special_mapping(vma, &vvar_mapping))
119			zap_page_range(vma, vma->vm_start, size);
120	}
121	mmap_read_unlock(mm);
122
 
123	return 0;
124}
 
 
 
 
 
125#endif
126
127static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
128		      struct vm_area_struct *vma, struct vm_fault *vmf)
129{
130	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
131	unsigned long pfn;
132	long sym_offset;
133
134	if (!image)
135		return VM_FAULT_SIGBUS;
136
137	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
138		image->sym_vvar_start;
139
140	/*
141	 * Sanity check: a symbol offset of zero means that the page
142	 * does not exist for this vdso image, not that the page is at
143	 * offset zero relative to the text mapping.  This should be
144	 * impossible here, because sym_offset should only be zero for
145	 * the page past the end of the vvar mapping.
146	 */
147	if (sym_offset == 0)
148		return VM_FAULT_SIGBUS;
149
150	if (sym_offset == image->sym_vvar_page) {
151		struct page *timens_page = find_timens_vvar_page(vma);
152
153		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
154
155		/*
156		 * If a task belongs to a time namespace then a namespace
157		 * specific VVAR is mapped with the sym_vvar_page offset and
158		 * the real VVAR page is mapped with the sym_timens_page
159		 * offset.
160		 * See also the comment near timens_setup_vdso_data().
161		 */
162		if (timens_page) {
163			unsigned long addr;
164			vm_fault_t err;
165
166			/*
167			 * Optimization: inside time namespace pre-fault
168			 * VVAR page too. As on timens page there are only
169			 * offsets for clocks on VVAR, it'll be faulted
170			 * shortly by VDSO code.
171			 */
172			addr = vmf->address + (image->sym_timens_page - sym_offset);
173			err = vmf_insert_pfn(vma, addr, pfn);
174			if (unlikely(err & VM_FAULT_ERROR))
175				return err;
176
177			pfn = page_to_pfn(timens_page);
178		}
179
180		return vmf_insert_pfn(vma, vmf->address, pfn);
181	} else if (sym_offset == image->sym_pvclock_page) {
182		struct pvclock_vsyscall_time_info *pvti =
183			pvclock_get_pvti_cpu0_va();
184		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
185			return vmf_insert_pfn_prot(vma, vmf->address,
186					__pa(pvti) >> PAGE_SHIFT,
187					pgprot_decrypted(vma->vm_page_prot));
188		}
189	} else if (sym_offset == image->sym_hvclock_page) {
190		pfn = hv_get_tsc_pfn();
191
192		if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
193			return vmf_insert_pfn(vma, vmf->address, pfn);
 
194	} else if (sym_offset == image->sym_timens_page) {
195		struct page *timens_page = find_timens_vvar_page(vma);
196
197		if (!timens_page)
198			return VM_FAULT_SIGBUS;
199
200		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
201		return vmf_insert_pfn(vma, vmf->address, pfn);
202	}
203
204	return VM_FAULT_SIGBUS;
205}
206
207static const struct vm_special_mapping vdso_mapping = {
208	.name = "[vdso]",
209	.fault = vdso_fault,
210	.mremap = vdso_mremap,
211};
212static const struct vm_special_mapping vvar_mapping = {
213	.name = "[vvar]",
214	.fault = vvar_fault,
 
215};
216
217/*
218 * Add vdso and vvar mappings to current process.
219 * @image          - blob to map
220 * @addr           - request a specific address (zero to map at free addr)
221 */
222static int map_vdso(const struct vdso_image *image, unsigned long addr)
223{
224	struct mm_struct *mm = current->mm;
225	struct vm_area_struct *vma;
226	unsigned long text_start;
227	int ret = 0;
228
229	if (mmap_write_lock_killable(mm))
230		return -EINTR;
231
232	addr = get_unmapped_area(NULL, addr,
233				 image->size - image->sym_vvar_start, 0, 0);
234	if (IS_ERR_VALUE(addr)) {
235		ret = addr;
236		goto up_fail;
237	}
238
239	text_start = addr - image->sym_vvar_start;
240
241	/*
242	 * MAYWRITE to allow gdb to COW and set breakpoints
243	 */
244	vma = _install_special_mapping(mm,
245				       text_start,
246				       image->size,
247				       VM_READ|VM_EXEC|
248				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
249				       &vdso_mapping);
250
251	if (IS_ERR(vma)) {
252		ret = PTR_ERR(vma);
253		goto up_fail;
254	}
255
256	vma = _install_special_mapping(mm,
257				       addr,
258				       -image->sym_vvar_start,
259				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
260				       VM_PFNMAP,
261				       &vvar_mapping);
262
263	if (IS_ERR(vma)) {
264		ret = PTR_ERR(vma);
265		do_munmap(mm, text_start, image->size, NULL);
266	} else {
267		current->mm->context.vdso = (void __user *)text_start;
268		current->mm->context.vdso_image = image;
269	}
270
271up_fail:
272	mmap_write_unlock(mm);
273	return ret;
274}
275
276#ifdef CONFIG_X86_64
277/*
278 * Put the vdso above the (randomized) stack with another randomized
279 * offset.  This way there is no hole in the middle of address space.
280 * To save memory make sure it is still in the same PTE as the stack
281 * top.  This doesn't give that many random bits.
282 *
283 * Note that this algorithm is imperfect: the distribution of the vdso
284 * start address within a PMD is biased toward the end.
285 *
286 * Only used for the 64-bit and x32 vdsos.
287 */
288static unsigned long vdso_addr(unsigned long start, unsigned len)
289{
290	unsigned long addr, end;
291	unsigned offset;
292
293	/*
294	 * Round up the start address.  It can start out unaligned as a result
295	 * of stack start randomization.
296	 */
297	start = PAGE_ALIGN(start);
298
299	/* Round the lowest possible end address up to a PMD boundary. */
300	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
301	if (end >= TASK_SIZE_MAX)
302		end = TASK_SIZE_MAX;
303	end -= len;
304
305	if (end > start) {
306		offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
307		addr = start + (offset << PAGE_SHIFT);
308	} else {
309		addr = start;
310	}
311
312	/*
313	 * Forcibly align the final address in case we have a hardware
314	 * issue that requires alignment for performance reasons.
315	 */
316	addr = align_vdso_addr(addr);
317
318	return addr;
319}
320
321static int map_vdso_randomized(const struct vdso_image *image)
322{
323	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
324
325	return map_vdso(image, addr);
326}
327#endif
328
329int map_vdso_once(const struct vdso_image *image, unsigned long addr)
330{
331	struct mm_struct *mm = current->mm;
332	struct vm_area_struct *vma;
333	VMA_ITERATOR(vmi, mm, 0);
334
335	mmap_write_lock(mm);
336	/*
337	 * Check if we have already mapped vdso blob - fail to prevent
338	 * abusing from userspace install_special_mapping, which may
339	 * not do accounting and rlimit right.
340	 * We could search vma near context.vdso, but it's a slowpath,
341	 * so let's explicitly check all VMAs to be completely sure.
342	 */
343	for_each_vma(vmi, vma) {
344		if (vma_is_special_mapping(vma, &vdso_mapping) ||
345				vma_is_special_mapping(vma, &vvar_mapping)) {
346			mmap_write_unlock(mm);
347			return -EEXIST;
348		}
349	}
350	mmap_write_unlock(mm);
351
352	return map_vdso(image, addr);
353}
354
355#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
356static int load_vdso32(void)
357{
358	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
359		return 0;
360
361	return map_vdso(&vdso_image_32, 0);
362}
363#endif
364
365#ifdef CONFIG_X86_64
366int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
367{
368	if (!vdso64_enabled)
369		return 0;
370
371	return map_vdso_randomized(&vdso_image_64);
372}
373
374#ifdef CONFIG_COMPAT
375int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
376				       int uses_interp, bool x32)
377{
378#ifdef CONFIG_X86_X32_ABI
379	if (x32) {
380		if (!vdso64_enabled)
381			return 0;
382		return map_vdso_randomized(&vdso_image_x32);
383	}
384#endif
385#ifdef CONFIG_IA32_EMULATION
386	return load_vdso32();
387#else
388	return 0;
389#endif
390}
391#endif
392#else
393int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
394{
395	return load_vdso32();
396}
397#endif
398
399bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
400{
401#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
402	const struct vdso_image *image = current->mm->context.vdso_image;
403	unsigned long vdso = (unsigned long) current->mm->context.vdso;
404
405	if (in_ia32_syscall() && image == &vdso_image_32) {
406		if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
407		    regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
408			return true;
409	}
410#endif
411	return false;
412}
413
414#ifdef CONFIG_X86_64
415static __init int vdso_setup(char *s)
416{
417	vdso64_enabled = simple_strtoul(s, NULL, 0);
418	return 1;
419}
420__setup("vdso=", vdso_setup);
421
422static int __init init_vdso(void)
423{
424	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
425
426	init_vdso_image(&vdso_image_64);
427
428#ifdef CONFIG_X86_X32_ABI
429	init_vdso_image(&vdso_image_x32);
430#endif
431
432	return 0;
433}
434subsys_initcall(init_vdso);
435#endif /* CONFIG_X86_64 */
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright 2007 Andi Kleen, SUSE Labs.
  4 *
  5 * This contains most of the x86 vDSO kernel-side code.
  6 */
  7#include <linux/mm.h>
  8#include <linux/err.h>
  9#include <linux/sched.h>
 10#include <linux/sched/task_stack.h>
 11#include <linux/slab.h>
 12#include <linux/init.h>
 13#include <linux/random.h>
 14#include <linux/elf.h>
 15#include <linux/cpu.h>
 16#include <linux/ptrace.h>
 17#include <linux/time_namespace.h>
 18
 19#include <asm/pvclock.h>
 20#include <asm/vgtod.h>
 21#include <asm/proto.h>
 22#include <asm/vdso.h>
 23#include <asm/vvar.h>
 24#include <asm/tlb.h>
 25#include <asm/page.h>
 26#include <asm/desc.h>
 27#include <asm/cpufeature.h>
 28#include <clocksource/hyperv_timer.h>
 29
 30#undef _ASM_X86_VVAR_H
 31#define EMIT_VVAR(name, offset)	\
 32	const size_t name ## _offset = offset;
 33#include <asm/vvar.h>
 34
 35struct vdso_data *arch_get_vdso_data(void *vvar_page)
 36{
 37	return (struct vdso_data *)(vvar_page + _vdso_data_offset);
 38}
 39#undef EMIT_VVAR
 40
 41unsigned int vclocks_used __read_mostly;
 42
 43#if defined(CONFIG_X86_64)
 44unsigned int __read_mostly vdso64_enabled = 1;
 45#endif
 46
 47void __init init_vdso_image(const struct vdso_image *image)
 48{
 49	BUG_ON(image->size % PAGE_SIZE != 0);
 50
 51	apply_alternatives((struct alt_instr *)(image->data + image->alt),
 52			   (struct alt_instr *)(image->data + image->alt +
 53						image->alt_len));
 54}
 55
 56static const struct vm_special_mapping vvar_mapping;
 57struct linux_binprm;
 58
 59static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
 60		      struct vm_area_struct *vma, struct vm_fault *vmf)
 61{
 62	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
 63
 64	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
 65		return VM_FAULT_SIGBUS;
 66
 67	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
 68	get_page(vmf->page);
 69	return 0;
 70}
 71
 72static void vdso_fix_landing(const struct vdso_image *image,
 73		struct vm_area_struct *new_vma)
 74{
 75#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
 76	if (in_ia32_syscall() && image == &vdso_image_32) {
 77		struct pt_regs *regs = current_pt_regs();
 78		unsigned long vdso_land = image->sym_int80_landing_pad;
 79		unsigned long old_land_addr = vdso_land +
 80			(unsigned long)current->mm->context.vdso;
 81
 82		/* Fixing userspace landing - look at do_fast_syscall_32 */
 83		if (regs->ip == old_land_addr)
 84			regs->ip = new_vma->vm_start + vdso_land;
 85	}
 86#endif
 87}
 88
 89static int vdso_mremap(const struct vm_special_mapping *sm,
 90		struct vm_area_struct *new_vma)
 91{
 92	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
 93	const struct vdso_image *image = current->mm->context.vdso_image;
 94
 95	if (image->size != new_size)
 96		return -EINVAL;
 97
 98	vdso_fix_landing(image, new_vma);
 99	current->mm->context.vdso = (void __user *)new_vma->vm_start;
100
101	return 0;
102}
103
104static int vvar_mremap(const struct vm_special_mapping *sm,
105		struct vm_area_struct *new_vma)
106{
107	const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
108	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
109
110	if (new_size != -image->sym_vvar_start)
111		return -EINVAL;
112
113	return 0;
114}
115
116#ifdef CONFIG_TIME_NS
117static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
118{
119	if (likely(vma->vm_mm == current->mm))
120		return current->nsproxy->time_ns->vvar_page;
121
122	/*
123	 * VM_PFNMAP | VM_IO protect .fault() handler from being called
124	 * through interfaces like /proc/$pid/mem or
125	 * process_vm_{readv,writev}() as long as there's no .access()
126	 * in special_mapping_vmops().
127	 * For more details check_vma_flags() and __access_remote_vm()
128	 */
129
130	WARN(1, "vvar_page accessed remotely");
131
132	return NULL;
133}
134
135/*
136 * The vvar page layout depends on whether a task belongs to the root or
137 * non-root time namespace. Whenever a task changes its namespace, the VVAR
138 * page tables are cleared and then they will re-faulted with a
139 * corresponding layout.
140 * See also the comment near timens_setup_vdso_data() for details.
141 */
142int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
143{
144	struct mm_struct *mm = task->mm;
145	struct vm_area_struct *vma;
 
146
147	mmap_read_lock(mm);
148
149	for (vma = mm->mmap; vma; vma = vma->vm_next) {
150		unsigned long size = vma->vm_end - vma->vm_start;
151
152		if (vma_is_special_mapping(vma, &vvar_mapping))
153			zap_page_range(vma, vma->vm_start, size);
154	}
 
155
156	mmap_read_unlock(mm);
157	return 0;
158}
159#else
160static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
161{
162	return NULL;
163}
164#endif
165
166static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
167		      struct vm_area_struct *vma, struct vm_fault *vmf)
168{
169	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
170	unsigned long pfn;
171	long sym_offset;
172
173	if (!image)
174		return VM_FAULT_SIGBUS;
175
176	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
177		image->sym_vvar_start;
178
179	/*
180	 * Sanity check: a symbol offset of zero means that the page
181	 * does not exist for this vdso image, not that the page is at
182	 * offset zero relative to the text mapping.  This should be
183	 * impossible here, because sym_offset should only be zero for
184	 * the page past the end of the vvar mapping.
185	 */
186	if (sym_offset == 0)
187		return VM_FAULT_SIGBUS;
188
189	if (sym_offset == image->sym_vvar_page) {
190		struct page *timens_page = find_timens_vvar_page(vma);
191
192		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
193
194		/*
195		 * If a task belongs to a time namespace then a namespace
196		 * specific VVAR is mapped with the sym_vvar_page offset and
197		 * the real VVAR page is mapped with the sym_timens_page
198		 * offset.
199		 * See also the comment near timens_setup_vdso_data().
200		 */
201		if (timens_page) {
202			unsigned long addr;
203			vm_fault_t err;
204
205			/*
206			 * Optimization: inside time namespace pre-fault
207			 * VVAR page too. As on timens page there are only
208			 * offsets for clocks on VVAR, it'll be faulted
209			 * shortly by VDSO code.
210			 */
211			addr = vmf->address + (image->sym_timens_page - sym_offset);
212			err = vmf_insert_pfn(vma, addr, pfn);
213			if (unlikely(err & VM_FAULT_ERROR))
214				return err;
215
216			pfn = page_to_pfn(timens_page);
217		}
218
219		return vmf_insert_pfn(vma, vmf->address, pfn);
220	} else if (sym_offset == image->sym_pvclock_page) {
221		struct pvclock_vsyscall_time_info *pvti =
222			pvclock_get_pvti_cpu0_va();
223		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
224			return vmf_insert_pfn_prot(vma, vmf->address,
225					__pa(pvti) >> PAGE_SHIFT,
226					pgprot_decrypted(vma->vm_page_prot));
227		}
228	} else if (sym_offset == image->sym_hvclock_page) {
229		struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
230
231		if (tsc_pg && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
232			return vmf_insert_pfn(vma, vmf->address,
233					virt_to_phys(tsc_pg) >> PAGE_SHIFT);
234	} else if (sym_offset == image->sym_timens_page) {
235		struct page *timens_page = find_timens_vvar_page(vma);
236
237		if (!timens_page)
238			return VM_FAULT_SIGBUS;
239
240		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
241		return vmf_insert_pfn(vma, vmf->address, pfn);
242	}
243
244	return VM_FAULT_SIGBUS;
245}
246
247static const struct vm_special_mapping vdso_mapping = {
248	.name = "[vdso]",
249	.fault = vdso_fault,
250	.mremap = vdso_mremap,
251};
252static const struct vm_special_mapping vvar_mapping = {
253	.name = "[vvar]",
254	.fault = vvar_fault,
255	.mremap = vvar_mremap,
256};
257
258/*
259 * Add vdso and vvar mappings to current process.
260 * @image          - blob to map
261 * @addr           - request a specific address (zero to map at free addr)
262 */
263static int map_vdso(const struct vdso_image *image, unsigned long addr)
264{
265	struct mm_struct *mm = current->mm;
266	struct vm_area_struct *vma;
267	unsigned long text_start;
268	int ret = 0;
269
270	if (mmap_write_lock_killable(mm))
271		return -EINTR;
272
273	addr = get_unmapped_area(NULL, addr,
274				 image->size - image->sym_vvar_start, 0, 0);
275	if (IS_ERR_VALUE(addr)) {
276		ret = addr;
277		goto up_fail;
278	}
279
280	text_start = addr - image->sym_vvar_start;
281
282	/*
283	 * MAYWRITE to allow gdb to COW and set breakpoints
284	 */
285	vma = _install_special_mapping(mm,
286				       text_start,
287				       image->size,
288				       VM_READ|VM_EXEC|
289				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
290				       &vdso_mapping);
291
292	if (IS_ERR(vma)) {
293		ret = PTR_ERR(vma);
294		goto up_fail;
295	}
296
297	vma = _install_special_mapping(mm,
298				       addr,
299				       -image->sym_vvar_start,
300				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
301				       VM_PFNMAP,
302				       &vvar_mapping);
303
304	if (IS_ERR(vma)) {
305		ret = PTR_ERR(vma);
306		do_munmap(mm, text_start, image->size, NULL);
307	} else {
308		current->mm->context.vdso = (void __user *)text_start;
309		current->mm->context.vdso_image = image;
310	}
311
312up_fail:
313	mmap_write_unlock(mm);
314	return ret;
315}
316
317#ifdef CONFIG_X86_64
318/*
319 * Put the vdso above the (randomized) stack with another randomized
320 * offset.  This way there is no hole in the middle of address space.
321 * To save memory make sure it is still in the same PTE as the stack
322 * top.  This doesn't give that many random bits.
323 *
324 * Note that this algorithm is imperfect: the distribution of the vdso
325 * start address within a PMD is biased toward the end.
326 *
327 * Only used for the 64-bit and x32 vdsos.
328 */
329static unsigned long vdso_addr(unsigned long start, unsigned len)
330{
331	unsigned long addr, end;
332	unsigned offset;
333
334	/*
335	 * Round up the start address.  It can start out unaligned as a result
336	 * of stack start randomization.
337	 */
338	start = PAGE_ALIGN(start);
339
340	/* Round the lowest possible end address up to a PMD boundary. */
341	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
342	if (end >= TASK_SIZE_MAX)
343		end = TASK_SIZE_MAX;
344	end -= len;
345
346	if (end > start) {
347		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
348		addr = start + (offset << PAGE_SHIFT);
349	} else {
350		addr = start;
351	}
352
353	/*
354	 * Forcibly align the final address in case we have a hardware
355	 * issue that requires alignment for performance reasons.
356	 */
357	addr = align_vdso_addr(addr);
358
359	return addr;
360}
361
362static int map_vdso_randomized(const struct vdso_image *image)
363{
364	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
365
366	return map_vdso(image, addr);
367}
368#endif
369
370int map_vdso_once(const struct vdso_image *image, unsigned long addr)
371{
372	struct mm_struct *mm = current->mm;
373	struct vm_area_struct *vma;
 
374
375	mmap_write_lock(mm);
376	/*
377	 * Check if we have already mapped vdso blob - fail to prevent
378	 * abusing from userspace install_speciall_mapping, which may
379	 * not do accounting and rlimit right.
380	 * We could search vma near context.vdso, but it's a slowpath,
381	 * so let's explicitly check all VMAs to be completely sure.
382	 */
383	for (vma = mm->mmap; vma; vma = vma->vm_next) {
384		if (vma_is_special_mapping(vma, &vdso_mapping) ||
385				vma_is_special_mapping(vma, &vvar_mapping)) {
386			mmap_write_unlock(mm);
387			return -EEXIST;
388		}
389	}
390	mmap_write_unlock(mm);
391
392	return map_vdso(image, addr);
393}
394
395#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
396static int load_vdso32(void)
397{
398	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
399		return 0;
400
401	return map_vdso(&vdso_image_32, 0);
402}
403#endif
404
405#ifdef CONFIG_X86_64
406int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
407{
408	if (!vdso64_enabled)
409		return 0;
410
411	return map_vdso_randomized(&vdso_image_64);
412}
413
414#ifdef CONFIG_COMPAT
415int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
416				       int uses_interp)
417{
418#ifdef CONFIG_X86_X32_ABI
419	if (test_thread_flag(TIF_X32)) {
420		if (!vdso64_enabled)
421			return 0;
422		return map_vdso_randomized(&vdso_image_x32);
423	}
424#endif
425#ifdef CONFIG_IA32_EMULATION
426	return load_vdso32();
427#else
428	return 0;
429#endif
430}
431#endif
432#else
433int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
434{
435	return load_vdso32();
436}
437#endif
438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439#ifdef CONFIG_X86_64
440static __init int vdso_setup(char *s)
441{
442	vdso64_enabled = simple_strtoul(s, NULL, 0);
443	return 0;
444}
445__setup("vdso=", vdso_setup);
446
447static int __init init_vdso(void)
448{
449	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
450
451	init_vdso_image(&vdso_image_64);
452
453#ifdef CONFIG_X86_X32_ABI
454	init_vdso_image(&vdso_image_x32);
455#endif
456
457	return 0;
458}
459subsys_initcall(init_vdso);
460#endif /* CONFIG_X86_64 */