Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright 2007 Andi Kleen, SUSE Labs.
  3 * Subject to the GPL, v.2
  4 *
  5 * This contains most of the x86 vDSO kernel-side code.
  6 */
  7#include <linux/mm.h>
  8#include <linux/err.h>
  9#include <linux/sched.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/random.h>
 13#include <linux/elf.h>
 14#include <linux/cpu.h>
 
 15#include <asm/pvclock.h>
 16#include <asm/vgtod.h>
 17#include <asm/proto.h>
 18#include <asm/vdso.h>
 19#include <asm/vvar.h>
 20#include <asm/page.h>
 21#include <asm/hpet.h>
 22#include <asm/desc.h>
 23#include <asm/cpufeature.h>
 24
 25#if defined(CONFIG_X86_64)
 26unsigned int __read_mostly vdso64_enabled = 1;
 27#endif
 28
 29void __init init_vdso_image(const struct vdso_image *image)
 30{
 31	BUG_ON(image->size % PAGE_SIZE != 0);
 32
 33	apply_alternatives((struct alt_instr *)(image->data + image->alt),
 34			   (struct alt_instr *)(image->data + image->alt +
 35						image->alt_len));
 36}
 37
 38struct linux_binprm;
 39
 40/*
 41 * Put the vdso above the (randomized) stack with another randomized
 42 * offset.  This way there is no hole in the middle of address space.
 43 * To save memory make sure it is still in the same PTE as the stack
 44 * top.  This doesn't give that many random bits.
 45 *
 46 * Note that this algorithm is imperfect: the distribution of the vdso
 47 * start address within a PMD is biased toward the end.
 48 *
 49 * Only used for the 64-bit and x32 vdsos.
 50 */
 51static unsigned long vdso_addr(unsigned long start, unsigned len)
 52{
 53#ifdef CONFIG_X86_32
 54	return 0;
 55#else
 56	unsigned long addr, end;
 57	unsigned offset;
 58
 59	/*
 60	 * Round up the start address.  It can start out unaligned as a result
 61	 * of stack start randomization.
 62	 */
 63	start = PAGE_ALIGN(start);
 64
 65	/* Round the lowest possible end address up to a PMD boundary. */
 66	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
 67	if (end >= TASK_SIZE_MAX)
 68		end = TASK_SIZE_MAX;
 69	end -= len;
 70
 71	if (end > start) {
 72		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
 73		addr = start + (offset << PAGE_SHIFT);
 74	} else {
 75		addr = start;
 76	}
 77
 78	/*
 79	 * Forcibly align the final address in case we have a hardware
 80	 * issue that requires alignment for performance reasons.
 81	 */
 82	addr = align_vdso_addr(addr);
 83
 84	return addr;
 85#endif
 86}
 87
 88static int vdso_fault(const struct vm_special_mapping *sm,
 89		      struct vm_area_struct *vma, struct vm_fault *vmf)
 90{
 91	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
 92
 93	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
 94		return VM_FAULT_SIGBUS;
 95
 96	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
 97	get_page(vmf->page);
 98	return 0;
 99}
100
101static const struct vm_special_mapping text_mapping = {
102	.name = "[vdso]",
103	.fault = vdso_fault,
104};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
106static int vvar_fault(const struct vm_special_mapping *sm,
107		      struct vm_area_struct *vma, struct vm_fault *vmf)
108{
109	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
110	long sym_offset;
111	int ret = -EFAULT;
112
113	if (!image)
114		return VM_FAULT_SIGBUS;
115
116	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
117		image->sym_vvar_start;
118
119	/*
120	 * Sanity check: a symbol offset of zero means that the page
121	 * does not exist for this vdso image, not that the page is at
122	 * offset zero relative to the text mapping.  This should be
123	 * impossible here, because sym_offset should only be zero for
124	 * the page past the end of the vvar mapping.
125	 */
126	if (sym_offset == 0)
127		return VM_FAULT_SIGBUS;
128
129	if (sym_offset == image->sym_vvar_page) {
130		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
131				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
132	} else if (sym_offset == image->sym_hpet_page) {
133#ifdef CONFIG_HPET_TIMER
134		if (hpet_address && vclock_was_used(VCLOCK_HPET)) {
135			ret = vm_insert_pfn_prot(
136				vma,
137				(unsigned long)vmf->virtual_address,
138				hpet_address >> PAGE_SHIFT,
139				pgprot_noncached(PAGE_READONLY));
140		}
141#endif
142	} else if (sym_offset == image->sym_pvclock_page) {
143		struct pvclock_vsyscall_time_info *pvti =
144			pvclock_pvti_cpu0_va();
145		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
146			ret = vm_insert_pfn(
147				vma,
148				(unsigned long)vmf->virtual_address,
149				__pa(pvti) >> PAGE_SHIFT);
150		}
151	}
152
153	if (ret == 0 || ret == -EBUSY)
154		return VM_FAULT_NOPAGE;
155
156	return VM_FAULT_SIGBUS;
157}
158
159static int map_vdso(const struct vdso_image *image, bool calculate_addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160{
161	struct mm_struct *mm = current->mm;
162	struct vm_area_struct *vma;
163	unsigned long addr, text_start;
164	int ret = 0;
165	static const struct vm_special_mapping vvar_mapping = {
166		.name = "[vvar]",
167		.fault = vvar_fault,
168	};
169
170	if (calculate_addr) {
171		addr = vdso_addr(current->mm->start_stack,
172				 image->size - image->sym_vvar_start);
173	} else {
174		addr = 0;
175	}
176
177	down_write(&mm->mmap_sem);
 
178
179	addr = get_unmapped_area(NULL, addr,
180				 image->size - image->sym_vvar_start, 0, 0);
181	if (IS_ERR_VALUE(addr)) {
182		ret = addr;
183		goto up_fail;
184	}
185
186	text_start = addr - image->sym_vvar_start;
187	current->mm->context.vdso = (void __user *)text_start;
188	current->mm->context.vdso_image = image;
189
190	/*
191	 * MAYWRITE to allow gdb to COW and set breakpoints
192	 */
193	vma = _install_special_mapping(mm,
194				       text_start,
195				       image->size,
196				       VM_READ|VM_EXEC|
197				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
198				       &text_mapping);
199
200	if (IS_ERR(vma)) {
201		ret = PTR_ERR(vma);
202		goto up_fail;
203	}
204
205	vma = _install_special_mapping(mm,
206				       addr,
207				       -image->sym_vvar_start,
208				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
209				       VM_PFNMAP,
210				       &vvar_mapping);
211
212	if (IS_ERR(vma)) {
213		ret = PTR_ERR(vma);
214		goto up_fail;
 
 
 
215	}
216
217up_fail:
218	if (ret)
219		current->mm->context.vdso = NULL;
220
221	up_write(&mm->mmap_sem);
222	return ret;
223}
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
226static int load_vdso32(void)
227{
228	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
229		return 0;
230
231	return map_vdso(&vdso_image_32, false);
232}
233#endif
234
235#ifdef CONFIG_X86_64
236int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
237{
238	if (!vdso64_enabled)
239		return 0;
240
241	return map_vdso(&vdso_image_64, true);
242}
243
244#ifdef CONFIG_COMPAT
245int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
246				       int uses_interp)
247{
248#ifdef CONFIG_X86_X32_ABI
249	if (test_thread_flag(TIF_X32)) {
250		if (!vdso64_enabled)
251			return 0;
252
253		return map_vdso(&vdso_image_x32, true);
254	}
255#endif
256#ifdef CONFIG_IA32_EMULATION
257	return load_vdso32();
258#else
259	return 0;
260#endif
261}
262#endif
263#else
264int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
265{
266	return load_vdso32();
267}
268#endif
269
270#ifdef CONFIG_X86_64
271static __init int vdso_setup(char *s)
272{
273	vdso64_enabled = simple_strtoul(s, NULL, 0);
274	return 0;
275}
276__setup("vdso=", vdso_setup);
277#endif
278
279#ifdef CONFIG_X86_64
280static void vgetcpu_cpu_init(void *arg)
281{
282	int cpu = smp_processor_id();
283	struct desc_struct d = { };
284	unsigned long node = 0;
285#ifdef CONFIG_NUMA
286	node = cpu_to_node(cpu);
287#endif
288	if (static_cpu_has(X86_FEATURE_RDTSCP))
289		write_rdtscp_aux((node << 12) | cpu);
290
291	/*
292	 * Store cpu number in limit so that it can be loaded
293	 * quickly in user space in vgetcpu. (12 bits for the CPU
294	 * and 8 bits for the node)
295	 */
296	d.limit0 = cpu | ((node & 0xf) << 12);
297	d.limit = node >> 4;
298	d.type = 5;		/* RO data, expand down, accessed */
299	d.dpl = 3;		/* Visible to user code */
300	d.s = 1;		/* Not a system segment */
301	d.p = 1;		/* Present */
302	d.d = 1;		/* 32-bit */
303
304	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
305}
306
307static int
308vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
309{
310	long cpu = (long)arg;
311
312	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
313		smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
314
315	return NOTIFY_DONE;
316}
317
318static int __init init_vdso(void)
319{
320	init_vdso_image(&vdso_image_64);
321
322#ifdef CONFIG_X86_X32_ABI
323	init_vdso_image(&vdso_image_x32);
324#endif
325
326	cpu_notifier_register_begin();
327
328	on_each_cpu(vgetcpu_cpu_init, NULL, 1);
329	/* notifier priority > KVM */
330	__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
331
332	cpu_notifier_register_done();
333
334	return 0;
335}
336subsys_initcall(init_vdso);
337#endif /* CONFIG_X86_64 */
v4.10.11
  1/*
  2 * Copyright 2007 Andi Kleen, SUSE Labs.
  3 * Subject to the GPL, v.2
  4 *
  5 * This contains most of the x86 vDSO kernel-side code.
  6 */
  7#include <linux/mm.h>
  8#include <linux/err.h>
  9#include <linux/sched.h>
 10#include <linux/slab.h>
 11#include <linux/init.h>
 12#include <linux/random.h>
 13#include <linux/elf.h>
 14#include <linux/cpu.h>
 15#include <linux/ptrace.h>
 16#include <asm/pvclock.h>
 17#include <asm/vgtod.h>
 18#include <asm/proto.h>
 19#include <asm/vdso.h>
 20#include <asm/vvar.h>
 21#include <asm/page.h>
 
 22#include <asm/desc.h>
 23#include <asm/cpufeature.h>
 24
 25#if defined(CONFIG_X86_64)
 26unsigned int __read_mostly vdso64_enabled = 1;
 27#endif
 28
 29void __init init_vdso_image(const struct vdso_image *image)
 30{
 31	BUG_ON(image->size % PAGE_SIZE != 0);
 32
 33	apply_alternatives((struct alt_instr *)(image->data + image->alt),
 34			   (struct alt_instr *)(image->data + image->alt +
 35						image->alt_len));
 36}
 37
 38struct linux_binprm;
 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40static int vdso_fault(const struct vm_special_mapping *sm,
 41		      struct vm_area_struct *vma, struct vm_fault *vmf)
 42{
 43	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
 44
 45	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
 46		return VM_FAULT_SIGBUS;
 47
 48	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
 49	get_page(vmf->page);
 50	return 0;
 51}
 52
 53static void vdso_fix_landing(const struct vdso_image *image,
 54		struct vm_area_struct *new_vma)
 55{
 56#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
 57	if (in_ia32_syscall() && image == &vdso_image_32) {
 58		struct pt_regs *regs = current_pt_regs();
 59		unsigned long vdso_land = image->sym_int80_landing_pad;
 60		unsigned long old_land_addr = vdso_land +
 61			(unsigned long)current->mm->context.vdso;
 62
 63		/* Fixing userspace landing - look at do_fast_syscall_32 */
 64		if (regs->ip == old_land_addr)
 65			regs->ip = new_vma->vm_start + vdso_land;
 66	}
 67#endif
 68}
 69
 70static int vdso_mremap(const struct vm_special_mapping *sm,
 71		struct vm_area_struct *new_vma)
 72{
 73	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
 74	const struct vdso_image *image = current->mm->context.vdso_image;
 75
 76	if (image->size != new_size)
 77		return -EINVAL;
 78
 79	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
 80		return -EFAULT;
 81
 82	vdso_fix_landing(image, new_vma);
 83	current->mm->context.vdso = (void __user *)new_vma->vm_start;
 84
 85	return 0;
 86}
 87
 88static int vvar_fault(const struct vm_special_mapping *sm,
 89		      struct vm_area_struct *vma, struct vm_fault *vmf)
 90{
 91	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
 92	long sym_offset;
 93	int ret = -EFAULT;
 94
 95	if (!image)
 96		return VM_FAULT_SIGBUS;
 97
 98	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
 99		image->sym_vvar_start;
100
101	/*
102	 * Sanity check: a symbol offset of zero means that the page
103	 * does not exist for this vdso image, not that the page is at
104	 * offset zero relative to the text mapping.  This should be
105	 * impossible here, because sym_offset should only be zero for
106	 * the page past the end of the vvar mapping.
107	 */
108	if (sym_offset == 0)
109		return VM_FAULT_SIGBUS;
110
111	if (sym_offset == image->sym_vvar_page) {
112		ret = vm_insert_pfn(vma, vmf->address,
113				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
114	} else if (sym_offset == image->sym_pvclock_page) {
115		struct pvclock_vsyscall_time_info *pvti =
116			pvclock_pvti_cpu0_va();
117		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
118			ret = vm_insert_pfn(
119				vma,
120				vmf->address,
121				__pa(pvti) >> PAGE_SHIFT);
122		}
123	}
124
125	if (ret == 0 || ret == -EBUSY)
126		return VM_FAULT_NOPAGE;
127
128	return VM_FAULT_SIGBUS;
129}
130
131static const struct vm_special_mapping vdso_mapping = {
132	.name = "[vdso]",
133	.fault = vdso_fault,
134	.mremap = vdso_mremap,
135};
136static const struct vm_special_mapping vvar_mapping = {
137	.name = "[vvar]",
138	.fault = vvar_fault,
139};
140
141/*
142 * Add vdso and vvar mappings to current process.
143 * @image          - blob to map
144 * @addr           - request a specific address (zero to map at free addr)
145 */
146static int map_vdso(const struct vdso_image *image, unsigned long addr)
147{
148	struct mm_struct *mm = current->mm;
149	struct vm_area_struct *vma;
150	unsigned long text_start;
151	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
152
153	if (down_write_killable(&mm->mmap_sem))
154		return -EINTR;
155
156	addr = get_unmapped_area(NULL, addr,
157				 image->size - image->sym_vvar_start, 0, 0);
158	if (IS_ERR_VALUE(addr)) {
159		ret = addr;
160		goto up_fail;
161	}
162
163	text_start = addr - image->sym_vvar_start;
 
 
164
165	/*
166	 * MAYWRITE to allow gdb to COW and set breakpoints
167	 */
168	vma = _install_special_mapping(mm,
169				       text_start,
170				       image->size,
171				       VM_READ|VM_EXEC|
172				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
173				       &vdso_mapping);
174
175	if (IS_ERR(vma)) {
176		ret = PTR_ERR(vma);
177		goto up_fail;
178	}
179
180	vma = _install_special_mapping(mm,
181				       addr,
182				       -image->sym_vvar_start,
183				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
184				       VM_PFNMAP,
185				       &vvar_mapping);
186
187	if (IS_ERR(vma)) {
188		ret = PTR_ERR(vma);
189		do_munmap(mm, text_start, image->size);
190	} else {
191		current->mm->context.vdso = (void __user *)text_start;
192		current->mm->context.vdso_image = image;
193	}
194
195up_fail:
 
 
 
196	up_write(&mm->mmap_sem);
197	return ret;
198}
199
200#ifdef CONFIG_X86_64
201/*
202 * Put the vdso above the (randomized) stack with another randomized
203 * offset.  This way there is no hole in the middle of address space.
204 * To save memory make sure it is still in the same PTE as the stack
205 * top.  This doesn't give that many random bits.
206 *
207 * Note that this algorithm is imperfect: the distribution of the vdso
208 * start address within a PMD is biased toward the end.
209 *
210 * Only used for the 64-bit and x32 vdsos.
211 */
212static unsigned long vdso_addr(unsigned long start, unsigned len)
213{
214	unsigned long addr, end;
215	unsigned offset;
216
217	/*
218	 * Round up the start address.  It can start out unaligned as a result
219	 * of stack start randomization.
220	 */
221	start = PAGE_ALIGN(start);
222
223	/* Round the lowest possible end address up to a PMD boundary. */
224	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
225	if (end >= TASK_SIZE_MAX)
226		end = TASK_SIZE_MAX;
227	end -= len;
228
229	if (end > start) {
230		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
231		addr = start + (offset << PAGE_SHIFT);
232	} else {
233		addr = start;
234	}
235
236	/*
237	 * Forcibly align the final address in case we have a hardware
238	 * issue that requires alignment for performance reasons.
239	 */
240	addr = align_vdso_addr(addr);
241
242	return addr;
243}
244
245static int map_vdso_randomized(const struct vdso_image *image)
246{
247	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
248
249	return map_vdso(image, addr);
250}
251#endif
252
253int map_vdso_once(const struct vdso_image *image, unsigned long addr)
254{
255	struct mm_struct *mm = current->mm;
256	struct vm_area_struct *vma;
257
258	down_write(&mm->mmap_sem);
259	/*
260	 * Check if we have already mapped vdso blob - fail to prevent
261	 * abusing from userspace install_speciall_mapping, which may
262	 * not do accounting and rlimit right.
263	 * We could search vma near context.vdso, but it's a slowpath,
264	 * so let's explicitely check all VMAs to be completely sure.
265	 */
266	for (vma = mm->mmap; vma; vma = vma->vm_next) {
267		if (vma_is_special_mapping(vma, &vdso_mapping) ||
268				vma_is_special_mapping(vma, &vvar_mapping)) {
269			up_write(&mm->mmap_sem);
270			return -EEXIST;
271		}
272	}
273	up_write(&mm->mmap_sem);
274
275	return map_vdso(image, addr);
276}
277
278#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
279static int load_vdso32(void)
280{
281	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
282		return 0;
283
284	return map_vdso(&vdso_image_32, 0);
285}
286#endif
287
288#ifdef CONFIG_X86_64
289int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
290{
291	if (!vdso64_enabled)
292		return 0;
293
294	return map_vdso_randomized(&vdso_image_64);
295}
296
297#ifdef CONFIG_COMPAT
298int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
299				       int uses_interp)
300{
301#ifdef CONFIG_X86_X32_ABI
302	if (test_thread_flag(TIF_X32)) {
303		if (!vdso64_enabled)
304			return 0;
305		return map_vdso_randomized(&vdso_image_x32);
 
306	}
307#endif
308#ifdef CONFIG_IA32_EMULATION
309	return load_vdso32();
310#else
311	return 0;
312#endif
313}
314#endif
315#else
316int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
317{
318	return load_vdso32();
319}
320#endif
321
322#ifdef CONFIG_X86_64
323static __init int vdso_setup(char *s)
324{
325	vdso64_enabled = simple_strtoul(s, NULL, 0);
326	return 0;
327}
328__setup("vdso=", vdso_setup);
329#endif
330
331#ifdef CONFIG_X86_64
332static void vgetcpu_cpu_init(void *arg)
333{
334	int cpu = smp_processor_id();
335	struct desc_struct d = { };
336	unsigned long node = 0;
337#ifdef CONFIG_NUMA
338	node = cpu_to_node(cpu);
339#endif
340	if (static_cpu_has(X86_FEATURE_RDTSCP))
341		write_rdtscp_aux((node << 12) | cpu);
342
343	/*
344	 * Store cpu number in limit so that it can be loaded
345	 * quickly in user space in vgetcpu. (12 bits for the CPU
346	 * and 8 bits for the node)
347	 */
348	d.limit0 = cpu | ((node & 0xf) << 12);
349	d.limit = node >> 4;
350	d.type = 5;		/* RO data, expand down, accessed */
351	d.dpl = 3;		/* Visible to user code */
352	d.s = 1;		/* Not a system segment */
353	d.p = 1;		/* Present */
354	d.d = 1;		/* 32-bit */
355
356	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
357}
358
359static int vgetcpu_online(unsigned int cpu)
 
360{
361	return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
 
 
 
 
 
362}
363
364static int __init init_vdso(void)
365{
366	init_vdso_image(&vdso_image_64);
367
368#ifdef CONFIG_X86_X32_ABI
369	init_vdso_image(&vdso_image_x32);
370#endif
371
 
 
 
372	/* notifier priority > KVM */
373	return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
374				 "x86/vdso/vma:online", vgetcpu_online, NULL);
 
 
 
375}
376subsys_initcall(init_vdso);
377#endif /* CONFIG_X86_64 */