Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  Copyright (C) 1995  Linus Torvalds
  3 *
  4 *  Pentium III FXSR, SSE support
  5 *	Gareth Hughes <gareth@valinux.com>, May 2000
  6 *
  7 *  X86-64 port
  8 *	Andi Kleen.
  9 *
 10 *	CPU hotplug support - ashok.raj@intel.com
 11 */
 12
 13/*
 14 * This file handles the architecture-dependent parts of process handling..
 15 */
 16
 17#include <linux/cpu.h>
 18#include <linux/errno.h>
 19#include <linux/sched.h>
 
 
 20#include <linux/fs.h>
 21#include <linux/kernel.h>
 22#include <linux/mm.h>
 23#include <linux/elfcore.h>
 24#include <linux/smp.h>
 25#include <linux/slab.h>
 26#include <linux/user.h>
 27#include <linux/interrupt.h>
 28#include <linux/delay.h>
 29#include <linux/module.h>
 30#include <linux/ptrace.h>
 31#include <linux/notifier.h>
 32#include <linux/kprobes.h>
 33#include <linux/kdebug.h>
 34#include <linux/prctl.h>
 35#include <linux/uaccess.h>
 36#include <linux/io.h>
 37#include <linux/ftrace.h>
 
 38
 39#include <asm/pgtable.h>
 40#include <asm/processor.h>
 41#include <asm/i387.h>
 42#include <asm/fpu-internal.h>
 43#include <asm/mmu_context.h>
 44#include <asm/prctl.h>
 45#include <asm/desc.h>
 46#include <asm/proto.h>
 47#include <asm/ia32.h>
 48#include <asm/idle.h>
 49#include <asm/syscalls.h>
 50#include <asm/debugreg.h>
 51#include <asm/switch_to.h>
 
 
 
 
 
 
 
 
 
 52
 53asmlinkage extern void ret_from_fork(void);
 54
 55__visible DEFINE_PER_CPU(unsigned long, old_rsp);
 56
 57/* Prints also some state that isn't saved in the pt_regs */
 58void __show_regs(struct pt_regs *regs, int all)
 
 59{
 60	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
 61	unsigned long d0, d1, d2, d3, d6, d7;
 62	unsigned int fsindex, gsindex;
 63	unsigned int ds, cs, es;
 
 
 64
 65	printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
 66	printk_address(regs->ip);
 67	printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss,
 68			regs->sp, regs->flags);
 69	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
 70	       regs->ax, regs->bx, regs->cx);
 71	printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
 72	       regs->dx, regs->si, regs->di);
 73	printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
 74	       regs->bp, regs->r8, regs->r9);
 75	printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
 76	       regs->r10, regs->r11, regs->r12);
 77	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
 78	       regs->r13, regs->r14, regs->r15);
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80	asm("movl %%ds,%0" : "=r" (ds));
 81	asm("movl %%cs,%0" : "=r" (cs));
 82	asm("movl %%es,%0" : "=r" (es));
 83	asm("movl %%fs,%0" : "=r" (fsindex));
 84	asm("movl %%gs,%0" : "=r" (gsindex));
 85
 86	rdmsrl(MSR_FS_BASE, fs);
 87	rdmsrl(MSR_GS_BASE, gs);
 88	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 89
 90	if (!all)
 91		return;
 92
 93	cr0 = read_cr0();
 94	cr2 = read_cr2();
 95	cr3 = read_cr3();
 96	cr4 = read_cr4();
 97
 98	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
 99	       fs, fsindex, gs, gsindex, shadowgs);
100	printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
101			es, cr0);
102	printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
103			cr4);
104
105	get_debugreg(d0, 0);
106	get_debugreg(d1, 1);
107	get_debugreg(d2, 2);
108	get_debugreg(d3, 3);
109	get_debugreg(d6, 6);
110	get_debugreg(d7, 7);
111
112	/* Only print out debug registers if they are in their non-default state. */
113	if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
114	    (d6 == DR6_RESERVED) && (d7 == 0x400))
115		return;
116
117	printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
118	printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
 
119
 
 
120}
121
122void release_thread(struct task_struct *dead_task)
123{
124	if (dead_task->mm) {
125		if (dead_task->mm->context.size) {
126			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
127				dead_task->comm,
128				dead_task->mm->context.ldt,
129				dead_task->mm->context.size);
130			BUG();
131		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133}
 
 
 
134
135static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
 
136{
137	struct user_desc ud = {
138		.base_addr = addr,
139		.limit = 0xfffff,
140		.seg_32bit = 1,
141		.limit_in_pages = 1,
142		.useable = 1,
143	};
144	struct desc_struct *desc = t->thread.tls_array;
145	desc += tls;
146	fill_ldt(desc, &ud);
147}
148
149static inline u32 read_32bit_tls(struct task_struct *t, int tls)
150{
151	return get_desc_base(&t->thread.tls_array[tls]);
152}
153
154int copy_thread(unsigned long clone_flags, unsigned long sp,
155		unsigned long arg, struct task_struct *p)
156{
157	int err;
158	struct pt_regs *childregs;
159	struct task_struct *me = current;
160
161	p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
162	childregs = task_pt_regs(p);
163	p->thread.sp = (unsigned long) childregs;
164	p->thread.usersp = me->thread.usersp;
165	set_tsk_thread_flag(p, TIF_FORK);
166	p->thread.fpu_counter = 0;
167	p->thread.io_bitmap_ptr = NULL;
168
169	savesegment(gs, p->thread.gsindex);
170	p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
171	savesegment(fs, p->thread.fsindex);
172	p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
173	savesegment(es, p->thread.es);
174	savesegment(ds, p->thread.ds);
175	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
176
177	if (unlikely(p->flags & PF_KTHREAD)) {
178		/* kernel thread */
179		memset(childregs, 0, sizeof(struct pt_regs));
180		childregs->sp = (unsigned long)childregs;
181		childregs->ss = __KERNEL_DS;
182		childregs->bx = sp; /* function */
183		childregs->bp = arg;
184		childregs->orig_ax = -1;
185		childregs->cs = __KERNEL_CS | get_kernel_rpl();
186		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
187		return 0;
188	}
189	*childregs = *current_pt_regs();
190
191	childregs->ax = 0;
192	if (sp)
193		childregs->sp = sp;
194
195	err = -ENOMEM;
196	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
197
198	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
199		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
200						  IO_BITMAP_BYTES, GFP_KERNEL);
201		if (!p->thread.io_bitmap_ptr) {
202			p->thread.io_bitmap_max = 0;
203			return -ENOMEM;
204		}
205		set_tsk_thread_flag(p, TIF_IO_BITMAP);
 
 
 
 
 
206	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
208	/*
209	 * Set a new TLS for the child thread?
 
210	 */
211	if (clone_flags & CLONE_SETTLS) {
212#ifdef CONFIG_IA32_EMULATION
213		if (test_thread_flag(TIF_IA32))
214			err = do_set_thread_area(p, -1,
215				(struct user_desc __user *)childregs->si, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216		else
 
 
 
 
217#endif
218			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
219		if (err)
220			goto out;
221	}
222	err = 0;
223out:
224	if (err && p->thread.io_bitmap_ptr) {
225		kfree(p->thread.io_bitmap_ptr);
226		p->thread.io_bitmap_max = 0;
227	}
228
229	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230}
231
232static void
233start_thread_common(struct pt_regs *regs, unsigned long new_ip,
234		    unsigned long new_sp,
235		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
236{
 
 
 
 
 
 
 
 
237	loadsegment(fs, 0);
238	loadsegment(es, _ds);
239	loadsegment(ds, _ds);
240	load_gs_index(0);
241	current->thread.usersp	= new_sp;
242	regs->ip		= new_ip;
243	regs->sp		= new_sp;
244	this_cpu_write(old_rsp, new_sp);
245	regs->cs		= _cs;
246	regs->ss		= _ss;
247	regs->flags		= X86_EFLAGS_IF;
248}
249
250void
251start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
252{
253	start_thread_common(regs, new_ip, new_sp,
254			    __USER_CS, __USER_DS, 0);
255}
 
256
257#ifdef CONFIG_IA32_EMULATION
258void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
259{
260	start_thread_common(regs, new_ip, new_sp,
261			    test_thread_flag(TIF_X32)
262			    ? __USER_CS : __USER32_CS,
263			    __USER_DS, __USER_DS);
264}
265#endif
266
267/*
268 *	switch_to(x,y) should switch tasks from x to y.
269 *
270 * This could still be optimized:
271 * - fold all the options into a flag word and test it with a single test.
272 * - could test fs/gs bitsliced
273 *
274 * Kprobes not supported here. Set the probe on schedule instead.
275 * Function graph tracer not supported too.
276 */
277__visible __notrace_funcgraph struct task_struct *
278__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
279{
280	struct thread_struct *prev = &prev_p->thread;
281	struct thread_struct *next = &next_p->thread;
 
 
282	int cpu = smp_processor_id();
283	struct tss_struct *tss = &per_cpu(init_tss, cpu);
284	unsigned fsindex, gsindex;
285	fpu_switch_t fpu;
286
287	fpu = switch_fpu_prepare(prev_p, next_p, cpu);
288
289	/*
290	 * Reload esp0, LDT and the page table pointer:
291	 */
292	load_sp0(tss, next);
293
294	/*
295	 * Switch DS and ES.
296	 * This won't pick up thread selector changes, but I guess that is ok.
297	 */
298	savesegment(es, prev->es);
299	if (unlikely(next->es | prev->es))
300		loadsegment(es, next->es);
301
302	savesegment(ds, prev->ds);
303	if (unlikely(next->ds | prev->ds))
304		loadsegment(ds, next->ds);
305
 
 
306
307	/* We must save %fs and %gs before load_TLS() because
308	 * %fs and %gs may be cleared by load_TLS().
309	 *
310	 * (e.g. xen_load_tls())
311	 */
312	savesegment(fs, fsindex);
313	savesegment(gs, gsindex);
314
 
 
 
 
315	load_TLS(next, cpu);
316
317	/*
318	 * Leave lazy mode, flushing any hypercalls made here.
319	 * This must be done before restoring TLS segments so
320	 * the GDT and LDT are properly updated, and must be
321	 * done before math_state_restore, so the TS bit is up
322	 * to date.
323	 */
324	arch_end_context_switch(next_p);
325
326	/*
327	 * Switch FS and GS.
 
 
 
 
328	 *
329	 * Segment register != 0 always requires a reload.  Also
330	 * reload when it has changed.  When prev process used 64bit
331	 * base always reload to avoid an information leak.
 
 
 
332	 */
333	if (unlikely(fsindex | next->fsindex | prev->fs)) {
334		loadsegment(fs, next->fsindex);
335		/*
336		 * Check if the user used a selector != 0; if yes
337		 *  clear 64bit base, since overloaded base is always
338		 *  mapped to the Null selector
339		 */
340		if (fsindex)
341			prev->fs = 0;
342	}
343	/* when next process has a 64bit base use it */
344	if (next->fs)
345		wrmsrl(MSR_FS_BASE, next->fs);
346	prev->fsindex = fsindex;
347
348	if (unlikely(gsindex | next->gsindex | prev->gs)) {
349		load_gs_index(next->gsindex);
350		if (gsindex)
351			prev->gs = 0;
352	}
353	if (next->gs)
354		wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
355	prev->gsindex = gsindex;
356
357	switch_fpu_finish(next_p, fpu);
 
 
358
359	/*
360	 * Switch the PDA and FPU contexts.
361	 */
362	prev->usersp = this_cpu_read(old_rsp);
363	this_cpu_write(old_rsp, next->usersp);
364	this_cpu_write(current_task, next_p);
 
365
366	/*
367	 * If it were not for PREEMPT_ACTIVE we could guarantee that the
368	 * preempt_count of all tasks was equal here and this would not be
369	 * needed.
370	 */
371	task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
372	this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
373
374	this_cpu_write(kernel_stack,
375		  (unsigned long)task_stack_page(next_p) +
376		  THREAD_SIZE - KERNEL_STACK_OFFSET);
377
378	/*
379	 * Now maybe reload the debug registers and handle I/O bitmaps
380	 */
381	if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
382		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
383		__switch_to_xtra(prev_p, next_p, tss);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
385	return prev_p;
386}
387
388void set_personality_64bit(void)
389{
390	/* inherit personality from parent */
391
392	/* Make sure to be in 64bit mode */
393	clear_thread_flag(TIF_IA32);
394	clear_thread_flag(TIF_ADDR32);
395	clear_thread_flag(TIF_X32);
396
397	/* Ensure the corresponding mm is not marked. */
398	if (current->mm)
399		current->mm->context.ia32_compat = 0;
400
401	/* TBD: overwrites user setup. Should have two bits.
402	   But 64bit processes have always behaved this way,
403	   so it's not too bad. The main problem is just that
404	   32bit childs are affected again. */
405	current->personality &= ~READ_IMPLIES_EXEC;
406}
407
408void set_personality_ia32(bool x32)
409{
410	/* inherit personality from parent */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
 
 
412	/* Make sure to be in 32bit mode */
413	set_thread_flag(TIF_ADDR32);
414
415	/* Mark the associated mm as containing 32-bit tasks. */
416	if (current->mm)
417		current->mm->context.ia32_compat = 1;
418
419	if (x32) {
420		clear_thread_flag(TIF_IA32);
421		set_thread_flag(TIF_X32);
422		current->personality &= ~READ_IMPLIES_EXEC;
423		/* is_compat_task() uses the presence of the x32
424		   syscall bit flag to determine compat status */
425		current_thread_info()->status &= ~TS_COMPAT;
426	} else {
427		set_thread_flag(TIF_IA32);
428		clear_thread_flag(TIF_X32);
429		current->personality |= force_personality32;
430		/* Prepare the first "return" to user space */
431		current_thread_info()->status |= TS_COMPAT;
432	}
433}
434EXPORT_SYMBOL_GPL(set_personality_ia32);
435
436unsigned long get_wchan(struct task_struct *p)
 
437{
438	unsigned long stack;
439	u64 fp, ip;
440	int count = 0;
441
442	if (!p || p == current || p->state == TASK_RUNNING)
443		return 0;
444	stack = (unsigned long)task_stack_page(p);
445	if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
446		return 0;
447	fp = *(u64 *)(p->thread.sp);
448	do {
449		if (fp < (unsigned long)stack ||
450		    fp >= (unsigned long)stack+THREAD_SIZE)
451			return 0;
452		ip = *(u64 *)(fp+8);
453		if (!in_sched_functions(ip))
454			return ip;
455		fp = *(u64 *)fp;
456	} while (count++ < 16);
457	return 0;
458}
 
459
460long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
461{
462	int ret = 0;
463	int doit = task == current;
464	int cpu;
465
466	switch (code) {
467	case ARCH_SET_GS:
468		if (addr >= TASK_SIZE_OF(task))
469			return -EPERM;
470		cpu = get_cpu();
471		/* handle small bases via the GDT because that's faster to
472		   switch. */
473		if (addr <= 0xffffffff) {
474			set_32bit_tls(task, GS_TLS, addr);
475			if (doit) {
476				load_TLS(&task->thread, cpu);
477				load_gs_index(GS_TLS_SEL);
478			}
479			task->thread.gsindex = GS_TLS_SEL;
480			task->thread.gs = 0;
 
 
 
 
 
 
 
481		} else {
482			task->thread.gsindex = 0;
483			task->thread.gs = addr;
484			if (doit) {
485				load_gs_index(0);
486				ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
487			}
488		}
489		put_cpu();
490		break;
491	case ARCH_SET_FS:
492		/* Not strictly needed for fs, but do it for symmetry
493		   with gs */
494		if (addr >= TASK_SIZE_OF(task))
 
 
 
495			return -EPERM;
496		cpu = get_cpu();
497		/* handle small bases via the GDT because that's faster to
498		   switch. */
499		if (addr <= 0xffffffff) {
500			set_32bit_tls(task, FS_TLS, addr);
501			if (doit) {
502				load_TLS(&task->thread, cpu);
503				loadsegment(fs, FS_TLS_SEL);
504			}
505			task->thread.fsindex = FS_TLS_SEL;
506			task->thread.fs = 0;
 
 
 
 
507		} else {
508			task->thread.fsindex = 0;
509			task->thread.fs = addr;
510			if (doit) {
511				/* set the selector to 0 to not confuse
512				   __switch_to */
513				loadsegment(fs, 0);
514				ret = wrmsrl_safe(MSR_FS_BASE, addr);
515			}
516		}
517		put_cpu();
518		break;
 
519	case ARCH_GET_FS: {
520		unsigned long base;
521		if (task->thread.fsindex == FS_TLS_SEL)
522			base = read_32bit_tls(task, FS_TLS);
523		else if (doit)
524			rdmsrl(MSR_FS_BASE, base);
525		else
526			base = task->thread.fs;
527		ret = put_user(base, (unsigned long __user *)addr);
528		break;
529	}
530	case ARCH_GET_GS: {
531		unsigned long base;
532		unsigned gsindex;
533		if (task->thread.gsindex == GS_TLS_SEL)
534			base = read_32bit_tls(task, GS_TLS);
535		else if (doit) {
536			savesegment(gs, gsindex);
537			if (gsindex)
538				rdmsrl(MSR_KERNEL_GS_BASE, base);
539			else
540				base = task->thread.gs;
541		} else
542			base = task->thread.gs;
543		ret = put_user(base, (unsigned long __user *)addr);
544		break;
545	}
546
 
 
 
 
 
 
 
 
 
 
 
 
 
547	default:
548		ret = -EINVAL;
549		break;
550	}
551
552	return ret;
553}
554
555long sys_arch_prctl(int code, unsigned long addr)
 
 
 
 
 
 
 
 
 
 
 
 
556{
557	return do_arch_prctl(current, code, addr);
558}
 
559
560unsigned long KSTK_ESP(struct task_struct *task)
561{
562	return (test_tsk_thread_flag(task, TIF_IA32)) ?
563			(task_pt_regs(task)->sp) : ((task)->thread.usersp);
564}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  Copyright (C) 1995  Linus Torvalds
  4 *
  5 *  Pentium III FXSR, SSE support
  6 *	Gareth Hughes <gareth@valinux.com>, May 2000
  7 *
  8 *  X86-64 port
  9 *	Andi Kleen.
 10 *
 11 *	CPU hotplug support - ashok.raj@intel.com
 12 */
 13
 14/*
 15 * This file handles the architecture-dependent parts of process handling..
 16 */
 17
 18#include <linux/cpu.h>
 19#include <linux/errno.h>
 20#include <linux/sched.h>
 21#include <linux/sched/task.h>
 22#include <linux/sched/task_stack.h>
 23#include <linux/fs.h>
 24#include <linux/kernel.h>
 25#include <linux/mm.h>
 26#include <linux/elfcore.h>
 27#include <linux/smp.h>
 28#include <linux/slab.h>
 29#include <linux/user.h>
 30#include <linux/interrupt.h>
 31#include <linux/delay.h>
 32#include <linux/export.h>
 33#include <linux/ptrace.h>
 34#include <linux/notifier.h>
 35#include <linux/kprobes.h>
 36#include <linux/kdebug.h>
 37#include <linux/prctl.h>
 38#include <linux/uaccess.h>
 39#include <linux/io.h>
 40#include <linux/ftrace.h>
 41#include <linux/syscalls.h>
 42
 
 43#include <asm/processor.h>
 44#include <asm/pkru.h>
 45#include <asm/fpu/internal.h>
 46#include <asm/mmu_context.h>
 47#include <asm/prctl.h>
 48#include <asm/desc.h>
 49#include <asm/proto.h>
 50#include <asm/ia32.h>
 
 
 51#include <asm/debugreg.h>
 52#include <asm/switch_to.h>
 53#include <asm/xen/hypervisor.h>
 54#include <asm/vdso.h>
 55#include <asm/resctrl.h>
 56#include <asm/unistd.h>
 57#include <asm/fsgsbase.h>
 58#ifdef CONFIG_IA32_EMULATION
 59/* Not included via unistd.h */
 60#include <asm/unistd_32_ia32.h>
 61#endif
 62
 63#include "process.h"
 
 
 64
 65/* Prints also some state that isn't saved in the pt_regs */
 66void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
 67		 const char *log_lvl)
 68{
 69	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
 70	unsigned long d0, d1, d2, d3, d6, d7;
 71	unsigned int fsindex, gsindex;
 72	unsigned int ds, es;
 73
 74	show_iret_regs(regs, log_lvl);
 75
 76	if (regs->orig_ax != -1)
 77		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
 78	else
 79		pr_cont("\n");
 80
 81	printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n",
 82	       log_lvl, regs->ax, regs->bx, regs->cx);
 83	printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n",
 84	       log_lvl, regs->dx, regs->si, regs->di);
 85	printk("%sRBP: %016lx R08: %016lx R09: %016lx\n",
 86	       log_lvl, regs->bp, regs->r8, regs->r9);
 87	printk("%sR10: %016lx R11: %016lx R12: %016lx\n",
 88	       log_lvl, regs->r10, regs->r11, regs->r12);
 89	printk("%sR13: %016lx R14: %016lx R15: %016lx\n",
 90	       log_lvl, regs->r13, regs->r14, regs->r15);
 91
 92	if (mode == SHOW_REGS_SHORT)
 93		return;
 94
 95	if (mode == SHOW_REGS_USER) {
 96		rdmsrl(MSR_FS_BASE, fs);
 97		rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 98		printk("%sFS:  %016lx GS:  %016lx\n",
 99		       log_lvl, fs, shadowgs);
100		return;
101	}
102
103	asm("movl %%ds,%0" : "=r" (ds));
 
104	asm("movl %%es,%0" : "=r" (es));
105	asm("movl %%fs,%0" : "=r" (fsindex));
106	asm("movl %%gs,%0" : "=r" (gsindex));
107
108	rdmsrl(MSR_FS_BASE, fs);
109	rdmsrl(MSR_GS_BASE, gs);
110	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
111
 
 
 
112	cr0 = read_cr0();
113	cr2 = read_cr2();
114	cr3 = __read_cr3();
115	cr4 = __read_cr4();
116
117	printk("%sFS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
118	       log_lvl, fs, fsindex, gs, gsindex, shadowgs);
119	printk("%sCS:  %04lx DS: %04x ES: %04x CR0: %016lx\n",
120		log_lvl, regs->cs, ds, es, cr0);
121	printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n",
122		log_lvl, cr2, cr3, cr4);
123
124	get_debugreg(d0, 0);
125	get_debugreg(d1, 1);
126	get_debugreg(d2, 2);
127	get_debugreg(d3, 3);
128	get_debugreg(d6, 6);
129	get_debugreg(d7, 7);
130
131	/* Only print out debug registers if they are in their non-default state. */
132	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
133	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
134		printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n",
135		       log_lvl, d0, d1, d2);
136		printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n",
137		       log_lvl, d3, d6, d7);
138	}
139
140	if (cpu_feature_enabled(X86_FEATURE_OSPKE))
141		printk("%sPKRU: %08x\n", log_lvl, read_pkru());
142}
143
144void release_thread(struct task_struct *dead_task)
145{
146	WARN_ON(dead_task->mm);
147}
148
149enum which_selector {
150	FS,
151	GS
152};
153
154/*
155 * Out of line to be protected from kprobes and tracing. If this would be
156 * traced or probed than any access to a per CPU variable happens with
157 * the wrong GS.
158 *
159 * It is not used on Xen paravirt. When paravirt support is needed, it
160 * needs to be renamed with native_ prefix.
161 */
162static noinstr unsigned long __rdgsbase_inactive(void)
163{
164	unsigned long gsbase;
165
166	lockdep_assert_irqs_disabled();
167
168	if (!static_cpu_has(X86_FEATURE_XENPV)) {
169		native_swapgs();
170		gsbase = rdgsbase();
171		native_swapgs();
172	} else {
173		instrumentation_begin();
174		rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
175		instrumentation_end();
176	}
177
178	return gsbase;
179}
180
181/*
182 * Out of line to be protected from kprobes and tracing. If this would be
183 * traced or probed than any access to a per CPU variable happens with
184 * the wrong GS.
185 *
186 * It is not used on Xen paravirt. When paravirt support is needed, it
187 * needs to be renamed with native_ prefix.
188 */
189static noinstr void __wrgsbase_inactive(unsigned long gsbase)
190{
191	lockdep_assert_irqs_disabled();
192
193	if (!static_cpu_has(X86_FEATURE_XENPV)) {
194		native_swapgs();
195		wrgsbase(gsbase);
196		native_swapgs();
197	} else {
198		instrumentation_begin();
199		wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
200		instrumentation_end();
201	}
202}
203
204/*
205 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
206 * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
207 * It's forcibly inlined because it'll generate better code and this function
208 * is hot.
209 */
210static __always_inline void save_base_legacy(struct task_struct *prev_p,
211					     unsigned short selector,
212					     enum which_selector which)
213{
214	if (likely(selector == 0)) {
215		/*
216		 * On Intel (without X86_BUG_NULL_SEG), the segment base could
217		 * be the pre-existing saved base or it could be zero.  On AMD
218		 * (with X86_BUG_NULL_SEG), the segment base could be almost
219		 * anything.
220		 *
221		 * This branch is very hot (it's hit twice on almost every
222		 * context switch between 64-bit programs), and avoiding
223		 * the RDMSR helps a lot, so we just assume that whatever
224		 * value is already saved is correct.  This matches historical
225		 * Linux behavior, so it won't break existing applications.
226		 *
227		 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
228		 * report that the base is zero, it needs to actually be zero:
229		 * see the corresponding logic in load_seg_legacy.
230		 */
231	} else {
232		/*
233		 * If the selector is 1, 2, or 3, then the base is zero on
234		 * !X86_BUG_NULL_SEG CPUs and could be anything on
235		 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
236		 * has never attempted to preserve the base across context
237		 * switches.
238		 *
239		 * If selector > 3, then it refers to a real segment, and
240		 * saving the base isn't necessary.
241		 */
242		if (which == FS)
243			prev_p->thread.fsbase = 0;
244		else
245			prev_p->thread.gsbase = 0;
246	}
247}
248
249static __always_inline void save_fsgs(struct task_struct *task)
250{
251	savesegment(fs, task->thread.fsindex);
252	savesegment(gs, task->thread.gsindex);
253	if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
254		/*
255		 * If FSGSBASE is enabled, we can't make any useful guesses
256		 * about the base, and user code expects us to save the current
257		 * value.  Fortunately, reading the base directly is efficient.
258		 */
259		task->thread.fsbase = rdfsbase();
260		task->thread.gsbase = __rdgsbase_inactive();
261	} else {
262		save_base_legacy(task, task->thread.fsindex, FS);
263		save_base_legacy(task, task->thread.gsindex, GS);
264	}
265}
266
267/*
268 * While a process is running,current->thread.fsbase and current->thread.gsbase
269 * may not match the corresponding CPU registers (see save_base_legacy()).
270 */
271void current_save_fsgs(void)
272{
273	unsigned long flags;
274
275	/* Interrupts need to be off for FSGSBASE */
276	local_irq_save(flags);
277	save_fsgs(current);
278	local_irq_restore(flags);
279}
280#if IS_ENABLED(CONFIG_KVM)
281EXPORT_SYMBOL_GPL(current_save_fsgs);
282#endif
283
284static __always_inline void loadseg(enum which_selector which,
285				    unsigned short sel)
286{
287	if (which == FS)
288		loadsegment(fs, sel);
289	else
290		load_gs_index(sel);
291}
292
293static __always_inline void load_seg_legacy(unsigned short prev_index,
294					    unsigned long prev_base,
295					    unsigned short next_index,
296					    unsigned long next_base,
297					    enum which_selector which)
298{
299	if (likely(next_index <= 3)) {
300		/*
301		 * The next task is using 64-bit TLS, is not using this
302		 * segment at all, or is having fun with arcane CPU features.
303		 */
304		if (next_base == 0) {
305			/*
306			 * Nasty case: on AMD CPUs, we need to forcibly zero
307			 * the base.
308			 */
309			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
310				loadseg(which, __USER_DS);
311				loadseg(which, next_index);
312			} else {
313				/*
314				 * We could try to exhaustively detect cases
315				 * under which we can skip the segment load,
316				 * but there's really only one case that matters
317				 * for performance: if both the previous and
318				 * next states are fully zeroed, we can skip
319				 * the load.
320				 *
321				 * (This assumes that prev_base == 0 has no
322				 * false positives.  This is the case on
323				 * Intel-style CPUs.)
324				 */
325				if (likely(prev_index | next_index | prev_base))
326					loadseg(which, next_index);
327			}
328		} else {
329			if (prev_index != next_index)
330				loadseg(which, next_index);
331			wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
332			       next_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333		}
334	} else {
335		/*
336		 * The next task is using a real segment.  Loading the selector
337		 * is sufficient.
338		 */
339		loadseg(which, next_index);
340	}
341}
342
343/*
344 * Store prev's PKRU value and load next's PKRU value if they differ. PKRU
345 * is not XSTATE managed on context switch because that would require a
346 * lookup in the task's FPU xsave buffer and require to keep that updated
347 * in various places.
348 */
349static __always_inline void x86_pkru_load(struct thread_struct *prev,
350					  struct thread_struct *next)
351{
352	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
353		return;
354
355	/* Stash the prev task's value: */
356	prev->pkru = rdpkru();
357
358	/*
359	 * PKRU writes are slightly expensive.  Avoid them when not
360	 * strictly necessary:
361	 */
362	if (prev->pkru != next->pkru)
363		wrpkru(next->pkru);
364}
365
366static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
367					      struct thread_struct *next)
368{
369	if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
370		/* Update the FS and GS selectors if they could have changed. */
371		if (unlikely(prev->fsindex || next->fsindex))
372			loadseg(FS, next->fsindex);
373		if (unlikely(prev->gsindex || next->gsindex))
374			loadseg(GS, next->gsindex);
375
376		/* Update the bases. */
377		wrfsbase(next->fsbase);
378		__wrgsbase_inactive(next->gsbase);
379	} else {
380		load_seg_legacy(prev->fsindex, prev->fsbase,
381				next->fsindex, next->fsbase, FS);
382		load_seg_legacy(prev->gsindex, prev->gsbase,
383				next->gsindex, next->gsbase, GS);
384	}
385}
386
387unsigned long x86_fsgsbase_read_task(struct task_struct *task,
388				     unsigned short selector)
389{
390	unsigned short idx = selector >> 3;
391	unsigned long base;
392
393	if (likely((selector & SEGMENT_TI_MASK) == 0)) {
394		if (unlikely(idx >= GDT_ENTRIES))
395			return 0;
396
397		/*
398		 * There are no user segments in the GDT with nonzero bases
399		 * other than the TLS segments.
400		 */
401		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
402			return 0;
403
404		idx -= GDT_ENTRY_TLS_MIN;
405		base = get_desc_base(&task->thread.tls_array[idx]);
406	} else {
407#ifdef CONFIG_MODIFY_LDT_SYSCALL
408		struct ldt_struct *ldt;
409
410		/*
411		 * If performance here mattered, we could protect the LDT
412		 * with RCU.  This is a slow path, though, so we can just
413		 * take the mutex.
414		 */
415		mutex_lock(&task->mm->context.lock);
416		ldt = task->mm->context.ldt;
417		if (unlikely(!ldt || idx >= ldt->nr_entries))
418			base = 0;
419		else
420			base = get_desc_base(ldt->entries + idx);
421		mutex_unlock(&task->mm->context.lock);
422#else
423		base = 0;
424#endif
 
 
 
 
 
 
 
 
 
425	}
426
427	return base;
428}
429
430unsigned long x86_gsbase_read_cpu_inactive(void)
431{
432	unsigned long gsbase;
433
434	if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
435		unsigned long flags;
436
437		local_irq_save(flags);
438		gsbase = __rdgsbase_inactive();
439		local_irq_restore(flags);
440	} else {
441		rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
442	}
443
444	return gsbase;
445}
446
447void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
448{
449	if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
450		unsigned long flags;
451
452		local_irq_save(flags);
453		__wrgsbase_inactive(gsbase);
454		local_irq_restore(flags);
455	} else {
456		wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
457	}
458}
459
460unsigned long x86_fsbase_read_task(struct task_struct *task)
461{
462	unsigned long fsbase;
463
464	if (task == current)
465		fsbase = x86_fsbase_read_cpu();
466	else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
467		 (task->thread.fsindex == 0))
468		fsbase = task->thread.fsbase;
469	else
470		fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
471
472	return fsbase;
473}
474
475unsigned long x86_gsbase_read_task(struct task_struct *task)
476{
477	unsigned long gsbase;
478
479	if (task == current)
480		gsbase = x86_gsbase_read_cpu_inactive();
481	else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
482		 (task->thread.gsindex == 0))
483		gsbase = task->thread.gsbase;
484	else
485		gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
486
487	return gsbase;
488}
489
490void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
491{
492	WARN_ON_ONCE(task == current);
493
494	task->thread.fsbase = fsbase;
495}
496
497void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
498{
499	WARN_ON_ONCE(task == current);
500
501	task->thread.gsbase = gsbase;
502}
503
504static void
505start_thread_common(struct pt_regs *regs, unsigned long new_ip,
506		    unsigned long new_sp,
507		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
508{
509	WARN_ON_ONCE(regs != current_pt_regs());
510
511	if (static_cpu_has(X86_BUG_NULL_SEG)) {
512		/* Loading zero below won't clear the base. */
513		loadsegment(fs, __USER_DS);
514		load_gs_index(__USER_DS);
515	}
516
517	loadsegment(fs, 0);
518	loadsegment(es, _ds);
519	loadsegment(ds, _ds);
520	load_gs_index(0);
521
522	regs->ip		= new_ip;
523	regs->sp		= new_sp;
 
524	regs->cs		= _cs;
525	regs->ss		= _ss;
526	regs->flags		= X86_EFLAGS_IF;
527}
528
529void
530start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
531{
532	start_thread_common(regs, new_ip, new_sp,
533			    __USER_CS, __USER_DS, 0);
534}
535EXPORT_SYMBOL_GPL(start_thread);
536
537#ifdef CONFIG_COMPAT
538void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32)
539{
540	start_thread_common(regs, new_ip, new_sp,
541			    x32 ? __USER_CS : __USER32_CS,
 
542			    __USER_DS, __USER_DS);
543}
544#endif
545
546/*
547 *	switch_to(x,y) should switch tasks from x to y.
548 *
549 * This could still be optimized:
550 * - fold all the options into a flag word and test it with a single test.
551 * - could test fs/gs bitsliced
552 *
553 * Kprobes not supported here. Set the probe on schedule instead.
554 * Function graph tracer not supported too.
555 */
556__visible __notrace_funcgraph struct task_struct *
557__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
558{
559	struct thread_struct *prev = &prev_p->thread;
560	struct thread_struct *next = &next_p->thread;
561	struct fpu *prev_fpu = &prev->fpu;
562	struct fpu *next_fpu = &next->fpu;
563	int cpu = smp_processor_id();
 
 
 
564
565	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
566		     this_cpu_read(hardirq_stack_inuse));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
568	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
569		switch_fpu_prepare(prev_fpu, cpu);
570
571	/* We must save %fs and %gs before load_TLS() because
572	 * %fs and %gs may be cleared by load_TLS().
573	 *
574	 * (e.g. xen_load_tls())
575	 */
576	save_fsgs(prev_p);
 
577
578	/*
579	 * Load TLS before restoring any segments so that segment loads
580	 * reference the correct GDT entries.
581	 */
582	load_TLS(next, cpu);
583
584	/*
585	 * Leave lazy mode, flushing any hypercalls made here.  This
586	 * must be done after loading TLS entries in the GDT but before
587	 * loading segments that might reference them.
 
 
588	 */
589	arch_end_context_switch(next_p);
590
591	/* Switch DS and ES.
592	 *
593	 * Reading them only returns the selectors, but writing them (if
594	 * nonzero) loads the full descriptor from the GDT or LDT.  The
595	 * LDT for next is loaded in switch_mm, and the GDT is loaded
596	 * above.
597	 *
598	 * We therefore need to write new values to the segment
599	 * registers on every context switch unless both the new and old
600	 * values are zero.
601	 *
602	 * Note that we don't need to do anything for CS and SS, as
603	 * those are saved and restored as part of pt_regs.
604	 */
605	savesegment(es, prev->es);
606	if (unlikely(next->es | prev->es))
607		loadsegment(es, next->es);
608
609	savesegment(ds, prev->ds);
610	if (unlikely(next->ds | prev->ds))
611		loadsegment(ds, next->ds);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
612
613	x86_fsgsbase_load(prev, next);
614
615	x86_pkru_load(prev, next);
616
617	/*
618	 * Switch the PDA and FPU contexts.
619	 */
 
 
620	this_cpu_write(current_task, next_p);
621	this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
622
623	switch_fpu_finish(next_fpu);
 
 
 
 
 
 
624
625	/* Reload sp0. */
626	update_task_stack(next_p);
 
627
628	switch_to_extra(prev_p, next_p);
629
630	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
631		/*
632		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
633		 * does not update the cached descriptor.  As a result, if we
634		 * do SYSRET while SS is NULL, we'll end up in user mode with
635		 * SS apparently equal to __USER_DS but actually unusable.
636		 *
637		 * The straightforward workaround would be to fix it up just
638		 * before SYSRET, but that would slow down the system call
639		 * fast paths.  Instead, we ensure that SS is never NULL in
640		 * system call context.  We do this by replacing NULL SS
641		 * selectors at every context switch.  SYSCALL sets up a valid
642		 * SS, so the only way to get NULL is to re-enter the kernel
643		 * from CPL 3 through an interrupt.  Since that can't happen
644		 * in the same task as a running syscall, we are guaranteed to
645		 * context switch between every interrupt vector entry and a
646		 * subsequent SYSRET.
647		 *
648		 * We read SS first because SS reads are much faster than
649		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
650		 * it previously had a different non-NULL value.
651		 */
652		unsigned short ss_sel;
653		savesegment(ss, ss_sel);
654		if (ss_sel != __KERNEL_DS)
655			loadsegment(ss, __KERNEL_DS);
656	}
657
658	/* Load the Intel cache allocation PQR MSR. */
659	resctrl_sched_in();
660
661	return prev_p;
662}
663
664void set_personality_64bit(void)
665{
666	/* inherit personality from parent */
667
668	/* Make sure to be in 64bit mode */
 
669	clear_thread_flag(TIF_ADDR32);
670	/* Pretend that this comes from a 64bit execve */
671	task_pt_regs(current)->orig_ax = __NR_execve;
672	current_thread_info()->status &= ~TS_COMPAT;
673	if (current->mm)
674		current->mm->context.flags = MM_CONTEXT_HAS_VSYSCALL;
675
676	/* TBD: overwrites user setup. Should have two bits.
677	   But 64bit processes have always behaved this way,
678	   so it's not too bad. The main problem is just that
679	   32bit children are affected again. */
680	current->personality &= ~READ_IMPLIES_EXEC;
681}
682
683static void __set_personality_x32(void)
684{
685#ifdef CONFIG_X86_X32
686	if (current->mm)
687		current->mm->context.flags = 0;
688
689	current->personality &= ~READ_IMPLIES_EXEC;
690	/*
691	 * in_32bit_syscall() uses the presence of the x32 syscall bit
692	 * flag to determine compat status.  The x86 mmap() code relies on
693	 * the syscall bitness so set x32 syscall bit right here to make
694	 * in_32bit_syscall() work during exec().
695	 *
696	 * Pretend to come from a x32 execve.
697	 */
698	task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
699	current_thread_info()->status &= ~TS_COMPAT;
700#endif
701}
702
703static void __set_personality_ia32(void)
704{
705#ifdef CONFIG_IA32_EMULATION
706	if (current->mm) {
707		/*
708		 * uprobes applied to this MM need to know this and
709		 * cannot use user_64bit_mode() at that time.
710		 */
711		current->mm->context.flags = MM_CONTEXT_UPROBE_IA32;
712	}
713
714	current->personality |= force_personality32;
715	/* Prepare the first "return" to user space */
716	task_pt_regs(current)->orig_ax = __NR_ia32_execve;
717	current_thread_info()->status |= TS_COMPAT;
718#endif
719}
720
721void set_personality_ia32(bool x32)
722{
723	/* Make sure to be in 32bit mode */
724	set_thread_flag(TIF_ADDR32);
725
726	if (x32)
727		__set_personality_x32();
728	else
729		__set_personality_ia32();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730}
731EXPORT_SYMBOL_GPL(set_personality_ia32);
732
733#ifdef CONFIG_CHECKPOINT_RESTORE
734static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
735{
736	int ret;
737
738	ret = map_vdso_once(image, addr);
739	if (ret)
740		return ret;
741
742	return (long)image->size;
 
 
 
 
 
 
 
 
 
 
 
 
 
743}
744#endif
745
746long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
747{
748	int ret = 0;
 
 
749
750	switch (option) {
751	case ARCH_SET_GS: {
752		if (unlikely(arg2 >= TASK_SIZE_MAX))
753			return -EPERM;
754
755		preempt_disable();
756		/*
757		 * ARCH_SET_GS has always overwritten the index
758		 * and the base. Zero is the most sensible value
759		 * to put in the index, and is the only value that
760		 * makes any sense if FSGSBASE is unavailable.
761		 */
762		if (task == current) {
763			loadseg(GS, 0);
764			x86_gsbase_write_cpu_inactive(arg2);
765
766			/*
767			 * On non-FSGSBASE systems, save_base_legacy() expects
768			 * that we also fill in thread.gsbase.
769			 */
770			task->thread.gsbase = arg2;
771
772		} else {
773			task->thread.gsindex = 0;
774			x86_gsbase_write_task(task, arg2);
 
 
 
 
775		}
776		preempt_enable();
777		break;
778	}
779	case ARCH_SET_FS: {
780		/*
781		 * Not strictly needed for %fs, but do it for symmetry
782		 * with %gs
783		 */
784		if (unlikely(arg2 >= TASK_SIZE_MAX))
785			return -EPERM;
786
787		preempt_disable();
788		/*
789		 * Set the selector to 0 for the same reason
790		 * as %gs above.
791		 */
792		if (task == current) {
793			loadseg(FS, 0);
794			x86_fsbase_write_cpu(arg2);
795
796			/*
797			 * On non-FSGSBASE systems, save_base_legacy() expects
798			 * that we also fill in thread.fsbase.
799			 */
800			task->thread.fsbase = arg2;
801		} else {
802			task->thread.fsindex = 0;
803			x86_fsbase_write_task(task, arg2);
 
 
 
 
 
 
804		}
805		preempt_enable();
806		break;
807	}
808	case ARCH_GET_FS: {
809		unsigned long base = x86_fsbase_read_task(task);
810
811		ret = put_user(base, (unsigned long __user *)arg2);
 
 
 
 
 
812		break;
813	}
814	case ARCH_GET_GS: {
815		unsigned long base = x86_gsbase_read_task(task);
816
817		ret = put_user(base, (unsigned long __user *)arg2);
 
 
 
 
 
 
 
 
 
 
818		break;
819	}
820
821#ifdef CONFIG_CHECKPOINT_RESTORE
822# ifdef CONFIG_X86_X32_ABI
823	case ARCH_MAP_VDSO_X32:
824		return prctl_map_vdso(&vdso_image_x32, arg2);
825# endif
826# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
827	case ARCH_MAP_VDSO_32:
828		return prctl_map_vdso(&vdso_image_32, arg2);
829# endif
830	case ARCH_MAP_VDSO_64:
831		return prctl_map_vdso(&vdso_image_64, arg2);
832#endif
833
834	default:
835		ret = -EINVAL;
836		break;
837	}
838
839	return ret;
840}
841
842SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
843{
844	long ret;
845
846	ret = do_arch_prctl_64(current, option, arg2);
847	if (ret == -EINVAL)
848		ret = do_arch_prctl_common(current, option, arg2);
849
850	return ret;
851}
852
853#ifdef CONFIG_IA32_EMULATION
854COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
855{
856	return do_arch_prctl_common(current, option, arg2);
857}
858#endif
859
860unsigned long KSTK_ESP(struct task_struct *task)
861{
862	return task_pt_regs(task)->sp;
 
863}