Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  Copyright (C) 1995  Linus Torvalds
  4 *
  5 *  Pentium III FXSR, SSE support
  6 *	Gareth Hughes <gareth@valinux.com>, May 2000
  7 *
  8 *  X86-64 port
  9 *	Andi Kleen.
 10 *
 11 *	CPU hotplug support - ashok.raj@intel.com
 12 */
 13
 14/*
 15 * This file handles the architecture-dependent parts of process handling..
 16 */
 17
 18#include <linux/cpu.h>
 19#include <linux/errno.h>
 20#include <linux/sched.h>
 21#include <linux/sched/task.h>
 22#include <linux/sched/task_stack.h>
 23#include <linux/fs.h>
 24#include <linux/kernel.h>
 25#include <linux/mm.h>
 26#include <linux/elfcore.h>
 27#include <linux/smp.h>
 28#include <linux/slab.h>
 29#include <linux/user.h>
 30#include <linux/interrupt.h>
 31#include <linux/delay.h>
 32#include <linux/export.h>
 33#include <linux/ptrace.h>
 34#include <linux/notifier.h>
 35#include <linux/kprobes.h>
 36#include <linux/kdebug.h>
 37#include <linux/prctl.h>
 38#include <linux/uaccess.h>
 39#include <linux/io.h>
 40#include <linux/ftrace.h>
 41#include <linux/syscalls.h>
 
 42
 43#include <asm/pgtable.h>
 44#include <asm/processor.h>
 45#include <asm/fpu/internal.h>
 
 46#include <asm/mmu_context.h>
 47#include <asm/prctl.h>
 48#include <asm/desc.h>
 49#include <asm/proto.h>
 50#include <asm/ia32.h>
 51#include <asm/syscalls.h>
 52#include <asm/debugreg.h>
 53#include <asm/switch_to.h>
 54#include <asm/xen/hypervisor.h>
 55#include <asm/vdso.h>
 56#include <asm/resctrl_sched.h>
 57#include <asm/unistd.h>
 58#include <asm/fsgsbase.h>
 59#ifdef CONFIG_IA32_EMULATION
 60/* Not included via unistd.h */
 61#include <asm/unistd_32_ia32.h>
 62#endif
 63
 64#include "process.h"
 65
 66/* Prints also some state that isn't saved in the pt_regs */
 67void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 
 68{
 69	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
 70	unsigned long d0, d1, d2, d3, d6, d7;
 71	unsigned int fsindex, gsindex;
 72	unsigned int ds, es;
 73
 74	show_iret_regs(regs);
 75
 76	if (regs->orig_ax != -1)
 77		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
 78	else
 79		pr_cont("\n");
 80
 81	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
 82	       regs->ax, regs->bx, regs->cx);
 83	printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
 84	       regs->dx, regs->si, regs->di);
 85	printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
 86	       regs->bp, regs->r8, regs->r9);
 87	printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
 88	       regs->r10, regs->r11, regs->r12);
 89	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
 90	       regs->r13, regs->r14, regs->r15);
 91
 92	if (mode == SHOW_REGS_SHORT)
 93		return;
 94
 95	if (mode == SHOW_REGS_USER) {
 96		rdmsrl(MSR_FS_BASE, fs);
 97		rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 98		printk(KERN_DEFAULT "FS:  %016lx GS:  %016lx\n",
 99		       fs, shadowgs);
100		return;
101	}
102
103	asm("movl %%ds,%0" : "=r" (ds));
104	asm("movl %%es,%0" : "=r" (es));
105	asm("movl %%fs,%0" : "=r" (fsindex));
106	asm("movl %%gs,%0" : "=r" (gsindex));
107
108	rdmsrl(MSR_FS_BASE, fs);
109	rdmsrl(MSR_GS_BASE, gs);
110	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
111
112	cr0 = read_cr0();
113	cr2 = read_cr2();
114	cr3 = __read_cr3();
115	cr4 = __read_cr4();
116
117	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
118	       fs, fsindex, gs, gsindex, shadowgs);
119	printk(KERN_DEFAULT "CS:  %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
120			es, cr0);
121	printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
122			cr4);
123
124	get_debugreg(d0, 0);
125	get_debugreg(d1, 1);
126	get_debugreg(d2, 2);
127	get_debugreg(d3, 3);
128	get_debugreg(d6, 6);
129	get_debugreg(d7, 7);
130
131	/* Only print out debug registers if they are in their non-default state. */
132	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
133	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
134		printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
135		       d0, d1, d2);
136		printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
137		       d3, d6, d7);
138	}
139
140	if (boot_cpu_has(X86_FEATURE_OSPKE))
141		printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
142}
143
144void release_thread(struct task_struct *dead_task)
145{
146	WARN_ON(dead_task->mm);
147}
148
149enum which_selector {
150	FS,
151	GS
152};
153
154/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
156 * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
157 * It's forcibly inlined because it'll generate better code and this function
158 * is hot.
159 */
160static __always_inline void save_base_legacy(struct task_struct *prev_p,
161					     unsigned short selector,
162					     enum which_selector which)
163{
164	if (likely(selector == 0)) {
165		/*
166		 * On Intel (without X86_BUG_NULL_SEG), the segment base could
167		 * be the pre-existing saved base or it could be zero.  On AMD
168		 * (with X86_BUG_NULL_SEG), the segment base could be almost
169		 * anything.
170		 *
171		 * This branch is very hot (it's hit twice on almost every
172		 * context switch between 64-bit programs), and avoiding
173		 * the RDMSR helps a lot, so we just assume that whatever
174		 * value is already saved is correct.  This matches historical
175		 * Linux behavior, so it won't break existing applications.
176		 *
177		 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
178		 * report that the base is zero, it needs to actually be zero:
179		 * see the corresponding logic in load_seg_legacy.
180		 */
181	} else {
182		/*
183		 * If the selector is 1, 2, or 3, then the base is zero on
184		 * !X86_BUG_NULL_SEG CPUs and could be anything on
185		 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
186		 * has never attempted to preserve the base across context
187		 * switches.
188		 *
189		 * If selector > 3, then it refers to a real segment, and
190		 * saving the base isn't necessary.
191		 */
192		if (which == FS)
193			prev_p->thread.fsbase = 0;
194		else
195			prev_p->thread.gsbase = 0;
196	}
197}
198
199static __always_inline void save_fsgs(struct task_struct *task)
200{
201	savesegment(fs, task->thread.fsindex);
202	savesegment(gs, task->thread.gsindex);
203	save_base_legacy(task, task->thread.fsindex, FS);
204	save_base_legacy(task, task->thread.gsindex, GS);
 
 
 
 
 
 
 
 
 
 
205}
206
207#if IS_ENABLED(CONFIG_KVM)
208/*
209 * While a process is running,current->thread.fsbase and current->thread.gsbase
210 * may not match the corresponding CPU registers (see save_base_legacy()). KVM
211 * wants an efficient way to save and restore FSBASE and GSBASE.
212 * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
213 */
214void save_fsgs_for_kvm(void)
215{
 
 
 
 
216	save_fsgs(current);
 
217}
218EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
 
219#endif
220
221static __always_inline void loadseg(enum which_selector which,
222				    unsigned short sel)
223{
224	if (which == FS)
225		loadsegment(fs, sel);
226	else
227		load_gs_index(sel);
228}
229
230static __always_inline void load_seg_legacy(unsigned short prev_index,
231					    unsigned long prev_base,
232					    unsigned short next_index,
233					    unsigned long next_base,
234					    enum which_selector which)
235{
236	if (likely(next_index <= 3)) {
237		/*
238		 * The next task is using 64-bit TLS, is not using this
239		 * segment at all, or is having fun with arcane CPU features.
240		 */
241		if (next_base == 0) {
242			/*
243			 * Nasty case: on AMD CPUs, we need to forcibly zero
244			 * the base.
245			 */
246			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
247				loadseg(which, __USER_DS);
248				loadseg(which, next_index);
249			} else {
250				/*
251				 * We could try to exhaustively detect cases
252				 * under which we can skip the segment load,
253				 * but there's really only one case that matters
254				 * for performance: if both the previous and
255				 * next states are fully zeroed, we can skip
256				 * the load.
257				 *
258				 * (This assumes that prev_base == 0 has no
259				 * false positives.  This is the case on
260				 * Intel-style CPUs.)
261				 */
262				if (likely(prev_index | next_index | prev_base))
263					loadseg(which, next_index);
264			}
265		} else {
266			if (prev_index != next_index)
267				loadseg(which, next_index);
268			wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
269			       next_base);
270		}
271	} else {
272		/*
273		 * The next task is using a real segment.  Loading the selector
274		 * is sufficient.
275		 */
276		loadseg(which, next_index);
277	}
278}
279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
281					      struct thread_struct *next)
282{
283	load_seg_legacy(prev->fsindex, prev->fsbase,
284			next->fsindex, next->fsbase, FS);
285	load_seg_legacy(prev->gsindex, prev->gsbase,
286			next->gsindex, next->gsbase, GS);
 
 
 
 
 
 
 
 
 
 
 
 
287}
288
289static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
290					    unsigned short selector)
291{
292	unsigned short idx = selector >> 3;
293	unsigned long base;
294
295	if (likely((selector & SEGMENT_TI_MASK) == 0)) {
296		if (unlikely(idx >= GDT_ENTRIES))
297			return 0;
298
299		/*
300		 * There are no user segments in the GDT with nonzero bases
301		 * other than the TLS segments.
302		 */
303		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
304			return 0;
305
306		idx -= GDT_ENTRY_TLS_MIN;
307		base = get_desc_base(&task->thread.tls_array[idx]);
308	} else {
309#ifdef CONFIG_MODIFY_LDT_SYSCALL
310		struct ldt_struct *ldt;
311
312		/*
313		 * If performance here mattered, we could protect the LDT
314		 * with RCU.  This is a slow path, though, so we can just
315		 * take the mutex.
316		 */
317		mutex_lock(&task->mm->context.lock);
318		ldt = task->mm->context.ldt;
319		if (unlikely(idx >= ldt->nr_entries))
320			base = 0;
321		else
322			base = get_desc_base(ldt->entries + idx);
323		mutex_unlock(&task->mm->context.lock);
324#else
325		base = 0;
326#endif
327	}
328
329	return base;
330}
331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332unsigned long x86_fsbase_read_task(struct task_struct *task)
333{
334	unsigned long fsbase;
335
336	if (task == current)
337		fsbase = x86_fsbase_read_cpu();
338	else if (task->thread.fsindex == 0)
 
339		fsbase = task->thread.fsbase;
340	else
341		fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
342
343	return fsbase;
344}
345
346unsigned long x86_gsbase_read_task(struct task_struct *task)
347{
348	unsigned long gsbase;
349
350	if (task == current)
351		gsbase = x86_gsbase_read_cpu_inactive();
352	else if (task->thread.gsindex == 0)
 
353		gsbase = task->thread.gsbase;
354	else
355		gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
356
357	return gsbase;
358}
359
360void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
361{
362	WARN_ON_ONCE(task == current);
363
364	task->thread.fsbase = fsbase;
365}
366
367void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
368{
369	WARN_ON_ONCE(task == current);
370
371	task->thread.gsbase = gsbase;
372}
373
374int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
375		unsigned long arg, struct task_struct *p, unsigned long tls)
376{
377	int err;
378	struct pt_regs *childregs;
379	struct fork_frame *fork_frame;
380	struct inactive_task_frame *frame;
381	struct task_struct *me = current;
382
383	childregs = task_pt_regs(p);
384	fork_frame = container_of(childregs, struct fork_frame, regs);
385	frame = &fork_frame->frame;
386
387	frame->bp = 0;
388	frame->ret_addr = (unsigned long) ret_from_fork;
389	p->thread.sp = (unsigned long) fork_frame;
390	p->thread.io_bitmap_ptr = NULL;
391
392	savesegment(gs, p->thread.gsindex);
393	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
394	savesegment(fs, p->thread.fsindex);
395	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
396	savesegment(es, p->thread.es);
397	savesegment(ds, p->thread.ds);
398	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
399
400	if (unlikely(p->flags & PF_KTHREAD)) {
401		/* kernel thread */
402		memset(childregs, 0, sizeof(struct pt_regs));
403		frame->bx = sp;		/* function */
404		frame->r12 = arg;
405		return 0;
406	}
407	frame->bx = 0;
408	*childregs = *current_pt_regs();
409
410	childregs->ax = 0;
411	if (sp)
412		childregs->sp = sp;
413
414	err = -ENOMEM;
415	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
416		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
417						  IO_BITMAP_BYTES, GFP_KERNEL);
418		if (!p->thread.io_bitmap_ptr) {
419			p->thread.io_bitmap_max = 0;
420			return -ENOMEM;
421		}
422		set_tsk_thread_flag(p, TIF_IO_BITMAP);
423	}
424
425	/*
426	 * Set a new TLS for the child thread?
427	 */
428	if (clone_flags & CLONE_SETTLS) {
429#ifdef CONFIG_IA32_EMULATION
430		if (in_ia32_syscall())
431			err = do_set_thread_area(p, -1,
432				(struct user_desc __user *)tls, 0);
433		else
434#endif
435			err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
436		if (err)
437			goto out;
438	}
439	err = 0;
440out:
441	if (err && p->thread.io_bitmap_ptr) {
442		kfree(p->thread.io_bitmap_ptr);
443		p->thread.io_bitmap_max = 0;
444	}
445
446	return err;
447}
448
449static void
450start_thread_common(struct pt_regs *regs, unsigned long new_ip,
451		    unsigned long new_sp,
452		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
453{
454	WARN_ON_ONCE(regs != current_pt_regs());
455
456	if (static_cpu_has(X86_BUG_NULL_SEG)) {
457		/* Loading zero below won't clear the base. */
458		loadsegment(fs, __USER_DS);
459		load_gs_index(__USER_DS);
460	}
461
 
 
462	loadsegment(fs, 0);
463	loadsegment(es, _ds);
464	loadsegment(ds, _ds);
465	load_gs_index(0);
466
467	regs->ip		= new_ip;
468	regs->sp		= new_sp;
469	regs->cs		= _cs;
470	regs->ss		= _ss;
471	regs->flags		= X86_EFLAGS_IF;
472	force_iret();
473}
474
475void
476start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
477{
478	start_thread_common(regs, new_ip, new_sp,
479			    __USER_CS, __USER_DS, 0);
480}
481EXPORT_SYMBOL_GPL(start_thread);
482
483#ifdef CONFIG_COMPAT
484void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
485{
486	start_thread_common(regs, new_ip, new_sp,
487			    test_thread_flag(TIF_X32)
488			    ? __USER_CS : __USER32_CS,
489			    __USER_DS, __USER_DS);
490}
491#endif
492
493/*
494 *	switch_to(x,y) should switch tasks from x to y.
495 *
496 * This could still be optimized:
497 * - fold all the options into a flag word and test it with a single test.
498 * - could test fs/gs bitsliced
499 *
500 * Kprobes not supported here. Set the probe on schedule instead.
501 * Function graph tracer not supported too.
502 */
 
503__visible __notrace_funcgraph struct task_struct *
504__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
505{
506	struct thread_struct *prev = &prev_p->thread;
507	struct thread_struct *next = &next_p->thread;
508	struct fpu *prev_fpu = &prev->fpu;
509	struct fpu *next_fpu = &next->fpu;
510	int cpu = smp_processor_id();
511
512	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
513		     this_cpu_read(irq_count) != -1);
514
515	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
516		switch_fpu_prepare(prev_fpu, cpu);
517
518	/* We must save %fs and %gs before load_TLS() because
519	 * %fs and %gs may be cleared by load_TLS().
520	 *
521	 * (e.g. xen_load_tls())
522	 */
523	save_fsgs(prev_p);
524
525	/*
526	 * Load TLS before restoring any segments so that segment loads
527	 * reference the correct GDT entries.
528	 */
529	load_TLS(next, cpu);
530
531	/*
532	 * Leave lazy mode, flushing any hypercalls made here.  This
533	 * must be done after loading TLS entries in the GDT but before
534	 * loading segments that might reference them.
535	 */
536	arch_end_context_switch(next_p);
537
538	/* Switch DS and ES.
539	 *
540	 * Reading them only returns the selectors, but writing them (if
541	 * nonzero) loads the full descriptor from the GDT or LDT.  The
542	 * LDT for next is loaded in switch_mm, and the GDT is loaded
543	 * above.
544	 *
545	 * We therefore need to write new values to the segment
546	 * registers on every context switch unless both the new and old
547	 * values are zero.
548	 *
549	 * Note that we don't need to do anything for CS and SS, as
550	 * those are saved and restored as part of pt_regs.
551	 */
552	savesegment(es, prev->es);
553	if (unlikely(next->es | prev->es))
554		loadsegment(es, next->es);
555
556	savesegment(ds, prev->ds);
557	if (unlikely(next->ds | prev->ds))
558		loadsegment(ds, next->ds);
559
560	x86_fsgsbase_load(prev, next);
561
 
 
562	/*
563	 * Switch the PDA and FPU contexts.
564	 */
565	this_cpu_write(current_task, next_p);
566	this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
567
568	switch_fpu_finish(next_fpu);
569
570	/* Reload sp0. */
571	update_task_stack(next_p);
572
573	switch_to_extra(prev_p, next_p);
574
575#ifdef CONFIG_XEN_PV
576	/*
577	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
578	 * current_pt_regs()->flags may not match the current task's
579	 * intended IOPL.  We need to switch it manually.
580	 */
581	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
582		     prev->iopl != next->iopl))
583		xen_set_iopl_mask(next->iopl);
584#endif
585
586	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
587		/*
588		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
589		 * does not update the cached descriptor.  As a result, if we
590		 * do SYSRET while SS is NULL, we'll end up in user mode with
591		 * SS apparently equal to __USER_DS but actually unusable.
592		 *
593		 * The straightforward workaround would be to fix it up just
594		 * before SYSRET, but that would slow down the system call
595		 * fast paths.  Instead, we ensure that SS is never NULL in
596		 * system call context.  We do this by replacing NULL SS
597		 * selectors at every context switch.  SYSCALL sets up a valid
598		 * SS, so the only way to get NULL is to re-enter the kernel
599		 * from CPL 3 through an interrupt.  Since that can't happen
600		 * in the same task as a running syscall, we are guaranteed to
601		 * context switch between every interrupt vector entry and a
602		 * subsequent SYSRET.
603		 *
604		 * We read SS first because SS reads are much faster than
605		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
606		 * it previously had a different non-NULL value.
607		 */
608		unsigned short ss_sel;
609		savesegment(ss, ss_sel);
610		if (ss_sel != __KERNEL_DS)
611			loadsegment(ss, __KERNEL_DS);
612	}
613
614	/* Load the Intel cache allocation PQR MSR. */
615	resctrl_sched_in();
616
617	return prev_p;
618}
619
620void set_personality_64bit(void)
621{
622	/* inherit personality from parent */
623
624	/* Make sure to be in 64bit mode */
625	clear_thread_flag(TIF_IA32);
626	clear_thread_flag(TIF_ADDR32);
627	clear_thread_flag(TIF_X32);
628	/* Pretend that this comes from a 64bit execve */
629	task_pt_regs(current)->orig_ax = __NR_execve;
630	current_thread_info()->status &= ~TS_COMPAT;
631
632	/* Ensure the corresponding mm is not marked. */
633	if (current->mm)
634		current->mm->context.ia32_compat = 0;
635
636	/* TBD: overwrites user setup. Should have two bits.
637	   But 64bit processes have always behaved this way,
638	   so it's not too bad. The main problem is just that
639	   32bit children are affected again. */
640	current->personality &= ~READ_IMPLIES_EXEC;
641}
642
643static void __set_personality_x32(void)
644{
645#ifdef CONFIG_X86_X32
646	clear_thread_flag(TIF_IA32);
647	set_thread_flag(TIF_X32);
648	if (current->mm)
649		current->mm->context.ia32_compat = TIF_X32;
 
650	current->personality &= ~READ_IMPLIES_EXEC;
651	/*
652	 * in_32bit_syscall() uses the presence of the x32 syscall bit
653	 * flag to determine compat status.  The x86 mmap() code relies on
654	 * the syscall bitness so set x32 syscall bit right here to make
655	 * in_32bit_syscall() work during exec().
656	 *
657	 * Pretend to come from a x32 execve.
658	 */
659	task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
660	current_thread_info()->status &= ~TS_COMPAT;
661#endif
662}
663
664static void __set_personality_ia32(void)
665{
666#ifdef CONFIG_IA32_EMULATION
667	set_thread_flag(TIF_IA32);
668	clear_thread_flag(TIF_X32);
669	if (current->mm)
670		current->mm->context.ia32_compat = TIF_IA32;
 
 
 
 
671	current->personality |= force_personality32;
672	/* Prepare the first "return" to user space */
673	task_pt_regs(current)->orig_ax = __NR_ia32_execve;
674	current_thread_info()->status |= TS_COMPAT;
675#endif
676}
677
678void set_personality_ia32(bool x32)
679{
680	/* Make sure to be in 32bit mode */
681	set_thread_flag(TIF_ADDR32);
682
683	if (x32)
684		__set_personality_x32();
685	else
686		__set_personality_ia32();
687}
688EXPORT_SYMBOL_GPL(set_personality_ia32);
689
690#ifdef CONFIG_CHECKPOINT_RESTORE
691static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
692{
693	int ret;
694
695	ret = map_vdso_once(image, addr);
696	if (ret)
697		return ret;
698
699	return (long)image->size;
700}
701#endif
702
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
704{
705	int ret = 0;
706
707	switch (option) {
708	case ARCH_SET_GS: {
709		if (unlikely(arg2 >= TASK_SIZE_MAX))
710			return -EPERM;
711
712		preempt_disable();
713		/*
714		 * ARCH_SET_GS has always overwritten the index
715		 * and the base. Zero is the most sensible value
716		 * to put in the index, and is the only value that
717		 * makes any sense if FSGSBASE is unavailable.
718		 */
719		if (task == current) {
720			loadseg(GS, 0);
721			x86_gsbase_write_cpu_inactive(arg2);
722
723			/*
724			 * On non-FSGSBASE systems, save_base_legacy() expects
725			 * that we also fill in thread.gsbase.
726			 */
727			task->thread.gsbase = arg2;
728
729		} else {
730			task->thread.gsindex = 0;
731			x86_gsbase_write_task(task, arg2);
732		}
733		preempt_enable();
734		break;
735	}
736	case ARCH_SET_FS: {
737		/*
738		 * Not strictly needed for %fs, but do it for symmetry
739		 * with %gs
740		 */
741		if (unlikely(arg2 >= TASK_SIZE_MAX))
742			return -EPERM;
743
744		preempt_disable();
745		/*
746		 * Set the selector to 0 for the same reason
747		 * as %gs above.
748		 */
749		if (task == current) {
750			loadseg(FS, 0);
751			x86_fsbase_write_cpu(arg2);
752
753			/*
754			 * On non-FSGSBASE systems, save_base_legacy() expects
755			 * that we also fill in thread.fsbase.
756			 */
757			task->thread.fsbase = arg2;
758		} else {
759			task->thread.fsindex = 0;
760			x86_fsbase_write_task(task, arg2);
761		}
762		preempt_enable();
763		break;
764	}
765	case ARCH_GET_FS: {
766		unsigned long base = x86_fsbase_read_task(task);
767
768		ret = put_user(base, (unsigned long __user *)arg2);
769		break;
770	}
771	case ARCH_GET_GS: {
772		unsigned long base = x86_gsbase_read_task(task);
773
774		ret = put_user(base, (unsigned long __user *)arg2);
775		break;
776	}
777
778#ifdef CONFIG_CHECKPOINT_RESTORE
779# ifdef CONFIG_X86_X32_ABI
780	case ARCH_MAP_VDSO_X32:
781		return prctl_map_vdso(&vdso_image_x32, arg2);
782# endif
783# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
784	case ARCH_MAP_VDSO_32:
785		return prctl_map_vdso(&vdso_image_32, arg2);
786# endif
787	case ARCH_MAP_VDSO_64:
788		return prctl_map_vdso(&vdso_image_64, arg2);
789#endif
790
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
791	default:
792		ret = -EINVAL;
793		break;
794	}
795
796	return ret;
797}
798
799SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
800{
801	long ret;
802
803	ret = do_arch_prctl_64(current, option, arg2);
804	if (ret == -EINVAL)
805		ret = do_arch_prctl_common(current, option, arg2);
806
807	return ret;
808}
809
810#ifdef CONFIG_IA32_EMULATION
811COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
812{
813	return do_arch_prctl_common(current, option, arg2);
814}
815#endif
816
817unsigned long KSTK_ESP(struct task_struct *task)
818{
819	return task_pt_regs(task)->sp;
820}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  Copyright (C) 1995  Linus Torvalds
  4 *
  5 *  Pentium III FXSR, SSE support
  6 *	Gareth Hughes <gareth@valinux.com>, May 2000
  7 *
  8 *  X86-64 port
  9 *	Andi Kleen.
 10 *
 11 *	CPU hotplug support - ashok.raj@intel.com
 12 */
 13
 14/*
 15 * This file handles the architecture-dependent parts of process handling..
 16 */
 17
 18#include <linux/cpu.h>
 19#include <linux/errno.h>
 20#include <linux/sched.h>
 21#include <linux/sched/task.h>
 22#include <linux/sched/task_stack.h>
 23#include <linux/fs.h>
 24#include <linux/kernel.h>
 25#include <linux/mm.h>
 26#include <linux/elfcore.h>
 27#include <linux/smp.h>
 28#include <linux/slab.h>
 29#include <linux/user.h>
 30#include <linux/interrupt.h>
 31#include <linux/delay.h>
 32#include <linux/export.h>
 33#include <linux/ptrace.h>
 34#include <linux/notifier.h>
 35#include <linux/kprobes.h>
 36#include <linux/kdebug.h>
 37#include <linux/prctl.h>
 38#include <linux/uaccess.h>
 39#include <linux/io.h>
 40#include <linux/ftrace.h>
 41#include <linux/syscalls.h>
 42#include <linux/iommu.h>
 43
 
 44#include <asm/processor.h>
 45#include <asm/pkru.h>
 46#include <asm/fpu/sched.h>
 47#include <asm/mmu_context.h>
 48#include <asm/prctl.h>
 49#include <asm/desc.h>
 50#include <asm/proto.h>
 51#include <asm/ia32.h>
 
 52#include <asm/debugreg.h>
 53#include <asm/switch_to.h>
 54#include <asm/xen/hypervisor.h>
 55#include <asm/vdso.h>
 56#include <asm/resctrl.h>
 57#include <asm/unistd.h>
 58#include <asm/fsgsbase.h>
 59#ifdef CONFIG_IA32_EMULATION
 60/* Not included via unistd.h */
 61#include <asm/unistd_32_ia32.h>
 62#endif
 63
 64#include "process.h"
 65
 66/* Prints also some state that isn't saved in the pt_regs */
 67void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
 68		 const char *log_lvl)
 69{
 70	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
 71	unsigned long d0, d1, d2, d3, d6, d7;
 72	unsigned int fsindex, gsindex;
 73	unsigned int ds, es;
 74
 75	show_iret_regs(regs, log_lvl);
 76
 77	if (regs->orig_ax != -1)
 78		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
 79	else
 80		pr_cont("\n");
 81
 82	printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n",
 83	       log_lvl, regs->ax, regs->bx, regs->cx);
 84	printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n",
 85	       log_lvl, regs->dx, regs->si, regs->di);
 86	printk("%sRBP: %016lx R08: %016lx R09: %016lx\n",
 87	       log_lvl, regs->bp, regs->r8, regs->r9);
 88	printk("%sR10: %016lx R11: %016lx R12: %016lx\n",
 89	       log_lvl, regs->r10, regs->r11, regs->r12);
 90	printk("%sR13: %016lx R14: %016lx R15: %016lx\n",
 91	       log_lvl, regs->r13, regs->r14, regs->r15);
 92
 93	if (mode == SHOW_REGS_SHORT)
 94		return;
 95
 96	if (mode == SHOW_REGS_USER) {
 97		rdmsrl(MSR_FS_BASE, fs);
 98		rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 99		printk("%sFS:  %016lx GS:  %016lx\n",
100		       log_lvl, fs, shadowgs);
101		return;
102	}
103
104	asm("movl %%ds,%0" : "=r" (ds));
105	asm("movl %%es,%0" : "=r" (es));
106	asm("movl %%fs,%0" : "=r" (fsindex));
107	asm("movl %%gs,%0" : "=r" (gsindex));
108
109	rdmsrl(MSR_FS_BASE, fs);
110	rdmsrl(MSR_GS_BASE, gs);
111	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
112
113	cr0 = read_cr0();
114	cr2 = read_cr2();
115	cr3 = __read_cr3();
116	cr4 = __read_cr4();
117
118	printk("%sFS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
119	       log_lvl, fs, fsindex, gs, gsindex, shadowgs);
120	printk("%sCS:  %04lx DS: %04x ES: %04x CR0: %016lx\n",
121		log_lvl, regs->cs, ds, es, cr0);
122	printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n",
123		log_lvl, cr2, cr3, cr4);
124
125	get_debugreg(d0, 0);
126	get_debugreg(d1, 1);
127	get_debugreg(d2, 2);
128	get_debugreg(d3, 3);
129	get_debugreg(d6, 6);
130	get_debugreg(d7, 7);
131
132	/* Only print out debug registers if they are in their non-default state. */
133	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
134	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
135		printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n",
136		       log_lvl, d0, d1, d2);
137		printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n",
138		       log_lvl, d3, d6, d7);
139	}
140
141	if (cpu_feature_enabled(X86_FEATURE_OSPKE))
142		printk("%sPKRU: %08x\n", log_lvl, read_pkru());
143}
144
145void release_thread(struct task_struct *dead_task)
146{
147	WARN_ON(dead_task->mm);
148}
149
150enum which_selector {
151	FS,
152	GS
153};
154
155/*
156 * Out of line to be protected from kprobes and tracing. If this would be
157 * traced or probed than any access to a per CPU variable happens with
158 * the wrong GS.
159 *
160 * It is not used on Xen paravirt. When paravirt support is needed, it
161 * needs to be renamed with native_ prefix.
162 */
163static noinstr unsigned long __rdgsbase_inactive(void)
164{
165	unsigned long gsbase;
166
167	lockdep_assert_irqs_disabled();
168
169	if (!cpu_feature_enabled(X86_FEATURE_XENPV)) {
170		native_swapgs();
171		gsbase = rdgsbase();
172		native_swapgs();
173	} else {
174		instrumentation_begin();
175		rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
176		instrumentation_end();
177	}
178
179	return gsbase;
180}
181
182/*
183 * Out of line to be protected from kprobes and tracing. If this would be
184 * traced or probed than any access to a per CPU variable happens with
185 * the wrong GS.
186 *
187 * It is not used on Xen paravirt. When paravirt support is needed, it
188 * needs to be renamed with native_ prefix.
189 */
190static noinstr void __wrgsbase_inactive(unsigned long gsbase)
191{
192	lockdep_assert_irqs_disabled();
193
194	if (!cpu_feature_enabled(X86_FEATURE_XENPV)) {
195		native_swapgs();
196		wrgsbase(gsbase);
197		native_swapgs();
198	} else {
199		instrumentation_begin();
200		wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
201		instrumentation_end();
202	}
203}
204
205/*
206 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
207 * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
208 * It's forcibly inlined because it'll generate better code and this function
209 * is hot.
210 */
211static __always_inline void save_base_legacy(struct task_struct *prev_p,
212					     unsigned short selector,
213					     enum which_selector which)
214{
215	if (likely(selector == 0)) {
216		/*
217		 * On Intel (without X86_BUG_NULL_SEG), the segment base could
218		 * be the pre-existing saved base or it could be zero.  On AMD
219		 * (with X86_BUG_NULL_SEG), the segment base could be almost
220		 * anything.
221		 *
222		 * This branch is very hot (it's hit twice on almost every
223		 * context switch between 64-bit programs), and avoiding
224		 * the RDMSR helps a lot, so we just assume that whatever
225		 * value is already saved is correct.  This matches historical
226		 * Linux behavior, so it won't break existing applications.
227		 *
228		 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
229		 * report that the base is zero, it needs to actually be zero:
230		 * see the corresponding logic in load_seg_legacy.
231		 */
232	} else {
233		/*
234		 * If the selector is 1, 2, or 3, then the base is zero on
235		 * !X86_BUG_NULL_SEG CPUs and could be anything on
236		 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
237		 * has never attempted to preserve the base across context
238		 * switches.
239		 *
240		 * If selector > 3, then it refers to a real segment, and
241		 * saving the base isn't necessary.
242		 */
243		if (which == FS)
244			prev_p->thread.fsbase = 0;
245		else
246			prev_p->thread.gsbase = 0;
247	}
248}
249
250static __always_inline void save_fsgs(struct task_struct *task)
251{
252	savesegment(fs, task->thread.fsindex);
253	savesegment(gs, task->thread.gsindex);
254	if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
255		/*
256		 * If FSGSBASE is enabled, we can't make any useful guesses
257		 * about the base, and user code expects us to save the current
258		 * value.  Fortunately, reading the base directly is efficient.
259		 */
260		task->thread.fsbase = rdfsbase();
261		task->thread.gsbase = __rdgsbase_inactive();
262	} else {
263		save_base_legacy(task, task->thread.fsindex, FS);
264		save_base_legacy(task, task->thread.gsindex, GS);
265	}
266}
267
 
268/*
269 * While a process is running,current->thread.fsbase and current->thread.gsbase
270 * may not match the corresponding CPU registers (see save_base_legacy()).
 
 
271 */
272void current_save_fsgs(void)
273{
274	unsigned long flags;
275
276	/* Interrupts need to be off for FSGSBASE */
277	local_irq_save(flags);
278	save_fsgs(current);
279	local_irq_restore(flags);
280}
281#if IS_ENABLED(CONFIG_KVM)
282EXPORT_SYMBOL_GPL(current_save_fsgs);
283#endif
284
285static __always_inline void loadseg(enum which_selector which,
286				    unsigned short sel)
287{
288	if (which == FS)
289		loadsegment(fs, sel);
290	else
291		load_gs_index(sel);
292}
293
294static __always_inline void load_seg_legacy(unsigned short prev_index,
295					    unsigned long prev_base,
296					    unsigned short next_index,
297					    unsigned long next_base,
298					    enum which_selector which)
299{
300	if (likely(next_index <= 3)) {
301		/*
302		 * The next task is using 64-bit TLS, is not using this
303		 * segment at all, or is having fun with arcane CPU features.
304		 */
305		if (next_base == 0) {
306			/*
307			 * Nasty case: on AMD CPUs, we need to forcibly zero
308			 * the base.
309			 */
310			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
311				loadseg(which, __USER_DS);
312				loadseg(which, next_index);
313			} else {
314				/*
315				 * We could try to exhaustively detect cases
316				 * under which we can skip the segment load,
317				 * but there's really only one case that matters
318				 * for performance: if both the previous and
319				 * next states are fully zeroed, we can skip
320				 * the load.
321				 *
322				 * (This assumes that prev_base == 0 has no
323				 * false positives.  This is the case on
324				 * Intel-style CPUs.)
325				 */
326				if (likely(prev_index | next_index | prev_base))
327					loadseg(which, next_index);
328			}
329		} else {
330			if (prev_index != next_index)
331				loadseg(which, next_index);
332			wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
333			       next_base);
334		}
335	} else {
336		/*
337		 * The next task is using a real segment.  Loading the selector
338		 * is sufficient.
339		 */
340		loadseg(which, next_index);
341	}
342}
343
344/*
345 * Store prev's PKRU value and load next's PKRU value if they differ. PKRU
346 * is not XSTATE managed on context switch because that would require a
347 * lookup in the task's FPU xsave buffer and require to keep that updated
348 * in various places.
349 */
350static __always_inline void x86_pkru_load(struct thread_struct *prev,
351					  struct thread_struct *next)
352{
353	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
354		return;
355
356	/* Stash the prev task's value: */
357	prev->pkru = rdpkru();
358
359	/*
360	 * PKRU writes are slightly expensive.  Avoid them when not
361	 * strictly necessary:
362	 */
363	if (prev->pkru != next->pkru)
364		wrpkru(next->pkru);
365}
366
367static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
368					      struct thread_struct *next)
369{
370	if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
371		/* Update the FS and GS selectors if they could have changed. */
372		if (unlikely(prev->fsindex || next->fsindex))
373			loadseg(FS, next->fsindex);
374		if (unlikely(prev->gsindex || next->gsindex))
375			loadseg(GS, next->gsindex);
376
377		/* Update the bases. */
378		wrfsbase(next->fsbase);
379		__wrgsbase_inactive(next->gsbase);
380	} else {
381		load_seg_legacy(prev->fsindex, prev->fsbase,
382				next->fsindex, next->fsbase, FS);
383		load_seg_legacy(prev->gsindex, prev->gsbase,
384				next->gsindex, next->gsbase, GS);
385	}
386}
387
388unsigned long x86_fsgsbase_read_task(struct task_struct *task,
389				     unsigned short selector)
390{
391	unsigned short idx = selector >> 3;
392	unsigned long base;
393
394	if (likely((selector & SEGMENT_TI_MASK) == 0)) {
395		if (unlikely(idx >= GDT_ENTRIES))
396			return 0;
397
398		/*
399		 * There are no user segments in the GDT with nonzero bases
400		 * other than the TLS segments.
401		 */
402		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
403			return 0;
404
405		idx -= GDT_ENTRY_TLS_MIN;
406		base = get_desc_base(&task->thread.tls_array[idx]);
407	} else {
408#ifdef CONFIG_MODIFY_LDT_SYSCALL
409		struct ldt_struct *ldt;
410
411		/*
412		 * If performance here mattered, we could protect the LDT
413		 * with RCU.  This is a slow path, though, so we can just
414		 * take the mutex.
415		 */
416		mutex_lock(&task->mm->context.lock);
417		ldt = task->mm->context.ldt;
418		if (unlikely(!ldt || idx >= ldt->nr_entries))
419			base = 0;
420		else
421			base = get_desc_base(ldt->entries + idx);
422		mutex_unlock(&task->mm->context.lock);
423#else
424		base = 0;
425#endif
426	}
427
428	return base;
429}
430
431unsigned long x86_gsbase_read_cpu_inactive(void)
432{
433	unsigned long gsbase;
434
435	if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
436		unsigned long flags;
437
438		local_irq_save(flags);
439		gsbase = __rdgsbase_inactive();
440		local_irq_restore(flags);
441	} else {
442		rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
443	}
444
445	return gsbase;
446}
447
448void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
449{
450	if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
451		unsigned long flags;
452
453		local_irq_save(flags);
454		__wrgsbase_inactive(gsbase);
455		local_irq_restore(flags);
456	} else {
457		wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
458	}
459}
460
461unsigned long x86_fsbase_read_task(struct task_struct *task)
462{
463	unsigned long fsbase;
464
465	if (task == current)
466		fsbase = x86_fsbase_read_cpu();
467	else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
468		 (task->thread.fsindex == 0))
469		fsbase = task->thread.fsbase;
470	else
471		fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
472
473	return fsbase;
474}
475
476unsigned long x86_gsbase_read_task(struct task_struct *task)
477{
478	unsigned long gsbase;
479
480	if (task == current)
481		gsbase = x86_gsbase_read_cpu_inactive();
482	else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
483		 (task->thread.gsindex == 0))
484		gsbase = task->thread.gsbase;
485	else
486		gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
487
488	return gsbase;
489}
490
491void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
492{
493	WARN_ON_ONCE(task == current);
494
495	task->thread.fsbase = fsbase;
496}
497
498void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
499{
500	WARN_ON_ONCE(task == current);
501
502	task->thread.gsbase = gsbase;
503}
504
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505static void
506start_thread_common(struct pt_regs *regs, unsigned long new_ip,
507		    unsigned long new_sp,
508		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
509{
510	WARN_ON_ONCE(regs != current_pt_regs());
511
512	if (static_cpu_has(X86_BUG_NULL_SEG)) {
513		/* Loading zero below won't clear the base. */
514		loadsegment(fs, __USER_DS);
515		load_gs_index(__USER_DS);
516	}
517
518	reset_thread_features();
519
520	loadsegment(fs, 0);
521	loadsegment(es, _ds);
522	loadsegment(ds, _ds);
523	load_gs_index(0);
524
525	regs->ip		= new_ip;
526	regs->sp		= new_sp;
527	regs->cs		= _cs;
528	regs->ss		= _ss;
529	regs->flags		= X86_EFLAGS_IF;
 
530}
531
532void
533start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
534{
535	start_thread_common(regs, new_ip, new_sp,
536			    __USER_CS, __USER_DS, 0);
537}
538EXPORT_SYMBOL_GPL(start_thread);
539
540#ifdef CONFIG_COMPAT
541void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32)
542{
543	start_thread_common(regs, new_ip, new_sp,
544			    x32 ? __USER_CS : __USER32_CS,
 
545			    __USER_DS, __USER_DS);
546}
547#endif
548
549/*
550 *	switch_to(x,y) should switch tasks from x to y.
551 *
552 * This could still be optimized:
553 * - fold all the options into a flag word and test it with a single test.
554 * - could test fs/gs bitsliced
555 *
556 * Kprobes not supported here. Set the probe on schedule instead.
557 * Function graph tracer not supported too.
558 */
559__no_kmsan_checks
560__visible __notrace_funcgraph struct task_struct *
561__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
562{
563	struct thread_struct *prev = &prev_p->thread;
564	struct thread_struct *next = &next_p->thread;
565	struct fpu *prev_fpu = &prev->fpu;
 
566	int cpu = smp_processor_id();
567
568	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
569		     this_cpu_read(pcpu_hot.hardirq_stack_inuse));
570
571	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
572		switch_fpu_prepare(prev_fpu, cpu);
573
574	/* We must save %fs and %gs before load_TLS() because
575	 * %fs and %gs may be cleared by load_TLS().
576	 *
577	 * (e.g. xen_load_tls())
578	 */
579	save_fsgs(prev_p);
580
581	/*
582	 * Load TLS before restoring any segments so that segment loads
583	 * reference the correct GDT entries.
584	 */
585	load_TLS(next, cpu);
586
587	/*
588	 * Leave lazy mode, flushing any hypercalls made here.  This
589	 * must be done after loading TLS entries in the GDT but before
590	 * loading segments that might reference them.
591	 */
592	arch_end_context_switch(next_p);
593
594	/* Switch DS and ES.
595	 *
596	 * Reading them only returns the selectors, but writing them (if
597	 * nonzero) loads the full descriptor from the GDT or LDT.  The
598	 * LDT for next is loaded in switch_mm, and the GDT is loaded
599	 * above.
600	 *
601	 * We therefore need to write new values to the segment
602	 * registers on every context switch unless both the new and old
603	 * values are zero.
604	 *
605	 * Note that we don't need to do anything for CS and SS, as
606	 * those are saved and restored as part of pt_regs.
607	 */
608	savesegment(es, prev->es);
609	if (unlikely(next->es | prev->es))
610		loadsegment(es, next->es);
611
612	savesegment(ds, prev->ds);
613	if (unlikely(next->ds | prev->ds))
614		loadsegment(ds, next->ds);
615
616	x86_fsgsbase_load(prev, next);
617
618	x86_pkru_load(prev, next);
619
620	/*
621	 * Switch the PDA and FPU contexts.
622	 */
623	raw_cpu_write(pcpu_hot.current_task, next_p);
624	raw_cpu_write(pcpu_hot.top_of_stack, task_top_of_stack(next_p));
625
626	switch_fpu_finish();
627
628	/* Reload sp0. */
629	update_task_stack(next_p);
630
631	switch_to_extra(prev_p, next_p);
632
 
 
 
 
 
 
 
 
 
 
 
633	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
634		/*
635		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
636		 * does not update the cached descriptor.  As a result, if we
637		 * do SYSRET while SS is NULL, we'll end up in user mode with
638		 * SS apparently equal to __USER_DS but actually unusable.
639		 *
640		 * The straightforward workaround would be to fix it up just
641		 * before SYSRET, but that would slow down the system call
642		 * fast paths.  Instead, we ensure that SS is never NULL in
643		 * system call context.  We do this by replacing NULL SS
644		 * selectors at every context switch.  SYSCALL sets up a valid
645		 * SS, so the only way to get NULL is to re-enter the kernel
646		 * from CPL 3 through an interrupt.  Since that can't happen
647		 * in the same task as a running syscall, we are guaranteed to
648		 * context switch between every interrupt vector entry and a
649		 * subsequent SYSRET.
650		 *
651		 * We read SS first because SS reads are much faster than
652		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
653		 * it previously had a different non-NULL value.
654		 */
655		unsigned short ss_sel;
656		savesegment(ss, ss_sel);
657		if (ss_sel != __KERNEL_DS)
658			loadsegment(ss, __KERNEL_DS);
659	}
660
661	/* Load the Intel cache allocation PQR MSR. */
662	resctrl_sched_in(next_p);
663
664	return prev_p;
665}
666
667void set_personality_64bit(void)
668{
669	/* inherit personality from parent */
670
671	/* Make sure to be in 64bit mode */
 
672	clear_thread_flag(TIF_ADDR32);
 
673	/* Pretend that this comes from a 64bit execve */
674	task_pt_regs(current)->orig_ax = __NR_execve;
675	current_thread_info()->status &= ~TS_COMPAT;
 
 
676	if (current->mm)
677		__set_bit(MM_CONTEXT_HAS_VSYSCALL, &current->mm->context.flags);
678
679	/* TBD: overwrites user setup. Should have two bits.
680	   But 64bit processes have always behaved this way,
681	   so it's not too bad. The main problem is just that
682	   32bit children are affected again. */
683	current->personality &= ~READ_IMPLIES_EXEC;
684}
685
686static void __set_personality_x32(void)
687{
688#ifdef CONFIG_X86_X32_ABI
 
 
689	if (current->mm)
690		current->mm->context.flags = 0;
691
692	current->personality &= ~READ_IMPLIES_EXEC;
693	/*
694	 * in_32bit_syscall() uses the presence of the x32 syscall bit
695	 * flag to determine compat status.  The x86 mmap() code relies on
696	 * the syscall bitness so set x32 syscall bit right here to make
697	 * in_32bit_syscall() work during exec().
698	 *
699	 * Pretend to come from a x32 execve.
700	 */
701	task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
702	current_thread_info()->status &= ~TS_COMPAT;
703#endif
704}
705
706static void __set_personality_ia32(void)
707{
708#ifdef CONFIG_IA32_EMULATION
709	if (current->mm) {
710		/*
711		 * uprobes applied to this MM need to know this and
712		 * cannot use user_64bit_mode() at that time.
713		 */
714		__set_bit(MM_CONTEXT_UPROBE_IA32, &current->mm->context.flags);
715	}
716
717	current->personality |= force_personality32;
718	/* Prepare the first "return" to user space */
719	task_pt_regs(current)->orig_ax = __NR_ia32_execve;
720	current_thread_info()->status |= TS_COMPAT;
721#endif
722}
723
724void set_personality_ia32(bool x32)
725{
726	/* Make sure to be in 32bit mode */
727	set_thread_flag(TIF_ADDR32);
728
729	if (x32)
730		__set_personality_x32();
731	else
732		__set_personality_ia32();
733}
734EXPORT_SYMBOL_GPL(set_personality_ia32);
735
736#ifdef CONFIG_CHECKPOINT_RESTORE
737static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
738{
739	int ret;
740
741	ret = map_vdso_once(image, addr);
742	if (ret)
743		return ret;
744
745	return (long)image->size;
746}
747#endif
748
749#ifdef CONFIG_ADDRESS_MASKING
750
751#define LAM_U57_BITS 6
752
753static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
754{
755	if (!cpu_feature_enabled(X86_FEATURE_LAM))
756		return -ENODEV;
757
758	/* PTRACE_ARCH_PRCTL */
759	if (current->mm != mm)
760		return -EINVAL;
761
762	if (mm_valid_pasid(mm) &&
763	    !test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags))
764		return -EINVAL;
765
766	if (mmap_write_lock_killable(mm))
767		return -EINTR;
768
769	if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) {
770		mmap_write_unlock(mm);
771		return -EBUSY;
772	}
773
774	if (!nr_bits) {
775		mmap_write_unlock(mm);
776		return -EINVAL;
777	} else if (nr_bits <= LAM_U57_BITS) {
778		mm->context.lam_cr3_mask = X86_CR3_LAM_U57;
779		mm->context.untag_mask =  ~GENMASK(62, 57);
780	} else {
781		mmap_write_unlock(mm);
782		return -EINVAL;
783	}
784
785	write_cr3(__read_cr3() | mm->context.lam_cr3_mask);
786	set_tlbstate_lam_mode(mm);
787	set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
788
789	mmap_write_unlock(mm);
790
791	return 0;
792}
793#endif
794
795long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
796{
797	int ret = 0;
798
799	switch (option) {
800	case ARCH_SET_GS: {
801		if (unlikely(arg2 >= TASK_SIZE_MAX))
802			return -EPERM;
803
804		preempt_disable();
805		/*
806		 * ARCH_SET_GS has always overwritten the index
807		 * and the base. Zero is the most sensible value
808		 * to put in the index, and is the only value that
809		 * makes any sense if FSGSBASE is unavailable.
810		 */
811		if (task == current) {
812			loadseg(GS, 0);
813			x86_gsbase_write_cpu_inactive(arg2);
814
815			/*
816			 * On non-FSGSBASE systems, save_base_legacy() expects
817			 * that we also fill in thread.gsbase.
818			 */
819			task->thread.gsbase = arg2;
820
821		} else {
822			task->thread.gsindex = 0;
823			x86_gsbase_write_task(task, arg2);
824		}
825		preempt_enable();
826		break;
827	}
828	case ARCH_SET_FS: {
829		/*
830		 * Not strictly needed for %fs, but do it for symmetry
831		 * with %gs
832		 */
833		if (unlikely(arg2 >= TASK_SIZE_MAX))
834			return -EPERM;
835
836		preempt_disable();
837		/*
838		 * Set the selector to 0 for the same reason
839		 * as %gs above.
840		 */
841		if (task == current) {
842			loadseg(FS, 0);
843			x86_fsbase_write_cpu(arg2);
844
845			/*
846			 * On non-FSGSBASE systems, save_base_legacy() expects
847			 * that we also fill in thread.fsbase.
848			 */
849			task->thread.fsbase = arg2;
850		} else {
851			task->thread.fsindex = 0;
852			x86_fsbase_write_task(task, arg2);
853		}
854		preempt_enable();
855		break;
856	}
857	case ARCH_GET_FS: {
858		unsigned long base = x86_fsbase_read_task(task);
859
860		ret = put_user(base, (unsigned long __user *)arg2);
861		break;
862	}
863	case ARCH_GET_GS: {
864		unsigned long base = x86_gsbase_read_task(task);
865
866		ret = put_user(base, (unsigned long __user *)arg2);
867		break;
868	}
869
870#ifdef CONFIG_CHECKPOINT_RESTORE
871# ifdef CONFIG_X86_X32_ABI
872	case ARCH_MAP_VDSO_X32:
873		return prctl_map_vdso(&vdso_image_x32, arg2);
874# endif
875# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
876	case ARCH_MAP_VDSO_32:
877		return prctl_map_vdso(&vdso_image_32, arg2);
878# endif
879	case ARCH_MAP_VDSO_64:
880		return prctl_map_vdso(&vdso_image_64, arg2);
881#endif
882#ifdef CONFIG_ADDRESS_MASKING
883	case ARCH_GET_UNTAG_MASK:
884		return put_user(task->mm->context.untag_mask,
885				(unsigned long __user *)arg2);
886	case ARCH_ENABLE_TAGGED_ADDR:
887		return prctl_enable_tagged_addr(task->mm, arg2);
888	case ARCH_FORCE_TAGGED_SVA:
889		if (current != task)
890			return -EINVAL;
891		set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags);
892		return 0;
893	case ARCH_GET_MAX_TAG_BITS:
894		if (!cpu_feature_enabled(X86_FEATURE_LAM))
895			return put_user(0, (unsigned long __user *)arg2);
896		else
897			return put_user(LAM_U57_BITS, (unsigned long __user *)arg2);
898#endif
899	case ARCH_SHSTK_ENABLE:
900	case ARCH_SHSTK_DISABLE:
901	case ARCH_SHSTK_LOCK:
902	case ARCH_SHSTK_UNLOCK:
903	case ARCH_SHSTK_STATUS:
904		return shstk_prctl(task, option, arg2);
905	default:
906		ret = -EINVAL;
907		break;
908	}
909
910	return ret;
911}
912
913SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
914{
915	long ret;
916
917	ret = do_arch_prctl_64(current, option, arg2);
918	if (ret == -EINVAL)
919		ret = do_arch_prctl_common(option, arg2);
920
921	return ret;
922}
923
924#ifdef CONFIG_IA32_EMULATION
925COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
926{
927	return do_arch_prctl_common(option, arg2);
928}
929#endif
930
931unsigned long KSTK_ESP(struct task_struct *task)
932{
933	return task_pt_regs(task)->sp;
934}