Loading...
1// SPDX-License-Identifier: GPL-2.0
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/errno.h>
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/smp.h>
8#include <linux/prctl.h>
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/sched/idle.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task.h>
14#include <linux/sched/task_stack.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/pm.h>
18#include <linux/tick.h>
19#include <linux/random.h>
20#include <linux/user-return-notifier.h>
21#include <linux/dmi.h>
22#include <linux/utsname.h>
23#include <linux/stackprotector.h>
24#include <linux/cpuidle.h>
25#include <linux/acpi.h>
26#include <linux/elf-randomize.h>
27#include <trace/events/power.h>
28#include <linux/hw_breakpoint.h>
29#include <asm/cpu.h>
30#include <asm/apic.h>
31#include <asm/syscalls.h>
32#include <linux/uaccess.h>
33#include <asm/mwait.h>
34#include <asm/fpu/internal.h>
35#include <asm/debugreg.h>
36#include <asm/nmi.h>
37#include <asm/tlbflush.h>
38#include <asm/mce.h>
39#include <asm/vm86.h>
40#include <asm/switch_to.h>
41#include <asm/desc.h>
42#include <asm/prctl.h>
43#include <asm/spec-ctrl.h>
44#include <asm/proto.h>
45
46#include "process.h"
47
48/*
49 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
50 * no more per-task TSS's. The TSS size is kept cacheline-aligned
51 * so they are allowed to end up in the .data..cacheline_aligned
52 * section. Since TSS's are completely CPU-local, we want them
53 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
54 */
55__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
56 .x86_tss = {
57 /*
58 * .sp0 is only used when entering ring 0 from a lower
59 * privilege level. Since the init task never runs anything
60 * but ring 0 code, there is no need for a valid value here.
61 * Poison it.
62 */
63 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
64
65 /*
66 * .sp1 is cpu_current_top_of_stack. The init task never
67 * runs user code, but cpu_current_top_of_stack should still
68 * be well defined before the first context switch.
69 */
70 .sp1 = TOP_OF_INIT_STACK,
71
72#ifdef CONFIG_X86_32
73 .ss0 = __KERNEL_DS,
74 .ss1 = __KERNEL_CS,
75 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
76#endif
77 },
78#ifdef CONFIG_X86_32
79 /*
80 * Note that the .io_bitmap member must be extra-big. This is because
81 * the CPU will access an additional byte beyond the end of the IO
82 * permission bitmap. The extra byte must be all 1 bits, and must
83 * be within the limit.
84 */
85 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
86#endif
87};
88EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
89
90DEFINE_PER_CPU(bool, __tss_limit_invalid);
91EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
92
93/*
94 * this gets called so that we can store lazy state into memory and copy the
95 * current task into the new thread.
96 */
97int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
98{
99 memcpy(dst, src, arch_task_struct_size);
100#ifdef CONFIG_VM86
101 dst->thread.vm86 = NULL;
102#endif
103
104 return fpu__copy(dst, src);
105}
106
107/*
108 * Free current thread data structures etc..
109 */
110void exit_thread(struct task_struct *tsk)
111{
112 struct thread_struct *t = &tsk->thread;
113 unsigned long *bp = t->io_bitmap_ptr;
114 struct fpu *fpu = &t->fpu;
115
116 if (bp) {
117 struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
118
119 t->io_bitmap_ptr = NULL;
120 clear_thread_flag(TIF_IO_BITMAP);
121 /*
122 * Careful, clear this in the TSS too:
123 */
124 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
125 t->io_bitmap_max = 0;
126 put_cpu();
127 kfree(bp);
128 }
129
130 free_vm86(t);
131
132 fpu__drop(fpu);
133}
134
135void flush_thread(void)
136{
137 struct task_struct *tsk = current;
138
139 flush_ptrace_hw_breakpoint(tsk);
140 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
141
142 fpu__clear(&tsk->thread.fpu);
143}
144
145void disable_TSC(void)
146{
147 preempt_disable();
148 if (!test_and_set_thread_flag(TIF_NOTSC))
149 /*
150 * Must flip the CPU state synchronously with
151 * TIF_NOTSC in the current running context.
152 */
153 cr4_set_bits(X86_CR4_TSD);
154 preempt_enable();
155}
156
157static void enable_TSC(void)
158{
159 preempt_disable();
160 if (test_and_clear_thread_flag(TIF_NOTSC))
161 /*
162 * Must flip the CPU state synchronously with
163 * TIF_NOTSC in the current running context.
164 */
165 cr4_clear_bits(X86_CR4_TSD);
166 preempt_enable();
167}
168
169int get_tsc_mode(unsigned long adr)
170{
171 unsigned int val;
172
173 if (test_thread_flag(TIF_NOTSC))
174 val = PR_TSC_SIGSEGV;
175 else
176 val = PR_TSC_ENABLE;
177
178 return put_user(val, (unsigned int __user *)adr);
179}
180
181int set_tsc_mode(unsigned int val)
182{
183 if (val == PR_TSC_SIGSEGV)
184 disable_TSC();
185 else if (val == PR_TSC_ENABLE)
186 enable_TSC();
187 else
188 return -EINVAL;
189
190 return 0;
191}
192
193DEFINE_PER_CPU(u64, msr_misc_features_shadow);
194
195static void set_cpuid_faulting(bool on)
196{
197 u64 msrval;
198
199 msrval = this_cpu_read(msr_misc_features_shadow);
200 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
201 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
202 this_cpu_write(msr_misc_features_shadow, msrval);
203 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
204}
205
206static void disable_cpuid(void)
207{
208 preempt_disable();
209 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
210 /*
211 * Must flip the CPU state synchronously with
212 * TIF_NOCPUID in the current running context.
213 */
214 set_cpuid_faulting(true);
215 }
216 preempt_enable();
217}
218
219static void enable_cpuid(void)
220{
221 preempt_disable();
222 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
223 /*
224 * Must flip the CPU state synchronously with
225 * TIF_NOCPUID in the current running context.
226 */
227 set_cpuid_faulting(false);
228 }
229 preempt_enable();
230}
231
232static int get_cpuid_mode(void)
233{
234 return !test_thread_flag(TIF_NOCPUID);
235}
236
237static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
238{
239 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
240 return -ENODEV;
241
242 if (cpuid_enabled)
243 enable_cpuid();
244 else
245 disable_cpuid();
246
247 return 0;
248}
249
250/*
251 * Called immediately after a successful exec.
252 */
253void arch_setup_new_exec(void)
254{
255 /* If cpuid was previously disabled for this task, re-enable it. */
256 if (test_thread_flag(TIF_NOCPUID))
257 enable_cpuid();
258
259 /*
260 * Don't inherit TIF_SSBD across exec boundary when
261 * PR_SPEC_DISABLE_NOEXEC is used.
262 */
263 if (test_thread_flag(TIF_SSBD) &&
264 task_spec_ssb_noexec(current)) {
265 clear_thread_flag(TIF_SSBD);
266 task_clear_spec_ssb_disable(current);
267 task_clear_spec_ssb_noexec(current);
268 speculation_ctrl_update(task_thread_info(current)->flags);
269 }
270}
271
272static inline void switch_to_bitmap(struct thread_struct *prev,
273 struct thread_struct *next,
274 unsigned long tifp, unsigned long tifn)
275{
276 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
277
278 if (tifn & _TIF_IO_BITMAP) {
279 /*
280 * Copy the relevant range of the IO bitmap.
281 * Normally this is 128 bytes or less:
282 */
283 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
284 max(prev->io_bitmap_max, next->io_bitmap_max));
285 /*
286 * Make sure that the TSS limit is correct for the CPU
287 * to notice the IO bitmap.
288 */
289 refresh_tss_limit();
290 } else if (tifp & _TIF_IO_BITMAP) {
291 /*
292 * Clear any possible leftover bits:
293 */
294 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
295 }
296}
297
298#ifdef CONFIG_SMP
299
300struct ssb_state {
301 struct ssb_state *shared_state;
302 raw_spinlock_t lock;
303 unsigned int disable_state;
304 unsigned long local_state;
305};
306
307#define LSTATE_SSB 0
308
309static DEFINE_PER_CPU(struct ssb_state, ssb_state);
310
311void speculative_store_bypass_ht_init(void)
312{
313 struct ssb_state *st = this_cpu_ptr(&ssb_state);
314 unsigned int this_cpu = smp_processor_id();
315 unsigned int cpu;
316
317 st->local_state = 0;
318
319 /*
320 * Shared state setup happens once on the first bringup
321 * of the CPU. It's not destroyed on CPU hotunplug.
322 */
323 if (st->shared_state)
324 return;
325
326 raw_spin_lock_init(&st->lock);
327
328 /*
329 * Go over HT siblings and check whether one of them has set up the
330 * shared state pointer already.
331 */
332 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
333 if (cpu == this_cpu)
334 continue;
335
336 if (!per_cpu(ssb_state, cpu).shared_state)
337 continue;
338
339 /* Link it to the state of the sibling: */
340 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
341 return;
342 }
343
344 /*
345 * First HT sibling to come up on the core. Link shared state of
346 * the first HT sibling to itself. The siblings on the same core
347 * which come up later will see the shared state pointer and link
348 * themself to the state of this CPU.
349 */
350 st->shared_state = st;
351}
352
353/*
354 * Logic is: First HT sibling enables SSBD for both siblings in the core
355 * and last sibling to disable it, disables it for the whole core. This how
356 * MSR_SPEC_CTRL works in "hardware":
357 *
358 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
359 */
360static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
361{
362 struct ssb_state *st = this_cpu_ptr(&ssb_state);
363 u64 msr = x86_amd_ls_cfg_base;
364
365 if (!static_cpu_has(X86_FEATURE_ZEN)) {
366 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
367 wrmsrl(MSR_AMD64_LS_CFG, msr);
368 return;
369 }
370
371 if (tifn & _TIF_SSBD) {
372 /*
373 * Since this can race with prctl(), block reentry on the
374 * same CPU.
375 */
376 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
377 return;
378
379 msr |= x86_amd_ls_cfg_ssbd_mask;
380
381 raw_spin_lock(&st->shared_state->lock);
382 /* First sibling enables SSBD: */
383 if (!st->shared_state->disable_state)
384 wrmsrl(MSR_AMD64_LS_CFG, msr);
385 st->shared_state->disable_state++;
386 raw_spin_unlock(&st->shared_state->lock);
387 } else {
388 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
389 return;
390
391 raw_spin_lock(&st->shared_state->lock);
392 st->shared_state->disable_state--;
393 if (!st->shared_state->disable_state)
394 wrmsrl(MSR_AMD64_LS_CFG, msr);
395 raw_spin_unlock(&st->shared_state->lock);
396 }
397}
398#else
399static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
400{
401 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
402
403 wrmsrl(MSR_AMD64_LS_CFG, msr);
404}
405#endif
406
407static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
408{
409 /*
410 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
411 * so ssbd_tif_to_spec_ctrl() just works.
412 */
413 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
414}
415
416/*
417 * Update the MSRs managing speculation control, during context switch.
418 *
419 * tifp: Previous task's thread flags
420 * tifn: Next task's thread flags
421 */
422static __always_inline void __speculation_ctrl_update(unsigned long tifp,
423 unsigned long tifn)
424{
425 unsigned long tif_diff = tifp ^ tifn;
426 u64 msr = x86_spec_ctrl_base;
427 bool updmsr = false;
428
429 lockdep_assert_irqs_disabled();
430
431 /*
432 * If TIF_SSBD is different, select the proper mitigation
433 * method. Note that if SSBD mitigation is disabled or permanentely
434 * enabled this branch can't be taken because nothing can set
435 * TIF_SSBD.
436 */
437 if (tif_diff & _TIF_SSBD) {
438 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
439 amd_set_ssb_virt_state(tifn);
440 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
441 amd_set_core_ssb_state(tifn);
442 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
443 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
444 msr |= ssbd_tif_to_spec_ctrl(tifn);
445 updmsr = true;
446 }
447 }
448
449 /*
450 * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
451 * otherwise avoid the MSR write.
452 */
453 if (IS_ENABLED(CONFIG_SMP) &&
454 static_branch_unlikely(&switch_to_cond_stibp)) {
455 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
456 msr |= stibp_tif_to_spec_ctrl(tifn);
457 }
458
459 if (updmsr)
460 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
461}
462
463static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
464{
465 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
466 if (task_spec_ssb_disable(tsk))
467 set_tsk_thread_flag(tsk, TIF_SSBD);
468 else
469 clear_tsk_thread_flag(tsk, TIF_SSBD);
470
471 if (task_spec_ib_disable(tsk))
472 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
473 else
474 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
475 }
476 /* Return the updated threadinfo flags*/
477 return task_thread_info(tsk)->flags;
478}
479
480void speculation_ctrl_update(unsigned long tif)
481{
482 unsigned long flags;
483
484 /* Forced update. Make sure all relevant TIF flags are different */
485 local_irq_save(flags);
486 __speculation_ctrl_update(~tif, tif);
487 local_irq_restore(flags);
488}
489
490/* Called from seccomp/prctl update */
491void speculation_ctrl_update_current(void)
492{
493 preempt_disable();
494 speculation_ctrl_update(speculation_ctrl_update_tif(current));
495 preempt_enable();
496}
497
498void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
499{
500 struct thread_struct *prev, *next;
501 unsigned long tifp, tifn;
502
503 prev = &prev_p->thread;
504 next = &next_p->thread;
505
506 tifn = READ_ONCE(task_thread_info(next_p)->flags);
507 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
508 switch_to_bitmap(prev, next, tifp, tifn);
509
510 propagate_user_return_notify(prev_p, next_p);
511
512 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
513 arch_has_block_step()) {
514 unsigned long debugctl, msk;
515
516 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
517 debugctl &= ~DEBUGCTLMSR_BTF;
518 msk = tifn & _TIF_BLOCKSTEP;
519 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
520 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
521 }
522
523 if ((tifp ^ tifn) & _TIF_NOTSC)
524 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
525
526 if ((tifp ^ tifn) & _TIF_NOCPUID)
527 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
528
529 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
530 __speculation_ctrl_update(tifp, tifn);
531 } else {
532 speculation_ctrl_update_tif(prev_p);
533 tifn = speculation_ctrl_update_tif(next_p);
534
535 /* Enforce MSR update to ensure consistent state */
536 __speculation_ctrl_update(~tifn, tifn);
537 }
538}
539
540/*
541 * Idle related variables and functions
542 */
543unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
544EXPORT_SYMBOL(boot_option_idle_override);
545
546static void (*x86_idle)(void);
547
548#ifndef CONFIG_SMP
549static inline void play_dead(void)
550{
551 BUG();
552}
553#endif
554
555void arch_cpu_idle_enter(void)
556{
557 tsc_verify_tsc_adjust(false);
558 local_touch_nmi();
559}
560
561void arch_cpu_idle_dead(void)
562{
563 play_dead();
564}
565
566/*
567 * Called from the generic idle code.
568 */
569void arch_cpu_idle(void)
570{
571 x86_idle();
572}
573
574/*
575 * We use this if we don't have any better idle routine..
576 */
577void __cpuidle default_idle(void)
578{
579 trace_cpu_idle_rcuidle(1, smp_processor_id());
580 safe_halt();
581 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
582}
583#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
584EXPORT_SYMBOL(default_idle);
585#endif
586
587#ifdef CONFIG_XEN
588bool xen_set_default_idle(void)
589{
590 bool ret = !!x86_idle;
591
592 x86_idle = default_idle;
593
594 return ret;
595}
596#endif
597
598void stop_this_cpu(void *dummy)
599{
600 local_irq_disable();
601 /*
602 * Remove this CPU:
603 */
604 set_cpu_online(smp_processor_id(), false);
605 disable_local_APIC();
606 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
607
608 /*
609 * Use wbinvd on processors that support SME. This provides support
610 * for performing a successful kexec when going from SME inactive
611 * to SME active (or vice-versa). The cache must be cleared so that
612 * if there are entries with the same physical address, both with and
613 * without the encryption bit, they don't race each other when flushed
614 * and potentially end up with the wrong entry being committed to
615 * memory.
616 */
617 if (boot_cpu_has(X86_FEATURE_SME))
618 native_wbinvd();
619 for (;;) {
620 /*
621 * Use native_halt() so that memory contents don't change
622 * (stack usage and variables) after possibly issuing the
623 * native_wbinvd() above.
624 */
625 native_halt();
626 }
627}
628
629/*
630 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
631 * states (local apic timer and TSC stop).
632 */
633static void amd_e400_idle(void)
634{
635 /*
636 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
637 * gets set after static_cpu_has() places have been converted via
638 * alternatives.
639 */
640 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
641 default_idle();
642 return;
643 }
644
645 tick_broadcast_enter();
646
647 default_idle();
648
649 /*
650 * The switch back from broadcast mode needs to be called with
651 * interrupts disabled.
652 */
653 local_irq_disable();
654 tick_broadcast_exit();
655 local_irq_enable();
656}
657
658/*
659 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
660 * We can't rely on cpuidle installing MWAIT, because it will not load
661 * on systems that support only C1 -- so the boot default must be MWAIT.
662 *
663 * Some AMD machines are the opposite, they depend on using HALT.
664 *
665 * So for default C1, which is used during boot until cpuidle loads,
666 * use MWAIT-C1 on Intel HW that has it, else use HALT.
667 */
668static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
669{
670 if (c->x86_vendor != X86_VENDOR_INTEL)
671 return 0;
672
673 if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
674 return 0;
675
676 return 1;
677}
678
679/*
680 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
681 * with interrupts enabled and no flags, which is backwards compatible with the
682 * original MWAIT implementation.
683 */
684static __cpuidle void mwait_idle(void)
685{
686 if (!current_set_polling_and_test()) {
687 trace_cpu_idle_rcuidle(1, smp_processor_id());
688 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
689 mb(); /* quirk */
690 clflush((void *)¤t_thread_info()->flags);
691 mb(); /* quirk */
692 }
693
694 __monitor((void *)¤t_thread_info()->flags, 0, 0);
695 if (!need_resched())
696 __sti_mwait(0, 0);
697 else
698 local_irq_enable();
699 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
700 } else {
701 local_irq_enable();
702 }
703 __current_clr_polling();
704}
705
706void select_idle_routine(const struct cpuinfo_x86 *c)
707{
708#ifdef CONFIG_SMP
709 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
710 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
711#endif
712 if (x86_idle || boot_option_idle_override == IDLE_POLL)
713 return;
714
715 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
716 pr_info("using AMD E400 aware idle routine\n");
717 x86_idle = amd_e400_idle;
718 } else if (prefer_mwait_c1_over_halt(c)) {
719 pr_info("using mwait in idle threads\n");
720 x86_idle = mwait_idle;
721 } else
722 x86_idle = default_idle;
723}
724
725void amd_e400_c1e_apic_setup(void)
726{
727 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
728 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
729 local_irq_disable();
730 tick_broadcast_force();
731 local_irq_enable();
732 }
733}
734
735void __init arch_post_acpi_subsys_init(void)
736{
737 u32 lo, hi;
738
739 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
740 return;
741
742 /*
743 * AMD E400 detection needs to happen after ACPI has been enabled. If
744 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
745 * MSR_K8_INT_PENDING_MSG.
746 */
747 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
748 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
749 return;
750
751 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
752
753 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
754 mark_tsc_unstable("TSC halt in AMD C1E");
755 pr_info("System has AMD C1E enabled\n");
756}
757
758static int __init idle_setup(char *str)
759{
760 if (!str)
761 return -EINVAL;
762
763 if (!strcmp(str, "poll")) {
764 pr_info("using polling idle threads\n");
765 boot_option_idle_override = IDLE_POLL;
766 cpu_idle_poll_ctrl(true);
767 } else if (!strcmp(str, "halt")) {
768 /*
769 * When the boot option of idle=halt is added, halt is
770 * forced to be used for CPU idle. In such case CPU C2/C3
771 * won't be used again.
772 * To continue to load the CPU idle driver, don't touch
773 * the boot_option_idle_override.
774 */
775 x86_idle = default_idle;
776 boot_option_idle_override = IDLE_HALT;
777 } else if (!strcmp(str, "nomwait")) {
778 /*
779 * If the boot option of "idle=nomwait" is added,
780 * it means that mwait will be disabled for CPU C2/C3
781 * states. In such case it won't touch the variable
782 * of boot_option_idle_override.
783 */
784 boot_option_idle_override = IDLE_NOMWAIT;
785 } else
786 return -1;
787
788 return 0;
789}
790early_param("idle", idle_setup);
791
792unsigned long arch_align_stack(unsigned long sp)
793{
794 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
795 sp -= get_random_int() % 8192;
796 return sp & ~0xf;
797}
798
799unsigned long arch_randomize_brk(struct mm_struct *mm)
800{
801 return randomize_page(mm->brk, 0x02000000);
802}
803
804/*
805 * Called from fs/proc with a reference on @p to find the function
806 * which called into schedule(). This needs to be done carefully
807 * because the task might wake up and we might look at a stack
808 * changing under us.
809 */
810unsigned long get_wchan(struct task_struct *p)
811{
812 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
813 int count = 0;
814
815 if (p == current || p->state == TASK_RUNNING)
816 return 0;
817
818 if (!try_get_task_stack(p))
819 return 0;
820
821 start = (unsigned long)task_stack_page(p);
822 if (!start)
823 goto out;
824
825 /*
826 * Layout of the stack page:
827 *
828 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
829 * PADDING
830 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
831 * stack
832 * ----------- bottom = start
833 *
834 * The tasks stack pointer points at the location where the
835 * framepointer is stored. The data on the stack is:
836 * ... IP FP ... IP FP
837 *
838 * We need to read FP and IP, so we need to adjust the upper
839 * bound by another unsigned long.
840 */
841 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
842 top -= 2 * sizeof(unsigned long);
843 bottom = start;
844
845 sp = READ_ONCE(p->thread.sp);
846 if (sp < bottom || sp > top)
847 goto out;
848
849 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
850 do {
851 if (fp < bottom || fp > top)
852 goto out;
853 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
854 if (!in_sched_functions(ip)) {
855 ret = ip;
856 goto out;
857 }
858 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
859 } while (count++ < 16 && p->state != TASK_RUNNING);
860
861out:
862 put_task_stack(p);
863 return ret;
864}
865
866long do_arch_prctl_common(struct task_struct *task, int option,
867 unsigned long cpuid_enabled)
868{
869 switch (option) {
870 case ARCH_GET_CPUID:
871 return get_cpuid_mode();
872 case ARCH_SET_CPUID:
873 return set_cpuid_mode(task, cpuid_enabled);
874 }
875
876 return -EINVAL;
877}
1// SPDX-License-Identifier: GPL-2.0
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/errno.h>
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/smp.h>
8#include <linux/prctl.h>
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/sched/idle.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task.h>
14#include <linux/sched/task_stack.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/pm.h>
18#include <linux/tick.h>
19#include <linux/random.h>
20#include <linux/user-return-notifier.h>
21#include <linux/dmi.h>
22#include <linux/utsname.h>
23#include <linux/stackprotector.h>
24#include <linux/cpuidle.h>
25#include <linux/acpi.h>
26#include <linux/elf-randomize.h>
27#include <trace/events/power.h>
28#include <linux/hw_breakpoint.h>
29#include <asm/cpu.h>
30#include <asm/apic.h>
31#include <linux/uaccess.h>
32#include <asm/mwait.h>
33#include <asm/fpu/internal.h>
34#include <asm/debugreg.h>
35#include <asm/nmi.h>
36#include <asm/tlbflush.h>
37#include <asm/mce.h>
38#include <asm/vm86.h>
39#include <asm/switch_to.h>
40#include <asm/desc.h>
41#include <asm/prctl.h>
42#include <asm/spec-ctrl.h>
43#include <asm/io_bitmap.h>
44#include <asm/proto.h>
45#include <asm/frame.h>
46
47#include "process.h"
48
49/*
50 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
51 * no more per-task TSS's. The TSS size is kept cacheline-aligned
52 * so they are allowed to end up in the .data..cacheline_aligned
53 * section. Since TSS's are completely CPU-local, we want them
54 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
55 */
56__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
57 .x86_tss = {
58 /*
59 * .sp0 is only used when entering ring 0 from a lower
60 * privilege level. Since the init task never runs anything
61 * but ring 0 code, there is no need for a valid value here.
62 * Poison it.
63 */
64 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
65
66 /*
67 * .sp1 is cpu_current_top_of_stack. The init task never
68 * runs user code, but cpu_current_top_of_stack should still
69 * be well defined before the first context switch.
70 */
71 .sp1 = TOP_OF_INIT_STACK,
72
73#ifdef CONFIG_X86_32
74 .ss0 = __KERNEL_DS,
75 .ss1 = __KERNEL_CS,
76#endif
77 .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
78 },
79};
80EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
81
82DEFINE_PER_CPU(bool, __tss_limit_invalid);
83EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
84
85/*
86 * this gets called so that we can store lazy state into memory and copy the
87 * current task into the new thread.
88 */
89int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
90{
91 memcpy(dst, src, arch_task_struct_size);
92#ifdef CONFIG_VM86
93 dst->thread.vm86 = NULL;
94#endif
95
96 return fpu__copy(dst, src);
97}
98
99/*
100 * Free thread data structures etc..
101 */
102void exit_thread(struct task_struct *tsk)
103{
104 struct thread_struct *t = &tsk->thread;
105 struct fpu *fpu = &t->fpu;
106
107 if (test_thread_flag(TIF_IO_BITMAP))
108 io_bitmap_exit(tsk);
109
110 free_vm86(t);
111
112 fpu__drop(fpu);
113}
114
115static int set_new_tls(struct task_struct *p, unsigned long tls)
116{
117 struct user_desc __user *utls = (struct user_desc __user *)tls;
118
119 if (in_ia32_syscall())
120 return do_set_thread_area(p, -1, utls, 0);
121 else
122 return do_set_thread_area_64(p, ARCH_SET_FS, tls);
123}
124
125int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
126 struct task_struct *p, unsigned long tls)
127{
128 struct inactive_task_frame *frame;
129 struct fork_frame *fork_frame;
130 struct pt_regs *childregs;
131 int ret = 0;
132
133 childregs = task_pt_regs(p);
134 fork_frame = container_of(childregs, struct fork_frame, regs);
135 frame = &fork_frame->frame;
136
137 frame->bp = encode_frame_pointer(childregs);
138 frame->ret_addr = (unsigned long) ret_from_fork;
139 p->thread.sp = (unsigned long) fork_frame;
140 p->thread.io_bitmap = NULL;
141 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
142
143#ifdef CONFIG_X86_64
144 current_save_fsgs();
145 p->thread.fsindex = current->thread.fsindex;
146 p->thread.fsbase = current->thread.fsbase;
147 p->thread.gsindex = current->thread.gsindex;
148 p->thread.gsbase = current->thread.gsbase;
149
150 savesegment(es, p->thread.es);
151 savesegment(ds, p->thread.ds);
152#else
153 p->thread.sp0 = (unsigned long) (childregs + 1);
154 /*
155 * Clear all status flags including IF and set fixed bit. 64bit
156 * does not have this initialization as the frame does not contain
157 * flags. The flags consistency (especially vs. AC) is there
158 * ensured via objtool, which lacks 32bit support.
159 */
160 frame->flags = X86_EFLAGS_FIXED;
161#endif
162
163 /* Kernel thread ? */
164 if (unlikely(p->flags & PF_KTHREAD)) {
165 memset(childregs, 0, sizeof(struct pt_regs));
166 kthread_frame_init(frame, sp, arg);
167 return 0;
168 }
169
170 frame->bx = 0;
171 *childregs = *current_pt_regs();
172 childregs->ax = 0;
173 if (sp)
174 childregs->sp = sp;
175
176#ifdef CONFIG_X86_32
177 task_user_gs(p) = get_user_gs(current_pt_regs());
178#endif
179
180 /* Set a new TLS for the child thread? */
181 if (clone_flags & CLONE_SETTLS)
182 ret = set_new_tls(p, tls);
183
184 if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
185 io_bitmap_share(p);
186
187 return ret;
188}
189
190void flush_thread(void)
191{
192 struct task_struct *tsk = current;
193
194 flush_ptrace_hw_breakpoint(tsk);
195 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
196
197 fpu__clear_all(&tsk->thread.fpu);
198}
199
200void disable_TSC(void)
201{
202 preempt_disable();
203 if (!test_and_set_thread_flag(TIF_NOTSC))
204 /*
205 * Must flip the CPU state synchronously with
206 * TIF_NOTSC in the current running context.
207 */
208 cr4_set_bits(X86_CR4_TSD);
209 preempt_enable();
210}
211
212static void enable_TSC(void)
213{
214 preempt_disable();
215 if (test_and_clear_thread_flag(TIF_NOTSC))
216 /*
217 * Must flip the CPU state synchronously with
218 * TIF_NOTSC in the current running context.
219 */
220 cr4_clear_bits(X86_CR4_TSD);
221 preempt_enable();
222}
223
224int get_tsc_mode(unsigned long adr)
225{
226 unsigned int val;
227
228 if (test_thread_flag(TIF_NOTSC))
229 val = PR_TSC_SIGSEGV;
230 else
231 val = PR_TSC_ENABLE;
232
233 return put_user(val, (unsigned int __user *)adr);
234}
235
236int set_tsc_mode(unsigned int val)
237{
238 if (val == PR_TSC_SIGSEGV)
239 disable_TSC();
240 else if (val == PR_TSC_ENABLE)
241 enable_TSC();
242 else
243 return -EINVAL;
244
245 return 0;
246}
247
248DEFINE_PER_CPU(u64, msr_misc_features_shadow);
249
250static void set_cpuid_faulting(bool on)
251{
252 u64 msrval;
253
254 msrval = this_cpu_read(msr_misc_features_shadow);
255 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
256 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
257 this_cpu_write(msr_misc_features_shadow, msrval);
258 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
259}
260
261static void disable_cpuid(void)
262{
263 preempt_disable();
264 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
265 /*
266 * Must flip the CPU state synchronously with
267 * TIF_NOCPUID in the current running context.
268 */
269 set_cpuid_faulting(true);
270 }
271 preempt_enable();
272}
273
274static void enable_cpuid(void)
275{
276 preempt_disable();
277 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
278 /*
279 * Must flip the CPU state synchronously with
280 * TIF_NOCPUID in the current running context.
281 */
282 set_cpuid_faulting(false);
283 }
284 preempt_enable();
285}
286
287static int get_cpuid_mode(void)
288{
289 return !test_thread_flag(TIF_NOCPUID);
290}
291
292static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
293{
294 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
295 return -ENODEV;
296
297 if (cpuid_enabled)
298 enable_cpuid();
299 else
300 disable_cpuid();
301
302 return 0;
303}
304
305/*
306 * Called immediately after a successful exec.
307 */
308void arch_setup_new_exec(void)
309{
310 /* If cpuid was previously disabled for this task, re-enable it. */
311 if (test_thread_flag(TIF_NOCPUID))
312 enable_cpuid();
313
314 /*
315 * Don't inherit TIF_SSBD across exec boundary when
316 * PR_SPEC_DISABLE_NOEXEC is used.
317 */
318 if (test_thread_flag(TIF_SSBD) &&
319 task_spec_ssb_noexec(current)) {
320 clear_thread_flag(TIF_SSBD);
321 task_clear_spec_ssb_disable(current);
322 task_clear_spec_ssb_noexec(current);
323 speculation_ctrl_update(task_thread_info(current)->flags);
324 }
325}
326
327#ifdef CONFIG_X86_IOPL_IOPERM
328static inline void switch_to_bitmap(unsigned long tifp)
329{
330 /*
331 * Invalidate I/O bitmap if the previous task used it. This prevents
332 * any possible leakage of an active I/O bitmap.
333 *
334 * If the next task has an I/O bitmap it will handle it on exit to
335 * user mode.
336 */
337 if (tifp & _TIF_IO_BITMAP)
338 tss_invalidate_io_bitmap();
339}
340
341static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
342{
343 /*
344 * Copy at least the byte range of the incoming tasks bitmap which
345 * covers the permitted I/O ports.
346 *
347 * If the previous task which used an I/O bitmap had more bits
348 * permitted, then the copy needs to cover those as well so they
349 * get turned off.
350 */
351 memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
352 max(tss->io_bitmap.prev_max, iobm->max));
353
354 /*
355 * Store the new max and the sequence number of this bitmap
356 * and a pointer to the bitmap itself.
357 */
358 tss->io_bitmap.prev_max = iobm->max;
359 tss->io_bitmap.prev_sequence = iobm->sequence;
360}
361
362/**
363 * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
364 */
365void native_tss_update_io_bitmap(void)
366{
367 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
368 struct thread_struct *t = ¤t->thread;
369 u16 *base = &tss->x86_tss.io_bitmap_base;
370
371 if (!test_thread_flag(TIF_IO_BITMAP)) {
372 native_tss_invalidate_io_bitmap();
373 return;
374 }
375
376 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
377 *base = IO_BITMAP_OFFSET_VALID_ALL;
378 } else {
379 struct io_bitmap *iobm = t->io_bitmap;
380
381 /*
382 * Only copy bitmap data when the sequence number differs. The
383 * update time is accounted to the incoming task.
384 */
385 if (tss->io_bitmap.prev_sequence != iobm->sequence)
386 tss_copy_io_bitmap(tss, iobm);
387
388 /* Enable the bitmap */
389 *base = IO_BITMAP_OFFSET_VALID_MAP;
390 }
391
392 /*
393 * Make sure that the TSS limit is covering the IO bitmap. It might have
394 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
395 * access from user space to trigger a #GP because tbe bitmap is outside
396 * the TSS limit.
397 */
398 refresh_tss_limit();
399}
400#else /* CONFIG_X86_IOPL_IOPERM */
401static inline void switch_to_bitmap(unsigned long tifp) { }
402#endif
403
404#ifdef CONFIG_SMP
405
406struct ssb_state {
407 struct ssb_state *shared_state;
408 raw_spinlock_t lock;
409 unsigned int disable_state;
410 unsigned long local_state;
411};
412
413#define LSTATE_SSB 0
414
415static DEFINE_PER_CPU(struct ssb_state, ssb_state);
416
417void speculative_store_bypass_ht_init(void)
418{
419 struct ssb_state *st = this_cpu_ptr(&ssb_state);
420 unsigned int this_cpu = smp_processor_id();
421 unsigned int cpu;
422
423 st->local_state = 0;
424
425 /*
426 * Shared state setup happens once on the first bringup
427 * of the CPU. It's not destroyed on CPU hotunplug.
428 */
429 if (st->shared_state)
430 return;
431
432 raw_spin_lock_init(&st->lock);
433
434 /*
435 * Go over HT siblings and check whether one of them has set up the
436 * shared state pointer already.
437 */
438 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
439 if (cpu == this_cpu)
440 continue;
441
442 if (!per_cpu(ssb_state, cpu).shared_state)
443 continue;
444
445 /* Link it to the state of the sibling: */
446 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
447 return;
448 }
449
450 /*
451 * First HT sibling to come up on the core. Link shared state of
452 * the first HT sibling to itself. The siblings on the same core
453 * which come up later will see the shared state pointer and link
454 * themself to the state of this CPU.
455 */
456 st->shared_state = st;
457}
458
459/*
460 * Logic is: First HT sibling enables SSBD for both siblings in the core
461 * and last sibling to disable it, disables it for the whole core. This how
462 * MSR_SPEC_CTRL works in "hardware":
463 *
464 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
465 */
466static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
467{
468 struct ssb_state *st = this_cpu_ptr(&ssb_state);
469 u64 msr = x86_amd_ls_cfg_base;
470
471 if (!static_cpu_has(X86_FEATURE_ZEN)) {
472 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
473 wrmsrl(MSR_AMD64_LS_CFG, msr);
474 return;
475 }
476
477 if (tifn & _TIF_SSBD) {
478 /*
479 * Since this can race with prctl(), block reentry on the
480 * same CPU.
481 */
482 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
483 return;
484
485 msr |= x86_amd_ls_cfg_ssbd_mask;
486
487 raw_spin_lock(&st->shared_state->lock);
488 /* First sibling enables SSBD: */
489 if (!st->shared_state->disable_state)
490 wrmsrl(MSR_AMD64_LS_CFG, msr);
491 st->shared_state->disable_state++;
492 raw_spin_unlock(&st->shared_state->lock);
493 } else {
494 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
495 return;
496
497 raw_spin_lock(&st->shared_state->lock);
498 st->shared_state->disable_state--;
499 if (!st->shared_state->disable_state)
500 wrmsrl(MSR_AMD64_LS_CFG, msr);
501 raw_spin_unlock(&st->shared_state->lock);
502 }
503}
504#else
505static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
506{
507 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
508
509 wrmsrl(MSR_AMD64_LS_CFG, msr);
510}
511#endif
512
513static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
514{
515 /*
516 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
517 * so ssbd_tif_to_spec_ctrl() just works.
518 */
519 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
520}
521
522/*
523 * Update the MSRs managing speculation control, during context switch.
524 *
525 * tifp: Previous task's thread flags
526 * tifn: Next task's thread flags
527 */
528static __always_inline void __speculation_ctrl_update(unsigned long tifp,
529 unsigned long tifn)
530{
531 unsigned long tif_diff = tifp ^ tifn;
532 u64 msr = x86_spec_ctrl_base;
533 bool updmsr = false;
534
535 lockdep_assert_irqs_disabled();
536
537 /* Handle change of TIF_SSBD depending on the mitigation method. */
538 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
539 if (tif_diff & _TIF_SSBD)
540 amd_set_ssb_virt_state(tifn);
541 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
542 if (tif_diff & _TIF_SSBD)
543 amd_set_core_ssb_state(tifn);
544 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
545 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
546 updmsr |= !!(tif_diff & _TIF_SSBD);
547 msr |= ssbd_tif_to_spec_ctrl(tifn);
548 }
549
550 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
551 if (IS_ENABLED(CONFIG_SMP) &&
552 static_branch_unlikely(&switch_to_cond_stibp)) {
553 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
554 msr |= stibp_tif_to_spec_ctrl(tifn);
555 }
556
557 if (updmsr)
558 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
559}
560
561static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
562{
563 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
564 if (task_spec_ssb_disable(tsk))
565 set_tsk_thread_flag(tsk, TIF_SSBD);
566 else
567 clear_tsk_thread_flag(tsk, TIF_SSBD);
568
569 if (task_spec_ib_disable(tsk))
570 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
571 else
572 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
573 }
574 /* Return the updated threadinfo flags*/
575 return task_thread_info(tsk)->flags;
576}
577
578void speculation_ctrl_update(unsigned long tif)
579{
580 unsigned long flags;
581
582 /* Forced update. Make sure all relevant TIF flags are different */
583 local_irq_save(flags);
584 __speculation_ctrl_update(~tif, tif);
585 local_irq_restore(flags);
586}
587
588/* Called from seccomp/prctl update */
589void speculation_ctrl_update_current(void)
590{
591 preempt_disable();
592 speculation_ctrl_update(speculation_ctrl_update_tif(current));
593 preempt_enable();
594}
595
596static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
597{
598 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
599
600 newval = cr4 ^ mask;
601 if (newval != cr4) {
602 this_cpu_write(cpu_tlbstate.cr4, newval);
603 __write_cr4(newval);
604 }
605}
606
607void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
608{
609 unsigned long tifp, tifn;
610
611 tifn = READ_ONCE(task_thread_info(next_p)->flags);
612 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
613
614 switch_to_bitmap(tifp);
615
616 propagate_user_return_notify(prev_p, next_p);
617
618 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
619 arch_has_block_step()) {
620 unsigned long debugctl, msk;
621
622 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
623 debugctl &= ~DEBUGCTLMSR_BTF;
624 msk = tifn & _TIF_BLOCKSTEP;
625 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
626 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
627 }
628
629 if ((tifp ^ tifn) & _TIF_NOTSC)
630 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
631
632 if ((tifp ^ tifn) & _TIF_NOCPUID)
633 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
634
635 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
636 __speculation_ctrl_update(tifp, tifn);
637 } else {
638 speculation_ctrl_update_tif(prev_p);
639 tifn = speculation_ctrl_update_tif(next_p);
640
641 /* Enforce MSR update to ensure consistent state */
642 __speculation_ctrl_update(~tifn, tifn);
643 }
644
645 if ((tifp ^ tifn) & _TIF_SLD)
646 switch_to_sld(tifn);
647}
648
649/*
650 * Idle related variables and functions
651 */
652unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
653EXPORT_SYMBOL(boot_option_idle_override);
654
655static void (*x86_idle)(void);
656
657#ifndef CONFIG_SMP
658static inline void play_dead(void)
659{
660 BUG();
661}
662#endif
663
664void arch_cpu_idle_enter(void)
665{
666 tsc_verify_tsc_adjust(false);
667 local_touch_nmi();
668}
669
670void arch_cpu_idle_dead(void)
671{
672 play_dead();
673}
674
675/*
676 * Called from the generic idle code.
677 */
678void arch_cpu_idle(void)
679{
680 x86_idle();
681}
682
683/*
684 * We use this if we don't have any better idle routine..
685 */
686void __cpuidle default_idle(void)
687{
688 safe_halt();
689}
690#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
691EXPORT_SYMBOL(default_idle);
692#endif
693
694#ifdef CONFIG_XEN
695bool xen_set_default_idle(void)
696{
697 bool ret = !!x86_idle;
698
699 x86_idle = default_idle;
700
701 return ret;
702}
703#endif
704
705void stop_this_cpu(void *dummy)
706{
707 local_irq_disable();
708 /*
709 * Remove this CPU:
710 */
711 set_cpu_online(smp_processor_id(), false);
712 disable_local_APIC();
713 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
714
715 /*
716 * Use wbinvd on processors that support SME. This provides support
717 * for performing a successful kexec when going from SME inactive
718 * to SME active (or vice-versa). The cache must be cleared so that
719 * if there are entries with the same physical address, both with and
720 * without the encryption bit, they don't race each other when flushed
721 * and potentially end up with the wrong entry being committed to
722 * memory.
723 */
724 if (boot_cpu_has(X86_FEATURE_SME))
725 native_wbinvd();
726 for (;;) {
727 /*
728 * Use native_halt() so that memory contents don't change
729 * (stack usage and variables) after possibly issuing the
730 * native_wbinvd() above.
731 */
732 native_halt();
733 }
734}
735
736/*
737 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
738 * states (local apic timer and TSC stop).
739 */
740static void amd_e400_idle(void)
741{
742 /*
743 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
744 * gets set after static_cpu_has() places have been converted via
745 * alternatives.
746 */
747 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
748 default_idle();
749 return;
750 }
751
752 tick_broadcast_enter();
753
754 default_idle();
755
756 /*
757 * The switch back from broadcast mode needs to be called with
758 * interrupts disabled.
759 */
760 local_irq_disable();
761 tick_broadcast_exit();
762 local_irq_enable();
763}
764
765/*
766 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
767 * We can't rely on cpuidle installing MWAIT, because it will not load
768 * on systems that support only C1 -- so the boot default must be MWAIT.
769 *
770 * Some AMD machines are the opposite, they depend on using HALT.
771 *
772 * So for default C1, which is used during boot until cpuidle loads,
773 * use MWAIT-C1 on Intel HW that has it, else use HALT.
774 */
775static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
776{
777 if (c->x86_vendor != X86_VENDOR_INTEL)
778 return 0;
779
780 if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
781 return 0;
782
783 return 1;
784}
785
786/*
787 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
788 * with interrupts enabled and no flags, which is backwards compatible with the
789 * original MWAIT implementation.
790 */
791static __cpuidle void mwait_idle(void)
792{
793 if (!current_set_polling_and_test()) {
794 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
795 mb(); /* quirk */
796 clflush((void *)¤t_thread_info()->flags);
797 mb(); /* quirk */
798 }
799
800 __monitor((void *)¤t_thread_info()->flags, 0, 0);
801 if (!need_resched())
802 __sti_mwait(0, 0);
803 else
804 local_irq_enable();
805 } else {
806 local_irq_enable();
807 }
808 __current_clr_polling();
809}
810
811void select_idle_routine(const struct cpuinfo_x86 *c)
812{
813#ifdef CONFIG_SMP
814 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
815 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
816#endif
817 if (x86_idle || boot_option_idle_override == IDLE_POLL)
818 return;
819
820 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
821 pr_info("using AMD E400 aware idle routine\n");
822 x86_idle = amd_e400_idle;
823 } else if (prefer_mwait_c1_over_halt(c)) {
824 pr_info("using mwait in idle threads\n");
825 x86_idle = mwait_idle;
826 } else
827 x86_idle = default_idle;
828}
829
830void amd_e400_c1e_apic_setup(void)
831{
832 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
833 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
834 local_irq_disable();
835 tick_broadcast_force();
836 local_irq_enable();
837 }
838}
839
840void __init arch_post_acpi_subsys_init(void)
841{
842 u32 lo, hi;
843
844 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
845 return;
846
847 /*
848 * AMD E400 detection needs to happen after ACPI has been enabled. If
849 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
850 * MSR_K8_INT_PENDING_MSG.
851 */
852 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
853 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
854 return;
855
856 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
857
858 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
859 mark_tsc_unstable("TSC halt in AMD C1E");
860 pr_info("System has AMD C1E enabled\n");
861}
862
863static int __init idle_setup(char *str)
864{
865 if (!str)
866 return -EINVAL;
867
868 if (!strcmp(str, "poll")) {
869 pr_info("using polling idle threads\n");
870 boot_option_idle_override = IDLE_POLL;
871 cpu_idle_poll_ctrl(true);
872 } else if (!strcmp(str, "halt")) {
873 /*
874 * When the boot option of idle=halt is added, halt is
875 * forced to be used for CPU idle. In such case CPU C2/C3
876 * won't be used again.
877 * To continue to load the CPU idle driver, don't touch
878 * the boot_option_idle_override.
879 */
880 x86_idle = default_idle;
881 boot_option_idle_override = IDLE_HALT;
882 } else if (!strcmp(str, "nomwait")) {
883 /*
884 * If the boot option of "idle=nomwait" is added,
885 * it means that mwait will be disabled for CPU C2/C3
886 * states. In such case it won't touch the variable
887 * of boot_option_idle_override.
888 */
889 boot_option_idle_override = IDLE_NOMWAIT;
890 } else
891 return -1;
892
893 return 0;
894}
895early_param("idle", idle_setup);
896
897unsigned long arch_align_stack(unsigned long sp)
898{
899 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
900 sp -= get_random_int() % 8192;
901 return sp & ~0xf;
902}
903
904unsigned long arch_randomize_brk(struct mm_struct *mm)
905{
906 return randomize_page(mm->brk, 0x02000000);
907}
908
909/*
910 * Called from fs/proc with a reference on @p to find the function
911 * which called into schedule(). This needs to be done carefully
912 * because the task might wake up and we might look at a stack
913 * changing under us.
914 */
915unsigned long get_wchan(struct task_struct *p)
916{
917 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
918 int count = 0;
919
920 if (p == current || p->state == TASK_RUNNING)
921 return 0;
922
923 if (!try_get_task_stack(p))
924 return 0;
925
926 start = (unsigned long)task_stack_page(p);
927 if (!start)
928 goto out;
929
930 /*
931 * Layout of the stack page:
932 *
933 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
934 * PADDING
935 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
936 * stack
937 * ----------- bottom = start
938 *
939 * The tasks stack pointer points at the location where the
940 * framepointer is stored. The data on the stack is:
941 * ... IP FP ... IP FP
942 *
943 * We need to read FP and IP, so we need to adjust the upper
944 * bound by another unsigned long.
945 */
946 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
947 top -= 2 * sizeof(unsigned long);
948 bottom = start;
949
950 sp = READ_ONCE(p->thread.sp);
951 if (sp < bottom || sp > top)
952 goto out;
953
954 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
955 do {
956 if (fp < bottom || fp > top)
957 goto out;
958 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
959 if (!in_sched_functions(ip)) {
960 ret = ip;
961 goto out;
962 }
963 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
964 } while (count++ < 16 && p->state != TASK_RUNNING);
965
966out:
967 put_task_stack(p);
968 return ret;
969}
970
971long do_arch_prctl_common(struct task_struct *task, int option,
972 unsigned long cpuid_enabled)
973{
974 switch (option) {
975 case ARCH_GET_CPUID:
976 return get_cpuid_mode();
977 case ARCH_SET_CPUID:
978 return set_cpuid_mode(task, cpuid_enabled);
979 }
980
981 return -EINVAL;
982}