Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_SYNC_CORE_H
  3#define _ASM_X86_SYNC_CORE_H
  4
  5#include <linux/preempt.h>
  6#include <asm/processor.h>
  7#include <asm/cpufeature.h>
  8#include <asm/special_insns.h>
  9
 10#ifdef CONFIG_X86_32
 11static inline void iret_to_self(void)
 12{
 13	asm volatile (
 14		"pushfl\n\t"
 15		"pushl %%cs\n\t"
 16		"pushl $1f\n\t"
 17		"iret\n\t"
 18		"1:"
 19		: ASM_CALL_CONSTRAINT : : "memory");
 20}
 21#else
 22static inline void iret_to_self(void)
 23{
 24	unsigned int tmp;
 25
 26	asm volatile (
 27		"mov %%ss, %0\n\t"
 28		"pushq %q0\n\t"
 29		"pushq %%rsp\n\t"
 30		"addq $8, (%%rsp)\n\t"
 31		"pushfq\n\t"
 32		"mov %%cs, %0\n\t"
 33		"pushq %q0\n\t"
 34		"pushq $1f\n\t"
 35		"iretq\n\t"
 36		"1:"
 37		: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
 38}
 39#endif /* CONFIG_X86_32 */
 40
 41/*
 42 * This function forces the icache and prefetched instruction stream to
 43 * catch up with reality in two very specific cases:
 44 *
 45 *  a) Text was modified using one virtual address and is about to be executed
 46 *     from the same physical page at a different virtual address.
 47 *
 48 *  b) Text was modified on a different CPU, may subsequently be
 49 *     executed on this CPU, and you want to make sure the new version
 50 *     gets executed.  This generally means you're calling this in an IPI.
 51 *
 52 * If you're calling this for a different reason, you're probably doing
 53 * it wrong.
 54 *
 55 * Like all of Linux's memory ordering operations, this is a
 56 * compiler barrier as well.
 57 */
 58static inline void sync_core(void)
 59{
 60	/*
 61	 * The SERIALIZE instruction is the most straightforward way to
 62	 * do this, but it is not universally available.
 63	 */
 64	if (static_cpu_has(X86_FEATURE_SERIALIZE)) {
 65		serialize();
 66		return;
 67	}
 68
 69	/*
 70	 * For all other processors, there are quite a few ways to do this.
 71	 * IRET-to-self is nice because it works on every CPU, at any CPL
 72	 * (so it's compatible with paravirtualization), and it never exits
 73	 * to a hypervisor.  The only downsides are that it's a bit slow
 74	 * (it seems to be a bit more than 2x slower than the fastest
 75	 * options) and that it unmasks NMIs.  The "push %cs" is needed,
 76	 * because in paravirtual environments __KERNEL_CS may not be a
 77	 * valid CS value when we do IRET directly.
 78	 *
 79	 * In case NMI unmasking or performance ever becomes a problem,
 80	 * the next best option appears to be MOV-to-CR2 and an
 81	 * unconditional jump.  That sequence also works on all CPUs,
 82	 * but it will fault at CPL3 (i.e. Xen PV).
 83	 *
 84	 * CPUID is the conventional way, but it's nasty: it doesn't
 85	 * exist on some 486-like CPUs, and it usually exits to a
 86	 * hypervisor.
 87	 */
 88	iret_to_self();
 89}
 90
 91/*
 92 * Ensure that a core serializing instruction is issued before returning
 93 * to user-mode. x86 implements return to user-space through sysexit,
 94 * sysrel, and sysretq, which are not core serializing.
 95 */
 96static inline void sync_core_before_usermode(void)
 97{
 98	/* With PTI, we unconditionally serialize before running user code. */
 99	if (static_cpu_has(X86_FEATURE_PTI))
100		return;
101
102	/*
103	 * Even if we're in an interrupt, we might reschedule before returning,
104	 * in which case we could switch to a different thread in the same mm
105	 * and return using SYSRET or SYSEXIT.  Instead of trying to keep
106	 * track of our need to sync the core, just sync right away.
107	 */
108	sync_core();
109}
110
111#endif /* _ASM_X86_SYNC_CORE_H */