Linux Audio

Check our new training course

Loading...
v4.17
 1// SPDX-License-Identifier: GPL-2.0
 2#include <asm/paravirt.h>
 3
 4DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 5DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 6DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
 7DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
 8DEF_NATIVE(pv_cpu_ops, iret, "iret");
 9DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
10DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
11DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
 
12
13#if defined(CONFIG_PARAVIRT_SPINLOCKS)
14DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
15DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
16#endif
17
18unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
19{
20	/* arg in %eax, return in %eax */
21	return 0;
22}
23
24unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
25{
26	/* arg in %edx:%eax, return in %edx:%eax */
27	return 0;
28}
29
30extern bool pv_is_native_spin_unlock(void);
31extern bool pv_is_native_vcpu_is_preempted(void);
32
33unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
34		      unsigned long addr, unsigned len)
35{
36	const unsigned char *start, *end;
37	unsigned ret;
38
39#define PATCH_SITE(ops, x)					\
40		case PARAVIRT_PATCH(ops.x):			\
41			start = start_##ops##_##x;		\
42			end = end_##ops##_##x;			\
43			goto patch_site
44	switch (type) {
45		PATCH_SITE(pv_irq_ops, irq_disable);
46		PATCH_SITE(pv_irq_ops, irq_enable);
47		PATCH_SITE(pv_irq_ops, restore_fl);
48		PATCH_SITE(pv_irq_ops, save_fl);
49		PATCH_SITE(pv_cpu_ops, iret);
50		PATCH_SITE(pv_mmu_ops, read_cr2);
51		PATCH_SITE(pv_mmu_ops, read_cr3);
52		PATCH_SITE(pv_mmu_ops, write_cr3);
53#if defined(CONFIG_PARAVIRT_SPINLOCKS)
 
54		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
55			if (pv_is_native_spin_unlock()) {
56				start = start_pv_lock_ops_queued_spin_unlock;
57				end   = end_pv_lock_ops_queued_spin_unlock;
58				goto patch_site;
59			}
60			goto patch_default;
61
62		case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
63			if (pv_is_native_vcpu_is_preempted()) {
64				start = start_pv_lock_ops_vcpu_is_preempted;
65				end   = end_pv_lock_ops_vcpu_is_preempted;
66				goto patch_site;
67			}
68			goto patch_default;
69#endif
70
71	default:
72patch_default: __maybe_unused
73		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
74		break;
75
76patch_site:
77		ret = paravirt_patch_insns(ibuf, len, start, end);
78		break;
79	}
80#undef PATCH_SITE
81	return ret;
82}
v4.6
 
 1#include <asm/paravirt.h>
 2
 3DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 4DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 5DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
 6DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
 7DEF_NATIVE(pv_cpu_ops, iret, "iret");
 8DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
 9DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
10DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
11DEF_NATIVE(pv_cpu_ops, clts, "clts");
12
13#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
14DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
 
15#endif
16
17unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
18{
19	/* arg in %eax, return in %eax */
20	return 0;
21}
22
23unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
24{
25	/* arg in %edx:%eax, return in %edx:%eax */
26	return 0;
27}
28
29extern bool pv_is_native_spin_unlock(void);
 
30
31unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
32		      unsigned long addr, unsigned len)
33{
34	const unsigned char *start, *end;
35	unsigned ret;
36
37#define PATCH_SITE(ops, x)					\
38		case PARAVIRT_PATCH(ops.x):			\
39			start = start_##ops##_##x;		\
40			end = end_##ops##_##x;			\
41			goto patch_site
42	switch (type) {
43		PATCH_SITE(pv_irq_ops, irq_disable);
44		PATCH_SITE(pv_irq_ops, irq_enable);
45		PATCH_SITE(pv_irq_ops, restore_fl);
46		PATCH_SITE(pv_irq_ops, save_fl);
47		PATCH_SITE(pv_cpu_ops, iret);
48		PATCH_SITE(pv_mmu_ops, read_cr2);
49		PATCH_SITE(pv_mmu_ops, read_cr3);
50		PATCH_SITE(pv_mmu_ops, write_cr3);
51		PATCH_SITE(pv_cpu_ops, clts);
52#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
53		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
54			if (pv_is_native_spin_unlock()) {
55				start = start_pv_lock_ops_queued_spin_unlock;
56				end   = end_pv_lock_ops_queued_spin_unlock;
57				goto patch_site;
58			}
 
 
 
 
 
 
 
 
 
59#endif
60
61	default:
 
62		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
63		break;
64
65patch_site:
66		ret = paravirt_patch_insns(ibuf, len, start, end);
67		break;
68	}
69#undef PATCH_SITE
70	return ret;
71}