Loading...
1#include <asm/paravirt.h>
2
3DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
4DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
5DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
6DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
7DEF_NATIVE(pv_cpu_ops, iret, "iret");
8DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
9DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
10DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
11DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
12DEF_NATIVE(pv_cpu_ops, clts, "clts");
13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
14
15unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
16{
17 /* arg in %eax, return in %eax */
18 return 0;
19}
20
21unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
22{
23 /* arg in %edx:%eax, return in %edx:%eax */
24 return 0;
25}
26
27unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
28 unsigned long addr, unsigned len)
29{
30 const unsigned char *start, *end;
31 unsigned ret;
32
33#define PATCH_SITE(ops, x) \
34 case PARAVIRT_PATCH(ops.x): \
35 start = start_##ops##_##x; \
36 end = end_##ops##_##x; \
37 goto patch_site
38 switch (type) {
39 PATCH_SITE(pv_irq_ops, irq_disable);
40 PATCH_SITE(pv_irq_ops, irq_enable);
41 PATCH_SITE(pv_irq_ops, restore_fl);
42 PATCH_SITE(pv_irq_ops, save_fl);
43 PATCH_SITE(pv_cpu_ops, iret);
44 PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
45 PATCH_SITE(pv_mmu_ops, read_cr2);
46 PATCH_SITE(pv_mmu_ops, read_cr3);
47 PATCH_SITE(pv_mmu_ops, write_cr3);
48 PATCH_SITE(pv_cpu_ops, clts);
49 PATCH_SITE(pv_cpu_ops, read_tsc);
50
51 patch_site:
52 ret = paravirt_patch_insns(ibuf, len, start, end);
53 break;
54
55 default:
56 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
57 break;
58 }
59#undef PATCH_SITE
60 return ret;
61}
1#include <asm/paravirt.h>
2
3DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
4DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
5DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
6DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
7DEF_NATIVE(pv_cpu_ops, iret, "iret");
8DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
9DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
10DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
11DEF_NATIVE(pv_cpu_ops, clts, "clts");
12
13#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
14DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
15#endif
16
17unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
18{
19 /* arg in %eax, return in %eax */
20 return 0;
21}
22
23unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
24{
25 /* arg in %edx:%eax, return in %edx:%eax */
26 return 0;
27}
28
29extern bool pv_is_native_spin_unlock(void);
30
31unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
32 unsigned long addr, unsigned len)
33{
34 const unsigned char *start, *end;
35 unsigned ret;
36
37#define PATCH_SITE(ops, x) \
38 case PARAVIRT_PATCH(ops.x): \
39 start = start_##ops##_##x; \
40 end = end_##ops##_##x; \
41 goto patch_site
42 switch (type) {
43 PATCH_SITE(pv_irq_ops, irq_disable);
44 PATCH_SITE(pv_irq_ops, irq_enable);
45 PATCH_SITE(pv_irq_ops, restore_fl);
46 PATCH_SITE(pv_irq_ops, save_fl);
47 PATCH_SITE(pv_cpu_ops, iret);
48 PATCH_SITE(pv_mmu_ops, read_cr2);
49 PATCH_SITE(pv_mmu_ops, read_cr3);
50 PATCH_SITE(pv_mmu_ops, write_cr3);
51 PATCH_SITE(pv_cpu_ops, clts);
52#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
53 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
54 if (pv_is_native_spin_unlock()) {
55 start = start_pv_lock_ops_queued_spin_unlock;
56 end = end_pv_lock_ops_queued_spin_unlock;
57 goto patch_site;
58 }
59#endif
60
61 default:
62 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
63 break;
64
65patch_site:
66 ret = paravirt_patch_insns(ibuf, len, start, end);
67 break;
68 }
69#undef PATCH_SITE
70 return ret;
71}