Loading...
1/* Paravirtualization interfaces
2 Copyright (C) 2006 Rusty Russell IBM Corporation
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17
18 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
19*/
20
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/export.h>
24#include <linux/efi.h>
25#include <linux/bcd.h>
26#include <linux/highmem.h>
27#include <linux/kprobes.h>
28
29#include <asm/bug.h>
30#include <asm/paravirt.h>
31#include <asm/debugreg.h>
32#include <asm/desc.h>
33#include <asm/setup.h>
34#include <asm/pgtable.h>
35#include <asm/time.h>
36#include <asm/pgalloc.h>
37#include <asm/irq.h>
38#include <asm/delay.h>
39#include <asm/fixmap.h>
40#include <asm/apic.h>
41#include <asm/tlbflush.h>
42#include <asm/timer.h>
43#include <asm/special_insns.h>
44
45/*
46 * nop stub, which must not clobber anything *including the stack* to
47 * avoid confusing the entry prologues.
48 */
49extern void _paravirt_nop(void);
50asm (".pushsection .entry.text, \"ax\"\n"
51 ".global _paravirt_nop\n"
52 "_paravirt_nop:\n\t"
53 "ret\n\t"
54 ".size _paravirt_nop, . - _paravirt_nop\n\t"
55 ".type _paravirt_nop, @function\n\t"
56 ".popsection");
57
58/* identity function, which can be inlined */
59u32 notrace _paravirt_ident_32(u32 x)
60{
61 return x;
62}
63
64u64 notrace _paravirt_ident_64(u64 x)
65{
66 return x;
67}
68
69void __init default_banner(void)
70{
71 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
72 pv_info.name);
73}
74
75/* Undefined instruction for dealing with missing ops pointers. */
76static const unsigned char ud2a[] = { 0x0f, 0x0b };
77
78struct branch {
79 unsigned char opcode;
80 u32 delta;
81} __attribute__((packed));
82
83unsigned paravirt_patch_call(void *insnbuf,
84 const void *target, u16 tgt_clobbers,
85 unsigned long addr, u16 site_clobbers,
86 unsigned len)
87{
88 struct branch *b = insnbuf;
89 unsigned long delta = (unsigned long)target - (addr+5);
90
91 if (tgt_clobbers & ~site_clobbers)
92 return len; /* target would clobber too much for this site */
93 if (len < 5)
94 return len; /* call too long for patch site */
95
96 b->opcode = 0xe8; /* call */
97 b->delta = delta;
98 BUILD_BUG_ON(sizeof(*b) != 5);
99
100 return 5;
101}
102
103unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
104 unsigned long addr, unsigned len)
105{
106 struct branch *b = insnbuf;
107 unsigned long delta = (unsigned long)target - (addr+5);
108
109 if (len < 5)
110 return len; /* call too long for patch site */
111
112 b->opcode = 0xe9; /* jmp */
113 b->delta = delta;
114
115 return 5;
116}
117
118DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
119
120void __init native_pv_lock_init(void)
121{
122 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
123 static_branch_disable(&virt_spin_lock_key);
124}
125
126/*
127 * Neat trick to map patch type back to the call within the
128 * corresponding structure.
129 */
130static void *get_call_destination(u8 type)
131{
132 struct paravirt_patch_template tmpl = {
133 .pv_init_ops = pv_init_ops,
134 .pv_time_ops = pv_time_ops,
135 .pv_cpu_ops = pv_cpu_ops,
136 .pv_irq_ops = pv_irq_ops,
137 .pv_mmu_ops = pv_mmu_ops,
138#ifdef CONFIG_PARAVIRT_SPINLOCKS
139 .pv_lock_ops = pv_lock_ops,
140#endif
141 };
142 return *((void **)&tmpl + type);
143}
144
145unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
146 unsigned long addr, unsigned len)
147{
148 void *opfunc = get_call_destination(type);
149 unsigned ret;
150
151 if (opfunc == NULL)
152 /* If there's no function, patch it with a ud2a (BUG) */
153 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
154 else if (opfunc == _paravirt_nop)
155 ret = 0;
156
157 /* identity functions just return their single argument */
158 else if (opfunc == _paravirt_ident_32)
159 ret = paravirt_patch_ident_32(insnbuf, len);
160 else if (opfunc == _paravirt_ident_64)
161 ret = paravirt_patch_ident_64(insnbuf, len);
162
163 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
164 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
165 /* If operation requires a jmp, then jmp */
166 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
167 else
168 /* Otherwise call the function; assume target could
169 clobber any caller-save reg */
170 ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
171 addr, clobbers, len);
172
173 return ret;
174}
175
176unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
177 const char *start, const char *end)
178{
179 unsigned insn_len = end - start;
180
181 if (insn_len > len || start == NULL)
182 insn_len = len;
183 else
184 memcpy(insnbuf, start, insn_len);
185
186 return insn_len;
187}
188
189static void native_flush_tlb(void)
190{
191 __native_flush_tlb();
192}
193
194/*
195 * Global pages have to be flushed a bit differently. Not a real
196 * performance problem because this does not happen often.
197 */
198static void native_flush_tlb_global(void)
199{
200 __native_flush_tlb_global();
201}
202
203static void native_flush_tlb_one_user(unsigned long addr)
204{
205 __native_flush_tlb_one_user(addr);
206}
207
208struct static_key paravirt_steal_enabled;
209struct static_key paravirt_steal_rq_enabled;
210
211static u64 native_steal_clock(int cpu)
212{
213 return 0;
214}
215
216/* These are in entry.S */
217extern void native_iret(void);
218extern void native_usergs_sysret64(void);
219
220static struct resource reserve_ioports = {
221 .start = 0,
222 .end = IO_SPACE_LIMIT,
223 .name = "paravirt-ioport",
224 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
225};
226
227/*
228 * Reserve the whole legacy IO space to prevent any legacy drivers
229 * from wasting time probing for their hardware. This is a fairly
230 * brute-force approach to disabling all non-virtual drivers.
231 *
232 * Note that this must be called very early to have any effect.
233 */
234int paravirt_disable_iospace(void)
235{
236 return request_resource(&ioport_resource, &reserve_ioports);
237}
238
239static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
240
241static inline void enter_lazy(enum paravirt_lazy_mode mode)
242{
243 BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
244
245 this_cpu_write(paravirt_lazy_mode, mode);
246}
247
248static void leave_lazy(enum paravirt_lazy_mode mode)
249{
250 BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
251
252 this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
253}
254
255void paravirt_enter_lazy_mmu(void)
256{
257 enter_lazy(PARAVIRT_LAZY_MMU);
258}
259
260void paravirt_leave_lazy_mmu(void)
261{
262 leave_lazy(PARAVIRT_LAZY_MMU);
263}
264
265void paravirt_flush_lazy_mmu(void)
266{
267 preempt_disable();
268
269 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
270 arch_leave_lazy_mmu_mode();
271 arch_enter_lazy_mmu_mode();
272 }
273
274 preempt_enable();
275}
276
277void paravirt_start_context_switch(struct task_struct *prev)
278{
279 BUG_ON(preemptible());
280
281 if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
282 arch_leave_lazy_mmu_mode();
283 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
284 }
285 enter_lazy(PARAVIRT_LAZY_CPU);
286}
287
288void paravirt_end_context_switch(struct task_struct *next)
289{
290 BUG_ON(preemptible());
291
292 leave_lazy(PARAVIRT_LAZY_CPU);
293
294 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
295 arch_enter_lazy_mmu_mode();
296}
297
298enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
299{
300 if (in_interrupt())
301 return PARAVIRT_LAZY_NONE;
302
303 return this_cpu_read(paravirt_lazy_mode);
304}
305
306struct pv_info pv_info = {
307 .name = "bare hardware",
308 .kernel_rpl = 0,
309 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
310
311#ifdef CONFIG_X86_64
312 .extra_user_64bit_cs = __USER_CS,
313#endif
314};
315
316struct pv_init_ops pv_init_ops = {
317 .patch = native_patch,
318};
319
320struct pv_time_ops pv_time_ops = {
321 .sched_clock = native_sched_clock,
322 .steal_clock = native_steal_clock,
323};
324
325__visible struct pv_irq_ops pv_irq_ops = {
326 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
327 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
328 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
329 .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
330 .safe_halt = native_safe_halt,
331 .halt = native_halt,
332};
333
334__visible struct pv_cpu_ops pv_cpu_ops = {
335 .cpuid = native_cpuid,
336 .get_debugreg = native_get_debugreg,
337 .set_debugreg = native_set_debugreg,
338 .read_cr0 = native_read_cr0,
339 .write_cr0 = native_write_cr0,
340 .write_cr4 = native_write_cr4,
341#ifdef CONFIG_X86_64
342 .read_cr8 = native_read_cr8,
343 .write_cr8 = native_write_cr8,
344#endif
345 .wbinvd = native_wbinvd,
346 .read_msr = native_read_msr,
347 .write_msr = native_write_msr,
348 .read_msr_safe = native_read_msr_safe,
349 .write_msr_safe = native_write_msr_safe,
350 .read_pmc = native_read_pmc,
351 .load_tr_desc = native_load_tr_desc,
352 .set_ldt = native_set_ldt,
353 .load_gdt = native_load_gdt,
354 .load_idt = native_load_idt,
355 .store_tr = native_store_tr,
356 .load_tls = native_load_tls,
357#ifdef CONFIG_X86_64
358 .load_gs_index = native_load_gs_index,
359#endif
360 .write_ldt_entry = native_write_ldt_entry,
361 .write_gdt_entry = native_write_gdt_entry,
362 .write_idt_entry = native_write_idt_entry,
363
364 .alloc_ldt = paravirt_nop,
365 .free_ldt = paravirt_nop,
366
367 .load_sp0 = native_load_sp0,
368
369#ifdef CONFIG_X86_64
370 .usergs_sysret64 = native_usergs_sysret64,
371#endif
372 .iret = native_iret,
373 .swapgs = native_swapgs,
374
375 .set_iopl_mask = native_set_iopl_mask,
376 .io_delay = native_io_delay,
377
378 .start_context_switch = paravirt_nop,
379 .end_context_switch = paravirt_nop,
380};
381
382/* At this point, native_get/set_debugreg has real function entries */
383NOKPROBE_SYMBOL(native_get_debugreg);
384NOKPROBE_SYMBOL(native_set_debugreg);
385NOKPROBE_SYMBOL(native_load_idt);
386
387#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
388/* 32-bit pagetable entries */
389#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
390#else
391/* 64-bit pagetable entries */
392#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
393#endif
394
395struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
396
397 .read_cr2 = native_read_cr2,
398 .write_cr2 = native_write_cr2,
399 .read_cr3 = __native_read_cr3,
400 .write_cr3 = native_write_cr3,
401
402 .flush_tlb_user = native_flush_tlb,
403 .flush_tlb_kernel = native_flush_tlb_global,
404 .flush_tlb_one_user = native_flush_tlb_one_user,
405 .flush_tlb_others = native_flush_tlb_others,
406
407 .pgd_alloc = __paravirt_pgd_alloc,
408 .pgd_free = paravirt_nop,
409
410 .alloc_pte = paravirt_nop,
411 .alloc_pmd = paravirt_nop,
412 .alloc_pud = paravirt_nop,
413 .alloc_p4d = paravirt_nop,
414 .release_pte = paravirt_nop,
415 .release_pmd = paravirt_nop,
416 .release_pud = paravirt_nop,
417 .release_p4d = paravirt_nop,
418
419 .set_pte = native_set_pte,
420 .set_pte_at = native_set_pte_at,
421 .set_pmd = native_set_pmd,
422
423 .ptep_modify_prot_start = __ptep_modify_prot_start,
424 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
425
426#if CONFIG_PGTABLE_LEVELS >= 3
427#ifdef CONFIG_X86_PAE
428 .set_pte_atomic = native_set_pte_atomic,
429 .pte_clear = native_pte_clear,
430 .pmd_clear = native_pmd_clear,
431#endif
432 .set_pud = native_set_pud,
433
434 .pmd_val = PTE_IDENT,
435 .make_pmd = PTE_IDENT,
436
437#if CONFIG_PGTABLE_LEVELS >= 4
438 .pud_val = PTE_IDENT,
439 .make_pud = PTE_IDENT,
440
441 .set_p4d = native_set_p4d,
442
443#if CONFIG_PGTABLE_LEVELS >= 5
444 .p4d_val = PTE_IDENT,
445 .make_p4d = PTE_IDENT,
446
447 .set_pgd = native_set_pgd,
448#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
449#endif /* CONFIG_PGTABLE_LEVELS >= 4 */
450#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
451
452 .pte_val = PTE_IDENT,
453 .pgd_val = PTE_IDENT,
454
455 .make_pte = PTE_IDENT,
456 .make_pgd = PTE_IDENT,
457
458 .dup_mmap = paravirt_nop,
459 .exit_mmap = paravirt_nop,
460 .activate_mm = paravirt_nop,
461
462 .lazy_mode = {
463 .enter = paravirt_nop,
464 .leave = paravirt_nop,
465 .flush = paravirt_nop,
466 },
467
468 .set_fixmap = native_set_fixmap,
469};
470
471EXPORT_SYMBOL_GPL(pv_time_ops);
472EXPORT_SYMBOL (pv_cpu_ops);
473EXPORT_SYMBOL (pv_mmu_ops);
474EXPORT_SYMBOL_GPL(pv_info);
475EXPORT_SYMBOL (pv_irq_ops);
1/* Paravirtualization interfaces
2 Copyright (C) 2006 Rusty Russell IBM Corporation
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17
18 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
19*/
20
21#include <linux/errno.h>
22#include <linux/module.h>
23#include <linux/efi.h>
24#include <linux/bcd.h>
25#include <linux/highmem.h>
26
27#include <asm/bug.h>
28#include <asm/paravirt.h>
29#include <asm/debugreg.h>
30#include <asm/desc.h>
31#include <asm/setup.h>
32#include <asm/pgtable.h>
33#include <asm/time.h>
34#include <asm/pgalloc.h>
35#include <asm/irq.h>
36#include <asm/delay.h>
37#include <asm/fixmap.h>
38#include <asm/apic.h>
39#include <asm/tlbflush.h>
40#include <asm/timer.h>
41#include <asm/special_insns.h>
42
43/* nop stub */
44void _paravirt_nop(void)
45{
46}
47
48/* identity function, which can be inlined */
49u32 _paravirt_ident_32(u32 x)
50{
51 return x;
52}
53
54u64 _paravirt_ident_64(u64 x)
55{
56 return x;
57}
58
59void __init default_banner(void)
60{
61 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
62 pv_info.name);
63}
64
65/* Simple instruction patching code. */
66#define DEF_NATIVE(ops, name, code) \
67 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
68 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
69
70/* Undefined instruction for dealing with missing ops pointers. */
71static const unsigned char ud2a[] = { 0x0f, 0x0b };
72
73unsigned paravirt_patch_nop(void)
74{
75 return 0;
76}
77
78unsigned paravirt_patch_ignore(unsigned len)
79{
80 return len;
81}
82
83struct branch {
84 unsigned char opcode;
85 u32 delta;
86} __attribute__((packed));
87
88unsigned paravirt_patch_call(void *insnbuf,
89 const void *target, u16 tgt_clobbers,
90 unsigned long addr, u16 site_clobbers,
91 unsigned len)
92{
93 struct branch *b = insnbuf;
94 unsigned long delta = (unsigned long)target - (addr+5);
95
96 if (tgt_clobbers & ~site_clobbers)
97 return len; /* target would clobber too much for this site */
98 if (len < 5)
99 return len; /* call too long for patch site */
100
101 b->opcode = 0xe8; /* call */
102 b->delta = delta;
103 BUILD_BUG_ON(sizeof(*b) != 5);
104
105 return 5;
106}
107
108unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
109 unsigned long addr, unsigned len)
110{
111 struct branch *b = insnbuf;
112 unsigned long delta = (unsigned long)target - (addr+5);
113
114 if (len < 5)
115 return len; /* call too long for patch site */
116
117 b->opcode = 0xe9; /* jmp */
118 b->delta = delta;
119
120 return 5;
121}
122
123/* Neat trick to map patch type back to the call within the
124 * corresponding structure. */
125static void *get_call_destination(u8 type)
126{
127 struct paravirt_patch_template tmpl = {
128 .pv_init_ops = pv_init_ops,
129 .pv_time_ops = pv_time_ops,
130 .pv_cpu_ops = pv_cpu_ops,
131 .pv_irq_ops = pv_irq_ops,
132 .pv_apic_ops = pv_apic_ops,
133 .pv_mmu_ops = pv_mmu_ops,
134#ifdef CONFIG_PARAVIRT_SPINLOCKS
135 .pv_lock_ops = pv_lock_ops,
136#endif
137 };
138 return *((void **)&tmpl + type);
139}
140
141unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
142 unsigned long addr, unsigned len)
143{
144 void *opfunc = get_call_destination(type);
145 unsigned ret;
146
147 if (opfunc == NULL)
148 /* If there's no function, patch it with a ud2a (BUG) */
149 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
150 else if (opfunc == _paravirt_nop)
151 /* If the operation is a nop, then nop the callsite */
152 ret = paravirt_patch_nop();
153
154 /* identity functions just return their single argument */
155 else if (opfunc == _paravirt_ident_32)
156 ret = paravirt_patch_ident_32(insnbuf, len);
157 else if (opfunc == _paravirt_ident_64)
158 ret = paravirt_patch_ident_64(insnbuf, len);
159
160 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
161 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
162 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
163 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
164 /* If operation requires a jmp, then jmp */
165 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
166 else
167 /* Otherwise call the function; assume target could
168 clobber any caller-save reg */
169 ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
170 addr, clobbers, len);
171
172 return ret;
173}
174
175unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
176 const char *start, const char *end)
177{
178 unsigned insn_len = end - start;
179
180 if (insn_len > len || start == NULL)
181 insn_len = len;
182 else
183 memcpy(insnbuf, start, insn_len);
184
185 return insn_len;
186}
187
188static void native_flush_tlb(void)
189{
190 __native_flush_tlb();
191}
192
193/*
194 * Global pages have to be flushed a bit differently. Not a real
195 * performance problem because this does not happen often.
196 */
197static void native_flush_tlb_global(void)
198{
199 __native_flush_tlb_global();
200}
201
202static void native_flush_tlb_single(unsigned long addr)
203{
204 __native_flush_tlb_single(addr);
205}
206
207struct static_key paravirt_steal_enabled;
208struct static_key paravirt_steal_rq_enabled;
209
210static u64 native_steal_clock(int cpu)
211{
212 return 0;
213}
214
215/* These are in entry.S */
216extern void native_iret(void);
217extern void native_irq_enable_sysexit(void);
218extern void native_usergs_sysret32(void);
219extern void native_usergs_sysret64(void);
220
221static struct resource reserve_ioports = {
222 .start = 0,
223 .end = IO_SPACE_LIMIT,
224 .name = "paravirt-ioport",
225 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
226};
227
228/*
229 * Reserve the whole legacy IO space to prevent any legacy drivers
230 * from wasting time probing for their hardware. This is a fairly
231 * brute-force approach to disabling all non-virtual drivers.
232 *
233 * Note that this must be called very early to have any effect.
234 */
235int paravirt_disable_iospace(void)
236{
237 return request_resource(&ioport_resource, &reserve_ioports);
238}
239
240static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
241
242static inline void enter_lazy(enum paravirt_lazy_mode mode)
243{
244 BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
245
246 this_cpu_write(paravirt_lazy_mode, mode);
247}
248
249static void leave_lazy(enum paravirt_lazy_mode mode)
250{
251 BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
252
253 this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
254}
255
256void paravirt_enter_lazy_mmu(void)
257{
258 enter_lazy(PARAVIRT_LAZY_MMU);
259}
260
261void paravirt_leave_lazy_mmu(void)
262{
263 leave_lazy(PARAVIRT_LAZY_MMU);
264}
265
266void paravirt_start_context_switch(struct task_struct *prev)
267{
268 BUG_ON(preemptible());
269
270 if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
271 arch_leave_lazy_mmu_mode();
272 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
273 }
274 enter_lazy(PARAVIRT_LAZY_CPU);
275}
276
277void paravirt_end_context_switch(struct task_struct *next)
278{
279 BUG_ON(preemptible());
280
281 leave_lazy(PARAVIRT_LAZY_CPU);
282
283 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
284 arch_enter_lazy_mmu_mode();
285}
286
287enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
288{
289 if (in_interrupt())
290 return PARAVIRT_LAZY_NONE;
291
292 return this_cpu_read(paravirt_lazy_mode);
293}
294
295void arch_flush_lazy_mmu_mode(void)
296{
297 preempt_disable();
298
299 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
300 arch_leave_lazy_mmu_mode();
301 arch_enter_lazy_mmu_mode();
302 }
303
304 preempt_enable();
305}
306
307struct pv_info pv_info = {
308 .name = "bare hardware",
309 .paravirt_enabled = 0,
310 .kernel_rpl = 0,
311 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
312
313#ifdef CONFIG_X86_64
314 .extra_user_64bit_cs = __USER_CS,
315#endif
316};
317
318struct pv_init_ops pv_init_ops = {
319 .patch = native_patch,
320};
321
322struct pv_time_ops pv_time_ops = {
323 .sched_clock = native_sched_clock,
324 .steal_clock = native_steal_clock,
325};
326
327struct pv_irq_ops pv_irq_ops = {
328 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
329 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
330 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
331 .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
332 .safe_halt = native_safe_halt,
333 .halt = native_halt,
334#ifdef CONFIG_X86_64
335 .adjust_exception_frame = paravirt_nop,
336#endif
337};
338
339struct pv_cpu_ops pv_cpu_ops = {
340 .cpuid = native_cpuid,
341 .get_debugreg = native_get_debugreg,
342 .set_debugreg = native_set_debugreg,
343 .clts = native_clts,
344 .read_cr0 = native_read_cr0,
345 .write_cr0 = native_write_cr0,
346 .read_cr4 = native_read_cr4,
347 .read_cr4_safe = native_read_cr4_safe,
348 .write_cr4 = native_write_cr4,
349#ifdef CONFIG_X86_64
350 .read_cr8 = native_read_cr8,
351 .write_cr8 = native_write_cr8,
352#endif
353 .wbinvd = native_wbinvd,
354 .read_msr = native_read_msr_safe,
355 .rdmsr_regs = native_rdmsr_safe_regs,
356 .write_msr = native_write_msr_safe,
357 .wrmsr_regs = native_wrmsr_safe_regs,
358 .read_tsc = native_read_tsc,
359 .read_pmc = native_read_pmc,
360 .read_tscp = native_read_tscp,
361 .load_tr_desc = native_load_tr_desc,
362 .set_ldt = native_set_ldt,
363 .load_gdt = native_load_gdt,
364 .load_idt = native_load_idt,
365 .store_gdt = native_store_gdt,
366 .store_idt = native_store_idt,
367 .store_tr = native_store_tr,
368 .load_tls = native_load_tls,
369#ifdef CONFIG_X86_64
370 .load_gs_index = native_load_gs_index,
371#endif
372 .write_ldt_entry = native_write_ldt_entry,
373 .write_gdt_entry = native_write_gdt_entry,
374 .write_idt_entry = native_write_idt_entry,
375
376 .alloc_ldt = paravirt_nop,
377 .free_ldt = paravirt_nop,
378
379 .load_sp0 = native_load_sp0,
380
381#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
382 .irq_enable_sysexit = native_irq_enable_sysexit,
383#endif
384#ifdef CONFIG_X86_64
385#ifdef CONFIG_IA32_EMULATION
386 .usergs_sysret32 = native_usergs_sysret32,
387#endif
388 .usergs_sysret64 = native_usergs_sysret64,
389#endif
390 .iret = native_iret,
391 .swapgs = native_swapgs,
392
393 .set_iopl_mask = native_set_iopl_mask,
394 .io_delay = native_io_delay,
395
396 .start_context_switch = paravirt_nop,
397 .end_context_switch = paravirt_nop,
398};
399
400struct pv_apic_ops pv_apic_ops = {
401#ifdef CONFIG_X86_LOCAL_APIC
402 .startup_ipi_hook = paravirt_nop,
403#endif
404};
405
406#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
407/* 32-bit pagetable entries */
408#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
409#else
410/* 64-bit pagetable entries */
411#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
412#endif
413
414struct pv_mmu_ops pv_mmu_ops = {
415
416 .read_cr2 = native_read_cr2,
417 .write_cr2 = native_write_cr2,
418 .read_cr3 = native_read_cr3,
419 .write_cr3 = native_write_cr3,
420
421 .flush_tlb_user = native_flush_tlb,
422 .flush_tlb_kernel = native_flush_tlb_global,
423 .flush_tlb_single = native_flush_tlb_single,
424 .flush_tlb_others = native_flush_tlb_others,
425
426 .pgd_alloc = __paravirt_pgd_alloc,
427 .pgd_free = paravirt_nop,
428
429 .alloc_pte = paravirt_nop,
430 .alloc_pmd = paravirt_nop,
431 .alloc_pud = paravirt_nop,
432 .release_pte = paravirt_nop,
433 .release_pmd = paravirt_nop,
434 .release_pud = paravirt_nop,
435
436 .set_pte = native_set_pte,
437 .set_pte_at = native_set_pte_at,
438 .set_pmd = native_set_pmd,
439 .set_pmd_at = native_set_pmd_at,
440 .pte_update = paravirt_nop,
441 .pte_update_defer = paravirt_nop,
442 .pmd_update = paravirt_nop,
443 .pmd_update_defer = paravirt_nop,
444
445 .ptep_modify_prot_start = __ptep_modify_prot_start,
446 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
447
448#if PAGETABLE_LEVELS >= 3
449#ifdef CONFIG_X86_PAE
450 .set_pte_atomic = native_set_pte_atomic,
451 .pte_clear = native_pte_clear,
452 .pmd_clear = native_pmd_clear,
453#endif
454 .set_pud = native_set_pud,
455
456 .pmd_val = PTE_IDENT,
457 .make_pmd = PTE_IDENT,
458
459#if PAGETABLE_LEVELS == 4
460 .pud_val = PTE_IDENT,
461 .make_pud = PTE_IDENT,
462
463 .set_pgd = native_set_pgd,
464#endif
465#endif /* PAGETABLE_LEVELS >= 3 */
466
467 .pte_val = PTE_IDENT,
468 .pgd_val = PTE_IDENT,
469
470 .make_pte = PTE_IDENT,
471 .make_pgd = PTE_IDENT,
472
473 .dup_mmap = paravirt_nop,
474 .exit_mmap = paravirt_nop,
475 .activate_mm = paravirt_nop,
476
477 .lazy_mode = {
478 .enter = paravirt_nop,
479 .leave = paravirt_nop,
480 },
481
482 .set_fixmap = native_set_fixmap,
483};
484
485EXPORT_SYMBOL_GPL(pv_time_ops);
486EXPORT_SYMBOL (pv_cpu_ops);
487EXPORT_SYMBOL (pv_mmu_ops);
488EXPORT_SYMBOL_GPL(pv_apic_ops);
489EXPORT_SYMBOL_GPL(pv_info);
490EXPORT_SYMBOL (pv_irq_ops);