Linux Audio

Check our new training course

Loading...
v4.6
 
  1#ifndef _ASM_X86_PARAVIRT_H
  2#define _ASM_X86_PARAVIRT_H
  3/* Various instructions on x86 need to be replaced for
  4 * para-virtualization: those hooks are defined here. */
  5
 
 
 
 
 
 
  6#ifdef CONFIG_PARAVIRT
  7#include <asm/pgtable_types.h>
  8#include <asm/asm.h>
  9
 10#include <asm/paravirt_types.h>
 11
 12#ifndef __ASSEMBLY__
 13#include <linux/bug.h>
 14#include <linux/types.h>
 15#include <linux/cpumask.h>
 
 16#include <asm/frame.h>
 17
 18static inline int paravirt_enabled(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19{
 20	return pv_info.paravirt_enabled;
 21}
 22
 23static inline int paravirt_has_feature(unsigned int feature)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24{
 25	WARN_ON_ONCE(!pv_info.paravirt_enabled);
 26	return (pv_info.features & feature);
 27}
 28
 29static inline void load_sp0(struct tss_struct *tss,
 30			     struct thread_struct *thread)
 31{
 32	PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
 
 
 
 
 
 
 33}
 34
 35/* The paravirtualized CPUID instruction. */
 36static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
 37			   unsigned int *ecx, unsigned int *edx)
 38{
 39	PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
 40}
 41
 42/*
 43 * These special macros can be used to get or set a debugging register
 44 */
 45static inline unsigned long paravirt_get_debugreg(int reg)
 46{
 47	return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
 48}
 49#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
 50static inline void set_debugreg(unsigned long val, int reg)
 51{
 52	PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
 53}
 54
 55static inline void clts(void)
 56{
 57	PVOP_VCALL0(pv_cpu_ops.clts);
 58}
 59
 60static inline unsigned long read_cr0(void)
 61{
 62	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
 63}
 64
 65static inline void write_cr0(unsigned long x)
 66{
 67	PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
 68}
 69
 70static inline unsigned long read_cr2(void)
 71{
 72	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
 
 73}
 74
 75static inline void write_cr2(unsigned long x)
 76{
 77	PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
 78}
 79
 80static inline unsigned long read_cr3(void)
 81{
 82	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
 
 83}
 84
 85static inline void write_cr3(unsigned long x)
 86{
 87	PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
 88}
 89
 90static inline unsigned long __read_cr4(void)
 91{
 92	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
 93}
 94static inline unsigned long __read_cr4_safe(void)
 95{
 96	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
 97}
 98
 99static inline void __write_cr4(unsigned long x)
100{
101	PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
102}
103
104#ifdef CONFIG_X86_64
105static inline unsigned long read_cr8(void)
106{
107	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
108}
109
110static inline void write_cr8(unsigned long x)
111{
112	PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
113}
114#endif
115
116static inline void arch_safe_halt(void)
 
 
117{
118	PVOP_VCALL0(pv_irq_ops.safe_halt);
119}
120
121static inline void halt(void)
122{
123	PVOP_VCALL0(pv_irq_ops.halt);
124}
125
126static inline void wbinvd(void)
 
127{
128	PVOP_VCALL0(pv_cpu_ops.wbinvd);
129}
130
131#define get_kernel_rpl()  (pv_info.kernel_rpl)
132
133static inline u64 paravirt_read_msr(unsigned msr, int *err)
134{
135	return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
136}
137
138static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
 
139{
140	return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
141}
142
143/* These should all do BUG_ON(_err), but our headers are too tangled. */
144#define rdmsr(msr, val1, val2)			\
145do {						\
146	int _err;				\
147	u64 _l = paravirt_read_msr(msr, &_err);	\
148	val1 = (u32)_l;				\
149	val2 = _l >> 32;			\
150} while (0)
151
152#define wrmsr(msr, val1, val2)			\
153do {						\
154	paravirt_write_msr(msr, val1, val2);	\
155} while (0)
156
157#define rdmsrl(msr, val)			\
158do {						\
159	int _err;				\
160	val = paravirt_read_msr(msr, &_err);	\
161} while (0)
162
163static inline void wrmsrl(unsigned msr, u64 val)
164{
165	wrmsr(msr, (u32)val, (u32)(val>>32));
166}
167
168#define wrmsr_safe(msr, a, b)	paravirt_write_msr(msr, a, b)
169
170/* rdmsr with exception handling */
171#define rdmsr_safe(msr, a, b)			\
172({						\
173	int _err;				\
174	u64 _l = paravirt_read_msr(msr, &_err);	\
175	(*a) = (u32)_l;				\
176	(*b) = _l >> 32;			\
177	_err;					\
178})
179
180static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
181{
182	int err;
183
184	*p = paravirt_read_msr(msr, &err);
185	return err;
186}
187
188static inline unsigned long long paravirt_sched_clock(void)
189{
190	return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
191}
192
193struct static_key;
194extern struct static_key paravirt_steal_enabled;
195extern struct static_key paravirt_steal_rq_enabled;
196
197static inline u64 paravirt_steal_clock(int cpu)
198{
199	return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
200}
201
202static inline unsigned long long paravirt_read_pmc(int counter)
203{
204	return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
205}
206
207#define rdpmc(counter, low, high)		\
208do {						\
209	u64 _l = paravirt_read_pmc(counter);	\
210	low = (u32)_l;				\
211	high = _l >> 32;			\
212} while (0)
213
214#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
215
216static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
217{
218	PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
219}
220
221static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
222{
223	PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
224}
225
226static inline void load_TR_desc(void)
227{
228	PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
229}
230static inline void load_gdt(const struct desc_ptr *dtr)
231{
232	PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
233}
234static inline void load_idt(const struct desc_ptr *dtr)
235{
236	PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
237}
238static inline void set_ldt(const void *addr, unsigned entries)
239{
240	PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
241}
242static inline void store_idt(struct desc_ptr *dtr)
243{
244	PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
245}
246static inline unsigned long paravirt_store_tr(void)
247{
248	return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
249}
 
250#define store_tr(tr)	((tr) = paravirt_store_tr())
251static inline void load_TLS(struct thread_struct *t, unsigned cpu)
252{
253	PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
254}
255
256#ifdef CONFIG_X86_64
257static inline void load_gs_index(unsigned int gs)
258{
259	PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
260}
261#endif
262
263static inline void write_ldt_entry(struct desc_struct *dt, int entry,
264				   const void *desc)
265{
266	PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
267}
268
269static inline void write_gdt_entry(struct desc_struct *dt, int entry,
270				   void *desc, int type)
271{
272	PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
273}
274
275static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
276{
277	PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
278}
279static inline void set_iopl_mask(unsigned mask)
280{
281	PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
282}
283
284/* The paravirtualized I/O functions */
285static inline void slow_down_io(void)
286{
287	pv_cpu_ops.io_delay();
288#ifdef REALLY_SLOW_IO
289	pv_cpu_ops.io_delay();
290	pv_cpu_ops.io_delay();
291	pv_cpu_ops.io_delay();
292#endif
293}
294
295static inline void paravirt_activate_mm(struct mm_struct *prev,
296					struct mm_struct *next)
297{
298	PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
299}
300
301static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
302					  struct mm_struct *mm)
303{
304	PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
305}
306
307static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
308{
309	PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
310}
311
312static inline void __flush_tlb(void)
313{
314	PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
315}
316static inline void __flush_tlb_global(void)
317{
318	PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
319}
320static inline void __flush_tlb_single(unsigned long addr)
321{
322	PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
323}
 
324
325static inline void flush_tlb_others(const struct cpumask *cpumask,
326				    struct mm_struct *mm,
327				    unsigned long start,
328				    unsigned long end)
329{
330	PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
331}
332
333static inline int paravirt_pgd_alloc(struct mm_struct *mm)
334{
335	return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
336}
337
338static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
339{
340	PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
341}
342
343static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
344{
345	PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
346}
347static inline void paravirt_release_pte(unsigned long pfn)
348{
349	PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
350}
351
352static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
353{
354	PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
355}
356
357static inline void paravirt_release_pmd(unsigned long pfn)
358{
359	PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
360}
361
362static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
363{
364	PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
365}
366static inline void paravirt_release_pud(unsigned long pfn)
367{
368	PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
369}
370
371static inline void pte_update(struct mm_struct *mm, unsigned long addr,
372			      pte_t *ptep)
373{
374	PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
375}
376
377static inline pte_t __pte(pteval_t val)
378{
379	pteval_t ret;
380
381	if (sizeof(pteval_t) > sizeof(long))
382		ret = PVOP_CALLEE2(pteval_t,
383				   pv_mmu_ops.make_pte,
384				   val, (u64)val >> 32);
385	else
386		ret = PVOP_CALLEE1(pteval_t,
387				   pv_mmu_ops.make_pte,
388				   val);
389
390	return (pte_t) { .pte = ret };
 
 
 
391}
392
393static inline pteval_t pte_val(pte_t pte)
394{
395	pteval_t ret;
396
397	if (sizeof(pteval_t) > sizeof(long))
398		ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
399				   pte.pte, (u64)pte.pte >> 32);
400	else
401		ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
402				   pte.pte);
403
404	return ret;
405}
406
407static inline pgd_t __pgd(pgdval_t val)
408{
409	pgdval_t ret;
410
411	if (sizeof(pgdval_t) > sizeof(long))
412		ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
413				   val, (u64)val >> 32);
414	else
415		ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
416				   val);
417
418	return (pgd_t) { ret };
419}
420
421static inline pgdval_t pgd_val(pgd_t pgd)
422{
423	pgdval_t ret;
424
425	if (sizeof(pgdval_t) > sizeof(long))
426		ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
427				    pgd.pgd, (u64)pgd.pgd >> 32);
428	else
429		ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
430				    pgd.pgd);
431
432	return ret;
433}
434
435#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
436static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
437					   pte_t *ptep)
438{
439	pteval_t ret;
440
441	ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
442			 mm, addr, ptep);
443
444	return (pte_t) { .pte = ret };
445}
446
447static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
448					   pte_t *ptep, pte_t pte)
449{
450	if (sizeof(pteval_t) > sizeof(long))
451		/* 5 arg words */
452		pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
453	else
454		PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
455			    mm, addr, ptep, pte.pte);
456}
457
458static inline void set_pte(pte_t *ptep, pte_t pte)
459{
460	if (sizeof(pteval_t) > sizeof(long))
461		PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
462			    pte.pte, (u64)pte.pte >> 32);
463	else
464		PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
465			    pte.pte);
466}
467
468static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
469			      pte_t *ptep, pte_t pte)
470{
471	if (sizeof(pteval_t) > sizeof(long))
472		/* 5 arg words */
473		pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
474	else
475		PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
476}
477
478static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
479			      pmd_t *pmdp, pmd_t pmd)
480{
481	if (sizeof(pmdval_t) > sizeof(long))
482		/* 5 arg words */
483		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
484	else
485		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
486			    native_pmd_val(pmd));
487}
488
489static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
490{
491	pmdval_t val = native_pmd_val(pmd);
492
493	if (sizeof(pmdval_t) > sizeof(long))
494		PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
495	else
496		PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
497}
498
499#if CONFIG_PGTABLE_LEVELS >= 3
500static inline pmd_t __pmd(pmdval_t val)
501{
502	pmdval_t ret;
503
504	if (sizeof(pmdval_t) > sizeof(long))
505		ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
506				   val, (u64)val >> 32);
507	else
508		ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
509				   val);
510
511	return (pmd_t) { ret };
512}
513
514static inline pmdval_t pmd_val(pmd_t pmd)
515{
516	pmdval_t ret;
517
518	if (sizeof(pmdval_t) > sizeof(long))
519		ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
520				    pmd.pmd, (u64)pmd.pmd >> 32);
521	else
522		ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
523				    pmd.pmd);
524
525	return ret;
526}
527
528static inline void set_pud(pud_t *pudp, pud_t pud)
529{
530	pudval_t val = native_pud_val(pud);
531
532	if (sizeof(pudval_t) > sizeof(long))
533		PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
534			    val, (u64)val >> 32);
535	else
536		PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
537			    val);
538}
539#if CONFIG_PGTABLE_LEVELS == 4
540static inline pud_t __pud(pudval_t val)
541{
542	pudval_t ret;
543
544	if (sizeof(pudval_t) > sizeof(long))
545		ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
546				   val, (u64)val >> 32);
547	else
548		ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
549				   val);
550
551	return (pud_t) { ret };
552}
553
554static inline pudval_t pud_val(pud_t pud)
555{
556	pudval_t ret;
 
 
557
558	if (sizeof(pudval_t) > sizeof(long))
559		ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
560				    pud.pud, (u64)pud.pud >> 32);
561	else
562		ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
563				    pud.pud);
564
565	return ret;
 
 
 
 
566}
567
568static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 
 
569{
570	pgdval_t val = native_pgd_val(pgd);
 
571
572	if (sizeof(pgdval_t) > sizeof(long))
573		PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
574			    val, (u64)val >> 32);
575	else
576		PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
577			    val);
578}
579
580static inline void pgd_clear(pgd_t *pgdp)
581{
582	set_pgd(pgdp, __pgd(0));
 
583}
584
585static inline void pud_clear(pud_t *pudp)
586{
587	set_pud(pudp, __pud(0));
588}
589
590#endif	/* CONFIG_PGTABLE_LEVELS == 4 */
 
 
 
 
 
591
592#endif	/* CONFIG_PGTABLE_LEVELS >= 3 */
 
 
 
593
594#ifdef CONFIG_X86_PAE
595/* Special-case pte-setting operations for PAE, which can't update a
596   64-bit pte atomically */
597static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
598{
599	PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
600		    pte.pte, pte.pte >> 32);
601}
602
603static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
604			     pte_t *ptep)
605{
606	PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
607}
608
609static inline void pmd_clear(pmd_t *pmdp)
610{
611	PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
612}
613#else  /* !CONFIG_X86_PAE */
614static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
615{
616	set_pte(ptep, pte);
617}
618
619static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
620			     pte_t *ptep)
621{
622	set_pte_at(mm, addr, ptep, __pte(0));
623}
624
625static inline void pmd_clear(pmd_t *pmdp)
626{
627	set_pmd(pmdp, __pmd(0));
628}
629#endif	/* CONFIG_X86_PAE */
630
631#define  __HAVE_ARCH_START_CONTEXT_SWITCH
632static inline void arch_start_context_switch(struct task_struct *prev)
633{
634	PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
635}
636
637static inline void arch_end_context_switch(struct task_struct *next)
638{
639	PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
640}
641
642#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
643static inline void arch_enter_lazy_mmu_mode(void)
644{
645	PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
646}
647
648static inline void arch_leave_lazy_mmu_mode(void)
649{
650	PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
651}
652
653static inline void arch_flush_lazy_mmu_mode(void)
654{
655	PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
656}
657
658static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
659				phys_addr_t phys, pgprot_t flags)
660{
661	pv_mmu_ops.set_fixmap(idx, phys, flags);
662}
 
663
664#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
665
666#ifdef CONFIG_QUEUED_SPINLOCKS
667
668static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
669							u32 val)
670{
671	PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
672}
673
674static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
675{
676	PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
 
 
677}
678
679static __always_inline void pv_wait(u8 *ptr, u8 val)
680{
681	PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
682}
683
684static __always_inline void pv_kick(int cpu)
685{
686	PVOP_VCALL1(pv_lock_ops.kick, cpu);
687}
688
689#else /* !CONFIG_QUEUED_SPINLOCKS */
690
691static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
692							__ticket_t ticket)
693{
694	PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
695}
696
697static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
698							__ticket_t ticket)
699{
700	PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
 
 
701}
702
703#endif /* CONFIG_QUEUED_SPINLOCKS */
 
704
705#endif /* SMP && PARAVIRT_SPINLOCKS */
706
707#ifdef CONFIG_X86_32
708#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
709#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
710
711/* save and restore all caller-save registers, except return value */
712#define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
713#define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
714
715#define PV_FLAGS_ARG "0"
716#define PV_EXTRA_CLOBBERS
717#define PV_VEXTRA_CLOBBERS
718#else
719/* save and restore all caller-save registers, except return value */
720#define PV_SAVE_ALL_CALLER_REGS						\
721	"push %rcx;"							\
722	"push %rdx;"							\
723	"push %rsi;"							\
724	"push %rdi;"							\
725	"push %r8;"							\
726	"push %r9;"							\
727	"push %r10;"							\
728	"push %r11;"
729#define PV_RESTORE_ALL_CALLER_REGS					\
730	"pop %r11;"							\
731	"pop %r10;"							\
732	"pop %r9;"							\
733	"pop %r8;"							\
734	"pop %rdi;"							\
735	"pop %rsi;"							\
736	"pop %rdx;"							\
737	"pop %rcx;"
738
739/* We save some registers, but all of them, that's too much. We clobber all
740 * caller saved registers but the argument parameter */
741#define PV_SAVE_REGS "pushq %%rdi;"
742#define PV_RESTORE_REGS "popq %%rdi;"
743#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
744#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
745#define PV_FLAGS_ARG "D"
746#endif
747
748/*
749 * Generate a thunk around a function which saves all caller-save
750 * registers except for the return value.  This allows C functions to
751 * be called from assembler code where fewer than normal registers are
752 * available.  It may also help code generation around calls from C
753 * code if the common case doesn't use many registers.
754 *
755 * When a callee is wrapped in a thunk, the caller can assume that all
756 * arg regs and all scratch registers are preserved across the
757 * call. The return value in rax/eax will not be saved, even for void
758 * functions.
759 */
760#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
761#define PV_CALLEE_SAVE_REGS_THUNK(func)					\
762	extern typeof(func) __raw_callee_save_##func;			\
763									\
764	asm(".pushsection .text;"					\
765	    ".globl " PV_THUNK_NAME(func) ";"				\
766	    ".type " PV_THUNK_NAME(func) ", @function;"			\
 
767	    PV_THUNK_NAME(func) ":"					\
 
768	    FRAME_BEGIN							\
769	    PV_SAVE_ALL_CALLER_REGS					\
770	    "call " #func ";"						\
771	    PV_RESTORE_ALL_CALLER_REGS					\
772	    FRAME_END							\
773	    "ret;"							\
 
774	    ".popsection")
775
 
 
 
776/* Get a reference to a callee-save function */
777#define PV_CALLEE_SAVE(func)						\
778	((struct paravirt_callee_save) { __raw_callee_save_##func })
779
780/* Promise that "func" already uses the right calling convention */
781#define __PV_IS_CALLEE_SAVE(func)			\
782	((struct paravirt_callee_save) { func })
783
784static inline notrace unsigned long arch_local_save_flags(void)
 
785{
786	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
 
787}
788
789static inline notrace void arch_local_irq_restore(unsigned long f)
790{
791	PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
792}
793
794static inline notrace void arch_local_irq_disable(void)
795{
796	PVOP_VCALLEE0(pv_irq_ops.irq_disable);
797}
798
799static inline notrace void arch_local_irq_enable(void)
800{
801	PVOP_VCALLEE0(pv_irq_ops.irq_enable);
802}
803
804static inline notrace unsigned long arch_local_irq_save(void)
805{
806	unsigned long f;
807
808	f = arch_local_save_flags();
809	arch_local_irq_disable();
810	return f;
811}
 
812
813
814/* Make sure as little as possible of this mess escapes. */
815#undef PARAVIRT_CALL
816#undef __PVOP_CALL
817#undef __PVOP_VCALL
818#undef PVOP_VCALL0
819#undef PVOP_CALL0
820#undef PVOP_VCALL1
821#undef PVOP_CALL1
822#undef PVOP_VCALL2
823#undef PVOP_CALL2
824#undef PVOP_VCALL3
825#undef PVOP_CALL3
826#undef PVOP_VCALL4
827#undef PVOP_CALL4
828
829extern void default_banner(void);
 
830
831#else  /* __ASSEMBLY__ */
832
833#define _PVSITE(ptype, clobbers, ops, word, algn)	\
834771:;						\
835	ops;					\
836772:;						\
837	.pushsection .parainstructions,"a";	\
838	 .align	algn;				\
839	 word 771b;				\
840	 .byte ptype;				\
841	 .byte 772b-771b;			\
842	 .short clobbers;			\
843	.popsection
844
845
846#define COND_PUSH(set, mask, reg)			\
847	.if ((~(set)) & mask); push %reg; .endif
848#define COND_POP(set, mask, reg)			\
849	.if ((~(set)) & mask); pop %reg; .endif
850
851#ifdef CONFIG_X86_64
 
 
852
853#define PV_SAVE_REGS(set)			\
854	COND_PUSH(set, CLBR_RAX, rax);		\
855	COND_PUSH(set, CLBR_RCX, rcx);		\
856	COND_PUSH(set, CLBR_RDX, rdx);		\
857	COND_PUSH(set, CLBR_RSI, rsi);		\
858	COND_PUSH(set, CLBR_RDI, rdi);		\
859	COND_PUSH(set, CLBR_R8, r8);		\
860	COND_PUSH(set, CLBR_R9, r9);		\
861	COND_PUSH(set, CLBR_R10, r10);		\
862	COND_PUSH(set, CLBR_R11, r11)
863#define PV_RESTORE_REGS(set)			\
864	COND_POP(set, CLBR_R11, r11);		\
865	COND_POP(set, CLBR_R10, r10);		\
866	COND_POP(set, CLBR_R9, r9);		\
867	COND_POP(set, CLBR_R8, r8);		\
868	COND_POP(set, CLBR_RDI, rdi);		\
869	COND_POP(set, CLBR_RSI, rsi);		\
870	COND_POP(set, CLBR_RDX, rdx);		\
871	COND_POP(set, CLBR_RCX, rcx);		\
872	COND_POP(set, CLBR_RAX, rax)
873
874#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
875#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
876#define PARA_INDIRECT(addr)	*addr(%rip)
877#else
878#define PV_SAVE_REGS(set)			\
879	COND_PUSH(set, CLBR_EAX, eax);		\
880	COND_PUSH(set, CLBR_EDI, edi);		\
881	COND_PUSH(set, CLBR_ECX, ecx);		\
882	COND_PUSH(set, CLBR_EDX, edx)
883#define PV_RESTORE_REGS(set)			\
884	COND_POP(set, CLBR_EDX, edx);		\
885	COND_POP(set, CLBR_ECX, ecx);		\
886	COND_POP(set, CLBR_EDI, edi);		\
887	COND_POP(set, CLBR_EAX, eax)
888
889#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
890#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
891#define PARA_INDIRECT(addr)	*%cs:addr
892#endif
893
894#define INTERRUPT_RETURN						\
895	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,	\
896		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
897
898#define DISABLE_INTERRUPTS(clobbers)					\
899	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
900		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
901		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);	\
902		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
903
904#define ENABLE_INTERRUPTS(clobbers)					\
905	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,	\
906		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
907		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);	\
908		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
909
910#ifdef CONFIG_X86_32
911#define GET_CR0_INTO_EAX				\
912	push %ecx; push %edx;				\
913	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);	\
914	pop %edx; pop %ecx
915#else	/* !CONFIG_X86_32 */
916
917/*
918 * If swapgs is used while the userspace stack is still current,
919 * there's no way to call a pvop.  The PV replacement *must* be
920 * inlined, or the swapgs instruction must be trapped and emulated.
921 */
922#define SWAPGS_UNSAFE_STACK						\
923	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
924		  swapgs)
925
926/*
927 * Note: swapgs is very special, and in practise is either going to be
928 * implemented with a single "swapgs" instruction or something very
929 * special.  Either way, we don't need to save any registers for
930 * it.
931 */
932#define SWAPGS								\
933	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
934		  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)		\
935		 )
936
937#define GET_CR2_INTO_RAX				\
938	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
939
940#define PARAVIRT_ADJUST_EXCEPTION_FRAME					\
941	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
942		  CLBR_NONE,						\
943		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
944
945#define USERGS_SYSRET64							\
946	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),	\
947		  CLBR_NONE,						\
948		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
949#endif	/* CONFIG_X86_32 */
950
951#endif /* __ASSEMBLY__ */
952#else  /* CONFIG_PARAVIRT */
953# define default_banner x86_init_noop
 
 
 
 
 
 
 
 
954#ifndef __ASSEMBLY__
955static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
956					  struct mm_struct *mm)
957{
958}
 
959
 
960static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
961{
962}
 
 
 
 
 
 
 
963#endif /* __ASSEMBLY__ */
964#endif /* !CONFIG_PARAVIRT */
965#endif /* _ASM_X86_PARAVIRT_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_PARAVIRT_H
  3#define _ASM_X86_PARAVIRT_H
  4/* Various instructions on x86 need to be replaced for
  5 * para-virtualization: those hooks are defined here. */
  6
  7#include <asm/paravirt_types.h>
  8
  9#ifndef __ASSEMBLY__
 10struct mm_struct;
 11#endif
 12
 13#ifdef CONFIG_PARAVIRT
 14#include <asm/pgtable_types.h>
 15#include <asm/asm.h>
 16#include <asm/nospec-branch.h>
 
 17
 18#ifndef __ASSEMBLY__
 19#include <linux/bug.h>
 20#include <linux/types.h>
 21#include <linux/cpumask.h>
 22#include <linux/static_call_types.h>
 23#include <asm/frame.h>
 24
 25u64 dummy_steal_clock(int cpu);
 26u64 dummy_sched_clock(void);
 27
 28DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
 29DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
 30
 31void paravirt_set_sched_clock(u64 (*func)(void));
 32
 33static __always_inline u64 paravirt_sched_clock(void)
 34{
 35	return static_call(pv_sched_clock)();
 36}
 37
 38struct static_key;
 39extern struct static_key paravirt_steal_enabled;
 40extern struct static_key paravirt_steal_rq_enabled;
 41
 42__visible void __native_queued_spin_unlock(struct qspinlock *lock);
 43bool pv_is_native_spin_unlock(void);
 44__visible bool __native_vcpu_is_preempted(long cpu);
 45bool pv_is_native_vcpu_is_preempted(void);
 46
 47static inline u64 paravirt_steal_clock(int cpu)
 48{
 49	return static_call(pv_steal_clock)(cpu);
 50}
 51
 52#ifdef CONFIG_PARAVIRT_SPINLOCKS
 53void __init paravirt_set_cap(void);
 54#endif
 55
 56/* The paravirtualized I/O functions */
 57static inline void slow_down_io(void)
 58{
 59	PVOP_VCALL0(cpu.io_delay);
 60#ifdef REALLY_SLOW_IO
 61	PVOP_VCALL0(cpu.io_delay);
 62	PVOP_VCALL0(cpu.io_delay);
 63	PVOP_VCALL0(cpu.io_delay);
 64#endif
 65}
 66
 67void native_flush_tlb_local(void);
 68void native_flush_tlb_global(void);
 69void native_flush_tlb_one_user(unsigned long addr);
 70void native_flush_tlb_multi(const struct cpumask *cpumask,
 71			     const struct flush_tlb_info *info);
 72
 73static inline void __flush_tlb_local(void)
 74{
 75	PVOP_VCALL0(mmu.flush_tlb_user);
 76}
 77
 78static inline void __flush_tlb_global(void)
 79{
 80	PVOP_VCALL0(mmu.flush_tlb_kernel);
 81}
 82
 83static inline void __flush_tlb_one_user(unsigned long addr)
 84{
 85	PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
 86}
 87
 88static inline void __flush_tlb_multi(const struct cpumask *cpumask,
 89				      const struct flush_tlb_info *info)
 90{
 91	PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
 92}
 93
 94static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
 95{
 96	PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
 97}
 98
 99static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
100{
101	PVOP_VCALL1(mmu.exit_mmap, mm);
 
102}
103
104static inline void notify_page_enc_status_changed(unsigned long pfn,
105						  int npages, bool enc)
106{
107	PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
108}
109
110#ifdef CONFIG_PARAVIRT_XXL
111static inline void load_sp0(unsigned long sp0)
112{
113	PVOP_VCALL1(cpu.load_sp0, sp0);
114}
115
116/* The paravirtualized CPUID instruction. */
117static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
118			   unsigned int *ecx, unsigned int *edx)
119{
120	PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
121}
122
123/*
124 * These special macros can be used to get or set a debugging register
125 */
126static __always_inline unsigned long paravirt_get_debugreg(int reg)
127{
128	return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
129}
130#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
131static __always_inline void set_debugreg(unsigned long val, int reg)
 
 
 
 
 
132{
133	PVOP_VCALL2(cpu.set_debugreg, reg, val);
134}
135
136static inline unsigned long read_cr0(void)
137{
138	return PVOP_CALL0(unsigned long, cpu.read_cr0);
139}
140
141static inline void write_cr0(unsigned long x)
142{
143	PVOP_VCALL1(cpu.write_cr0, x);
144}
145
146static __always_inline unsigned long read_cr2(void)
147{
148	return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
149				"mov %%cr2, %%rax;", ALT_NOT_XEN);
150}
151
152static __always_inline void write_cr2(unsigned long x)
153{
154	PVOP_VCALL1(mmu.write_cr2, x);
155}
156
157static inline unsigned long __read_cr3(void)
158{
159	return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
160			      "mov %%cr3, %%rax;", ALT_NOT_XEN);
161}
162
163static inline void write_cr3(unsigned long x)
164{
165	PVOP_ALT_VCALL1(mmu.write_cr3, x, "mov %%rdi, %%cr3", ALT_NOT_XEN);
 
 
 
 
 
 
 
 
 
166}
167
168static inline void __write_cr4(unsigned long x)
169{
170	PVOP_VCALL1(cpu.write_cr4, x);
171}
172
173static __always_inline void arch_safe_halt(void)
 
174{
175	PVOP_VCALL0(irq.safe_halt);
176}
177
178static inline void halt(void)
179{
180	PVOP_VCALL0(irq.halt);
181}
 
182
183extern noinstr void pv_native_wbinvd(void);
184
185static __always_inline void wbinvd(void)
186{
187	PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT_XEN);
188}
189
190static inline u64 paravirt_read_msr(unsigned msr)
191{
192	return PVOP_CALL1(u64, cpu.read_msr, msr);
193}
194
195static inline void paravirt_write_msr(unsigned msr,
196				      unsigned low, unsigned high)
197{
198	PVOP_VCALL3(cpu.write_msr, msr, low, high);
199}
200
201static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
 
 
202{
203	return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
204}
205
206static inline int paravirt_write_msr_safe(unsigned msr,
207					  unsigned low, unsigned high)
208{
209	return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
210}
211
 
212#define rdmsr(msr, val1, val2)			\
213do {						\
214	u64 _l = paravirt_read_msr(msr);	\
 
215	val1 = (u32)_l;				\
216	val2 = _l >> 32;			\
217} while (0)
218
219#define wrmsr(msr, val1, val2)			\
220do {						\
221	paravirt_write_msr(msr, val1, val2);	\
222} while (0)
223
224#define rdmsrl(msr, val)			\
225do {						\
226	val = paravirt_read_msr(msr);		\
 
227} while (0)
228
229static inline void wrmsrl(unsigned msr, u64 val)
230{
231	wrmsr(msr, (u32)val, (u32)(val>>32));
232}
233
234#define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
235
236/* rdmsr with exception handling */
237#define rdmsr_safe(msr, a, b)				\
238({							\
239	int _err;					\
240	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
241	(*a) = (u32)_l;					\
242	(*b) = _l >> 32;				\
243	_err;						\
244})
245
246static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
247{
248	int err;
249
250	*p = paravirt_read_msr_safe(msr, &err);
251	return err;
252}
253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254static inline unsigned long long paravirt_read_pmc(int counter)
255{
256	return PVOP_CALL1(u64, cpu.read_pmc, counter);
257}
258
259#define rdpmc(counter, low, high)		\
260do {						\
261	u64 _l = paravirt_read_pmc(counter);	\
262	low = (u32)_l;				\
263	high = _l >> 32;			\
264} while (0)
265
266#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
267
268static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
269{
270	PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
271}
272
273static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
274{
275	PVOP_VCALL2(cpu.free_ldt, ldt, entries);
276}
277
278static inline void load_TR_desc(void)
279{
280	PVOP_VCALL0(cpu.load_tr_desc);
281}
282static inline void load_gdt(const struct desc_ptr *dtr)
283{
284	PVOP_VCALL1(cpu.load_gdt, dtr);
285}
286static inline void load_idt(const struct desc_ptr *dtr)
287{
288	PVOP_VCALL1(cpu.load_idt, dtr);
289}
290static inline void set_ldt(const void *addr, unsigned entries)
291{
292	PVOP_VCALL2(cpu.set_ldt, addr, entries);
 
 
 
 
293}
294static inline unsigned long paravirt_store_tr(void)
295{
296	return PVOP_CALL0(unsigned long, cpu.store_tr);
297}
298
299#define store_tr(tr)	((tr) = paravirt_store_tr())
300static inline void load_TLS(struct thread_struct *t, unsigned cpu)
301{
302	PVOP_VCALL2(cpu.load_tls, t, cpu);
303}
304
 
305static inline void load_gs_index(unsigned int gs)
306{
307	PVOP_VCALL1(cpu.load_gs_index, gs);
308}
 
309
310static inline void write_ldt_entry(struct desc_struct *dt, int entry,
311				   const void *desc)
312{
313	PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
314}
315
316static inline void write_gdt_entry(struct desc_struct *dt, int entry,
317				   void *desc, int type)
318{
319	PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
320}
321
322static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
323{
324	PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
 
 
 
 
325}
326
327#ifdef CONFIG_X86_IOPL_IOPERM
328static inline void tss_invalidate_io_bitmap(void)
329{
330	PVOP_VCALL0(cpu.invalidate_io_bitmap);
 
 
 
 
 
331}
332
333static inline void tss_update_io_bitmap(void)
 
334{
335	PVOP_VCALL0(cpu.update_io_bitmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336}
337#endif
338
339static inline void paravirt_enter_mmap(struct mm_struct *next)
 
 
 
340{
341	PVOP_VCALL1(mmu.enter_mmap, next);
342}
343
344static inline int paravirt_pgd_alloc(struct mm_struct *mm)
345{
346	return PVOP_CALL1(int, mmu.pgd_alloc, mm);
347}
348
349static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
350{
351	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
352}
353
354static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
355{
356	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
357}
358static inline void paravirt_release_pte(unsigned long pfn)
359{
360	PVOP_VCALL1(mmu.release_pte, pfn);
361}
362
363static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
364{
365	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
366}
367
368static inline void paravirt_release_pmd(unsigned long pfn)
369{
370	PVOP_VCALL1(mmu.release_pmd, pfn);
371}
372
373static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
374{
375	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
376}
377static inline void paravirt_release_pud(unsigned long pfn)
378{
379	PVOP_VCALL1(mmu.release_pud, pfn);
380}
381
382static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
 
383{
384	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
385}
386
387static inline void paravirt_release_p4d(unsigned long pfn)
388{
389	PVOP_VCALL1(mmu.release_p4d, pfn);
390}
 
 
 
 
 
 
 
 
391
392static inline pte_t __pte(pteval_t val)
393{
394	return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
395					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
396}
397
398static inline pteval_t pte_val(pte_t pte)
399{
400	return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
401				"mov %%rdi, %%rax", ALT_NOT_XEN);
 
 
 
 
 
 
 
 
402}
403
404static inline pgd_t __pgd(pgdval_t val)
405{
406	return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
407					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
 
 
 
 
 
 
 
 
408}
409
410static inline pgdval_t pgd_val(pgd_t pgd)
411{
412	return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
413				"mov %%rdi, %%rax", ALT_NOT_XEN);
 
 
 
 
 
 
 
 
414}
415
416#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
417static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
418					   pte_t *ptep)
419{
420	pteval_t ret;
421
422	ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
 
423
424	return (pte_t) { .pte = ret };
425}
426
427static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
428					   pte_t *ptep, pte_t old_pte, pte_t pte)
429{
430
431	PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
 
 
 
 
432}
433
434static inline void set_pte(pte_t *ptep, pte_t pte)
435{
436	PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437}
438
439static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
440{
441	PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
 
 
 
 
 
442}
443
 
444static inline pmd_t __pmd(pmdval_t val)
445{
446	return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
447					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
 
 
 
 
 
 
 
 
448}
449
450static inline pmdval_t pmd_val(pmd_t pmd)
451{
452	return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
453				"mov %%rdi, %%rax", ALT_NOT_XEN);
 
 
 
 
 
 
 
 
454}
455
456static inline void set_pud(pud_t *pudp, pud_t pud)
457{
458	PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
 
 
 
 
 
 
 
459}
460
461static inline pud_t __pud(pudval_t val)
462{
463	pudval_t ret;
464
465	ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
466			       "mov %%rdi, %%rax", ALT_NOT_XEN);
 
 
 
 
467
468	return (pud_t) { ret };
469}
470
471static inline pudval_t pud_val(pud_t pud)
472{
473	return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
474				"mov %%rdi, %%rax", ALT_NOT_XEN);
475}
476
477static inline void pud_clear(pud_t *pudp)
478{
479	set_pud(pudp, native_make_pud(0));
480}
 
 
481
482static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
483{
484	p4dval_t val = native_p4d_val(p4d);
485
486	PVOP_VCALL2(mmu.set_p4d, p4dp, val);
487}
488
489#if CONFIG_PGTABLE_LEVELS >= 5
490
491static inline p4d_t __p4d(p4dval_t val)
492{
493	p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
494					"mov %%rdi, %%rax", ALT_NOT_XEN);
495
496	return (p4d_t) { ret };
 
 
 
 
 
497}
498
499static inline p4dval_t p4d_val(p4d_t p4d)
500{
501	return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
502				"mov %%rdi, %%rax", ALT_NOT_XEN);
503}
504
505static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
506{
507	PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
508}
509
510#define set_pgd(pgdp, pgdval) do {					\
511	if (pgtable_l5_enabled())						\
512		__set_pgd(pgdp, pgdval);				\
513	else								\
514		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
515} while (0)
516
517#define pgd_clear(pgdp) do {						\
518	if (pgtable_l5_enabled())					\
519		set_pgd(pgdp, native_make_pgd(0));			\
520} while (0)
521
522#endif  /* CONFIG_PGTABLE_LEVELS == 5 */
 
 
 
 
 
 
 
523
524static inline void p4d_clear(p4d_t *p4dp)
 
525{
526	set_p4d(p4dp, native_make_p4d(0));
527}
528
 
 
 
 
 
529static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
530{
531	set_pte(ptep, pte);
532}
533
534static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
535			     pte_t *ptep)
536{
537	set_pte(ptep, native_make_pte(0));
538}
539
540static inline void pmd_clear(pmd_t *pmdp)
541{
542	set_pmd(pmdp, native_make_pmd(0));
543}
 
544
545#define  __HAVE_ARCH_START_CONTEXT_SWITCH
546static inline void arch_start_context_switch(struct task_struct *prev)
547{
548	PVOP_VCALL1(cpu.start_context_switch, prev);
549}
550
551static inline void arch_end_context_switch(struct task_struct *next)
552{
553	PVOP_VCALL1(cpu.end_context_switch, next);
554}
555
556#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
557static inline void arch_enter_lazy_mmu_mode(void)
558{
559	PVOP_VCALL0(mmu.lazy_mode.enter);
560}
561
562static inline void arch_leave_lazy_mmu_mode(void)
563{
564	PVOP_VCALL0(mmu.lazy_mode.leave);
565}
566
567static inline void arch_flush_lazy_mmu_mode(void)
568{
569	PVOP_VCALL0(mmu.lazy_mode.flush);
570}
571
572static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
573				phys_addr_t phys, pgprot_t flags)
574{
575	pv_ops.mmu.set_fixmap(idx, phys, flags);
576}
577#endif
578
579#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
580
 
 
581static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
582							u32 val)
583{
584	PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
585}
586
587static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
588{
589	PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
590			  "movb $0, (%%" _ASM_ARG1 ");",
591			  ALT_NOT(X86_FEATURE_PVUNLOCK));
592}
593
594static __always_inline void pv_wait(u8 *ptr, u8 val)
595{
596	PVOP_VCALL2(lock.wait, ptr, val);
597}
598
599static __always_inline void pv_kick(int cpu)
600{
601	PVOP_VCALL1(lock.kick, cpu);
 
 
 
 
 
 
 
 
602}
603
604static __always_inline bool pv_vcpu_is_preempted(long cpu)
 
605{
606	return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
607				"xor %%" _ASM_AX ", %%" _ASM_AX ";",
608				ALT_NOT(X86_FEATURE_VCPUPREEMPT));
609}
610
611void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
612bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
613
614#endif /* SMP && PARAVIRT_SPINLOCKS */
615
616#ifdef CONFIG_X86_32
 
 
 
617/* save and restore all caller-save registers, except return value */
618#define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
619#define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
 
 
 
 
620#else
621/* save and restore all caller-save registers, except return value */
622#define PV_SAVE_ALL_CALLER_REGS						\
623	"push %rcx;"							\
624	"push %rdx;"							\
625	"push %rsi;"							\
626	"push %rdi;"							\
627	"push %r8;"							\
628	"push %r9;"							\
629	"push %r10;"							\
630	"push %r11;"
631#define PV_RESTORE_ALL_CALLER_REGS					\
632	"pop %r11;"							\
633	"pop %r10;"							\
634	"pop %r9;"							\
635	"pop %r8;"							\
636	"pop %rdi;"							\
637	"pop %rsi;"							\
638	"pop %rdx;"							\
639	"pop %rcx;"
 
 
 
 
 
 
 
 
640#endif
641
642/*
643 * Generate a thunk around a function which saves all caller-save
644 * registers except for the return value.  This allows C functions to
645 * be called from assembler code where fewer than normal registers are
646 * available.  It may also help code generation around calls from C
647 * code if the common case doesn't use many registers.
648 *
649 * When a callee is wrapped in a thunk, the caller can assume that all
650 * arg regs and all scratch registers are preserved across the
651 * call. The return value in rax/eax will not be saved, even for void
652 * functions.
653 */
654#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
655#define __PV_CALLEE_SAVE_REGS_THUNK(func, section)			\
656	extern typeof(func) __raw_callee_save_##func;			\
657									\
658	asm(".pushsection " section ", \"ax\";"				\
659	    ".globl " PV_THUNK_NAME(func) ";"				\
660	    ".type " PV_THUNK_NAME(func) ", @function;"			\
661	    ASM_FUNC_ALIGN						\
662	    PV_THUNK_NAME(func) ":"					\
663	    ASM_ENDBR							\
664	    FRAME_BEGIN							\
665	    PV_SAVE_ALL_CALLER_REGS					\
666	    "call " #func ";"						\
667	    PV_RESTORE_ALL_CALLER_REGS					\
668	    FRAME_END							\
669	    ASM_RET							\
670	    ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";"	\
671	    ".popsection")
672
673#define PV_CALLEE_SAVE_REGS_THUNK(func)			\
674	__PV_CALLEE_SAVE_REGS_THUNK(func, ".text")
675
676/* Get a reference to a callee-save function */
677#define PV_CALLEE_SAVE(func)						\
678	((struct paravirt_callee_save) { __raw_callee_save_##func })
679
680/* Promise that "func" already uses the right calling convention */
681#define __PV_IS_CALLEE_SAVE(func)			\
682	((struct paravirt_callee_save) { func })
683
684#ifdef CONFIG_PARAVIRT_XXL
685static __always_inline unsigned long arch_local_save_flags(void)
686{
687	return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
688				ALT_NOT_XEN);
689}
690
691static __always_inline void arch_local_irq_disable(void)
692{
693	PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT_XEN);
694}
695
696static __always_inline void arch_local_irq_enable(void)
697{
698	PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT_XEN);
699}
700
701static __always_inline unsigned long arch_local_irq_save(void)
 
 
 
 
 
702{
703	unsigned long f;
704
705	f = arch_local_save_flags();
706	arch_local_irq_disable();
707	return f;
708}
709#endif
710
711
712/* Make sure as little as possible of this mess escapes. */
713#undef PARAVIRT_CALL
714#undef __PVOP_CALL
715#undef __PVOP_VCALL
716#undef PVOP_VCALL0
717#undef PVOP_CALL0
718#undef PVOP_VCALL1
719#undef PVOP_CALL1
720#undef PVOP_VCALL2
721#undef PVOP_CALL2
722#undef PVOP_VCALL3
723#undef PVOP_CALL3
724#undef PVOP_VCALL4
725#undef PVOP_CALL4
726
727extern void default_banner(void);
728void native_pv_lock_init(void) __init;
729
730#else  /* __ASSEMBLY__ */
731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732#ifdef CONFIG_X86_64
733#ifdef CONFIG_PARAVIRT_XXL
734#ifdef CONFIG_DEBUG_ENTRY
735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
736#define PARA_INDIRECT(addr)	*addr(%rip)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
737
738.macro PARA_IRQ_save_fl
739	ANNOTATE_RETPOLINE_SAFE;
740	call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);
741.endm
742
743#define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl;",			\
744				 "ALT_CALL_INSTR;", ALT_CALL_ALWAYS,	\
745				 "pushf; pop %rax;", ALT_NOT_XEN
746#endif
747#endif /* CONFIG_PARAVIRT_XXL */
748#endif	/* CONFIG_X86_64 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
749
750#endif /* __ASSEMBLY__ */
751#else  /* CONFIG_PARAVIRT */
752# define default_banner x86_init_noop
753
754#ifndef __ASSEMBLY__
755static inline void native_pv_lock_init(void)
756{
757}
758#endif
759#endif /* !CONFIG_PARAVIRT */
760
761#ifndef __ASSEMBLY__
762#ifndef CONFIG_PARAVIRT_XXL
763static inline void paravirt_enter_mmap(struct mm_struct *mm)
764{
765}
766#endif
767
768#ifndef CONFIG_PARAVIRT
769static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
770{
771}
772#endif
773
774#ifndef CONFIG_PARAVIRT_SPINLOCKS
775static inline void paravirt_set_cap(void)
776{
777}
778#endif
779#endif /* __ASSEMBLY__ */
 
780#endif /* _ASM_X86_PARAVIRT_H */