Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*  Paravirtualization interfaces
  3    Copyright (C) 2006 Rusty Russell IBM Corporation
  4
  5
  6    2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
  7*/
  8
  9#include <linux/errno.h>
 10#include <linux/init.h>
 11#include <linux/export.h>
 12#include <linux/efi.h>
 13#include <linux/bcd.h>
 14#include <linux/highmem.h>
 15#include <linux/kprobes.h>
 16#include <linux/pgtable.h>
 17#include <linux/static_call.h>
 18
 19#include <asm/bug.h>
 20#include <asm/paravirt.h>
 21#include <asm/debugreg.h>
 22#include <asm/desc.h>
 23#include <asm/setup.h>
 24#include <asm/time.h>
 25#include <asm/pgalloc.h>
 26#include <asm/irq.h>
 27#include <asm/delay.h>
 28#include <asm/fixmap.h>
 29#include <asm/apic.h>
 30#include <asm/tlbflush.h>
 31#include <asm/timer.h>
 32#include <asm/special_insns.h>
 33#include <asm/tlb.h>
 34#include <asm/io_bitmap.h>
 35#include <asm/gsseg.h>
 36
 37/* stub always returning 0. */
 38DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text);
 39
 40void __init default_banner(void)
 41{
 42	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
 43	       pv_info.name);
 44}
 45
 46#ifdef CONFIG_PARAVIRT_XXL
 47DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text);
 48DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text);
 49DEFINE_ASM_FUNC(pv_native_irq_disable, "cli", .noinstr.text);
 50DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
 51DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
 52#endif
 53
 54DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
 55
 56void __init native_pv_lock_init(void)
 57{
 58	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 59		static_branch_enable(&virt_spin_lock_key);
 
 60}
 61
 62static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
 63{
 64	tlb_remove_page(tlb, table);
 65}
 66
 67struct static_key paravirt_steal_enabled;
 68struct static_key paravirt_steal_rq_enabled;
 69
 70static u64 native_steal_clock(int cpu)
 71{
 72	return 0;
 73}
 74
 75DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
 76DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
 77
 78void paravirt_set_sched_clock(u64 (*func)(void))
 79{
 80	static_call_update(pv_sched_clock, func);
 81}
 82
 83/* These are in entry.S */
 84static struct resource reserve_ioports = {
 85	.start = 0,
 86	.end = IO_SPACE_LIMIT,
 87	.name = "paravirt-ioport",
 88	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
 89};
 90
 91/*
 92 * Reserve the whole legacy IO space to prevent any legacy drivers
 93 * from wasting time probing for their hardware.  This is a fairly
 94 * brute-force approach to disabling all non-virtual drivers.
 95 *
 96 * Note that this must be called very early to have any effect.
 97 */
 98int paravirt_disable_iospace(void)
 99{
100	return request_resource(&ioport_resource, &reserve_ioports);
101}
102
103#ifdef CONFIG_PARAVIRT_XXL
104static noinstr void pv_native_write_cr2(unsigned long val)
105{
106	native_write_cr2(val);
107}
108
109static noinstr unsigned long pv_native_get_debugreg(int regno)
110{
111	return native_get_debugreg(regno);
112}
113
114static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
115{
116	native_set_debugreg(regno, val);
117}
118
119noinstr void pv_native_wbinvd(void)
120{
121	native_wbinvd();
122}
123
124static noinstr void pv_native_safe_halt(void)
125{
126	native_safe_halt();
127}
128#endif
129
130struct pv_info pv_info = {
131	.name = "bare hardware",
132#ifdef CONFIG_PARAVIRT_XXL
133	.extra_user_64bit_cs = __USER_CS,
134#endif
135};
136
137/* 64-bit pagetable entries */
138#define PTE_IDENT	__PV_IS_CALLEE_SAVE(_paravirt_ident_64)
139
140struct paravirt_patch_template pv_ops = {
141	/* Cpu ops. */
142	.cpu.io_delay		= native_io_delay,
143
144#ifdef CONFIG_PARAVIRT_XXL
145	.cpu.cpuid		= native_cpuid,
146	.cpu.get_debugreg	= pv_native_get_debugreg,
147	.cpu.set_debugreg	= pv_native_set_debugreg,
148	.cpu.read_cr0		= native_read_cr0,
149	.cpu.write_cr0		= native_write_cr0,
150	.cpu.write_cr4		= native_write_cr4,
151	.cpu.wbinvd		= pv_native_wbinvd,
152	.cpu.read_msr		= native_read_msr,
153	.cpu.write_msr		= native_write_msr,
154	.cpu.read_msr_safe	= native_read_msr_safe,
155	.cpu.write_msr_safe	= native_write_msr_safe,
156	.cpu.read_pmc		= native_read_pmc,
157	.cpu.load_tr_desc	= native_load_tr_desc,
158	.cpu.set_ldt		= native_set_ldt,
159	.cpu.load_gdt		= native_load_gdt,
160	.cpu.load_idt		= native_load_idt,
161	.cpu.store_tr		= native_store_tr,
162	.cpu.load_tls		= native_load_tls,
163	.cpu.load_gs_index	= native_load_gs_index,
164	.cpu.write_ldt_entry	= native_write_ldt_entry,
165	.cpu.write_gdt_entry	= native_write_gdt_entry,
166	.cpu.write_idt_entry	= native_write_idt_entry,
167
168	.cpu.alloc_ldt		= paravirt_nop,
169	.cpu.free_ldt		= paravirt_nop,
170
171	.cpu.load_sp0		= native_load_sp0,
172
173#ifdef CONFIG_X86_IOPL_IOPERM
174	.cpu.invalidate_io_bitmap	= native_tss_invalidate_io_bitmap,
175	.cpu.update_io_bitmap		= native_tss_update_io_bitmap,
176#endif
177
178	.cpu.start_context_switch	= paravirt_nop,
179	.cpu.end_context_switch		= paravirt_nop,
180
181	/* Irq ops. */
182	.irq.save_fl		= __PV_IS_CALLEE_SAVE(pv_native_save_fl),
183	.irq.irq_disable	= __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
184	.irq.irq_enable		= __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
185	.irq.safe_halt		= pv_native_safe_halt,
186	.irq.halt		= native_halt,
187#endif /* CONFIG_PARAVIRT_XXL */
188
189	/* Mmu ops. */
190	.mmu.flush_tlb_user	= native_flush_tlb_local,
191	.mmu.flush_tlb_kernel	= native_flush_tlb_global,
192	.mmu.flush_tlb_one_user	= native_flush_tlb_one_user,
193	.mmu.flush_tlb_multi	= native_flush_tlb_multi,
194	.mmu.tlb_remove_table	= native_tlb_remove_table,
195
196	.mmu.exit_mmap		= paravirt_nop,
197	.mmu.notify_page_enc_status_changed	= paravirt_nop,
198
199#ifdef CONFIG_PARAVIRT_XXL
200	.mmu.read_cr2		= __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
201	.mmu.write_cr2		= pv_native_write_cr2,
202	.mmu.read_cr3		= __native_read_cr3,
203	.mmu.write_cr3		= native_write_cr3,
204
205	.mmu.pgd_alloc		= __paravirt_pgd_alloc,
206	.mmu.pgd_free		= paravirt_nop,
207
208	.mmu.alloc_pte		= paravirt_nop,
209	.mmu.alloc_pmd		= paravirt_nop,
210	.mmu.alloc_pud		= paravirt_nop,
211	.mmu.alloc_p4d		= paravirt_nop,
212	.mmu.release_pte	= paravirt_nop,
213	.mmu.release_pmd	= paravirt_nop,
214	.mmu.release_pud	= paravirt_nop,
215	.mmu.release_p4d	= paravirt_nop,
216
217	.mmu.set_pte		= native_set_pte,
218	.mmu.set_pmd		= native_set_pmd,
219
220	.mmu.ptep_modify_prot_start	= __ptep_modify_prot_start,
221	.mmu.ptep_modify_prot_commit	= __ptep_modify_prot_commit,
222
223	.mmu.set_pud		= native_set_pud,
224
225	.mmu.pmd_val		= PTE_IDENT,
226	.mmu.make_pmd		= PTE_IDENT,
227
228	.mmu.pud_val		= PTE_IDENT,
229	.mmu.make_pud		= PTE_IDENT,
230
231	.mmu.set_p4d		= native_set_p4d,
232
233#if CONFIG_PGTABLE_LEVELS >= 5
234	.mmu.p4d_val		= PTE_IDENT,
235	.mmu.make_p4d		= PTE_IDENT,
236
237	.mmu.set_pgd		= native_set_pgd,
238#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
239
240	.mmu.pte_val		= PTE_IDENT,
241	.mmu.pgd_val		= PTE_IDENT,
242
243	.mmu.make_pte		= PTE_IDENT,
244	.mmu.make_pgd		= PTE_IDENT,
245
246	.mmu.enter_mmap		= paravirt_nop,
247
248	.mmu.lazy_mode = {
249		.enter		= paravirt_nop,
250		.leave		= paravirt_nop,
251		.flush		= paravirt_nop,
252	},
253
254	.mmu.set_fixmap		= native_set_fixmap,
255#endif /* CONFIG_PARAVIRT_XXL */
256
257#if defined(CONFIG_PARAVIRT_SPINLOCKS)
258	/* Lock ops. */
259#ifdef CONFIG_SMP
260	.lock.queued_spin_lock_slowpath	= native_queued_spin_lock_slowpath,
261	.lock.queued_spin_unlock	=
262				PV_CALLEE_SAVE(__native_queued_spin_unlock),
263	.lock.wait			= paravirt_nop,
264	.lock.kick			= paravirt_nop,
265	.lock.vcpu_is_preempted		=
266				PV_CALLEE_SAVE(__native_vcpu_is_preempted),
267#endif /* SMP */
268#endif
269};
270
271#ifdef CONFIG_PARAVIRT_XXL
272NOKPROBE_SYMBOL(native_load_idt);
273#endif
274
275EXPORT_SYMBOL(pv_ops);
276EXPORT_SYMBOL_GPL(pv_info);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*  Paravirtualization interfaces
  3    Copyright (C) 2006 Rusty Russell IBM Corporation
  4
  5
  6    2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
  7*/
  8
  9#include <linux/errno.h>
 10#include <linux/init.h>
 11#include <linux/export.h>
 12#include <linux/efi.h>
 13#include <linux/bcd.h>
 14#include <linux/highmem.h>
 15#include <linux/kprobes.h>
 16#include <linux/pgtable.h>
 17#include <linux/static_call.h>
 18
 19#include <asm/bug.h>
 20#include <asm/paravirt.h>
 21#include <asm/debugreg.h>
 22#include <asm/desc.h>
 23#include <asm/setup.h>
 24#include <asm/time.h>
 25#include <asm/pgalloc.h>
 26#include <asm/irq.h>
 27#include <asm/delay.h>
 28#include <asm/fixmap.h>
 29#include <asm/apic.h>
 30#include <asm/tlbflush.h>
 31#include <asm/timer.h>
 32#include <asm/special_insns.h>
 33#include <asm/tlb.h>
 34#include <asm/io_bitmap.h>
 35#include <asm/gsseg.h>
 36
 37/* stub always returning 0. */
 38DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text);
 39
 40void __init default_banner(void)
 41{
 42	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
 43	       pv_info.name);
 44}
 45
 46#ifdef CONFIG_PARAVIRT_XXL
 47DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text);
 48DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text);
 49DEFINE_ASM_FUNC(pv_native_irq_disable, "cli", .noinstr.text);
 50DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
 51DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
 52#endif
 53
 54DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
 55
 56void __init native_pv_lock_init(void)
 57{
 58	if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
 59	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
 60		static_branch_disable(&virt_spin_lock_key);
 61}
 62
 63static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
 64{
 65	tlb_remove_page(tlb, table);
 66}
 67
 68struct static_key paravirt_steal_enabled;
 69struct static_key paravirt_steal_rq_enabled;
 70
 71static u64 native_steal_clock(int cpu)
 72{
 73	return 0;
 74}
 75
 76DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
 77DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
 78
 79void paravirt_set_sched_clock(u64 (*func)(void))
 80{
 81	static_call_update(pv_sched_clock, func);
 82}
 83
 84/* These are in entry.S */
 85static struct resource reserve_ioports = {
 86	.start = 0,
 87	.end = IO_SPACE_LIMIT,
 88	.name = "paravirt-ioport",
 89	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
 90};
 91
 92/*
 93 * Reserve the whole legacy IO space to prevent any legacy drivers
 94 * from wasting time probing for their hardware.  This is a fairly
 95 * brute-force approach to disabling all non-virtual drivers.
 96 *
 97 * Note that this must be called very early to have any effect.
 98 */
 99int paravirt_disable_iospace(void)
100{
101	return request_resource(&ioport_resource, &reserve_ioports);
102}
103
104#ifdef CONFIG_PARAVIRT_XXL
105static noinstr void pv_native_write_cr2(unsigned long val)
106{
107	native_write_cr2(val);
108}
109
110static noinstr unsigned long pv_native_get_debugreg(int regno)
111{
112	return native_get_debugreg(regno);
113}
114
115static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
116{
117	native_set_debugreg(regno, val);
118}
119
120noinstr void pv_native_wbinvd(void)
121{
122	native_wbinvd();
123}
124
125static noinstr void pv_native_safe_halt(void)
126{
127	native_safe_halt();
128}
129#endif
130
131struct pv_info pv_info = {
132	.name = "bare hardware",
133#ifdef CONFIG_PARAVIRT_XXL
134	.extra_user_64bit_cs = __USER_CS,
135#endif
136};
137
138/* 64-bit pagetable entries */
139#define PTE_IDENT	__PV_IS_CALLEE_SAVE(_paravirt_ident_64)
140
141struct paravirt_patch_template pv_ops = {
142	/* Cpu ops. */
143	.cpu.io_delay		= native_io_delay,
144
145#ifdef CONFIG_PARAVIRT_XXL
146	.cpu.cpuid		= native_cpuid,
147	.cpu.get_debugreg	= pv_native_get_debugreg,
148	.cpu.set_debugreg	= pv_native_set_debugreg,
149	.cpu.read_cr0		= native_read_cr0,
150	.cpu.write_cr0		= native_write_cr0,
151	.cpu.write_cr4		= native_write_cr4,
152	.cpu.wbinvd		= pv_native_wbinvd,
153	.cpu.read_msr		= native_read_msr,
154	.cpu.write_msr		= native_write_msr,
155	.cpu.read_msr_safe	= native_read_msr_safe,
156	.cpu.write_msr_safe	= native_write_msr_safe,
157	.cpu.read_pmc		= native_read_pmc,
158	.cpu.load_tr_desc	= native_load_tr_desc,
159	.cpu.set_ldt		= native_set_ldt,
160	.cpu.load_gdt		= native_load_gdt,
161	.cpu.load_idt		= native_load_idt,
162	.cpu.store_tr		= native_store_tr,
163	.cpu.load_tls		= native_load_tls,
164	.cpu.load_gs_index	= native_load_gs_index,
165	.cpu.write_ldt_entry	= native_write_ldt_entry,
166	.cpu.write_gdt_entry	= native_write_gdt_entry,
167	.cpu.write_idt_entry	= native_write_idt_entry,
168
169	.cpu.alloc_ldt		= paravirt_nop,
170	.cpu.free_ldt		= paravirt_nop,
171
172	.cpu.load_sp0		= native_load_sp0,
173
174#ifdef CONFIG_X86_IOPL_IOPERM
175	.cpu.invalidate_io_bitmap	= native_tss_invalidate_io_bitmap,
176	.cpu.update_io_bitmap		= native_tss_update_io_bitmap,
177#endif
178
179	.cpu.start_context_switch	= paravirt_nop,
180	.cpu.end_context_switch		= paravirt_nop,
181
182	/* Irq ops. */
183	.irq.save_fl		= __PV_IS_CALLEE_SAVE(pv_native_save_fl),
184	.irq.irq_disable	= __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
185	.irq.irq_enable		= __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
186	.irq.safe_halt		= pv_native_safe_halt,
187	.irq.halt		= native_halt,
188#endif /* CONFIG_PARAVIRT_XXL */
189
190	/* Mmu ops. */
191	.mmu.flush_tlb_user	= native_flush_tlb_local,
192	.mmu.flush_tlb_kernel	= native_flush_tlb_global,
193	.mmu.flush_tlb_one_user	= native_flush_tlb_one_user,
194	.mmu.flush_tlb_multi	= native_flush_tlb_multi,
195	.mmu.tlb_remove_table	= native_tlb_remove_table,
196
197	.mmu.exit_mmap		= paravirt_nop,
198	.mmu.notify_page_enc_status_changed	= paravirt_nop,
199
200#ifdef CONFIG_PARAVIRT_XXL
201	.mmu.read_cr2		= __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
202	.mmu.write_cr2		= pv_native_write_cr2,
203	.mmu.read_cr3		= __native_read_cr3,
204	.mmu.write_cr3		= native_write_cr3,
205
206	.mmu.pgd_alloc		= __paravirt_pgd_alloc,
207	.mmu.pgd_free		= paravirt_nop,
208
209	.mmu.alloc_pte		= paravirt_nop,
210	.mmu.alloc_pmd		= paravirt_nop,
211	.mmu.alloc_pud		= paravirt_nop,
212	.mmu.alloc_p4d		= paravirt_nop,
213	.mmu.release_pte	= paravirt_nop,
214	.mmu.release_pmd	= paravirt_nop,
215	.mmu.release_pud	= paravirt_nop,
216	.mmu.release_p4d	= paravirt_nop,
217
218	.mmu.set_pte		= native_set_pte,
219	.mmu.set_pmd		= native_set_pmd,
220
221	.mmu.ptep_modify_prot_start	= __ptep_modify_prot_start,
222	.mmu.ptep_modify_prot_commit	= __ptep_modify_prot_commit,
223
224	.mmu.set_pud		= native_set_pud,
225
226	.mmu.pmd_val		= PTE_IDENT,
227	.mmu.make_pmd		= PTE_IDENT,
228
229	.mmu.pud_val		= PTE_IDENT,
230	.mmu.make_pud		= PTE_IDENT,
231
232	.mmu.set_p4d		= native_set_p4d,
233
234#if CONFIG_PGTABLE_LEVELS >= 5
235	.mmu.p4d_val		= PTE_IDENT,
236	.mmu.make_p4d		= PTE_IDENT,
237
238	.mmu.set_pgd		= native_set_pgd,
239#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
240
241	.mmu.pte_val		= PTE_IDENT,
242	.mmu.pgd_val		= PTE_IDENT,
243
244	.mmu.make_pte		= PTE_IDENT,
245	.mmu.make_pgd		= PTE_IDENT,
246
247	.mmu.enter_mmap		= paravirt_nop,
248
249	.mmu.lazy_mode = {
250		.enter		= paravirt_nop,
251		.leave		= paravirt_nop,
252		.flush		= paravirt_nop,
253	},
254
255	.mmu.set_fixmap		= native_set_fixmap,
256#endif /* CONFIG_PARAVIRT_XXL */
257
258#if defined(CONFIG_PARAVIRT_SPINLOCKS)
259	/* Lock ops. */
260#ifdef CONFIG_SMP
261	.lock.queued_spin_lock_slowpath	= native_queued_spin_lock_slowpath,
262	.lock.queued_spin_unlock	=
263				PV_CALLEE_SAVE(__native_queued_spin_unlock),
264	.lock.wait			= paravirt_nop,
265	.lock.kick			= paravirt_nop,
266	.lock.vcpu_is_preempted		=
267				PV_CALLEE_SAVE(__native_vcpu_is_preempted),
268#endif /* SMP */
269#endif
270};
271
272#ifdef CONFIG_PARAVIRT_XXL
273NOKPROBE_SYMBOL(native_load_idt);
274#endif
275
276EXPORT_SYMBOL(pv_ops);
277EXPORT_SYMBOL_GPL(pv_info);