Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Asm versions of Xen pv-ops, suitable for either direct use or
  3 * inlining.  The inline versions are the same as the direct-use
  4 * versions, with the pre- and post-amble chopped off.
  5 *
  6 * This code is encoded for size rather than absolute efficiency, with
  7 * a view to being able to inline as much as possible.
  8 *
  9 * We only bother with direct forms (ie, vcpu in percpu data) of the
 10 * operations here; the indirect forms are better handled in C, since
 11 * they're generally too large to inline anyway.
 12 */
 13
 14#include <asm/asm-offsets.h>
 15#include <asm/percpu.h>
 16#include <asm/processor-flags.h>
 17#include <asm/frame.h>
 18
 19#include "xen-asm.h"
 20
 21/*
 22 * Enable events.  This clears the event mask and tests the pending
 23 * event status with one and operation.  If there are pending events,
 24 * then enter the hypervisor to get them handled.
 25 */
 26ENTRY(xen_irq_enable_direct)
 27	FRAME_BEGIN
 28	/* Unmask events */
 29	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 30
 31	/*
 32	 * Preempt here doesn't matter because that will deal with any
 33	 * pending interrupts.  The pending check may end up being run
 34	 * on the wrong CPU, but that doesn't hurt.
 35	 */
 36
 37	/* Test for pending */
 38	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
 39	jz 1f
 40
 412:	call check_events
 421:
 43ENDPATCH(xen_irq_enable_direct)
 44	FRAME_END
 45	ret
 46	ENDPROC(xen_irq_enable_direct)
 47	RELOC(xen_irq_enable_direct, 2b+1)
 48
 49
 50/*
 51 * Disabling events is simply a matter of making the event mask
 52 * non-zero.
 53 */
 54ENTRY(xen_irq_disable_direct)
 55	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 56ENDPATCH(xen_irq_disable_direct)
 57	ret
 58	ENDPROC(xen_irq_disable_direct)
 59	RELOC(xen_irq_disable_direct, 0)
 60
 61/*
 62 * (xen_)save_fl is used to get the current interrupt enable status.
 63 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
 64 * may be set in the return value.  We take advantage of this by
 65 * making sure that X86_EFLAGS_IF has the right value (and other bits
 66 * in that byte are 0), but other bits in the return value are
 67 * undefined.  We need to toggle the state of the bit, because Xen and
 68 * x86 use opposite senses (mask vs enable).
 69 */
 70ENTRY(xen_save_fl_direct)
 71	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 72	setz %ah
 73	addb %ah, %ah
 74ENDPATCH(xen_save_fl_direct)
 75	ret
 76	ENDPROC(xen_save_fl_direct)
 77	RELOC(xen_save_fl_direct, 0)
 78
 79
 80/*
 81 * In principle the caller should be passing us a value return from
 82 * xen_save_fl_direct, but for robustness sake we test only the
 83 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
 84 * interrupt mask state, it checks for unmasked pending events and
 85 * enters the hypervisor to get them delivered if so.
 86 */
 87ENTRY(xen_restore_fl_direct)
 88	FRAME_BEGIN
 89#ifdef CONFIG_X86_64
 90	testw $X86_EFLAGS_IF, %di
 91#else
 92	testb $X86_EFLAGS_IF>>8, %ah
 93#endif
 94	setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 95	/*
 96	 * Preempt here doesn't matter because that will deal with any
 97	 * pending interrupts.  The pending check may end up being run
 98	 * on the wrong CPU, but that doesn't hurt.
 99	 */
100
101	/* check for unmasked and pending */
102	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
103	jnz 1f
1042:	call check_events
1051:
106ENDPATCH(xen_restore_fl_direct)
107	FRAME_END
108	ret
109	ENDPROC(xen_restore_fl_direct)
110	RELOC(xen_restore_fl_direct, 2b+1)
111
112
113/*
114 * Force an event check by making a hypercall, but preserve regs
115 * before making the call.
116 */
117ENTRY(check_events)
118	FRAME_BEGIN
119#ifdef CONFIG_X86_32
120	push %eax
121	push %ecx
122	push %edx
123	call xen_force_evtchn_callback
124	pop %edx
125	pop %ecx
126	pop %eax
127#else
128	push %rax
129	push %rcx
130	push %rdx
131	push %rsi
132	push %rdi
133	push %r8
134	push %r9
135	push %r10
136	push %r11
137	call xen_force_evtchn_callback
138	pop %r11
139	pop %r10
140	pop %r9
141	pop %r8
142	pop %rdi
143	pop %rsi
144	pop %rdx
145	pop %rcx
146	pop %rax
147#endif
148	FRAME_END
149	ret
150ENDPROC(check_events)
v3.5.6
  1/*
  2 * Asm versions of Xen pv-ops, suitable for either direct use or
  3 * inlining.  The inline versions are the same as the direct-use
  4 * versions, with the pre- and post-amble chopped off.
  5 *
  6 * This code is encoded for size rather than absolute efficiency, with
  7 * a view to being able to inline as much as possible.
  8 *
  9 * We only bother with direct forms (ie, vcpu in percpu data) of the
 10 * operations here; the indirect forms are better handled in C, since
 11 * they're generally too large to inline anyway.
 12 */
 13
 14#include <asm/asm-offsets.h>
 15#include <asm/percpu.h>
 16#include <asm/processor-flags.h>
 
 17
 18#include "xen-asm.h"
 19
 20/*
 21 * Enable events.  This clears the event mask and tests the pending
 22 * event status with one and operation.  If there are pending events,
 23 * then enter the hypervisor to get them handled.
 24 */
 25ENTRY(xen_irq_enable_direct)
 
 26	/* Unmask events */
 27	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 28
 29	/*
 30	 * Preempt here doesn't matter because that will deal with any
 31	 * pending interrupts.  The pending check may end up being run
 32	 * on the wrong CPU, but that doesn't hurt.
 33	 */
 34
 35	/* Test for pending */
 36	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
 37	jz 1f
 38
 392:	call check_events
 401:
 41ENDPATCH(xen_irq_enable_direct)
 
 42	ret
 43	ENDPROC(xen_irq_enable_direct)
 44	RELOC(xen_irq_enable_direct, 2b+1)
 45
 46
 47/*
 48 * Disabling events is simply a matter of making the event mask
 49 * non-zero.
 50 */
 51ENTRY(xen_irq_disable_direct)
 52	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 53ENDPATCH(xen_irq_disable_direct)
 54	ret
 55	ENDPROC(xen_irq_disable_direct)
 56	RELOC(xen_irq_disable_direct, 0)
 57
 58/*
 59 * (xen_)save_fl is used to get the current interrupt enable status.
 60 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
 61 * may be set in the return value.  We take advantage of this by
 62 * making sure that X86_EFLAGS_IF has the right value (and other bits
 63 * in that byte are 0), but other bits in the return value are
 64 * undefined.  We need to toggle the state of the bit, because Xen and
 65 * x86 use opposite senses (mask vs enable).
 66 */
 67ENTRY(xen_save_fl_direct)
 68	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 69	setz %ah
 70	addb %ah, %ah
 71ENDPATCH(xen_save_fl_direct)
 72	ret
 73	ENDPROC(xen_save_fl_direct)
 74	RELOC(xen_save_fl_direct, 0)
 75
 76
 77/*
 78 * In principle the caller should be passing us a value return from
 79 * xen_save_fl_direct, but for robustness sake we test only the
 80 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
 81 * interrupt mask state, it checks for unmasked pending events and
 82 * enters the hypervisor to get them delivered if so.
 83 */
 84ENTRY(xen_restore_fl_direct)
 
 85#ifdef CONFIG_X86_64
 86	testw $X86_EFLAGS_IF, %di
 87#else
 88	testb $X86_EFLAGS_IF>>8, %ah
 89#endif
 90	setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
 91	/*
 92	 * Preempt here doesn't matter because that will deal with any
 93	 * pending interrupts.  The pending check may end up being run
 94	 * on the wrong CPU, but that doesn't hurt.
 95	 */
 96
 97	/* check for unmasked and pending */
 98	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
 99	jnz 1f
1002:	call check_events
1011:
102ENDPATCH(xen_restore_fl_direct)
 
103	ret
104	ENDPROC(xen_restore_fl_direct)
105	RELOC(xen_restore_fl_direct, 2b+1)
106
107
108/*
109 * Force an event check by making a hypercall, but preserve regs
110 * before making the call.
111 */
112check_events:
 
113#ifdef CONFIG_X86_32
114	push %eax
115	push %ecx
116	push %edx
117	call xen_force_evtchn_callback
118	pop %edx
119	pop %ecx
120	pop %eax
121#else
122	push %rax
123	push %rcx
124	push %rdx
125	push %rsi
126	push %rdi
127	push %r8
128	push %r9
129	push %r10
130	push %r11
131	call xen_force_evtchn_callback
132	pop %r11
133	pop %r10
134	pop %r9
135	pop %r8
136	pop %rdi
137	pop %rsi
138	pop %rdx
139	pop %rcx
140	pop %rax
141#endif
 
142	ret