Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Xen event channels (2-level ABI)
  4 *
  5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  6 */
  7
  8#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  9
 10#include <linux/linkage.h>
 11#include <linux/interrupt.h>
 12#include <linux/irq.h>
 13
 14#include <asm/sync_bitops.h>
 15#include <asm/xen/hypercall.h>
 16#include <asm/xen/hypervisor.h>
 17
 18#include <xen/xen.h>
 19#include <xen/xen-ops.h>
 20#include <xen/events.h>
 21#include <xen/interface/xen.h>
 22#include <xen/interface/event_channel.h>
 23
 24#include "events_internal.h"
 25
 26/*
 27 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
 28 * careful to only use bitops which allow for this (e.g
 29 * test_bit/find_first_bit and friends but not __ffs) and to pass
 30 * BITS_PER_EVTCHN_WORD as the bitmask length.
 31 */
 32#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
 33/*
 34 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
 35 * array. Primarily to avoid long lines (hence the terse name).
 36 */
 37#define BM(x) (unsigned long *)(x)
 38/* Find the first set bit in a evtchn mask */
 39#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
 40
 41#define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
 42
 43static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
 44
 45static unsigned evtchn_2l_max_channels(void)
 46{
 47	return EVTCHN_2L_NR_CHANNELS;
 48}
 49
 50static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
 51{
 52	clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
 53	set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
 54}
 55
 56static void evtchn_2l_clear_pending(evtchn_port_t port)
 57{
 58	struct shared_info *s = HYPERVISOR_shared_info;
 59	sync_clear_bit(port, BM(&s->evtchn_pending[0]));
 60}
 61
 62static void evtchn_2l_set_pending(evtchn_port_t port)
 63{
 64	struct shared_info *s = HYPERVISOR_shared_info;
 65	sync_set_bit(port, BM(&s->evtchn_pending[0]));
 66}
 67
 68static bool evtchn_2l_is_pending(evtchn_port_t port)
 69{
 70	struct shared_info *s = HYPERVISOR_shared_info;
 71	return sync_test_bit(port, BM(&s->evtchn_pending[0]));
 72}
 73
 74static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
 75{
 76	struct shared_info *s = HYPERVISOR_shared_info;
 77	return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
 78}
 79
 80static void evtchn_2l_mask(evtchn_port_t port)
 81{
 82	struct shared_info *s = HYPERVISOR_shared_info;
 83	sync_set_bit(port, BM(&s->evtchn_mask[0]));
 84}
 85
 86static void evtchn_2l_unmask(evtchn_port_t port)
 87{
 88	struct shared_info *s = HYPERVISOR_shared_info;
 89	unsigned int cpu = get_cpu();
 90	int do_hypercall = 0, evtchn_pending = 0;
 91
 92	BUG_ON(!irqs_disabled());
 93
 94	if (unlikely((cpu != cpu_from_evtchn(port))))
 95		do_hypercall = 1;
 96	else {
 97		/*
 98		 * Need to clear the mask before checking pending to
 99		 * avoid a race with an event becoming pending.
100		 *
101		 * EVTCHNOP_unmask will only trigger an upcall if the
102		 * mask bit was set, so if a hypercall is needed
103		 * remask the event.
104		 */
105		sync_clear_bit(port, BM(&s->evtchn_mask[0]));
106		evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
107
108		if (unlikely(evtchn_pending && xen_hvm_domain())) {
109			sync_set_bit(port, BM(&s->evtchn_mask[0]));
110			do_hypercall = 1;
111		}
112	}
113
114	/* Slow path (hypercall) if this is a non-local port or if this is
115	 * an hvm domain and an event is pending (hvm domains don't have
116	 * their own implementation of irq_enable). */
117	if (do_hypercall) {
118		struct evtchn_unmask unmask = { .port = port };
119		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
120	} else {
121		struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
122
123		/*
124		 * The following is basically the equivalent of
125		 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
126		 * the interrupt edge' if the channel is masked.
127		 */
128		if (evtchn_pending &&
129		    !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
130					   BM(&vcpu_info->evtchn_pending_sel)))
131			vcpu_info->evtchn_upcall_pending = 1;
132	}
133
134	put_cpu();
135}
136
137static DEFINE_PER_CPU(unsigned int, current_word_idx);
138static DEFINE_PER_CPU(unsigned int, current_bit_idx);
139
140/*
141 * Mask out the i least significant bits of w
142 */
143#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
144
145static inline xen_ulong_t active_evtchns(unsigned int cpu,
146					 struct shared_info *sh,
147					 unsigned int idx)
148{
149	return sh->evtchn_pending[idx] &
150		per_cpu(cpu_evtchn_mask, cpu)[idx] &
151		~sh->evtchn_mask[idx];
152}
153
154/*
155 * Search the CPU's pending events bitmasks.  For each one found, map
156 * the event number to an irq, and feed it into do_IRQ() for handling.
157 *
158 * Xen uses a two-level bitmap to speed searching.  The first level is
159 * a bitset of words which contain pending event bits.  The second
160 * level is a bitset of pending events themselves.
161 */
162static void evtchn_2l_handle_events(unsigned cpu)
163{
164	int irq;
165	xen_ulong_t pending_words;
166	xen_ulong_t pending_bits;
167	int start_word_idx, start_bit_idx;
168	int word_idx, bit_idx;
169	int i;
170	struct shared_info *s = HYPERVISOR_shared_info;
171	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
172
173	/* Timer interrupt has highest priority. */
174	irq = irq_from_virq(cpu, VIRQ_TIMER);
175	if (irq != -1) {
176		evtchn_port_t evtchn = evtchn_from_irq(irq);
177		word_idx = evtchn / BITS_PER_LONG;
178		bit_idx = evtchn % BITS_PER_LONG;
179		if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
180			generic_handle_irq(irq);
181	}
182
183	/*
184	 * Master flag must be cleared /before/ clearing
185	 * selector flag. xchg_xen_ulong must contain an
186	 * appropriate barrier.
187	 */
188	pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
189
190	start_word_idx = __this_cpu_read(current_word_idx);
191	start_bit_idx = __this_cpu_read(current_bit_idx);
192
193	word_idx = start_word_idx;
194
195	for (i = 0; pending_words != 0; i++) {
196		xen_ulong_t words;
197
198		words = MASK_LSBS(pending_words, word_idx);
199
200		/*
201		 * If we masked out all events, wrap to beginning.
202		 */
203		if (words == 0) {
204			word_idx = 0;
205			bit_idx = 0;
206			continue;
207		}
208		word_idx = EVTCHN_FIRST_BIT(words);
209
210		pending_bits = active_evtchns(cpu, s, word_idx);
211		bit_idx = 0; /* usually scan entire word from start */
212		/*
213		 * We scan the starting word in two parts.
214		 *
215		 * 1st time: start in the middle, scanning the
216		 * upper bits.
217		 *
218		 * 2nd time: scan the whole word (not just the
219		 * parts skipped in the first pass) -- if an
220		 * event in the previously scanned bits is
221		 * pending again it would just be scanned on
222		 * the next loop anyway.
223		 */
224		if (word_idx == start_word_idx) {
225			if (i == 0)
226				bit_idx = start_bit_idx;
227		}
228
229		do {
230			xen_ulong_t bits;
231			evtchn_port_t port;
232
233			bits = MASK_LSBS(pending_bits, bit_idx);
234
235			/* If we masked out all events, move on. */
236			if (bits == 0)
237				break;
238
239			bit_idx = EVTCHN_FIRST_BIT(bits);
240
241			/* Process port. */
242			port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
243			irq = get_evtchn_to_irq(port);
244
245			if (irq != -1)
246				generic_handle_irq(irq);
247
248			bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
249
250			/* Next caller starts at last processed + 1 */
251			__this_cpu_write(current_word_idx,
252					 bit_idx ? word_idx :
253					 (word_idx+1) % BITS_PER_EVTCHN_WORD);
254			__this_cpu_write(current_bit_idx, bit_idx);
255		} while (bit_idx != 0);
256
257		/* Scan start_l1i twice; all others once. */
258		if ((word_idx != start_word_idx) || (i != 0))
259			pending_words &= ~(1UL << word_idx);
260
261		word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
262	}
263}
264
265irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
266{
267	struct shared_info *sh = HYPERVISOR_shared_info;
268	int cpu = smp_processor_id();
269	xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
270	int i;
271	unsigned long flags;
272	static DEFINE_SPINLOCK(debug_lock);
273	struct vcpu_info *v;
274
275	spin_lock_irqsave(&debug_lock, flags);
276
277	printk("\nvcpu %d\n  ", cpu);
278
279	for_each_online_cpu(i) {
280		int pending;
281		v = per_cpu(xen_vcpu, i);
282		pending = (get_irq_regs() && i == cpu)
283			? xen_irqs_disabled(get_irq_regs())
284			: v->evtchn_upcall_mask;
285		printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n  ", i,
286		       pending, v->evtchn_upcall_pending,
287		       (int)(sizeof(v->evtchn_pending_sel)*2),
288		       v->evtchn_pending_sel);
289	}
290	v = per_cpu(xen_vcpu, cpu);
291
292	printk("\npending:\n   ");
293	for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
294		printk("%0*"PRI_xen_ulong"%s",
295		       (int)sizeof(sh->evtchn_pending[0])*2,
296		       sh->evtchn_pending[i],
297		       i % 8 == 0 ? "\n   " : " ");
298	printk("\nglobal mask:\n   ");
299	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
300		printk("%0*"PRI_xen_ulong"%s",
301		       (int)(sizeof(sh->evtchn_mask[0])*2),
302		       sh->evtchn_mask[i],
303		       i % 8 == 0 ? "\n   " : " ");
304
305	printk("\nglobally unmasked:\n   ");
306	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
307		printk("%0*"PRI_xen_ulong"%s",
308		       (int)(sizeof(sh->evtchn_mask[0])*2),
309		       sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
310		       i % 8 == 0 ? "\n   " : " ");
311
312	printk("\nlocal cpu%d mask:\n   ", cpu);
313	for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
314		printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
315		       cpu_evtchn[i],
316		       i % 8 == 0 ? "\n   " : " ");
317
318	printk("\nlocally unmasked:\n   ");
319	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
320		xen_ulong_t pending = sh->evtchn_pending[i]
321			& ~sh->evtchn_mask[i]
322			& cpu_evtchn[i];
323		printk("%0*"PRI_xen_ulong"%s",
324		       (int)(sizeof(sh->evtchn_mask[0])*2),
325		       pending, i % 8 == 0 ? "\n   " : " ");
326	}
327
328	printk("\npending list:\n");
329	for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
330		if (sync_test_bit(i, BM(sh->evtchn_pending))) {
331			int word_idx = i / BITS_PER_EVTCHN_WORD;
332			printk("  %d: event %d -> irq %d%s%s%s\n",
333			       cpu_from_evtchn(i), i,
334			       get_evtchn_to_irq(i),
335			       sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
336			       ? "" : " l2-clear",
337			       !sync_test_bit(i, BM(sh->evtchn_mask))
338			       ? "" : " globally-masked",
339			       sync_test_bit(i, BM(cpu_evtchn))
340			       ? "" : " locally-masked");
341		}
342	}
343
344	spin_unlock_irqrestore(&debug_lock, flags);
345
346	return IRQ_HANDLED;
347}
348
349static void evtchn_2l_resume(void)
350{
351	int i;
352
353	for_each_online_cpu(i)
354		memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
355				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
356}
357
358static const struct evtchn_ops evtchn_ops_2l = {
359	.max_channels      = evtchn_2l_max_channels,
360	.nr_channels       = evtchn_2l_max_channels,
361	.bind_to_cpu       = evtchn_2l_bind_to_cpu,
362	.clear_pending     = evtchn_2l_clear_pending,
363	.set_pending       = evtchn_2l_set_pending,
364	.is_pending        = evtchn_2l_is_pending,
365	.test_and_set_mask = evtchn_2l_test_and_set_mask,
366	.mask              = evtchn_2l_mask,
367	.unmask            = evtchn_2l_unmask,
368	.handle_events     = evtchn_2l_handle_events,
369	.resume	           = evtchn_2l_resume,
370};
371
372void __init xen_evtchn_2l_init(void)
373{
374	pr_info("Using 2-level ABI\n");
375	evtchn_ops = &evtchn_ops_2l;
376}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Xen event channels (2-level ABI)
  4 *
  5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  6 */
  7
  8#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  9
 10#include <linux/linkage.h>
 11#include <linux/interrupt.h>
 12#include <linux/irq.h>
 13
 14#include <asm/sync_bitops.h>
 15#include <asm/xen/hypercall.h>
 16#include <asm/xen/hypervisor.h>
 17
 18#include <xen/xen.h>
 19#include <xen/xen-ops.h>
 20#include <xen/events.h>
 21#include <xen/interface/xen.h>
 22#include <xen/interface/event_channel.h>
 23
 24#include "events_internal.h"
 25
 26/*
 27 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
 28 * careful to only use bitops which allow for this (e.g
 29 * test_bit/find_first_bit and friends but not __ffs) and to pass
 30 * BITS_PER_EVTCHN_WORD as the bitmask length.
 31 */
 32#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
 33/*
 34 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
 35 * array. Primarily to avoid long lines (hence the terse name).
 36 */
 37#define BM(x) (unsigned long *)(x)
 38/* Find the first set bit in a evtchn mask */
 39#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
 40
 41#define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
 42
 43static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
 44
 45static unsigned evtchn_2l_max_channels(void)
 46{
 47	return EVTCHN_2L_NR_CHANNELS;
 48}
 49
 50static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
 51{
 52	clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
 53	set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
 54}
 55
 56static void evtchn_2l_clear_pending(unsigned port)
 57{
 58	struct shared_info *s = HYPERVISOR_shared_info;
 59	sync_clear_bit(port, BM(&s->evtchn_pending[0]));
 60}
 61
 62static void evtchn_2l_set_pending(unsigned port)
 63{
 64	struct shared_info *s = HYPERVISOR_shared_info;
 65	sync_set_bit(port, BM(&s->evtchn_pending[0]));
 66}
 67
 68static bool evtchn_2l_is_pending(unsigned port)
 69{
 70	struct shared_info *s = HYPERVISOR_shared_info;
 71	return sync_test_bit(port, BM(&s->evtchn_pending[0]));
 72}
 73
 74static bool evtchn_2l_test_and_set_mask(unsigned port)
 75{
 76	struct shared_info *s = HYPERVISOR_shared_info;
 77	return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
 78}
 79
 80static void evtchn_2l_mask(unsigned port)
 81{
 82	struct shared_info *s = HYPERVISOR_shared_info;
 83	sync_set_bit(port, BM(&s->evtchn_mask[0]));
 84}
 85
 86static void evtchn_2l_unmask(unsigned port)
 87{
 88	struct shared_info *s = HYPERVISOR_shared_info;
 89	unsigned int cpu = get_cpu();
 90	int do_hypercall = 0, evtchn_pending = 0;
 91
 92	BUG_ON(!irqs_disabled());
 93
 94	if (unlikely((cpu != cpu_from_evtchn(port))))
 95		do_hypercall = 1;
 96	else {
 97		/*
 98		 * Need to clear the mask before checking pending to
 99		 * avoid a race with an event becoming pending.
100		 *
101		 * EVTCHNOP_unmask will only trigger an upcall if the
102		 * mask bit was set, so if a hypercall is needed
103		 * remask the event.
104		 */
105		sync_clear_bit(port, BM(&s->evtchn_mask[0]));
106		evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
107
108		if (unlikely(evtchn_pending && xen_hvm_domain())) {
109			sync_set_bit(port, BM(&s->evtchn_mask[0]));
110			do_hypercall = 1;
111		}
112	}
113
114	/* Slow path (hypercall) if this is a non-local port or if this is
115	 * an hvm domain and an event is pending (hvm domains don't have
116	 * their own implementation of irq_enable). */
117	if (do_hypercall) {
118		struct evtchn_unmask unmask = { .port = port };
119		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
120	} else {
121		struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
122
123		/*
124		 * The following is basically the equivalent of
125		 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
126		 * the interrupt edge' if the channel is masked.
127		 */
128		if (evtchn_pending &&
129		    !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
130					   BM(&vcpu_info->evtchn_pending_sel)))
131			vcpu_info->evtchn_upcall_pending = 1;
132	}
133
134	put_cpu();
135}
136
137static DEFINE_PER_CPU(unsigned int, current_word_idx);
138static DEFINE_PER_CPU(unsigned int, current_bit_idx);
139
140/*
141 * Mask out the i least significant bits of w
142 */
143#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
144
145static inline xen_ulong_t active_evtchns(unsigned int cpu,
146					 struct shared_info *sh,
147					 unsigned int idx)
148{
149	return sh->evtchn_pending[idx] &
150		per_cpu(cpu_evtchn_mask, cpu)[idx] &
151		~sh->evtchn_mask[idx];
152}
153
154/*
155 * Search the CPU's pending events bitmasks.  For each one found, map
156 * the event number to an irq, and feed it into do_IRQ() for handling.
157 *
158 * Xen uses a two-level bitmap to speed searching.  The first level is
159 * a bitset of words which contain pending event bits.  The second
160 * level is a bitset of pending events themselves.
161 */
162static void evtchn_2l_handle_events(unsigned cpu)
163{
164	int irq;
165	xen_ulong_t pending_words;
166	xen_ulong_t pending_bits;
167	int start_word_idx, start_bit_idx;
168	int word_idx, bit_idx;
169	int i;
170	struct shared_info *s = HYPERVISOR_shared_info;
171	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
172
173	/* Timer interrupt has highest priority. */
174	irq = irq_from_virq(cpu, VIRQ_TIMER);
175	if (irq != -1) {
176		unsigned int evtchn = evtchn_from_irq(irq);
177		word_idx = evtchn / BITS_PER_LONG;
178		bit_idx = evtchn % BITS_PER_LONG;
179		if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
180			generic_handle_irq(irq);
181	}
182
183	/*
184	 * Master flag must be cleared /before/ clearing
185	 * selector flag. xchg_xen_ulong must contain an
186	 * appropriate barrier.
187	 */
188	pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
189
190	start_word_idx = __this_cpu_read(current_word_idx);
191	start_bit_idx = __this_cpu_read(current_bit_idx);
192
193	word_idx = start_word_idx;
194
195	for (i = 0; pending_words != 0; i++) {
196		xen_ulong_t words;
197
198		words = MASK_LSBS(pending_words, word_idx);
199
200		/*
201		 * If we masked out all events, wrap to beginning.
202		 */
203		if (words == 0) {
204			word_idx = 0;
205			bit_idx = 0;
206			continue;
207		}
208		word_idx = EVTCHN_FIRST_BIT(words);
209
210		pending_bits = active_evtchns(cpu, s, word_idx);
211		bit_idx = 0; /* usually scan entire word from start */
212		/*
213		 * We scan the starting word in two parts.
214		 *
215		 * 1st time: start in the middle, scanning the
216		 * upper bits.
217		 *
218		 * 2nd time: scan the whole word (not just the
219		 * parts skipped in the first pass) -- if an
220		 * event in the previously scanned bits is
221		 * pending again it would just be scanned on
222		 * the next loop anyway.
223		 */
224		if (word_idx == start_word_idx) {
225			if (i == 0)
226				bit_idx = start_bit_idx;
227		}
228
229		do {
230			xen_ulong_t bits;
231			int port;
232
233			bits = MASK_LSBS(pending_bits, bit_idx);
234
235			/* If we masked out all events, move on. */
236			if (bits == 0)
237				break;
238
239			bit_idx = EVTCHN_FIRST_BIT(bits);
240
241			/* Process port. */
242			port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
243			irq = get_evtchn_to_irq(port);
244
245			if (irq != -1)
246				generic_handle_irq(irq);
247
248			bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
249
250			/* Next caller starts at last processed + 1 */
251			__this_cpu_write(current_word_idx,
252					 bit_idx ? word_idx :
253					 (word_idx+1) % BITS_PER_EVTCHN_WORD);
254			__this_cpu_write(current_bit_idx, bit_idx);
255		} while (bit_idx != 0);
256
257		/* Scan start_l1i twice; all others once. */
258		if ((word_idx != start_word_idx) || (i != 0))
259			pending_words &= ~(1UL << word_idx);
260
261		word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
262	}
263}
264
265irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
266{
267	struct shared_info *sh = HYPERVISOR_shared_info;
268	int cpu = smp_processor_id();
269	xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
270	int i;
271	unsigned long flags;
272	static DEFINE_SPINLOCK(debug_lock);
273	struct vcpu_info *v;
274
275	spin_lock_irqsave(&debug_lock, flags);
276
277	printk("\nvcpu %d\n  ", cpu);
278
279	for_each_online_cpu(i) {
280		int pending;
281		v = per_cpu(xen_vcpu, i);
282		pending = (get_irq_regs() && i == cpu)
283			? xen_irqs_disabled(get_irq_regs())
284			: v->evtchn_upcall_mask;
285		printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n  ", i,
286		       pending, v->evtchn_upcall_pending,
287		       (int)(sizeof(v->evtchn_pending_sel)*2),
288		       v->evtchn_pending_sel);
289	}
290	v = per_cpu(xen_vcpu, cpu);
291
292	printk("\npending:\n   ");
293	for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
294		printk("%0*"PRI_xen_ulong"%s",
295		       (int)sizeof(sh->evtchn_pending[0])*2,
296		       sh->evtchn_pending[i],
297		       i % 8 == 0 ? "\n   " : " ");
298	printk("\nglobal mask:\n   ");
299	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
300		printk("%0*"PRI_xen_ulong"%s",
301		       (int)(sizeof(sh->evtchn_mask[0])*2),
302		       sh->evtchn_mask[i],
303		       i % 8 == 0 ? "\n   " : " ");
304
305	printk("\nglobally unmasked:\n   ");
306	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
307		printk("%0*"PRI_xen_ulong"%s",
308		       (int)(sizeof(sh->evtchn_mask[0])*2),
309		       sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
310		       i % 8 == 0 ? "\n   " : " ");
311
312	printk("\nlocal cpu%d mask:\n   ", cpu);
313	for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
314		printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
315		       cpu_evtchn[i],
316		       i % 8 == 0 ? "\n   " : " ");
317
318	printk("\nlocally unmasked:\n   ");
319	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
320		xen_ulong_t pending = sh->evtchn_pending[i]
321			& ~sh->evtchn_mask[i]
322			& cpu_evtchn[i];
323		printk("%0*"PRI_xen_ulong"%s",
324		       (int)(sizeof(sh->evtchn_mask[0])*2),
325		       pending, i % 8 == 0 ? "\n   " : " ");
326	}
327
328	printk("\npending list:\n");
329	for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
330		if (sync_test_bit(i, BM(sh->evtchn_pending))) {
331			int word_idx = i / BITS_PER_EVTCHN_WORD;
332			printk("  %d: event %d -> irq %d%s%s%s\n",
333			       cpu_from_evtchn(i), i,
334			       get_evtchn_to_irq(i),
335			       sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
336			       ? "" : " l2-clear",
337			       !sync_test_bit(i, BM(sh->evtchn_mask))
338			       ? "" : " globally-masked",
339			       sync_test_bit(i, BM(cpu_evtchn))
340			       ? "" : " locally-masked");
341		}
342	}
343
344	spin_unlock_irqrestore(&debug_lock, flags);
345
346	return IRQ_HANDLED;
347}
348
349static void evtchn_2l_resume(void)
350{
351	int i;
352
353	for_each_online_cpu(i)
354		memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
355				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
356}
357
358static const struct evtchn_ops evtchn_ops_2l = {
359	.max_channels      = evtchn_2l_max_channels,
360	.nr_channels       = evtchn_2l_max_channels,
361	.bind_to_cpu       = evtchn_2l_bind_to_cpu,
362	.clear_pending     = evtchn_2l_clear_pending,
363	.set_pending       = evtchn_2l_set_pending,
364	.is_pending        = evtchn_2l_is_pending,
365	.test_and_set_mask = evtchn_2l_test_and_set_mask,
366	.mask              = evtchn_2l_mask,
367	.unmask            = evtchn_2l_unmask,
368	.handle_events     = evtchn_2l_handle_events,
369	.resume	           = evtchn_2l_resume,
370};
371
372void __init xen_evtchn_2l_init(void)
373{
374	pr_info("Using 2-level ABI\n");
375	evtchn_ops = &evtchn_ops_2l;
376}