Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Xen event channels (2-level ABI)
  4 *
  5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  6 */
  7
  8#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  9
 10#include <linux/linkage.h>
 11#include <linux/interrupt.h>
 12#include <linux/irq.h>
 13
 14#include <asm/sync_bitops.h>
 15#include <asm/xen/hypercall.h>
 16#include <asm/xen/hypervisor.h>
 17
 18#include <xen/xen.h>
 19#include <xen/xen-ops.h>
 20#include <xen/events.h>
 21#include <xen/interface/xen.h>
 22#include <xen/interface/event_channel.h>
 23
 24#include "events_internal.h"
 25
 26/*
 27 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
 28 * careful to only use bitops which allow for this (e.g
 29 * test_bit/find_first_bit and friends but not __ffs) and to pass
 30 * BITS_PER_EVTCHN_WORD as the bitmask length.
 31 */
 32#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
 33/*
 34 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
 35 * array. Primarily to avoid long lines (hence the terse name).
 36 */
 37#define BM(x) (unsigned long *)(x)
 38/* Find the first set bit in a evtchn mask */
 39#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
 40
 41#define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
 42
 43static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
 44
 45static unsigned evtchn_2l_max_channels(void)
 46{
 47	return EVTCHN_2L_NR_CHANNELS;
 48}
 49
 50static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
 51{
 52	clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
 
 53}
 54
 55static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
 56				  unsigned int old_cpu)
 57{
 58	clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu)));
 59	set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
 60}
 61
 62static void evtchn_2l_clear_pending(evtchn_port_t port)
 63{
 64	struct shared_info *s = HYPERVISOR_shared_info;
 65	sync_clear_bit(port, BM(&s->evtchn_pending[0]));
 66}
 67
 68static void evtchn_2l_set_pending(evtchn_port_t port)
 69{
 70	struct shared_info *s = HYPERVISOR_shared_info;
 71	sync_set_bit(port, BM(&s->evtchn_pending[0]));
 72}
 73
 74static bool evtchn_2l_is_pending(evtchn_port_t port)
 75{
 76	struct shared_info *s = HYPERVISOR_shared_info;
 77	return sync_test_bit(port, BM(&s->evtchn_pending[0]));
 78}
 79
 80static void evtchn_2l_mask(evtchn_port_t port)
 
 
 
 
 
 
 81{
 82	struct shared_info *s = HYPERVISOR_shared_info;
 83	sync_set_bit(port, BM(&s->evtchn_mask[0]));
 84}
 85
 86static void evtchn_2l_unmask(evtchn_port_t port)
 87{
 88	struct shared_info *s = HYPERVISOR_shared_info;
 89	unsigned int cpu = get_cpu();
 90	int do_hypercall = 0, evtchn_pending = 0;
 91
 92	BUG_ON(!irqs_disabled());
 93
 94	smp_wmb();	/* All writes before unmask must be visible. */
 95
 96	if (unlikely((cpu != cpu_from_evtchn(port))))
 97		do_hypercall = 1;
 98	else {
 99		/*
100		 * Need to clear the mask before checking pending to
101		 * avoid a race with an event becoming pending.
102		 *
103		 * EVTCHNOP_unmask will only trigger an upcall if the
104		 * mask bit was set, so if a hypercall is needed
105		 * remask the event.
106		 */
107		sync_clear_bit(port, BM(&s->evtchn_mask[0]));
108		evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
109
110		if (unlikely(evtchn_pending && xen_hvm_domain())) {
111			sync_set_bit(port, BM(&s->evtchn_mask[0]));
112			do_hypercall = 1;
113		}
114	}
115
116	/* Slow path (hypercall) if this is a non-local port or if this is
117	 * an hvm domain and an event is pending (hvm domains don't have
118	 * their own implementation of irq_enable). */
119	if (do_hypercall) {
120		struct evtchn_unmask unmask = { .port = port };
121		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
122	} else {
123		struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
124
125		/*
126		 * The following is basically the equivalent of
127		 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
128		 * the interrupt edge' if the channel is masked.
129		 */
130		if (evtchn_pending &&
131		    !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
132					   BM(&vcpu_info->evtchn_pending_sel)))
133			vcpu_info->evtchn_upcall_pending = 1;
134	}
135
136	put_cpu();
137}
138
139static DEFINE_PER_CPU(unsigned int, current_word_idx);
140static DEFINE_PER_CPU(unsigned int, current_bit_idx);
141
142/*
143 * Mask out the i least significant bits of w
144 */
145#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
146
147static inline xen_ulong_t active_evtchns(unsigned int cpu,
148					 struct shared_info *sh,
149					 unsigned int idx)
150{
151	return sh->evtchn_pending[idx] &
152		per_cpu(cpu_evtchn_mask, cpu)[idx] &
153		~sh->evtchn_mask[idx];
154}
155
156/*
157 * Search the CPU's pending events bitmasks.  For each one found, map
158 * the event number to an irq, and feed it into do_IRQ() for handling.
159 *
160 * Xen uses a two-level bitmap to speed searching.  The first level is
161 * a bitset of words which contain pending event bits.  The second
162 * level is a bitset of pending events themselves.
163 */
164static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
165{
166	int irq;
167	xen_ulong_t pending_words;
168	xen_ulong_t pending_bits;
169	int start_word_idx, start_bit_idx;
170	int word_idx, bit_idx;
171	int i;
172	struct shared_info *s = HYPERVISOR_shared_info;
173	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
174	evtchn_port_t evtchn;
175
176	/* Timer interrupt has highest priority. */
177	irq = irq_evtchn_from_virq(cpu, VIRQ_TIMER, &evtchn);
178	if (irq != -1) {
 
179		word_idx = evtchn / BITS_PER_LONG;
180		bit_idx = evtchn % BITS_PER_LONG;
181		if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
182			generic_handle_irq(irq);
183	}
184
185	/*
186	 * Master flag must be cleared /before/ clearing
187	 * selector flag. xchg_xen_ulong must contain an
188	 * appropriate barrier.
189	 */
190	pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
191
192	start_word_idx = __this_cpu_read(current_word_idx);
193	start_bit_idx = __this_cpu_read(current_bit_idx);
194
195	word_idx = start_word_idx;
196
197	for (i = 0; pending_words != 0; i++) {
198		xen_ulong_t words;
199
200		words = MASK_LSBS(pending_words, word_idx);
201
202		/*
203		 * If we masked out all events, wrap to beginning.
204		 */
205		if (words == 0) {
206			word_idx = 0;
207			bit_idx = 0;
208			continue;
209		}
210		word_idx = EVTCHN_FIRST_BIT(words);
211
212		pending_bits = active_evtchns(cpu, s, word_idx);
213		bit_idx = 0; /* usually scan entire word from start */
214		/*
215		 * We scan the starting word in two parts.
216		 *
217		 * 1st time: start in the middle, scanning the
218		 * upper bits.
219		 *
220		 * 2nd time: scan the whole word (not just the
221		 * parts skipped in the first pass) -- if an
222		 * event in the previously scanned bits is
223		 * pending again it would just be scanned on
224		 * the next loop anyway.
225		 */
226		if (word_idx == start_word_idx) {
227			if (i == 0)
228				bit_idx = start_bit_idx;
229		}
230
231		do {
232			xen_ulong_t bits;
233			evtchn_port_t port;
234
235			bits = MASK_LSBS(pending_bits, bit_idx);
236
237			/* If we masked out all events, move on. */
238			if (bits == 0)
239				break;
240
241			bit_idx = EVTCHN_FIRST_BIT(bits);
242
243			/* Process port. */
244			port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
245			handle_irq_for_port(port, ctrl);
 
 
 
246
247			bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
248
249			/* Next caller starts at last processed + 1 */
250			__this_cpu_write(current_word_idx,
251					 bit_idx ? word_idx :
252					 (word_idx+1) % BITS_PER_EVTCHN_WORD);
253			__this_cpu_write(current_bit_idx, bit_idx);
254		} while (bit_idx != 0);
255
256		/* Scan start_l1i twice; all others once. */
257		if ((word_idx != start_word_idx) || (i != 0))
258			pending_words &= ~(1UL << word_idx);
259
260		word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
261	}
262}
263
264irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
265{
266	struct shared_info *sh = HYPERVISOR_shared_info;
267	int cpu = smp_processor_id();
268	xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
269	int i;
270	unsigned long flags;
271	static DEFINE_SPINLOCK(debug_lock);
272	struct vcpu_info *v;
273
274	spin_lock_irqsave(&debug_lock, flags);
275
276	printk("\nvcpu %d\n  ", cpu);
277
278	for_each_online_cpu(i) {
279		int pending;
280		v = per_cpu(xen_vcpu, i);
281		pending = (get_irq_regs() && i == cpu)
282			? xen_irqs_disabled(get_irq_regs())
283			: v->evtchn_upcall_mask;
284		printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n  ", i,
285		       pending, v->evtchn_upcall_pending,
286		       (int)(sizeof(v->evtchn_pending_sel)*2),
287		       v->evtchn_pending_sel);
288	}
289	v = per_cpu(xen_vcpu, cpu);
290
291	printk("\npending:\n   ");
292	for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
293		printk("%0*"PRI_xen_ulong"%s",
294		       (int)sizeof(sh->evtchn_pending[0])*2,
295		       sh->evtchn_pending[i],
296		       i % 8 == 0 ? "\n   " : " ");
297	printk("\nglobal mask:\n   ");
298	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
299		printk("%0*"PRI_xen_ulong"%s",
300		       (int)(sizeof(sh->evtchn_mask[0])*2),
301		       sh->evtchn_mask[i],
302		       i % 8 == 0 ? "\n   " : " ");
303
304	printk("\nglobally unmasked:\n   ");
305	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
306		printk("%0*"PRI_xen_ulong"%s",
307		       (int)(sizeof(sh->evtchn_mask[0])*2),
308		       sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
309		       i % 8 == 0 ? "\n   " : " ");
310
311	printk("\nlocal cpu%d mask:\n   ", cpu);
312	for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
313		printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
314		       cpu_evtchn[i],
315		       i % 8 == 0 ? "\n   " : " ");
316
317	printk("\nlocally unmasked:\n   ");
318	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
319		xen_ulong_t pending = sh->evtchn_pending[i]
320			& ~sh->evtchn_mask[i]
321			& cpu_evtchn[i];
322		printk("%0*"PRI_xen_ulong"%s",
323		       (int)(sizeof(sh->evtchn_mask[0])*2),
324		       pending, i % 8 == 0 ? "\n   " : " ");
325	}
326
327	printk("\npending list:\n");
328	for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
329		if (sync_test_bit(i, BM(sh->evtchn_pending))) {
330			int word_idx = i / BITS_PER_EVTCHN_WORD;
331			printk("  %d: event %d -> irq %u%s%s%s\n",
332			       cpu_from_evtchn(i), i,
333			       irq_from_evtchn(i),
334			       sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
335			       ? "" : " l2-clear",
336			       !sync_test_bit(i, BM(sh->evtchn_mask))
337			       ? "" : " globally-masked",
338			       sync_test_bit(i, BM(cpu_evtchn))
339			       ? "" : " locally-masked");
340		}
341	}
342
343	spin_unlock_irqrestore(&debug_lock, flags);
344
345	return IRQ_HANDLED;
346}
347
348static void evtchn_2l_resume(void)
349{
350	int i;
351
352	for_each_online_cpu(i)
353		memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
354				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
355}
356
357static int evtchn_2l_percpu_deinit(unsigned int cpu)
358{
359	memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
360			EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
361
362	return 0;
363}
364
365static const struct evtchn_ops evtchn_ops_2l = {
366	.max_channels      = evtchn_2l_max_channels,
367	.nr_channels       = evtchn_2l_max_channels,
368	.remove            = evtchn_2l_remove,
369	.bind_to_cpu       = evtchn_2l_bind_to_cpu,
370	.clear_pending     = evtchn_2l_clear_pending,
371	.set_pending       = evtchn_2l_set_pending,
372	.is_pending        = evtchn_2l_is_pending,
 
373	.mask              = evtchn_2l_mask,
374	.unmask            = evtchn_2l_unmask,
375	.handle_events     = evtchn_2l_handle_events,
376	.resume	           = evtchn_2l_resume,
377	.percpu_deinit     = evtchn_2l_percpu_deinit,
378};
379
380void __init xen_evtchn_2l_init(void)
381{
382	pr_info("Using 2-level ABI\n");
383	evtchn_ops = &evtchn_ops_2l;
384}
v4.10.11
 
  1/*
  2 * Xen event channels (2-level ABI)
  3 *
  4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  5 */
  6
  7#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  8
  9#include <linux/linkage.h>
 10#include <linux/interrupt.h>
 11#include <linux/irq.h>
 12
 13#include <asm/sync_bitops.h>
 14#include <asm/xen/hypercall.h>
 15#include <asm/xen/hypervisor.h>
 16
 17#include <xen/xen.h>
 18#include <xen/xen-ops.h>
 19#include <xen/events.h>
 20#include <xen/interface/xen.h>
 21#include <xen/interface/event_channel.h>
 22
 23#include "events_internal.h"
 24
 25/*
 26 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
 27 * careful to only use bitops which allow for this (e.g
 28 * test_bit/find_first_bit and friends but not __ffs) and to pass
 29 * BITS_PER_EVTCHN_WORD as the bitmask length.
 30 */
 31#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
 32/*
 33 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
 34 * array. Primarily to avoid long lines (hence the terse name).
 35 */
 36#define BM(x) (unsigned long *)(x)
 37/* Find the first set bit in a evtchn mask */
 38#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
 39
 40#define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
 41
 42static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
 43
 44static unsigned evtchn_2l_max_channels(void)
 45{
 46	return EVTCHN_2L_NR_CHANNELS;
 47}
 48
 49static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
 50{
 51	clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
 52	set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
 53}
 54
 55static void evtchn_2l_clear_pending(unsigned port)
 
 
 
 
 
 
 
 56{
 57	struct shared_info *s = HYPERVISOR_shared_info;
 58	sync_clear_bit(port, BM(&s->evtchn_pending[0]));
 59}
 60
 61static void evtchn_2l_set_pending(unsigned port)
 62{
 63	struct shared_info *s = HYPERVISOR_shared_info;
 64	sync_set_bit(port, BM(&s->evtchn_pending[0]));
 65}
 66
 67static bool evtchn_2l_is_pending(unsigned port)
 68{
 69	struct shared_info *s = HYPERVISOR_shared_info;
 70	return sync_test_bit(port, BM(&s->evtchn_pending[0]));
 71}
 72
 73static bool evtchn_2l_test_and_set_mask(unsigned port)
 74{
 75	struct shared_info *s = HYPERVISOR_shared_info;
 76	return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
 77}
 78
 79static void evtchn_2l_mask(unsigned port)
 80{
 81	struct shared_info *s = HYPERVISOR_shared_info;
 82	sync_set_bit(port, BM(&s->evtchn_mask[0]));
 83}
 84
 85static void evtchn_2l_unmask(unsigned port)
 86{
 87	struct shared_info *s = HYPERVISOR_shared_info;
 88	unsigned int cpu = get_cpu();
 89	int do_hypercall = 0, evtchn_pending = 0;
 90
 91	BUG_ON(!irqs_disabled());
 92
 
 
 93	if (unlikely((cpu != cpu_from_evtchn(port))))
 94		do_hypercall = 1;
 95	else {
 96		/*
 97		 * Need to clear the mask before checking pending to
 98		 * avoid a race with an event becoming pending.
 99		 *
100		 * EVTCHNOP_unmask will only trigger an upcall if the
101		 * mask bit was set, so if a hypercall is needed
102		 * remask the event.
103		 */
104		sync_clear_bit(port, BM(&s->evtchn_mask[0]));
105		evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
106
107		if (unlikely(evtchn_pending && xen_hvm_domain())) {
108			sync_set_bit(port, BM(&s->evtchn_mask[0]));
109			do_hypercall = 1;
110		}
111	}
112
113	/* Slow path (hypercall) if this is a non-local port or if this is
114	 * an hvm domain and an event is pending (hvm domains don't have
115	 * their own implementation of irq_enable). */
116	if (do_hypercall) {
117		struct evtchn_unmask unmask = { .port = port };
118		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
119	} else {
120		struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
121
122		/*
123		 * The following is basically the equivalent of
124		 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
125		 * the interrupt edge' if the channel is masked.
126		 */
127		if (evtchn_pending &&
128		    !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
129					   BM(&vcpu_info->evtchn_pending_sel)))
130			vcpu_info->evtchn_upcall_pending = 1;
131	}
132
133	put_cpu();
134}
135
136static DEFINE_PER_CPU(unsigned int, current_word_idx);
137static DEFINE_PER_CPU(unsigned int, current_bit_idx);
138
139/*
140 * Mask out the i least significant bits of w
141 */
142#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
143
144static inline xen_ulong_t active_evtchns(unsigned int cpu,
145					 struct shared_info *sh,
146					 unsigned int idx)
147{
148	return sh->evtchn_pending[idx] &
149		per_cpu(cpu_evtchn_mask, cpu)[idx] &
150		~sh->evtchn_mask[idx];
151}
152
153/*
154 * Search the CPU's pending events bitmasks.  For each one found, map
155 * the event number to an irq, and feed it into do_IRQ() for handling.
156 *
157 * Xen uses a two-level bitmap to speed searching.  The first level is
158 * a bitset of words which contain pending event bits.  The second
159 * level is a bitset of pending events themselves.
160 */
161static void evtchn_2l_handle_events(unsigned cpu)
162{
163	int irq;
164	xen_ulong_t pending_words;
165	xen_ulong_t pending_bits;
166	int start_word_idx, start_bit_idx;
167	int word_idx, bit_idx;
168	int i;
169	struct shared_info *s = HYPERVISOR_shared_info;
170	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
 
171
172	/* Timer interrupt has highest priority. */
173	irq = irq_from_virq(cpu, VIRQ_TIMER);
174	if (irq != -1) {
175		unsigned int evtchn = evtchn_from_irq(irq);
176		word_idx = evtchn / BITS_PER_LONG;
177		bit_idx = evtchn % BITS_PER_LONG;
178		if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
179			generic_handle_irq(irq);
180	}
181
182	/*
183	 * Master flag must be cleared /before/ clearing
184	 * selector flag. xchg_xen_ulong must contain an
185	 * appropriate barrier.
186	 */
187	pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
188
189	start_word_idx = __this_cpu_read(current_word_idx);
190	start_bit_idx = __this_cpu_read(current_bit_idx);
191
192	word_idx = start_word_idx;
193
194	for (i = 0; pending_words != 0; i++) {
195		xen_ulong_t words;
196
197		words = MASK_LSBS(pending_words, word_idx);
198
199		/*
200		 * If we masked out all events, wrap to beginning.
201		 */
202		if (words == 0) {
203			word_idx = 0;
204			bit_idx = 0;
205			continue;
206		}
207		word_idx = EVTCHN_FIRST_BIT(words);
208
209		pending_bits = active_evtchns(cpu, s, word_idx);
210		bit_idx = 0; /* usually scan entire word from start */
211		/*
212		 * We scan the starting word in two parts.
213		 *
214		 * 1st time: start in the middle, scanning the
215		 * upper bits.
216		 *
217		 * 2nd time: scan the whole word (not just the
218		 * parts skipped in the first pass) -- if an
219		 * event in the previously scanned bits is
220		 * pending again it would just be scanned on
221		 * the next loop anyway.
222		 */
223		if (word_idx == start_word_idx) {
224			if (i == 0)
225				bit_idx = start_bit_idx;
226		}
227
228		do {
229			xen_ulong_t bits;
230			int port;
231
232			bits = MASK_LSBS(pending_bits, bit_idx);
233
234			/* If we masked out all events, move on. */
235			if (bits == 0)
236				break;
237
238			bit_idx = EVTCHN_FIRST_BIT(bits);
239
240			/* Process port. */
241			port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
242			irq = get_evtchn_to_irq(port);
243
244			if (irq != -1)
245				generic_handle_irq(irq);
246
247			bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
248
249			/* Next caller starts at last processed + 1 */
250			__this_cpu_write(current_word_idx,
251					 bit_idx ? word_idx :
252					 (word_idx+1) % BITS_PER_EVTCHN_WORD);
253			__this_cpu_write(current_bit_idx, bit_idx);
254		} while (bit_idx != 0);
255
256		/* Scan start_l1i twice; all others once. */
257		if ((word_idx != start_word_idx) || (i != 0))
258			pending_words &= ~(1UL << word_idx);
259
260		word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
261	}
262}
263
264irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
265{
266	struct shared_info *sh = HYPERVISOR_shared_info;
267	int cpu = smp_processor_id();
268	xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
269	int i;
270	unsigned long flags;
271	static DEFINE_SPINLOCK(debug_lock);
272	struct vcpu_info *v;
273
274	spin_lock_irqsave(&debug_lock, flags);
275
276	printk("\nvcpu %d\n  ", cpu);
277
278	for_each_online_cpu(i) {
279		int pending;
280		v = per_cpu(xen_vcpu, i);
281		pending = (get_irq_regs() && i == cpu)
282			? xen_irqs_disabled(get_irq_regs())
283			: v->evtchn_upcall_mask;
284		printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n  ", i,
285		       pending, v->evtchn_upcall_pending,
286		       (int)(sizeof(v->evtchn_pending_sel)*2),
287		       v->evtchn_pending_sel);
288	}
289	v = per_cpu(xen_vcpu, cpu);
290
291	printk("\npending:\n   ");
292	for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
293		printk("%0*"PRI_xen_ulong"%s",
294		       (int)sizeof(sh->evtchn_pending[0])*2,
295		       sh->evtchn_pending[i],
296		       i % 8 == 0 ? "\n   " : " ");
297	printk("\nglobal mask:\n   ");
298	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
299		printk("%0*"PRI_xen_ulong"%s",
300		       (int)(sizeof(sh->evtchn_mask[0])*2),
301		       sh->evtchn_mask[i],
302		       i % 8 == 0 ? "\n   " : " ");
303
304	printk("\nglobally unmasked:\n   ");
305	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
306		printk("%0*"PRI_xen_ulong"%s",
307		       (int)(sizeof(sh->evtchn_mask[0])*2),
308		       sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
309		       i % 8 == 0 ? "\n   " : " ");
310
311	printk("\nlocal cpu%d mask:\n   ", cpu);
312	for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
313		printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
314		       cpu_evtchn[i],
315		       i % 8 == 0 ? "\n   " : " ");
316
317	printk("\nlocally unmasked:\n   ");
318	for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
319		xen_ulong_t pending = sh->evtchn_pending[i]
320			& ~sh->evtchn_mask[i]
321			& cpu_evtchn[i];
322		printk("%0*"PRI_xen_ulong"%s",
323		       (int)(sizeof(sh->evtchn_mask[0])*2),
324		       pending, i % 8 == 0 ? "\n   " : " ");
325	}
326
327	printk("\npending list:\n");
328	for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
329		if (sync_test_bit(i, BM(sh->evtchn_pending))) {
330			int word_idx = i / BITS_PER_EVTCHN_WORD;
331			printk("  %d: event %d -> irq %d%s%s%s\n",
332			       cpu_from_evtchn(i), i,
333			       get_evtchn_to_irq(i),
334			       sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
335			       ? "" : " l2-clear",
336			       !sync_test_bit(i, BM(sh->evtchn_mask))
337			       ? "" : " globally-masked",
338			       sync_test_bit(i, BM(cpu_evtchn))
339			       ? "" : " locally-masked");
340		}
341	}
342
343	spin_unlock_irqrestore(&debug_lock, flags);
344
345	return IRQ_HANDLED;
346}
347
348static void evtchn_2l_resume(void)
349{
350	int i;
351
352	for_each_online_cpu(i)
353		memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
354				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
355}
356
 
 
 
 
 
 
 
 
357static const struct evtchn_ops evtchn_ops_2l = {
358	.max_channels      = evtchn_2l_max_channels,
359	.nr_channels       = evtchn_2l_max_channels,
 
360	.bind_to_cpu       = evtchn_2l_bind_to_cpu,
361	.clear_pending     = evtchn_2l_clear_pending,
362	.set_pending       = evtchn_2l_set_pending,
363	.is_pending        = evtchn_2l_is_pending,
364	.test_and_set_mask = evtchn_2l_test_and_set_mask,
365	.mask              = evtchn_2l_mask,
366	.unmask            = evtchn_2l_unmask,
367	.handle_events     = evtchn_2l_handle_events,
368	.resume	           = evtchn_2l_resume,
 
369};
370
371void __init xen_evtchn_2l_init(void)
372{
373	pr_info("Using 2-level ABI\n");
374	evtchn_ops = &evtchn_ops_2l;
375}