Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* sun4v_ivec.S: Sun4v interrupt vector handling.
  3 *
  4 * Copyright (C) 2006 <davem@davemloft.net>
  5 */
  6
  7#include <asm/cpudata.h>
  8#include <asm/intr_queue.h>
  9#include <asm/pil.h>
 10
 11	.text
 12	.align	32
 13
 14sun4v_cpu_mondo:
 15	/* Head offset in %g2, tail offset in %g4.
 16	 * If they are the same, no work.
 17	 */
 18	mov	INTRQ_CPU_MONDO_HEAD, %g2
 19	ldxa	[%g2] ASI_QUEUE, %g2
 20	mov	INTRQ_CPU_MONDO_TAIL, %g4
 21	ldxa	[%g4] ASI_QUEUE, %g4
 22	cmp	%g2, %g4
 23	be,pn	%xcc, sun4v_cpu_mondo_queue_empty
 24	 nop
 25
 26	/* Get &trap_block[smp_processor_id()] into %g4.  */
 27	ldxa	[%g0] ASI_SCRATCHPAD, %g4
 28	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
 29
 30	/* Get smp_processor_id() into %g3 */
 31	sethi	%hi(trap_block), %g5
 32	or	%g5, %lo(trap_block), %g5
 33	sub	%g4, %g5, %g3
 34	srlx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
 35
 36	/* Increment cpu_mondo_counter[smp_processor_id()] */
 37	sethi	%hi(cpu_mondo_counter), %g5
 38	or	%g5, %lo(cpu_mondo_counter), %g5
 39	sllx	%g3, 3, %g3
 40	add	%g5, %g3, %g5
 41	ldx	[%g5], %g3
 42	add	%g3, 1, %g3
 43	stx	%g3, [%g5]
 44
 45	/* Get CPU mondo queue base phys address into %g7.  */
 46	ldx	[%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
 47
 48	/* Now get the cross-call arguments and handler PC, same
 49	 * layout as sun4u:
 50	 *
 51	 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
 52	 *                  high half is context arg to MMU flushes, into %g5
 53	 * 2nd 64-bit word: 64-bit arg, load into %g1
 54	 * 3rd 64-bit word: 64-bit arg, load into %g7
 55	 */
 56	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g3
 57	add	%g2, 0x8, %g2
 58	srlx	%g3, 32, %g5
 59	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
 60	add	%g2, 0x8, %g2
 61	srl	%g3, 0, %g3
 62	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g7
 63	add	%g2, 0x40 - 0x8 - 0x8, %g2
 64
 65	/* Update queue head pointer.  */
 66	lduw	[%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
 67	and	%g2, %g4, %g2
 68
 69	mov	INTRQ_CPU_MONDO_HEAD, %g4
 70	stxa	%g2, [%g4] ASI_QUEUE
 71	membar	#Sync
 72
 73	jmpl	%g3, %g0
 74	 nop
 75
 76sun4v_cpu_mondo_queue_empty:
 77	retry
 78
 79sun4v_dev_mondo:
 80	/* Head offset in %g2, tail offset in %g4.  */
 81	mov	INTRQ_DEVICE_MONDO_HEAD, %g2
 82	ldxa	[%g2] ASI_QUEUE, %g2
 83	mov	INTRQ_DEVICE_MONDO_TAIL, %g4
 84	ldxa	[%g4] ASI_QUEUE, %g4
 85	cmp	%g2, %g4
 86	be,pn	%xcc, sun4v_dev_mondo_queue_empty
 87	 nop
 88
 89	/* Get &trap_block[smp_processor_id()] into %g4.  */
 90	ldxa	[%g0] ASI_SCRATCHPAD, %g4
 91	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
 92
 93	/* Get DEV mondo queue base phys address into %g5.  */
 94	ldx	[%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
 95
 96	/* Load IVEC into %g3.  */
 97	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
 98	add	%g2, 0x40, %g2
 99
100	/* XXX There can be a full 64-byte block of data here.
101	 * XXX This is how we can get at MSI vector data.
102	 * XXX Current we do not capture this, but when we do we'll
103	 * XXX need to add a 64-byte storage area in the struct ino_bucket
104	 * XXX or the struct irq_desc.
105	 */
106
107	/* Update queue head pointer, this frees up some registers.  */
108	lduw	[%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
109	and	%g2, %g4, %g2
110
111	mov	INTRQ_DEVICE_MONDO_HEAD, %g4
112	stxa	%g2, [%g4] ASI_QUEUE
113	membar	#Sync
114
115	TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
116
117	/* For VIRQs, cookie is encoded as ~bucket_phys_addr  */
118	brlz,pt %g3, 1f
119	 xnor	%g3, %g0, %g4
120
121	/* Get __pa(&ivector_table[IVEC]) into %g4.  */
122	sethi	%hi(ivector_table_pa), %g4
123	ldx	[%g4 + %lo(ivector_table_pa)], %g4
124	sllx	%g3, 4, %g3
125	add	%g4, %g3, %g4
126
1271:	ldx	[%g1], %g2
128	stxa	%g2, [%g4] ASI_PHYS_USE_EC
129	stx	%g4, [%g1]
130
131	/* Signal the interrupt by setting (1 << pil) in %softint.  */
132	wr	%g0, 1 << PIL_DEVICE_IRQ, %set_softint
133
134sun4v_dev_mondo_queue_empty:
135	retry
136
137sun4v_res_mondo:
138	/* Head offset in %g2, tail offset in %g4.  */
139	mov	INTRQ_RESUM_MONDO_HEAD, %g2
140	ldxa	[%g2] ASI_QUEUE, %g2
141	mov	INTRQ_RESUM_MONDO_TAIL, %g4
142	ldxa	[%g4] ASI_QUEUE, %g4
143	cmp	%g2, %g4
144	be,pn	%xcc, sun4v_res_mondo_queue_empty
145	 nop
146
147	/* Get &trap_block[smp_processor_id()] into %g3.  */
148	ldxa	[%g0] ASI_SCRATCHPAD, %g3
149	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
150
151	/* Get RES mondo queue base phys address into %g5.  */
152	ldx	[%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
153
154	/* Get RES kernel buffer base phys address into %g7.  */
155	ldx	[%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
156
157	/* If the first word is non-zero, queue is full.  */
158	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
159	brnz,pn	%g1, sun4v_res_mondo_queue_full
160	 nop
161
162	lduw	[%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
163
164	/* Remember this entry's offset in %g1.  */
165	mov	%g2, %g1
166
167	/* Copy 64-byte queue entry into kernel buffer.  */
168	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
169	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
170	add	%g2, 0x08, %g2
171	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
172	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
173	add	%g2, 0x08, %g2
174	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
175	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
176	add	%g2, 0x08, %g2
177	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
178	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
179	add	%g2, 0x08, %g2
180	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
181	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
182	add	%g2, 0x08, %g2
183	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
184	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
185	add	%g2, 0x08, %g2
186	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
187	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
188	add	%g2, 0x08, %g2
189	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
190	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
191	add	%g2, 0x08, %g2
192
193	/* Update queue head pointer.  */
194	and	%g2, %g4, %g2
195
196	mov	INTRQ_RESUM_MONDO_HEAD, %g4
197	stxa	%g2, [%g4] ASI_QUEUE
198	membar	#Sync
199
200	/* Disable interrupts and save register state so we can call
201	 * C code.  The etrap handling will leave %g4 in %l4 for us
202	 * when it's done.
203	 */
204	rdpr	%pil, %g2
205	wrpr	%g0, PIL_NORMAL_MAX, %pil
206	mov	%g1, %g4
207	ba,pt	%xcc, etrap_irq
208	 rd	%pc, %g7
209#ifdef CONFIG_TRACE_IRQFLAGS
210	call		trace_hardirqs_off
211	 nop
212#endif
213	/* Log the event.  */
214	add	%sp, PTREGS_OFF, %o0
215	call	sun4v_resum_error
216	 mov	%l4, %o1
217
218	/* Return from trap.  */
219	ba,pt	%xcc, rtrap_irq
220	 nop
221
222sun4v_res_mondo_queue_empty:
223	retry
224
225sun4v_res_mondo_queue_full:
226	/* The queue is full, consolidate our damage by setting
227	 * the head equal to the tail.  We'll just trap again otherwise.
228	 * Call C code to log the event.
229	 */
230	mov	INTRQ_RESUM_MONDO_HEAD, %g2
231	stxa	%g4, [%g2] ASI_QUEUE
232	membar	#Sync
233
234	rdpr	%pil, %g2
235	wrpr	%g0, PIL_NORMAL_MAX, %pil
236	ba,pt	%xcc, etrap_irq
237	 rd	%pc, %g7
238#ifdef CONFIG_TRACE_IRQFLAGS
239	call		trace_hardirqs_off
240	 nop
241#endif
242	call	sun4v_resum_overflow
243	 add	%sp, PTREGS_OFF, %o0
244
245	ba,pt	%xcc, rtrap_irq
246	 nop
247
248sun4v_nonres_mondo:
249	/* Head offset in %g2, tail offset in %g4.  */
250	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
251	ldxa	[%g2] ASI_QUEUE, %g2
252	mov	INTRQ_NONRESUM_MONDO_TAIL, %g4
253	ldxa	[%g4] ASI_QUEUE, %g4
254	cmp	%g2, %g4
255	be,pn	%xcc, sun4v_nonres_mondo_queue_empty
256	 nop
257
258	/* Get &trap_block[smp_processor_id()] into %g3.  */
259	ldxa	[%g0] ASI_SCRATCHPAD, %g3
260	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
261
262	/* Get RES mondo queue base phys address into %g5.  */
263	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
264
265	/* Get RES kernel buffer base phys address into %g7.  */
266	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
267
268	/* If the first word is non-zero, queue is full.  */
269	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
270	brnz,pn	%g1, sun4v_nonres_mondo_queue_full
271	 nop
272
273	lduw	[%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
274
275	/* Remember this entry's offset in %g1.  */
276	mov	%g2, %g1
277
278	/* Copy 64-byte queue entry into kernel buffer.  */
279	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
280	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
281	add	%g2, 0x08, %g2
282	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
283	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
284	add	%g2, 0x08, %g2
285	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
286	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
287	add	%g2, 0x08, %g2
288	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
289	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
290	add	%g2, 0x08, %g2
291	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
292	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
293	add	%g2, 0x08, %g2
294	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
295	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
296	add	%g2, 0x08, %g2
297	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
298	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
299	add	%g2, 0x08, %g2
300	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
301	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
302	add	%g2, 0x08, %g2
303
304	/* Update queue head pointer.  */
305	and	%g2, %g4, %g2
306
307	mov	INTRQ_NONRESUM_MONDO_HEAD, %g4
308	stxa	%g2, [%g4] ASI_QUEUE
309	membar	#Sync
310
311	/* Disable interrupts and save register state so we can call
312	 * C code.  The etrap handling will leave %g4 in %l4 for us
313	 * when it's done.
314	 */
315	rdpr	%pil, %g2
316	wrpr	%g0, PIL_NORMAL_MAX, %pil
317	mov	%g1, %g4
318	ba,pt	%xcc, etrap_irq
319	 rd	%pc, %g7
320#ifdef CONFIG_TRACE_IRQFLAGS
321	call		trace_hardirqs_off
322	 nop
323#endif
324	/* Log the event.  */
325	add	%sp, PTREGS_OFF, %o0
326	call	sun4v_nonresum_error
327	 mov	%l4, %o1
328
329	/* Return from trap.  */
330	ba,pt	%xcc, rtrap_irq
331	 nop
332
333sun4v_nonres_mondo_queue_empty:
334	retry
335
336sun4v_nonres_mondo_queue_full:
337	/* The queue is full, consolidate our damage by setting
338	 * the head equal to the tail.  We'll just trap again otherwise.
339	 * Call C code to log the event.
340	 */
341	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
342	stxa	%g4, [%g2] ASI_QUEUE
343	membar	#Sync
344
345	rdpr	%pil, %g2
346	wrpr	%g0, PIL_NORMAL_MAX, %pil
347	ba,pt	%xcc, etrap_irq
348	 rd	%pc, %g7
349#ifdef CONFIG_TRACE_IRQFLAGS
350	call		trace_hardirqs_off
351	 nop
352#endif
353	call	sun4v_nonresum_overflow
354	 add	%sp, PTREGS_OFF, %o0
355
356	ba,pt	%xcc, rtrap_irq
357	 nop
v4.6
 
  1/* sun4v_ivec.S: Sun4v interrupt vector handling.
  2 *
  3 * Copyright (C) 2006 <davem@davemloft.net>
  4 */
  5
  6#include <asm/cpudata.h>
  7#include <asm/intr_queue.h>
  8#include <asm/pil.h>
  9
 10	.text
 11	.align	32
 12
 13sun4v_cpu_mondo:
 14	/* Head offset in %g2, tail offset in %g4.
 15	 * If they are the same, no work.
 16	 */
 17	mov	INTRQ_CPU_MONDO_HEAD, %g2
 18	ldxa	[%g2] ASI_QUEUE, %g2
 19	mov	INTRQ_CPU_MONDO_TAIL, %g4
 20	ldxa	[%g4] ASI_QUEUE, %g4
 21	cmp	%g2, %g4
 22	be,pn	%xcc, sun4v_cpu_mondo_queue_empty
 23	 nop
 24
 25	/* Get &trap_block[smp_processor_id()] into %g4.  */
 26	ldxa	[%g0] ASI_SCRATCHPAD, %g4
 27	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28
 29	/* Get CPU mondo queue base phys address into %g7.  */
 30	ldx	[%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
 31
 32	/* Now get the cross-call arguments and handler PC, same
 33	 * layout as sun4u:
 34	 *
 35	 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
 36	 *                  high half is context arg to MMU flushes, into %g5
 37	 * 2nd 64-bit word: 64-bit arg, load into %g1
 38	 * 3rd 64-bit word: 64-bit arg, load into %g7
 39	 */
 40	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g3
 41	add	%g2, 0x8, %g2
 42	srlx	%g3, 32, %g5
 43	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
 44	add	%g2, 0x8, %g2
 45	srl	%g3, 0, %g3
 46	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g7
 47	add	%g2, 0x40 - 0x8 - 0x8, %g2
 48
 49	/* Update queue head pointer.  */
 50	lduw	[%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
 51	and	%g2, %g4, %g2
 52
 53	mov	INTRQ_CPU_MONDO_HEAD, %g4
 54	stxa	%g2, [%g4] ASI_QUEUE
 55	membar	#Sync
 56
 57	jmpl	%g3, %g0
 58	 nop
 59
 60sun4v_cpu_mondo_queue_empty:
 61	retry
 62
 63sun4v_dev_mondo:
 64	/* Head offset in %g2, tail offset in %g4.  */
 65	mov	INTRQ_DEVICE_MONDO_HEAD, %g2
 66	ldxa	[%g2] ASI_QUEUE, %g2
 67	mov	INTRQ_DEVICE_MONDO_TAIL, %g4
 68	ldxa	[%g4] ASI_QUEUE, %g4
 69	cmp	%g2, %g4
 70	be,pn	%xcc, sun4v_dev_mondo_queue_empty
 71	 nop
 72
 73	/* Get &trap_block[smp_processor_id()] into %g4.  */
 74	ldxa	[%g0] ASI_SCRATCHPAD, %g4
 75	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
 76
 77	/* Get DEV mondo queue base phys address into %g5.  */
 78	ldx	[%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
 79
 80	/* Load IVEC into %g3.  */
 81	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
 82	add	%g2, 0x40, %g2
 83
 84	/* XXX There can be a full 64-byte block of data here.
 85	 * XXX This is how we can get at MSI vector data.
 86	 * XXX Current we do not capture this, but when we do we'll
 87	 * XXX need to add a 64-byte storage area in the struct ino_bucket
 88	 * XXX or the struct irq_desc.
 89	 */
 90
 91	/* Update queue head pointer, this frees up some registers.  */
 92	lduw	[%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
 93	and	%g2, %g4, %g2
 94
 95	mov	INTRQ_DEVICE_MONDO_HEAD, %g4
 96	stxa	%g2, [%g4] ASI_QUEUE
 97	membar	#Sync
 98
 99	TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
100
101	/* For VIRQs, cookie is encoded as ~bucket_phys_addr  */
102	brlz,pt %g3, 1f
103	 xnor	%g3, %g0, %g4
104
105	/* Get __pa(&ivector_table[IVEC]) into %g4.  */
106	sethi	%hi(ivector_table_pa), %g4
107	ldx	[%g4 + %lo(ivector_table_pa)], %g4
108	sllx	%g3, 4, %g3
109	add	%g4, %g3, %g4
110
1111:	ldx	[%g1], %g2
112	stxa	%g2, [%g4] ASI_PHYS_USE_EC
113	stx	%g4, [%g1]
114
115	/* Signal the interrupt by setting (1 << pil) in %softint.  */
116	wr	%g0, 1 << PIL_DEVICE_IRQ, %set_softint
117
118sun4v_dev_mondo_queue_empty:
119	retry
120
121sun4v_res_mondo:
122	/* Head offset in %g2, tail offset in %g4.  */
123	mov	INTRQ_RESUM_MONDO_HEAD, %g2
124	ldxa	[%g2] ASI_QUEUE, %g2
125	mov	INTRQ_RESUM_MONDO_TAIL, %g4
126	ldxa	[%g4] ASI_QUEUE, %g4
127	cmp	%g2, %g4
128	be,pn	%xcc, sun4v_res_mondo_queue_empty
129	 nop
130
131	/* Get &trap_block[smp_processor_id()] into %g3.  */
132	ldxa	[%g0] ASI_SCRATCHPAD, %g3
133	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
134
135	/* Get RES mondo queue base phys address into %g5.  */
136	ldx	[%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
137
138	/* Get RES kernel buffer base phys address into %g7.  */
139	ldx	[%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
140
141	/* If the first word is non-zero, queue is full.  */
142	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
143	brnz,pn	%g1, sun4v_res_mondo_queue_full
144	 nop
145
146	lduw	[%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
147
148	/* Remember this entry's offset in %g1.  */
149	mov	%g2, %g1
150
151	/* Copy 64-byte queue entry into kernel buffer.  */
152	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
153	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
154	add	%g2, 0x08, %g2
155	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
156	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
157	add	%g2, 0x08, %g2
158	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
159	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
160	add	%g2, 0x08, %g2
161	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
162	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
163	add	%g2, 0x08, %g2
164	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
165	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
166	add	%g2, 0x08, %g2
167	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
168	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
169	add	%g2, 0x08, %g2
170	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
171	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
172	add	%g2, 0x08, %g2
173	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
174	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
175	add	%g2, 0x08, %g2
176
177	/* Update queue head pointer.  */
178	and	%g2, %g4, %g2
179
180	mov	INTRQ_RESUM_MONDO_HEAD, %g4
181	stxa	%g2, [%g4] ASI_QUEUE
182	membar	#Sync
183
184	/* Disable interrupts and save register state so we can call
185	 * C code.  The etrap handling will leave %g4 in %l4 for us
186	 * when it's done.
187	 */
188	rdpr	%pil, %g2
189	wrpr	%g0, PIL_NORMAL_MAX, %pil
190	mov	%g1, %g4
191	ba,pt	%xcc, etrap_irq
192	 rd	%pc, %g7
193#ifdef CONFIG_TRACE_IRQFLAGS
194	call		trace_hardirqs_off
195	 nop
196#endif
197	/* Log the event.  */
198	add	%sp, PTREGS_OFF, %o0
199	call	sun4v_resum_error
200	 mov	%l4, %o1
201
202	/* Return from trap.  */
203	ba,pt	%xcc, rtrap_irq
204	 nop
205
206sun4v_res_mondo_queue_empty:
207	retry
208
209sun4v_res_mondo_queue_full:
210	/* The queue is full, consolidate our damage by setting
211	 * the head equal to the tail.  We'll just trap again otherwise.
212	 * Call C code to log the event.
213	 */
214	mov	INTRQ_RESUM_MONDO_HEAD, %g2
215	stxa	%g4, [%g2] ASI_QUEUE
216	membar	#Sync
217
218	rdpr	%pil, %g2
219	wrpr	%g0, PIL_NORMAL_MAX, %pil
220	ba,pt	%xcc, etrap_irq
221	 rd	%pc, %g7
222#ifdef CONFIG_TRACE_IRQFLAGS
223	call		trace_hardirqs_off
224	 nop
225#endif
226	call	sun4v_resum_overflow
227	 add	%sp, PTREGS_OFF, %o0
228
229	ba,pt	%xcc, rtrap_irq
230	 nop
231
232sun4v_nonres_mondo:
233	/* Head offset in %g2, tail offset in %g4.  */
234	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
235	ldxa	[%g2] ASI_QUEUE, %g2
236	mov	INTRQ_NONRESUM_MONDO_TAIL, %g4
237	ldxa	[%g4] ASI_QUEUE, %g4
238	cmp	%g2, %g4
239	be,pn	%xcc, sun4v_nonres_mondo_queue_empty
240	 nop
241
242	/* Get &trap_block[smp_processor_id()] into %g3.  */
243	ldxa	[%g0] ASI_SCRATCHPAD, %g3
244	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
245
246	/* Get RES mondo queue base phys address into %g5.  */
247	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
248
249	/* Get RES kernel buffer base phys address into %g7.  */
250	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
251
252	/* If the first word is non-zero, queue is full.  */
253	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
254	brnz,pn	%g1, sun4v_nonres_mondo_queue_full
255	 nop
256
257	lduw	[%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
258
259	/* Remember this entry's offset in %g1.  */
260	mov	%g2, %g1
261
262	/* Copy 64-byte queue entry into kernel buffer.  */
263	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
264	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
265	add	%g2, 0x08, %g2
266	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
267	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
268	add	%g2, 0x08, %g2
269	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
270	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
271	add	%g2, 0x08, %g2
272	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
273	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
274	add	%g2, 0x08, %g2
275	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
276	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
277	add	%g2, 0x08, %g2
278	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
279	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
280	add	%g2, 0x08, %g2
281	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
282	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
283	add	%g2, 0x08, %g2
284	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
285	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
286	add	%g2, 0x08, %g2
287
288	/* Update queue head pointer.  */
289	and	%g2, %g4, %g2
290
291	mov	INTRQ_NONRESUM_MONDO_HEAD, %g4
292	stxa	%g2, [%g4] ASI_QUEUE
293	membar	#Sync
294
295	/* Disable interrupts and save register state so we can call
296	 * C code.  The etrap handling will leave %g4 in %l4 for us
297	 * when it's done.
298	 */
299	rdpr	%pil, %g2
300	wrpr	%g0, PIL_NORMAL_MAX, %pil
301	mov	%g1, %g4
302	ba,pt	%xcc, etrap_irq
303	 rd	%pc, %g7
304#ifdef CONFIG_TRACE_IRQFLAGS
305	call		trace_hardirqs_off
306	 nop
307#endif
308	/* Log the event.  */
309	add	%sp, PTREGS_OFF, %o0
310	call	sun4v_nonresum_error
311	 mov	%l4, %o1
312
313	/* Return from trap.  */
314	ba,pt	%xcc, rtrap_irq
315	 nop
316
317sun4v_nonres_mondo_queue_empty:
318	retry
319
320sun4v_nonres_mondo_queue_full:
321	/* The queue is full, consolidate our damage by setting
322	 * the head equal to the tail.  We'll just trap again otherwise.
323	 * Call C code to log the event.
324	 */
325	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
326	stxa	%g4, [%g2] ASI_QUEUE
327	membar	#Sync
328
329	rdpr	%pil, %g2
330	wrpr	%g0, PIL_NORMAL_MAX, %pil
331	ba,pt	%xcc, etrap_irq
332	 rd	%pc, %g7
333#ifdef CONFIG_TRACE_IRQFLAGS
334	call		trace_hardirqs_off
335	 nop
336#endif
337	call	sun4v_nonresum_overflow
338	 add	%sp, PTREGS_OFF, %o0
339
340	ba,pt	%xcc, rtrap_irq
341	 nop