Loading...
1/* irq.c: UltraSparc IRQ handling/init/registry.
2 *
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
8#include <linux/sched.h>
9#include <linux/linkage.h>
10#include <linux/ptrace.h>
11#include <linux/errno.h>
12#include <linux/kernel_stat.h>
13#include <linux/signal.h>
14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
22#include <linux/ftrace.h>
23#include <linux/irq.h>
24#include <linux/kmemleak.h>
25
26#include <asm/ptrace.h>
27#include <asm/processor.h>
28#include <linux/atomic.h>
29#include <asm/irq.h>
30#include <asm/io.h>
31#include <asm/iommu.h>
32#include <asm/upa.h>
33#include <asm/oplib.h>
34#include <asm/prom.h>
35#include <asm/timer.h>
36#include <asm/smp.h>
37#include <asm/starfire.h>
38#include <asm/uaccess.h>
39#include <asm/cache.h>
40#include <asm/cpudata.h>
41#include <asm/auxio.h>
42#include <asm/head.h>
43#include <asm/hypervisor.h>
44#include <asm/cacheflush.h>
45
46#include "entry.h"
47#include "cpumap.h"
48#include "kstack.h"
49
50#define NUM_IVECS (IMAP_INR + 1)
51
52struct ino_bucket *ivector_table;
53unsigned long ivector_table_pa;
54
55/* On several sun4u processors, it is illegal to mix bypass and
56 * non-bypass accesses. Therefore we access all INO buckets
57 * using bypass accesses only.
58 */
59static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
60{
61 unsigned long ret;
62
63 __asm__ __volatile__("ldxa [%1] %2, %0"
64 : "=&r" (ret)
65 : "r" (bucket_pa +
66 offsetof(struct ino_bucket,
67 __irq_chain_pa)),
68 "i" (ASI_PHYS_USE_EC));
69
70 return ret;
71}
72
73static void bucket_clear_chain_pa(unsigned long bucket_pa)
74{
75 __asm__ __volatile__("stxa %%g0, [%0] %1"
76 : /* no outputs */
77 : "r" (bucket_pa +
78 offsetof(struct ino_bucket,
79 __irq_chain_pa)),
80 "i" (ASI_PHYS_USE_EC));
81}
82
83static unsigned int bucket_get_irq(unsigned long bucket_pa)
84{
85 unsigned int ret;
86
87 __asm__ __volatile__("lduwa [%1] %2, %0"
88 : "=&r" (ret)
89 : "r" (bucket_pa +
90 offsetof(struct ino_bucket,
91 __irq)),
92 "i" (ASI_PHYS_USE_EC));
93
94 return ret;
95}
96
97static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
98{
99 __asm__ __volatile__("stwa %0, [%1] %2"
100 : /* no outputs */
101 : "r" (irq),
102 "r" (bucket_pa +
103 offsetof(struct ino_bucket,
104 __irq)),
105 "i" (ASI_PHYS_USE_EC));
106}
107
108#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
109
110static struct {
111 unsigned int dev_handle;
112 unsigned int dev_ino;
113 unsigned int in_use;
114} irq_table[NR_IRQS];
115static DEFINE_SPINLOCK(irq_alloc_lock);
116
117unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
118{
119 unsigned long flags;
120 unsigned char ent;
121
122 BUILD_BUG_ON(NR_IRQS >= 256);
123
124 spin_lock_irqsave(&irq_alloc_lock, flags);
125
126 for (ent = 1; ent < NR_IRQS; ent++) {
127 if (!irq_table[ent].in_use)
128 break;
129 }
130 if (ent >= NR_IRQS) {
131 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
132 ent = 0;
133 } else {
134 irq_table[ent].dev_handle = dev_handle;
135 irq_table[ent].dev_ino = dev_ino;
136 irq_table[ent].in_use = 1;
137 }
138
139 spin_unlock_irqrestore(&irq_alloc_lock, flags);
140
141 return ent;
142}
143
144#ifdef CONFIG_PCI_MSI
145void irq_free(unsigned int irq)
146{
147 unsigned long flags;
148
149 if (irq >= NR_IRQS)
150 return;
151
152 spin_lock_irqsave(&irq_alloc_lock, flags);
153
154 irq_table[irq].in_use = 0;
155
156 spin_unlock_irqrestore(&irq_alloc_lock, flags);
157}
158#endif
159
160/*
161 * /proc/interrupts printing:
162 */
163int arch_show_interrupts(struct seq_file *p, int prec)
164{
165 int j;
166
167 seq_printf(p, "NMI: ");
168 for_each_online_cpu(j)
169 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
170 seq_printf(p, " Non-maskable interrupts\n");
171 return 0;
172}
173
174static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
175{
176 unsigned int tid;
177
178 if (this_is_starfire) {
179 tid = starfire_translate(imap, cpuid);
180 tid <<= IMAP_TID_SHIFT;
181 tid &= IMAP_TID_UPA;
182 } else {
183 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
184 unsigned long ver;
185
186 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
187 if ((ver >> 32UL) == __JALAPENO_ID ||
188 (ver >> 32UL) == __SERRANO_ID) {
189 tid = cpuid << IMAP_TID_SHIFT;
190 tid &= IMAP_TID_JBUS;
191 } else {
192 unsigned int a = cpuid & 0x1f;
193 unsigned int n = (cpuid >> 5) & 0x1f;
194
195 tid = ((a << IMAP_AID_SHIFT) |
196 (n << IMAP_NID_SHIFT));
197 tid &= (IMAP_AID_SAFARI |
198 IMAP_NID_SAFARI);
199 }
200 } else {
201 tid = cpuid << IMAP_TID_SHIFT;
202 tid &= IMAP_TID_UPA;
203 }
204 }
205
206 return tid;
207}
208
209struct irq_handler_data {
210 unsigned long iclr;
211 unsigned long imap;
212
213 void (*pre_handler)(unsigned int, void *, void *);
214 void *arg1;
215 void *arg2;
216};
217
218#ifdef CONFIG_SMP
219static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
220{
221 cpumask_t mask;
222 int cpuid;
223
224 cpumask_copy(&mask, affinity);
225 if (cpumask_equal(&mask, cpu_online_mask)) {
226 cpuid = map_to_cpu(irq);
227 } else {
228 cpumask_t tmp;
229
230 cpumask_and(&tmp, cpu_online_mask, &mask);
231 cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
232 }
233
234 return cpuid;
235}
236#else
237#define irq_choose_cpu(irq, affinity) \
238 real_hard_smp_processor_id()
239#endif
240
241static void sun4u_irq_enable(struct irq_data *data)
242{
243 struct irq_handler_data *handler_data = data->handler_data;
244
245 if (likely(handler_data)) {
246 unsigned long cpuid, imap, val;
247 unsigned int tid;
248
249 cpuid = irq_choose_cpu(data->irq, data->affinity);
250 imap = handler_data->imap;
251
252 tid = sun4u_compute_tid(imap, cpuid);
253
254 val = upa_readq(imap);
255 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
256 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
257 val |= tid | IMAP_VALID;
258 upa_writeq(val, imap);
259 upa_writeq(ICLR_IDLE, handler_data->iclr);
260 }
261}
262
263static int sun4u_set_affinity(struct irq_data *data,
264 const struct cpumask *mask, bool force)
265{
266 struct irq_handler_data *handler_data = data->handler_data;
267
268 if (likely(handler_data)) {
269 unsigned long cpuid, imap, val;
270 unsigned int tid;
271
272 cpuid = irq_choose_cpu(data->irq, mask);
273 imap = handler_data->imap;
274
275 tid = sun4u_compute_tid(imap, cpuid);
276
277 val = upa_readq(imap);
278 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
279 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
280 val |= tid | IMAP_VALID;
281 upa_writeq(val, imap);
282 upa_writeq(ICLR_IDLE, handler_data->iclr);
283 }
284
285 return 0;
286}
287
288/* Don't do anything. The desc->status check for IRQ_DISABLED in
289 * handler_irq() will skip the handler call and that will leave the
290 * interrupt in the sent state. The next ->enable() call will hit the
291 * ICLR register to reset the state machine.
292 *
293 * This scheme is necessary, instead of clearing the Valid bit in the
294 * IMAP register, to handle the case of IMAP registers being shared by
295 * multiple INOs (and thus ICLR registers). Since we use a different
296 * virtual IRQ for each shared IMAP instance, the generic code thinks
297 * there is only one user so it prematurely calls ->disable() on
298 * free_irq().
299 *
300 * We have to provide an explicit ->disable() method instead of using
301 * NULL to get the default. The reason is that if the generic code
302 * sees that, it also hooks up a default ->shutdown method which
303 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
304 */
305static void sun4u_irq_disable(struct irq_data *data)
306{
307}
308
309static void sun4u_irq_eoi(struct irq_data *data)
310{
311 struct irq_handler_data *handler_data = data->handler_data;
312
313 if (likely(handler_data))
314 upa_writeq(ICLR_IDLE, handler_data->iclr);
315}
316
317static void sun4v_irq_enable(struct irq_data *data)
318{
319 unsigned int ino = irq_table[data->irq].dev_ino;
320 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
321 int err;
322
323 err = sun4v_intr_settarget(ino, cpuid);
324 if (err != HV_EOK)
325 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
326 "err(%d)\n", ino, cpuid, err);
327 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
328 if (err != HV_EOK)
329 printk(KERN_ERR "sun4v_intr_setstate(%x): "
330 "err(%d)\n", ino, err);
331 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
332 if (err != HV_EOK)
333 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
334 ino, err);
335}
336
337static int sun4v_set_affinity(struct irq_data *data,
338 const struct cpumask *mask, bool force)
339{
340 unsigned int ino = irq_table[data->irq].dev_ino;
341 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
342 int err;
343
344 err = sun4v_intr_settarget(ino, cpuid);
345 if (err != HV_EOK)
346 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
347 "err(%d)\n", ino, cpuid, err);
348
349 return 0;
350}
351
352static void sun4v_irq_disable(struct irq_data *data)
353{
354 unsigned int ino = irq_table[data->irq].dev_ino;
355 int err;
356
357 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
358 if (err != HV_EOK)
359 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
360 "err(%d)\n", ino, err);
361}
362
363static void sun4v_irq_eoi(struct irq_data *data)
364{
365 unsigned int ino = irq_table[data->irq].dev_ino;
366 int err;
367
368 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
369 if (err != HV_EOK)
370 printk(KERN_ERR "sun4v_intr_setstate(%x): "
371 "err(%d)\n", ino, err);
372}
373
374static void sun4v_virq_enable(struct irq_data *data)
375{
376 unsigned long cpuid, dev_handle, dev_ino;
377 int err;
378
379 cpuid = irq_choose_cpu(data->irq, data->affinity);
380
381 dev_handle = irq_table[data->irq].dev_handle;
382 dev_ino = irq_table[data->irq].dev_ino;
383
384 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
385 if (err != HV_EOK)
386 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
387 "err(%d)\n",
388 dev_handle, dev_ino, cpuid, err);
389 err = sun4v_vintr_set_state(dev_handle, dev_ino,
390 HV_INTR_STATE_IDLE);
391 if (err != HV_EOK)
392 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
393 "HV_INTR_STATE_IDLE): err(%d)\n",
394 dev_handle, dev_ino, err);
395 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
396 HV_INTR_ENABLED);
397 if (err != HV_EOK)
398 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
399 "HV_INTR_ENABLED): err(%d)\n",
400 dev_handle, dev_ino, err);
401}
402
403static int sun4v_virt_set_affinity(struct irq_data *data,
404 const struct cpumask *mask, bool force)
405{
406 unsigned long cpuid, dev_handle, dev_ino;
407 int err;
408
409 cpuid = irq_choose_cpu(data->irq, mask);
410
411 dev_handle = irq_table[data->irq].dev_handle;
412 dev_ino = irq_table[data->irq].dev_ino;
413
414 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
415 if (err != HV_EOK)
416 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
417 "err(%d)\n",
418 dev_handle, dev_ino, cpuid, err);
419
420 return 0;
421}
422
423static void sun4v_virq_disable(struct irq_data *data)
424{
425 unsigned long dev_handle, dev_ino;
426 int err;
427
428 dev_handle = irq_table[data->irq].dev_handle;
429 dev_ino = irq_table[data->irq].dev_ino;
430
431 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
432 HV_INTR_DISABLED);
433 if (err != HV_EOK)
434 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
435 "HV_INTR_DISABLED): err(%d)\n",
436 dev_handle, dev_ino, err);
437}
438
439static void sun4v_virq_eoi(struct irq_data *data)
440{
441 unsigned long dev_handle, dev_ino;
442 int err;
443
444 dev_handle = irq_table[data->irq].dev_handle;
445 dev_ino = irq_table[data->irq].dev_ino;
446
447 err = sun4v_vintr_set_state(dev_handle, dev_ino,
448 HV_INTR_STATE_IDLE);
449 if (err != HV_EOK)
450 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
451 "HV_INTR_STATE_IDLE): err(%d)\n",
452 dev_handle, dev_ino, err);
453}
454
455static struct irq_chip sun4u_irq = {
456 .name = "sun4u",
457 .irq_enable = sun4u_irq_enable,
458 .irq_disable = sun4u_irq_disable,
459 .irq_eoi = sun4u_irq_eoi,
460 .irq_set_affinity = sun4u_set_affinity,
461 .flags = IRQCHIP_EOI_IF_HANDLED,
462};
463
464static struct irq_chip sun4v_irq = {
465 .name = "sun4v",
466 .irq_enable = sun4v_irq_enable,
467 .irq_disable = sun4v_irq_disable,
468 .irq_eoi = sun4v_irq_eoi,
469 .irq_set_affinity = sun4v_set_affinity,
470 .flags = IRQCHIP_EOI_IF_HANDLED,
471};
472
473static struct irq_chip sun4v_virq = {
474 .name = "vsun4v",
475 .irq_enable = sun4v_virq_enable,
476 .irq_disable = sun4v_virq_disable,
477 .irq_eoi = sun4v_virq_eoi,
478 .irq_set_affinity = sun4v_virt_set_affinity,
479 .flags = IRQCHIP_EOI_IF_HANDLED,
480};
481
482static void pre_flow_handler(struct irq_data *d)
483{
484 struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
485 unsigned int ino = irq_table[d->irq].dev_ino;
486
487 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
488}
489
490void irq_install_pre_handler(int irq,
491 void (*func)(unsigned int, void *, void *),
492 void *arg1, void *arg2)
493{
494 struct irq_handler_data *handler_data = irq_get_handler_data(irq);
495
496 handler_data->pre_handler = func;
497 handler_data->arg1 = arg1;
498 handler_data->arg2 = arg2;
499
500 __irq_set_preflow_handler(irq, pre_flow_handler);
501}
502
503unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
504{
505 struct ino_bucket *bucket;
506 struct irq_handler_data *handler_data;
507 unsigned int irq;
508 int ino;
509
510 BUG_ON(tlb_type == hypervisor);
511
512 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
513 bucket = &ivector_table[ino];
514 irq = bucket_get_irq(__pa(bucket));
515 if (!irq) {
516 irq = irq_alloc(0, ino);
517 bucket_set_irq(__pa(bucket), irq);
518 irq_set_chip_and_handler_name(irq, &sun4u_irq,
519 handle_fasteoi_irq, "IVEC");
520 }
521
522 handler_data = irq_get_handler_data(irq);
523 if (unlikely(handler_data))
524 goto out;
525
526 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
527 if (unlikely(!handler_data)) {
528 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
529 prom_halt();
530 }
531 irq_set_handler_data(irq, handler_data);
532
533 handler_data->imap = imap;
534 handler_data->iclr = iclr;
535
536out:
537 return irq;
538}
539
540static unsigned int sun4v_build_common(unsigned long sysino,
541 struct irq_chip *chip)
542{
543 struct ino_bucket *bucket;
544 struct irq_handler_data *handler_data;
545 unsigned int irq;
546
547 BUG_ON(tlb_type != hypervisor);
548
549 bucket = &ivector_table[sysino];
550 irq = bucket_get_irq(__pa(bucket));
551 if (!irq) {
552 irq = irq_alloc(0, sysino);
553 bucket_set_irq(__pa(bucket), irq);
554 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
555 "IVEC");
556 }
557
558 handler_data = irq_get_handler_data(irq);
559 if (unlikely(handler_data))
560 goto out;
561
562 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
563 if (unlikely(!handler_data)) {
564 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
565 prom_halt();
566 }
567 irq_set_handler_data(irq, handler_data);
568
569 /* Catch accidental accesses to these things. IMAP/ICLR handling
570 * is done by hypervisor calls on sun4v platforms, not by direct
571 * register accesses.
572 */
573 handler_data->imap = ~0UL;
574 handler_data->iclr = ~0UL;
575
576out:
577 return irq;
578}
579
580unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
581{
582 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
583
584 return sun4v_build_common(sysino, &sun4v_irq);
585}
586
587unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
588{
589 struct irq_handler_data *handler_data;
590 unsigned long hv_err, cookie;
591 struct ino_bucket *bucket;
592 unsigned int irq;
593
594 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
595 if (unlikely(!bucket))
596 return 0;
597
598 /* The only reference we store to the IRQ bucket is
599 * by physical address which kmemleak can't see, tell
600 * it that this object explicitly is not a leak and
601 * should be scanned.
602 */
603 kmemleak_not_leak(bucket);
604
605 __flush_dcache_range((unsigned long) bucket,
606 ((unsigned long) bucket +
607 sizeof(struct ino_bucket)));
608
609 irq = irq_alloc(devhandle, devino);
610 bucket_set_irq(__pa(bucket), irq);
611
612 irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
613 "IVEC");
614
615 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
616 if (unlikely(!handler_data))
617 return 0;
618
619 /* In order to make the LDC channel startup sequence easier,
620 * especially wrt. locking, we do not let request_irq() enable
621 * the interrupt.
622 */
623 irq_set_status_flags(irq, IRQ_NOAUTOEN);
624 irq_set_handler_data(irq, handler_data);
625
626 /* Catch accidental accesses to these things. IMAP/ICLR handling
627 * is done by hypervisor calls on sun4v platforms, not by direct
628 * register accesses.
629 */
630 handler_data->imap = ~0UL;
631 handler_data->iclr = ~0UL;
632
633 cookie = ~__pa(bucket);
634 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
635 if (hv_err) {
636 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
637 "err=%lu\n", devhandle, devino, hv_err);
638 prom_halt();
639 }
640
641 return irq;
642}
643
644void ack_bad_irq(unsigned int irq)
645{
646 unsigned int ino = irq_table[irq].dev_ino;
647
648 if (!ino)
649 ino = 0xdeadbeef;
650
651 printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
652 ino, irq);
653}
654
655void *hardirq_stack[NR_CPUS];
656void *softirq_stack[NR_CPUS];
657
658void __irq_entry handler_irq(int pil, struct pt_regs *regs)
659{
660 unsigned long pstate, bucket_pa;
661 struct pt_regs *old_regs;
662 void *orig_sp;
663
664 clear_softint(1 << pil);
665
666 old_regs = set_irq_regs(regs);
667 irq_enter();
668
669 /* Grab an atomic snapshot of the pending IVECs. */
670 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
671 "wrpr %0, %3, %%pstate\n\t"
672 "ldx [%2], %1\n\t"
673 "stx %%g0, [%2]\n\t"
674 "wrpr %0, 0x0, %%pstate\n\t"
675 : "=&r" (pstate), "=&r" (bucket_pa)
676 : "r" (irq_work_pa(smp_processor_id())),
677 "i" (PSTATE_IE)
678 : "memory");
679
680 orig_sp = set_hardirq_stack();
681
682 while (bucket_pa) {
683 unsigned long next_pa;
684 unsigned int irq;
685
686 next_pa = bucket_get_chain_pa(bucket_pa);
687 irq = bucket_get_irq(bucket_pa);
688 bucket_clear_chain_pa(bucket_pa);
689
690 generic_handle_irq(irq);
691
692 bucket_pa = next_pa;
693 }
694
695 restore_hardirq_stack(orig_sp);
696
697 irq_exit();
698 set_irq_regs(old_regs);
699}
700
701void do_softirq(void)
702{
703 unsigned long flags;
704
705 if (in_interrupt())
706 return;
707
708 local_irq_save(flags);
709
710 if (local_softirq_pending()) {
711 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
712
713 sp += THREAD_SIZE - 192 - STACK_BIAS;
714
715 __asm__ __volatile__("mov %%sp, %0\n\t"
716 "mov %1, %%sp"
717 : "=&r" (orig_sp)
718 : "r" (sp));
719 __do_softirq();
720 __asm__ __volatile__("mov %0, %%sp"
721 : : "r" (orig_sp));
722 }
723
724 local_irq_restore(flags);
725}
726
727#ifdef CONFIG_HOTPLUG_CPU
728void fixup_irqs(void)
729{
730 unsigned int irq;
731
732 for (irq = 0; irq < NR_IRQS; irq++) {
733 struct irq_desc *desc = irq_to_desc(irq);
734 struct irq_data *data = irq_desc_get_irq_data(desc);
735 unsigned long flags;
736
737 raw_spin_lock_irqsave(&desc->lock, flags);
738 if (desc->action && !irqd_is_per_cpu(data)) {
739 if (data->chip->irq_set_affinity)
740 data->chip->irq_set_affinity(data,
741 data->affinity,
742 false);
743 }
744 raw_spin_unlock_irqrestore(&desc->lock, flags);
745 }
746
747 tick_ops->disable_irq();
748}
749#endif
750
751struct sun5_timer {
752 u64 count0;
753 u64 limit0;
754 u64 count1;
755 u64 limit1;
756};
757
758static struct sun5_timer *prom_timers;
759static u64 prom_limit0, prom_limit1;
760
761static void map_prom_timers(void)
762{
763 struct device_node *dp;
764 const unsigned int *addr;
765
766 /* PROM timer node hangs out in the top level of device siblings... */
767 dp = of_find_node_by_path("/");
768 dp = dp->child;
769 while (dp) {
770 if (!strcmp(dp->name, "counter-timer"))
771 break;
772 dp = dp->sibling;
773 }
774
775 /* Assume if node is not present, PROM uses different tick mechanism
776 * which we should not care about.
777 */
778 if (!dp) {
779 prom_timers = (struct sun5_timer *) 0;
780 return;
781 }
782
783 /* If PROM is really using this, it must be mapped by him. */
784 addr = of_get_property(dp, "address", NULL);
785 if (!addr) {
786 prom_printf("PROM does not have timer mapped, trying to continue.\n");
787 prom_timers = (struct sun5_timer *) 0;
788 return;
789 }
790 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
791}
792
793static void kill_prom_timer(void)
794{
795 if (!prom_timers)
796 return;
797
798 /* Save them away for later. */
799 prom_limit0 = prom_timers->limit0;
800 prom_limit1 = prom_timers->limit1;
801
802 /* Just as in sun4c PROM uses timer which ticks at IRQ 14.
803 * We turn both off here just to be paranoid.
804 */
805 prom_timers->limit0 = 0;
806 prom_timers->limit1 = 0;
807
808 /* Wheee, eat the interrupt packet too... */
809 __asm__ __volatile__(
810" mov 0x40, %%g2\n"
811" ldxa [%%g0] %0, %%g1\n"
812" ldxa [%%g2] %1, %%g1\n"
813" stxa %%g0, [%%g0] %0\n"
814" membar #Sync\n"
815 : /* no outputs */
816 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
817 : "g1", "g2");
818}
819
820void notrace init_irqwork_curcpu(void)
821{
822 int cpu = hard_smp_processor_id();
823
824 trap_block[cpu].irq_worklist_pa = 0UL;
825}
826
827/* Please be very careful with register_one_mondo() and
828 * sun4v_register_mondo_queues().
829 *
830 * On SMP this gets invoked from the CPU trampoline before
831 * the cpu has fully taken over the trap table from OBP,
832 * and it's kernel stack + %g6 thread register state is
833 * not fully cooked yet.
834 *
835 * Therefore you cannot make any OBP calls, not even prom_printf,
836 * from these two routines.
837 */
838static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
839{
840 unsigned long num_entries = (qmask + 1) / 64;
841 unsigned long status;
842
843 status = sun4v_cpu_qconf(type, paddr, num_entries);
844 if (status != HV_EOK) {
845 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
846 "err %lu\n", type, paddr, num_entries, status);
847 prom_halt();
848 }
849}
850
851void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
852{
853 struct trap_per_cpu *tb = &trap_block[this_cpu];
854
855 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
856 tb->cpu_mondo_qmask);
857 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
858 tb->dev_mondo_qmask);
859 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
860 tb->resum_qmask);
861 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
862 tb->nonresum_qmask);
863}
864
865/* Each queue region must be a power of 2 multiple of 64 bytes in
866 * size. The base real address must be aligned to the size of the
867 * region. Thus, an 8KB queue must be 8KB aligned, for example.
868 */
869static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
870{
871 unsigned long size = PAGE_ALIGN(qmask + 1);
872 unsigned long order = get_order(size);
873 unsigned long p;
874
875 p = __get_free_pages(GFP_KERNEL, order);
876 if (!p) {
877 prom_printf("SUN4V: Error, cannot allocate queue.\n");
878 prom_halt();
879 }
880
881 *pa_ptr = __pa(p);
882}
883
884static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
885{
886#ifdef CONFIG_SMP
887 unsigned long page;
888
889 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
890
891 page = get_zeroed_page(GFP_KERNEL);
892 if (!page) {
893 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
894 prom_halt();
895 }
896
897 tb->cpu_mondo_block_pa = __pa(page);
898 tb->cpu_list_pa = __pa(page + 64);
899#endif
900}
901
902/* Allocate mondo and error queues for all possible cpus. */
903static void __init sun4v_init_mondo_queues(void)
904{
905 int cpu;
906
907 for_each_possible_cpu(cpu) {
908 struct trap_per_cpu *tb = &trap_block[cpu];
909
910 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
911 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
912 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
913 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
914 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
915 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
916 tb->nonresum_qmask);
917 }
918}
919
920static void __init init_send_mondo_info(void)
921{
922 int cpu;
923
924 for_each_possible_cpu(cpu) {
925 struct trap_per_cpu *tb = &trap_block[cpu];
926
927 init_cpu_send_mondo_info(tb);
928 }
929}
930
931static struct irqaction timer_irq_action = {
932 .name = "timer",
933};
934
935/* Only invoked on boot processor. */
936void __init init_IRQ(void)
937{
938 unsigned long size;
939
940 map_prom_timers();
941 kill_prom_timer();
942
943 size = sizeof(struct ino_bucket) * NUM_IVECS;
944 ivector_table = kzalloc(size, GFP_KERNEL);
945 if (!ivector_table) {
946 prom_printf("Fatal error, cannot allocate ivector_table\n");
947 prom_halt();
948 }
949 __flush_dcache_range((unsigned long) ivector_table,
950 ((unsigned long) ivector_table) + size);
951
952 ivector_table_pa = __pa(ivector_table);
953
954 if (tlb_type == hypervisor)
955 sun4v_init_mondo_queues();
956
957 init_send_mondo_info();
958
959 if (tlb_type == hypervisor) {
960 /* Load up the boot cpu's entries. */
961 sun4v_register_mondo_queues(hard_smp_processor_id());
962 }
963
964 /* We need to clear any IRQ's pending in the soft interrupt
965 * registers, a spurious one could be left around from the
966 * PROM timer which we just disabled.
967 */
968 clear_softint(get_softint());
969
970 /* Now that ivector table is initialized, it is safe
971 * to receive IRQ vector traps. We will normally take
972 * one or two right now, in case some device PROM used
973 * to boot us wants to speak to us. We just ignore them.
974 */
975 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
976 "or %%g1, %0, %%g1\n\t"
977 "wrpr %%g1, 0x0, %%pstate"
978 : /* No outputs */
979 : "i" (PSTATE_IE)
980 : "g1");
981
982 irq_to_desc(0)->action = &timer_irq_action;
983}
1// SPDX-License-Identifier: GPL-2.0
2/* irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/sched.h>
10#include <linux/linkage.h>
11#include <linux/ptrace.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <linux/signal.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/ftrace.h>
24#include <linux/irq.h>
25
26#include <asm/ptrace.h>
27#include <asm/processor.h>
28#include <linux/atomic.h>
29#include <asm/irq.h>
30#include <asm/io.h>
31#include <asm/iommu.h>
32#include <asm/upa.h>
33#include <asm/oplib.h>
34#include <asm/prom.h>
35#include <asm/timer.h>
36#include <asm/smp.h>
37#include <asm/starfire.h>
38#include <linux/uaccess.h>
39#include <asm/cache.h>
40#include <asm/cpudata.h>
41#include <asm/auxio.h>
42#include <asm/head.h>
43#include <asm/hypervisor.h>
44#include <asm/cacheflush.h>
45
46#include "entry.h"
47#include "cpumap.h"
48#include "kstack.h"
49
50struct ino_bucket *ivector_table;
51unsigned long ivector_table_pa;
52
53/* On several sun4u processors, it is illegal to mix bypass and
54 * non-bypass accesses. Therefore we access all INO buckets
55 * using bypass accesses only.
56 */
57static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
58{
59 unsigned long ret;
60
61 __asm__ __volatile__("ldxa [%1] %2, %0"
62 : "=&r" (ret)
63 : "r" (bucket_pa +
64 offsetof(struct ino_bucket,
65 __irq_chain_pa)),
66 "i" (ASI_PHYS_USE_EC));
67
68 return ret;
69}
70
71static void bucket_clear_chain_pa(unsigned long bucket_pa)
72{
73 __asm__ __volatile__("stxa %%g0, [%0] %1"
74 : /* no outputs */
75 : "r" (bucket_pa +
76 offsetof(struct ino_bucket,
77 __irq_chain_pa)),
78 "i" (ASI_PHYS_USE_EC));
79}
80
81static unsigned int bucket_get_irq(unsigned long bucket_pa)
82{
83 unsigned int ret;
84
85 __asm__ __volatile__("lduwa [%1] %2, %0"
86 : "=&r" (ret)
87 : "r" (bucket_pa +
88 offsetof(struct ino_bucket,
89 __irq)),
90 "i" (ASI_PHYS_USE_EC));
91
92 return ret;
93}
94
95static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
96{
97 __asm__ __volatile__("stwa %0, [%1] %2"
98 : /* no outputs */
99 : "r" (irq),
100 "r" (bucket_pa +
101 offsetof(struct ino_bucket,
102 __irq)),
103 "i" (ASI_PHYS_USE_EC));
104}
105
106#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
107
108static unsigned long hvirq_major __initdata;
109static int __init early_hvirq_major(char *p)
110{
111 int rc = kstrtoul(p, 10, &hvirq_major);
112
113 return rc;
114}
115early_param("hvirq", early_hvirq_major);
116
117static int hv_irq_version;
118
119/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
120 * based interfaces, but:
121 *
122 * 1) Several OSs, Solaris and Linux included, use them even when only
123 * negotiating version 1.0 (or failing to negotiate at all). So the
124 * hypervisor has a workaround that provides the VIRQ interfaces even
125 * when only verion 1.0 of the API is in use.
126 *
127 * 2) Second, and more importantly, with major version 2.0 these VIRQ
128 * interfaces only were actually hooked up for LDC interrupts, even
129 * though the Hypervisor specification clearly stated:
130 *
131 * The new interrupt API functions will be available to a guest
132 * when it negotiates version 2.0 in the interrupt API group 0x2. When
133 * a guest negotiates version 2.0, all interrupt sources will only
134 * support using the cookie interface, and any attempt to use the
135 * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
136 * ENOTSUPPORTED error being returned.
137 *
138 * with an emphasis on "all interrupt sources".
139 *
140 * To correct this, major version 3.0 was created which does actually
141 * support VIRQs for all interrupt sources (not just LDC devices). So
142 * if we want to move completely over the cookie based VIRQs we must
143 * negotiate major version 3.0 or later of HV_GRP_INTR.
144 */
145static bool sun4v_cookie_only_virqs(void)
146{
147 if (hv_irq_version >= 3)
148 return true;
149 return false;
150}
151
152static void __init irq_init_hv(void)
153{
154 unsigned long hv_error, major, minor = 0;
155
156 if (tlb_type != hypervisor)
157 return;
158
159 if (hvirq_major)
160 major = hvirq_major;
161 else
162 major = 3;
163
164 hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
165 if (!hv_error)
166 hv_irq_version = major;
167 else
168 hv_irq_version = 1;
169
170 pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
171 hv_irq_version,
172 sun4v_cookie_only_virqs() ? "enabled" : "disabled");
173}
174
175/* This function is for the timer interrupt.*/
176int __init arch_probe_nr_irqs(void)
177{
178 return 1;
179}
180
181#define DEFAULT_NUM_IVECS (0xfffU)
182static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
183#define NUM_IVECS (nr_ivec)
184
185static unsigned int __init size_nr_ivec(void)
186{
187 if (tlb_type == hypervisor) {
188 switch (sun4v_chip_type) {
189 /* Athena's devhandle|devino is large.*/
190 case SUN4V_CHIP_SPARC64X:
191 nr_ivec = 0xffff;
192 break;
193 }
194 }
195 return nr_ivec;
196}
197
198struct irq_handler_data {
199 union {
200 struct {
201 unsigned int dev_handle;
202 unsigned int dev_ino;
203 };
204 unsigned long sysino;
205 };
206 struct ino_bucket bucket;
207 unsigned long iclr;
208 unsigned long imap;
209};
210
211static inline unsigned int irq_data_to_handle(struct irq_data *data)
212{
213 struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
214
215 return ihd->dev_handle;
216}
217
218static inline unsigned int irq_data_to_ino(struct irq_data *data)
219{
220 struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
221
222 return ihd->dev_ino;
223}
224
225static inline unsigned long irq_data_to_sysino(struct irq_data *data)
226{
227 struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
228
229 return ihd->sysino;
230}
231
232void irq_free(unsigned int irq)
233{
234 void *data = irq_get_handler_data(irq);
235
236 kfree(data);
237 irq_set_handler_data(irq, NULL);
238 irq_free_descs(irq, 1);
239}
240
241unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
242{
243 int irq;
244
245 irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL);
246 if (irq <= 0)
247 goto out;
248
249 return irq;
250out:
251 return 0;
252}
253
254static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
255{
256 unsigned long hv_err, cookie;
257 struct ino_bucket *bucket;
258 unsigned int irq = 0U;
259
260 hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
261 if (hv_err) {
262 pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
263 goto out;
264 }
265
266 if (cookie & ((1UL << 63UL))) {
267 cookie = ~cookie;
268 bucket = (struct ino_bucket *) __va(cookie);
269 irq = bucket->__irq;
270 }
271out:
272 return irq;
273}
274
275static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
276{
277 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
278 struct ino_bucket *bucket;
279 unsigned int irq;
280
281 bucket = &ivector_table[sysino];
282 irq = bucket_get_irq(__pa(bucket));
283
284 return irq;
285}
286
287void ack_bad_irq(unsigned int irq)
288{
289 pr_crit("BAD IRQ ack %d\n", irq);
290}
291
292void irq_install_pre_handler(int irq,
293 void (*func)(unsigned int, void *, void *),
294 void *arg1, void *arg2)
295{
296 pr_warn("IRQ pre handler NOT supported.\n");
297}
298
299/*
300 * /proc/interrupts printing:
301 */
302int arch_show_interrupts(struct seq_file *p, int prec)
303{
304 int j;
305
306 seq_printf(p, "NMI: ");
307 for_each_online_cpu(j)
308 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
309 seq_printf(p, " Non-maskable interrupts\n");
310 return 0;
311}
312
313static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
314{
315 unsigned int tid;
316
317 if (this_is_starfire) {
318 tid = starfire_translate(imap, cpuid);
319 tid <<= IMAP_TID_SHIFT;
320 tid &= IMAP_TID_UPA;
321 } else {
322 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
323 unsigned long ver;
324
325 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
326 if ((ver >> 32UL) == __JALAPENO_ID ||
327 (ver >> 32UL) == __SERRANO_ID) {
328 tid = cpuid << IMAP_TID_SHIFT;
329 tid &= IMAP_TID_JBUS;
330 } else {
331 unsigned int a = cpuid & 0x1f;
332 unsigned int n = (cpuid >> 5) & 0x1f;
333
334 tid = ((a << IMAP_AID_SHIFT) |
335 (n << IMAP_NID_SHIFT));
336 tid &= (IMAP_AID_SAFARI |
337 IMAP_NID_SAFARI);
338 }
339 } else {
340 tid = cpuid << IMAP_TID_SHIFT;
341 tid &= IMAP_TID_UPA;
342 }
343 }
344
345 return tid;
346}
347
348#ifdef CONFIG_SMP
349static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
350{
351 cpumask_t mask;
352 int cpuid;
353
354 cpumask_copy(&mask, affinity);
355 if (cpumask_equal(&mask, cpu_online_mask)) {
356 cpuid = map_to_cpu(irq);
357 } else {
358 cpumask_t tmp;
359
360 cpumask_and(&tmp, cpu_online_mask, &mask);
361 cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
362 }
363
364 return cpuid;
365}
366#else
367#define irq_choose_cpu(irq, affinity) \
368 real_hard_smp_processor_id()
369#endif
370
371static void sun4u_irq_enable(struct irq_data *data)
372{
373 struct irq_handler_data *handler_data;
374
375 handler_data = irq_data_get_irq_handler_data(data);
376 if (likely(handler_data)) {
377 unsigned long cpuid, imap, val;
378 unsigned int tid;
379
380 cpuid = irq_choose_cpu(data->irq,
381 irq_data_get_affinity_mask(data));
382 imap = handler_data->imap;
383
384 tid = sun4u_compute_tid(imap, cpuid);
385
386 val = upa_readq(imap);
387 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
388 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
389 val |= tid | IMAP_VALID;
390 upa_writeq(val, imap);
391 upa_writeq(ICLR_IDLE, handler_data->iclr);
392 }
393}
394
395static int sun4u_set_affinity(struct irq_data *data,
396 const struct cpumask *mask, bool force)
397{
398 struct irq_handler_data *handler_data;
399
400 handler_data = irq_data_get_irq_handler_data(data);
401 if (likely(handler_data)) {
402 unsigned long cpuid, imap, val;
403 unsigned int tid;
404
405 cpuid = irq_choose_cpu(data->irq, mask);
406 imap = handler_data->imap;
407
408 tid = sun4u_compute_tid(imap, cpuid);
409
410 val = upa_readq(imap);
411 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
412 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
413 val |= tid | IMAP_VALID;
414 upa_writeq(val, imap);
415 upa_writeq(ICLR_IDLE, handler_data->iclr);
416 }
417
418 return 0;
419}
420
421/* Don't do anything. The desc->status check for IRQ_DISABLED in
422 * handler_irq() will skip the handler call and that will leave the
423 * interrupt in the sent state. The next ->enable() call will hit the
424 * ICLR register to reset the state machine.
425 *
426 * This scheme is necessary, instead of clearing the Valid bit in the
427 * IMAP register, to handle the case of IMAP registers being shared by
428 * multiple INOs (and thus ICLR registers). Since we use a different
429 * virtual IRQ for each shared IMAP instance, the generic code thinks
430 * there is only one user so it prematurely calls ->disable() on
431 * free_irq().
432 *
433 * We have to provide an explicit ->disable() method instead of using
434 * NULL to get the default. The reason is that if the generic code
435 * sees that, it also hooks up a default ->shutdown method which
436 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
437 */
438static void sun4u_irq_disable(struct irq_data *data)
439{
440}
441
442static void sun4u_irq_eoi(struct irq_data *data)
443{
444 struct irq_handler_data *handler_data;
445
446 handler_data = irq_data_get_irq_handler_data(data);
447 if (likely(handler_data))
448 upa_writeq(ICLR_IDLE, handler_data->iclr);
449}
450
451static void sun4v_irq_enable(struct irq_data *data)
452{
453 unsigned long cpuid = irq_choose_cpu(data->irq,
454 irq_data_get_affinity_mask(data));
455 unsigned int ino = irq_data_to_sysino(data);
456 int err;
457
458 err = sun4v_intr_settarget(ino, cpuid);
459 if (err != HV_EOK)
460 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
461 "err(%d)\n", ino, cpuid, err);
462 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
463 if (err != HV_EOK)
464 printk(KERN_ERR "sun4v_intr_setstate(%x): "
465 "err(%d)\n", ino, err);
466 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
467 if (err != HV_EOK)
468 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
469 ino, err);
470}
471
472static int sun4v_set_affinity(struct irq_data *data,
473 const struct cpumask *mask, bool force)
474{
475 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
476 unsigned int ino = irq_data_to_sysino(data);
477 int err;
478
479 err = sun4v_intr_settarget(ino, cpuid);
480 if (err != HV_EOK)
481 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
482 "err(%d)\n", ino, cpuid, err);
483
484 return 0;
485}
486
487static void sun4v_irq_disable(struct irq_data *data)
488{
489 unsigned int ino = irq_data_to_sysino(data);
490 int err;
491
492 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
493 if (err != HV_EOK)
494 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
495 "err(%d)\n", ino, err);
496}
497
498static void sun4v_irq_eoi(struct irq_data *data)
499{
500 unsigned int ino = irq_data_to_sysino(data);
501 int err;
502
503 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
504 if (err != HV_EOK)
505 printk(KERN_ERR "sun4v_intr_setstate(%x): "
506 "err(%d)\n", ino, err);
507}
508
509static void sun4v_virq_enable(struct irq_data *data)
510{
511 unsigned long dev_handle = irq_data_to_handle(data);
512 unsigned long dev_ino = irq_data_to_ino(data);
513 unsigned long cpuid;
514 int err;
515
516 cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data));
517
518 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
519 if (err != HV_EOK)
520 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
521 "err(%d)\n",
522 dev_handle, dev_ino, cpuid, err);
523 err = sun4v_vintr_set_state(dev_handle, dev_ino,
524 HV_INTR_STATE_IDLE);
525 if (err != HV_EOK)
526 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
527 "HV_INTR_STATE_IDLE): err(%d)\n",
528 dev_handle, dev_ino, err);
529 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
530 HV_INTR_ENABLED);
531 if (err != HV_EOK)
532 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
533 "HV_INTR_ENABLED): err(%d)\n",
534 dev_handle, dev_ino, err);
535}
536
537static int sun4v_virt_set_affinity(struct irq_data *data,
538 const struct cpumask *mask, bool force)
539{
540 unsigned long dev_handle = irq_data_to_handle(data);
541 unsigned long dev_ino = irq_data_to_ino(data);
542 unsigned long cpuid;
543 int err;
544
545 cpuid = irq_choose_cpu(data->irq, mask);
546
547 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
548 if (err != HV_EOK)
549 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
550 "err(%d)\n",
551 dev_handle, dev_ino, cpuid, err);
552
553 return 0;
554}
555
556static void sun4v_virq_disable(struct irq_data *data)
557{
558 unsigned long dev_handle = irq_data_to_handle(data);
559 unsigned long dev_ino = irq_data_to_ino(data);
560 int err;
561
562
563 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
564 HV_INTR_DISABLED);
565 if (err != HV_EOK)
566 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
567 "HV_INTR_DISABLED): err(%d)\n",
568 dev_handle, dev_ino, err);
569}
570
571static void sun4v_virq_eoi(struct irq_data *data)
572{
573 unsigned long dev_handle = irq_data_to_handle(data);
574 unsigned long dev_ino = irq_data_to_ino(data);
575 int err;
576
577 err = sun4v_vintr_set_state(dev_handle, dev_ino,
578 HV_INTR_STATE_IDLE);
579 if (err != HV_EOK)
580 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
581 "HV_INTR_STATE_IDLE): err(%d)\n",
582 dev_handle, dev_ino, err);
583}
584
585static struct irq_chip sun4u_irq = {
586 .name = "sun4u",
587 .irq_enable = sun4u_irq_enable,
588 .irq_disable = sun4u_irq_disable,
589 .irq_eoi = sun4u_irq_eoi,
590 .irq_set_affinity = sun4u_set_affinity,
591 .flags = IRQCHIP_EOI_IF_HANDLED,
592};
593
594static struct irq_chip sun4v_irq = {
595 .name = "sun4v",
596 .irq_enable = sun4v_irq_enable,
597 .irq_disable = sun4v_irq_disable,
598 .irq_eoi = sun4v_irq_eoi,
599 .irq_set_affinity = sun4v_set_affinity,
600 .flags = IRQCHIP_EOI_IF_HANDLED,
601};
602
603static struct irq_chip sun4v_virq = {
604 .name = "vsun4v",
605 .irq_enable = sun4v_virq_enable,
606 .irq_disable = sun4v_virq_disable,
607 .irq_eoi = sun4v_virq_eoi,
608 .irq_set_affinity = sun4v_virt_set_affinity,
609 .flags = IRQCHIP_EOI_IF_HANDLED,
610};
611
612unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
613{
614 struct irq_handler_data *handler_data;
615 struct ino_bucket *bucket;
616 unsigned int irq;
617 int ino;
618
619 BUG_ON(tlb_type == hypervisor);
620
621 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
622 bucket = &ivector_table[ino];
623 irq = bucket_get_irq(__pa(bucket));
624 if (!irq) {
625 irq = irq_alloc(0, ino);
626 bucket_set_irq(__pa(bucket), irq);
627 irq_set_chip_and_handler_name(irq, &sun4u_irq,
628 handle_fasteoi_irq, "IVEC");
629 }
630
631 handler_data = irq_get_handler_data(irq);
632 if (unlikely(handler_data))
633 goto out;
634
635 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
636 if (unlikely(!handler_data)) {
637 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
638 prom_halt();
639 }
640 irq_set_handler_data(irq, handler_data);
641
642 handler_data->imap = imap;
643 handler_data->iclr = iclr;
644
645out:
646 return irq;
647}
648
649static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
650 void (*handler_data_init)(struct irq_handler_data *data,
651 u32 devhandle, unsigned int devino),
652 struct irq_chip *chip)
653{
654 struct irq_handler_data *data;
655 unsigned int irq;
656
657 irq = irq_alloc(devhandle, devino);
658 if (!irq)
659 goto out;
660
661 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
662 if (unlikely(!data)) {
663 pr_err("IRQ handler data allocation failed.\n");
664 irq_free(irq);
665 irq = 0;
666 goto out;
667 }
668
669 irq_set_handler_data(irq, data);
670 handler_data_init(data, devhandle, devino);
671 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
672 data->imap = ~0UL;
673 data->iclr = ~0UL;
674out:
675 return irq;
676}
677
678static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
679 unsigned int devino)
680{
681 struct irq_handler_data *ihd = irq_get_handler_data(irq);
682 unsigned long hv_error, cookie;
683
684 /* handler_irq needs to find the irq. cookie is seen signed in
685 * sun4v_dev_mondo and treated as a non ivector_table delivery.
686 */
687 ihd->bucket.__irq = irq;
688 cookie = ~__pa(&ihd->bucket);
689
690 hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
691 if (hv_error)
692 pr_err("HV vintr set cookie failed = %ld\n", hv_error);
693
694 return hv_error;
695}
696
697static void cookie_handler_data(struct irq_handler_data *data,
698 u32 devhandle, unsigned int devino)
699{
700 data->dev_handle = devhandle;
701 data->dev_ino = devino;
702}
703
704static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
705 struct irq_chip *chip)
706{
707 unsigned long hv_error;
708 unsigned int irq;
709
710 irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
711
712 hv_error = cookie_assign(irq, devhandle, devino);
713 if (hv_error) {
714 irq_free(irq);
715 irq = 0;
716 }
717
718 return irq;
719}
720
721static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
722{
723 unsigned int irq;
724
725 irq = cookie_exists(devhandle, devino);
726 if (irq)
727 goto out;
728
729 irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
730
731out:
732 return irq;
733}
734
735static void sysino_set_bucket(unsigned int irq)
736{
737 struct irq_handler_data *ihd = irq_get_handler_data(irq);
738 struct ino_bucket *bucket;
739 unsigned long sysino;
740
741 sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
742 BUG_ON(sysino >= nr_ivec);
743 bucket = &ivector_table[sysino];
744 bucket_set_irq(__pa(bucket), irq);
745}
746
747static void sysino_handler_data(struct irq_handler_data *data,
748 u32 devhandle, unsigned int devino)
749{
750 unsigned long sysino;
751
752 sysino = sun4v_devino_to_sysino(devhandle, devino);
753 data->sysino = sysino;
754}
755
756static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
757 struct irq_chip *chip)
758{
759 unsigned int irq;
760
761 irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
762 if (!irq)
763 goto out;
764
765 sysino_set_bucket(irq);
766out:
767 return irq;
768}
769
770static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
771{
772 int irq;
773
774 irq = sysino_exists(devhandle, devino);
775 if (irq)
776 goto out;
777
778 irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
779out:
780 return irq;
781}
782
783unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
784{
785 unsigned int irq;
786
787 if (sun4v_cookie_only_virqs())
788 irq = sun4v_build_cookie(devhandle, devino);
789 else
790 irq = sun4v_build_sysino(devhandle, devino);
791
792 return irq;
793}
794
795unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
796{
797 int irq;
798
799 irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
800 if (!irq)
801 goto out;
802
803 /* This is borrowed from the original function.
804 */
805 irq_set_status_flags(irq, IRQ_NOAUTOEN);
806
807out:
808 return irq;
809}
810
811void *hardirq_stack[NR_CPUS];
812void *softirq_stack[NR_CPUS];
813
814void __irq_entry handler_irq(int pil, struct pt_regs *regs)
815{
816 unsigned long pstate, bucket_pa;
817 struct pt_regs *old_regs;
818 void *orig_sp;
819
820 clear_softint(1 << pil);
821
822 old_regs = set_irq_regs(regs);
823 irq_enter();
824
825 /* Grab an atomic snapshot of the pending IVECs. */
826 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
827 "wrpr %0, %3, %%pstate\n\t"
828 "ldx [%2], %1\n\t"
829 "stx %%g0, [%2]\n\t"
830 "wrpr %0, 0x0, %%pstate\n\t"
831 : "=&r" (pstate), "=&r" (bucket_pa)
832 : "r" (irq_work_pa(smp_processor_id())),
833 "i" (PSTATE_IE)
834 : "memory");
835
836 orig_sp = set_hardirq_stack();
837
838 while (bucket_pa) {
839 unsigned long next_pa;
840 unsigned int irq;
841
842 next_pa = bucket_get_chain_pa(bucket_pa);
843 irq = bucket_get_irq(bucket_pa);
844 bucket_clear_chain_pa(bucket_pa);
845
846 generic_handle_irq(irq);
847
848 bucket_pa = next_pa;
849 }
850
851 restore_hardirq_stack(orig_sp);
852
853 irq_exit();
854 set_irq_regs(old_regs);
855}
856
857void do_softirq_own_stack(void)
858{
859 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
860
861 sp += THREAD_SIZE - 192 - STACK_BIAS;
862
863 __asm__ __volatile__("mov %%sp, %0\n\t"
864 "mov %1, %%sp"
865 : "=&r" (orig_sp)
866 : "r" (sp));
867 __do_softirq();
868 __asm__ __volatile__("mov %0, %%sp"
869 : : "r" (orig_sp));
870}
871
872#ifdef CONFIG_HOTPLUG_CPU
873void fixup_irqs(void)
874{
875 unsigned int irq;
876
877 for (irq = 0; irq < NR_IRQS; irq++) {
878 struct irq_desc *desc = irq_to_desc(irq);
879 struct irq_data *data;
880 unsigned long flags;
881
882 if (!desc)
883 continue;
884 data = irq_desc_get_irq_data(desc);
885 raw_spin_lock_irqsave(&desc->lock, flags);
886 if (desc->action && !irqd_is_per_cpu(data)) {
887 if (data->chip->irq_set_affinity)
888 data->chip->irq_set_affinity(data,
889 irq_data_get_affinity_mask(data),
890 false);
891 }
892 raw_spin_unlock_irqrestore(&desc->lock, flags);
893 }
894
895 tick_ops->disable_irq();
896}
897#endif
898
899struct sun5_timer {
900 u64 count0;
901 u64 limit0;
902 u64 count1;
903 u64 limit1;
904};
905
906static struct sun5_timer *prom_timers;
907static u64 prom_limit0, prom_limit1;
908
909static void map_prom_timers(void)
910{
911 struct device_node *dp;
912 const unsigned int *addr;
913
914 /* PROM timer node hangs out in the top level of device siblings... */
915 dp = of_find_node_by_path("/");
916 dp = dp->child;
917 while (dp) {
918 if (of_node_name_eq(dp, "counter-timer"))
919 break;
920 dp = dp->sibling;
921 }
922
923 /* Assume if node is not present, PROM uses different tick mechanism
924 * which we should not care about.
925 */
926 if (!dp) {
927 prom_timers = (struct sun5_timer *) 0;
928 return;
929 }
930
931 /* If PROM is really using this, it must be mapped by him. */
932 addr = of_get_property(dp, "address", NULL);
933 if (!addr) {
934 prom_printf("PROM does not have timer mapped, trying to continue.\n");
935 prom_timers = (struct sun5_timer *) 0;
936 return;
937 }
938 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
939}
940
941static void kill_prom_timer(void)
942{
943 if (!prom_timers)
944 return;
945
946 /* Save them away for later. */
947 prom_limit0 = prom_timers->limit0;
948 prom_limit1 = prom_timers->limit1;
949
950 /* Just as in sun4c PROM uses timer which ticks at IRQ 14.
951 * We turn both off here just to be paranoid.
952 */
953 prom_timers->limit0 = 0;
954 prom_timers->limit1 = 0;
955
956 /* Wheee, eat the interrupt packet too... */
957 __asm__ __volatile__(
958" mov 0x40, %%g2\n"
959" ldxa [%%g0] %0, %%g1\n"
960" ldxa [%%g2] %1, %%g1\n"
961" stxa %%g0, [%%g0] %0\n"
962" membar #Sync\n"
963 : /* no outputs */
964 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
965 : "g1", "g2");
966}
967
968void notrace init_irqwork_curcpu(void)
969{
970 int cpu = hard_smp_processor_id();
971
972 trap_block[cpu].irq_worklist_pa = 0UL;
973}
974
975/* Please be very careful with register_one_mondo() and
976 * sun4v_register_mondo_queues().
977 *
978 * On SMP this gets invoked from the CPU trampoline before
979 * the cpu has fully taken over the trap table from OBP,
980 * and it's kernel stack + %g6 thread register state is
981 * not fully cooked yet.
982 *
983 * Therefore you cannot make any OBP calls, not even prom_printf,
984 * from these two routines.
985 */
986static void notrace register_one_mondo(unsigned long paddr, unsigned long type,
987 unsigned long qmask)
988{
989 unsigned long num_entries = (qmask + 1) / 64;
990 unsigned long status;
991
992 status = sun4v_cpu_qconf(type, paddr, num_entries);
993 if (status != HV_EOK) {
994 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
995 "err %lu\n", type, paddr, num_entries, status);
996 prom_halt();
997 }
998}
999
1000void notrace sun4v_register_mondo_queues(int this_cpu)
1001{
1002 struct trap_per_cpu *tb = &trap_block[this_cpu];
1003
1004 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
1005 tb->cpu_mondo_qmask);
1006 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
1007 tb->dev_mondo_qmask);
1008 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
1009 tb->resum_qmask);
1010 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
1011 tb->nonresum_qmask);
1012}
1013
1014/* Each queue region must be a power of 2 multiple of 64 bytes in
1015 * size. The base real address must be aligned to the size of the
1016 * region. Thus, an 8KB queue must be 8KB aligned, for example.
1017 */
1018static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
1019{
1020 unsigned long size = PAGE_ALIGN(qmask + 1);
1021 unsigned long order = get_order(size);
1022 unsigned long p;
1023
1024 p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1025 if (!p) {
1026 prom_printf("SUN4V: Error, cannot allocate queue.\n");
1027 prom_halt();
1028 }
1029
1030 *pa_ptr = __pa(p);
1031}
1032
1033static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
1034{
1035#ifdef CONFIG_SMP
1036 unsigned long page;
1037 void *mondo, *p;
1038
1039 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
1040
1041 /* Make sure mondo block is 64byte aligned */
1042 p = kzalloc(127, GFP_KERNEL);
1043 if (!p) {
1044 prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
1045 prom_halt();
1046 }
1047 mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
1048 tb->cpu_mondo_block_pa = __pa(mondo);
1049
1050 page = get_zeroed_page(GFP_KERNEL);
1051 if (!page) {
1052 prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
1053 prom_halt();
1054 }
1055
1056 tb->cpu_list_pa = __pa(page);
1057#endif
1058}
1059
1060/* Allocate mondo and error queues for all possible cpus. */
1061static void __init sun4v_init_mondo_queues(void)
1062{
1063 int cpu;
1064
1065 for_each_possible_cpu(cpu) {
1066 struct trap_per_cpu *tb = &trap_block[cpu];
1067
1068 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
1069 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
1070 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
1071 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
1072 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
1073 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
1074 tb->nonresum_qmask);
1075 }
1076}
1077
1078static void __init init_send_mondo_info(void)
1079{
1080 int cpu;
1081
1082 for_each_possible_cpu(cpu) {
1083 struct trap_per_cpu *tb = &trap_block[cpu];
1084
1085 init_cpu_send_mondo_info(tb);
1086 }
1087}
1088
1089static struct irqaction timer_irq_action = {
1090 .name = "timer",
1091};
1092
1093static void __init irq_ivector_init(void)
1094{
1095 unsigned long size, order;
1096 unsigned int ivecs;
1097
1098 /* If we are doing cookie only VIRQs then we do not need the ivector
1099 * table to process interrupts.
1100 */
1101 if (sun4v_cookie_only_virqs())
1102 return;
1103
1104 ivecs = size_nr_ivec();
1105 size = sizeof(struct ino_bucket) * ivecs;
1106 order = get_order(size);
1107 ivector_table = (struct ino_bucket *)
1108 __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1109 if (!ivector_table) {
1110 prom_printf("Fatal error, cannot allocate ivector_table\n");
1111 prom_halt();
1112 }
1113 __flush_dcache_range((unsigned long) ivector_table,
1114 ((unsigned long) ivector_table) + size);
1115
1116 ivector_table_pa = __pa(ivector_table);
1117}
1118
1119/* Only invoked on boot processor.*/
1120void __init init_IRQ(void)
1121{
1122 irq_init_hv();
1123 irq_ivector_init();
1124 map_prom_timers();
1125 kill_prom_timer();
1126
1127 if (tlb_type == hypervisor)
1128 sun4v_init_mondo_queues();
1129
1130 init_send_mondo_info();
1131
1132 if (tlb_type == hypervisor) {
1133 /* Load up the boot cpu's entries. */
1134 sun4v_register_mondo_queues(hard_smp_processor_id());
1135 }
1136
1137 /* We need to clear any IRQ's pending in the soft interrupt
1138 * registers, a spurious one could be left around from the
1139 * PROM timer which we just disabled.
1140 */
1141 clear_softint(get_softint());
1142
1143 /* Now that ivector table is initialized, it is safe
1144 * to receive IRQ vector traps. We will normally take
1145 * one or two right now, in case some device PROM used
1146 * to boot us wants to speak to us. We just ignore them.
1147 */
1148 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1149 "or %%g1, %0, %%g1\n\t"
1150 "wrpr %%g1, 0x0, %%pstate"
1151 : /* No outputs */
1152 : "i" (PSTATE_IE)
1153 : "g1");
1154
1155 irq_to_desc(0)->action = &timer_irq_action;
1156}