Loading...
1/*
2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3 *
4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com>
6 *
7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 */
15#include <linux/irq.h>
16#include <linux/msi.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/of_platform.h>
20#include <linux/interrupt.h>
21#include <linux/seq_file.h>
22#include <sysdev/fsl_soc.h>
23#include <asm/prom.h>
24#include <asm/hw_irq.h>
25#include <asm/ppc-pci.h>
26#include <asm/mpic.h>
27#include <asm/fsl_hcalls.h>
28
29#include "fsl_msi.h"
30#include "fsl_pci.h"
31
32#define MSIIR_OFFSET_MASK 0xfffff
33#define MSIIR_IBS_SHIFT 0
34#define MSIIR_SRS_SHIFT 5
35#define MSIIR1_IBS_SHIFT 4
36#define MSIIR1_SRS_SHIFT 0
37#define MSI_SRS_MASK 0xf
38#define MSI_IBS_MASK 0x1f
39
40#define msi_hwirq(msi, msir_index, intr_index) \
41 ((msir_index) << (msi)->srs_shift | \
42 ((intr_index) << (msi)->ibs_shift))
43
44static LIST_HEAD(msi_head);
45
46struct fsl_msi_feature {
47 u32 fsl_pic_ip;
48 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
49};
50
51struct fsl_msi_cascade_data {
52 struct fsl_msi *msi_data;
53 int index;
54 int virq;
55};
56
57static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
58{
59 return in_be32(base + (reg >> 2));
60}
61
62/*
63 * We do not need this actually. The MSIR register has been read once
64 * in the cascade interrupt. So, this MSI interrupt has been acked
65*/
66static void fsl_msi_end_irq(struct irq_data *d)
67{
68}
69
70static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
71{
72 struct fsl_msi *msi_data = irqd->domain->host_data;
73 irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
74 int cascade_virq, srs;
75
76 srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
77 cascade_virq = msi_data->cascade_array[srs]->virq;
78
79 seq_printf(p, " fsl-msi-%d", cascade_virq);
80}
81
82
83static struct irq_chip fsl_msi_chip = {
84 .irq_mask = pci_msi_mask_irq,
85 .irq_unmask = pci_msi_unmask_irq,
86 .irq_ack = fsl_msi_end_irq,
87 .irq_print_chip = fsl_msi_print_chip,
88};
89
90static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
91 irq_hw_number_t hw)
92{
93 struct fsl_msi *msi_data = h->host_data;
94 struct irq_chip *chip = &fsl_msi_chip;
95
96 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
97
98 irq_set_chip_data(virq, msi_data);
99 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
100
101 return 0;
102}
103
104static const struct irq_domain_ops fsl_msi_host_ops = {
105 .map = fsl_msi_host_map,
106};
107
108static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
109{
110 int rc, hwirq;
111
112 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
113 irq_domain_get_of_node(msi_data->irqhost));
114 if (rc)
115 return rc;
116
117 /*
118 * Reserve all the hwirqs
119 * The available hwirqs will be released in fsl_msi_setup_hwirq()
120 */
121 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
122 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
123
124 return 0;
125}
126
127static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
128{
129 struct msi_desc *entry;
130 struct fsl_msi *msi_data;
131 irq_hw_number_t hwirq;
132
133 for_each_pci_msi_entry(entry, pdev) {
134 if (!entry->irq)
135 continue;
136 hwirq = virq_to_hw(entry->irq);
137 msi_data = irq_get_chip_data(entry->irq);
138 irq_set_msi_desc(entry->irq, NULL);
139 irq_dispose_mapping(entry->irq);
140 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
141 }
142
143 return;
144}
145
146static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
147 struct msi_msg *msg,
148 struct fsl_msi *fsl_msi_data)
149{
150 struct fsl_msi *msi_data = fsl_msi_data;
151 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
152 u64 address; /* Physical address of the MSIIR */
153 int len;
154 const __be64 *reg;
155
156 /* If the msi-address-64 property exists, then use it */
157 reg = of_get_property(hose->dn, "msi-address-64", &len);
158 if (reg && (len == sizeof(u64)))
159 address = be64_to_cpup(reg);
160 else
161 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
162
163 msg->address_lo = lower_32_bits(address);
164 msg->address_hi = upper_32_bits(address);
165
166 /*
167 * MPIC version 2.0 has erratum PIC1. It causes
168 * that neither MSI nor MSI-X can work fine.
169 * This is a workaround to allow MSI-X to function
170 * properly. It only works for MSI-X, we prevent
171 * MSI on buggy chips in fsl_setup_msi_irqs().
172 */
173 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
174 msg->data = __swab32(hwirq);
175 else
176 msg->data = hwirq;
177
178 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
179 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
180 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
181}
182
183static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
184{
185 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
186 struct device_node *np;
187 phandle phandle = 0;
188 int rc, hwirq = -ENOMEM;
189 unsigned int virq;
190 struct msi_desc *entry;
191 struct msi_msg msg;
192 struct fsl_msi *msi_data;
193
194 if (type == PCI_CAP_ID_MSI) {
195 /*
196 * MPIC version 2.0 has erratum PIC1. For now MSI
197 * could not work. So check to prevent MSI from
198 * being used on the board with this erratum.
199 */
200 list_for_each_entry(msi_data, &msi_head, list)
201 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
202 return -EINVAL;
203 }
204
205 /*
206 * If the PCI node has an fsl,msi property, then we need to use it
207 * to find the specific MSI.
208 */
209 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
210 if (np) {
211 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
212 of_device_is_compatible(np, "fsl,vmpic-msi") ||
213 of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
214 phandle = np->phandle;
215 else {
216 dev_err(&pdev->dev,
217 "node %pOF has an invalid fsl,msi phandle %u\n",
218 hose->dn, np->phandle);
219 return -EINVAL;
220 }
221 }
222
223 for_each_pci_msi_entry(entry, pdev) {
224 /*
225 * Loop over all the MSI devices until we find one that has an
226 * available interrupt.
227 */
228 list_for_each_entry(msi_data, &msi_head, list) {
229 /*
230 * If the PCI node has an fsl,msi property, then we
231 * restrict our search to the corresponding MSI node.
232 * The simplest way is to skip over MSI nodes with the
233 * wrong phandle. Under the Freescale hypervisor, this
234 * has the additional benefit of skipping over MSI
235 * nodes that are not mapped in the PAMU.
236 */
237 if (phandle && (phandle != msi_data->phandle))
238 continue;
239
240 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
241 if (hwirq >= 0)
242 break;
243 }
244
245 if (hwirq < 0) {
246 rc = hwirq;
247 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
248 goto out_free;
249 }
250
251 virq = irq_create_mapping(msi_data->irqhost, hwirq);
252
253 if (!virq) {
254 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
255 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
256 rc = -ENOSPC;
257 goto out_free;
258 }
259 /* chip_data is msi_data via host->hostdata in host->map() */
260 irq_set_msi_desc(virq, entry);
261
262 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
263 pci_write_msi_msg(virq, &msg);
264 }
265 return 0;
266
267out_free:
268 /* free by the caller of this function */
269 return rc;
270}
271
272static irqreturn_t fsl_msi_cascade(int irq, void *data)
273{
274 unsigned int cascade_irq;
275 struct fsl_msi *msi_data;
276 int msir_index = -1;
277 u32 msir_value = 0;
278 u32 intr_index;
279 u32 have_shift = 0;
280 struct fsl_msi_cascade_data *cascade_data = data;
281 irqreturn_t ret = IRQ_NONE;
282
283 msi_data = cascade_data->msi_data;
284
285 msir_index = cascade_data->index;
286
287 if (msir_index >= NR_MSI_REG_MAX)
288 cascade_irq = 0;
289
290 switch (msi_data->feature & FSL_PIC_IP_MASK) {
291 case FSL_PIC_IP_MPIC:
292 msir_value = fsl_msi_read(msi_data->msi_regs,
293 msir_index * 0x10);
294 break;
295 case FSL_PIC_IP_IPIC:
296 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
297 break;
298#ifdef CONFIG_EPAPR_PARAVIRT
299 case FSL_PIC_IP_VMPIC: {
300 unsigned int ret;
301 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
302 if (ret) {
303 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
304 "irq %u (ret=%u)\n", irq, ret);
305 msir_value = 0;
306 }
307 break;
308 }
309#endif
310 }
311
312 while (msir_value) {
313 intr_index = ffs(msir_value) - 1;
314
315 cascade_irq = irq_linear_revmap(msi_data->irqhost,
316 msi_hwirq(msi_data, msir_index,
317 intr_index + have_shift));
318 if (cascade_irq) {
319 generic_handle_irq(cascade_irq);
320 ret = IRQ_HANDLED;
321 }
322 have_shift += intr_index + 1;
323 msir_value = msir_value >> (intr_index + 1);
324 }
325
326 return ret;
327}
328
329static int fsl_of_msi_remove(struct platform_device *ofdev)
330{
331 struct fsl_msi *msi = platform_get_drvdata(ofdev);
332 int virq, i;
333
334 if (msi->list.prev != NULL)
335 list_del(&msi->list);
336 for (i = 0; i < NR_MSI_REG_MAX; i++) {
337 if (msi->cascade_array[i]) {
338 virq = msi->cascade_array[i]->virq;
339
340 BUG_ON(!virq);
341
342 free_irq(virq, msi->cascade_array[i]);
343 kfree(msi->cascade_array[i]);
344 irq_dispose_mapping(virq);
345 }
346 }
347 if (msi->bitmap.bitmap)
348 msi_bitmap_free(&msi->bitmap);
349 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
350 iounmap(msi->msi_regs);
351 kfree(msi);
352
353 return 0;
354}
355
356static struct lock_class_key fsl_msi_irq_class;
357static struct lock_class_key fsl_msi_irq_request_class;
358
359static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
360 int offset, int irq_index)
361{
362 struct fsl_msi_cascade_data *cascade_data = NULL;
363 int virt_msir, i, ret;
364
365 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
366 if (!virt_msir) {
367 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
368 __func__, irq_index);
369 return 0;
370 }
371
372 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
373 if (!cascade_data) {
374 dev_err(&dev->dev, "No memory for MSI cascade data\n");
375 return -ENOMEM;
376 }
377 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
378 &fsl_msi_irq_request_class);
379 cascade_data->index = offset;
380 cascade_data->msi_data = msi;
381 cascade_data->virq = virt_msir;
382 msi->cascade_array[irq_index] = cascade_data;
383
384 ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
385 "fsl-msi-cascade", cascade_data);
386 if (ret) {
387 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
388 virt_msir, ret);
389 return ret;
390 }
391
392 /* Release the hwirqs corresponding to this MSI register */
393 for (i = 0; i < IRQS_PER_MSI_REG; i++)
394 msi_bitmap_free_hwirqs(&msi->bitmap,
395 msi_hwirq(msi, offset, i), 1);
396
397 return 0;
398}
399
400static const struct of_device_id fsl_of_msi_ids[];
401static int fsl_of_msi_probe(struct platform_device *dev)
402{
403 const struct of_device_id *match;
404 struct fsl_msi *msi;
405 struct resource res, msiir;
406 int err, i, j, irq_index, count;
407 const u32 *p;
408 const struct fsl_msi_feature *features;
409 int len;
410 u32 offset;
411 struct pci_controller *phb;
412
413 match = of_match_device(fsl_of_msi_ids, &dev->dev);
414 if (!match)
415 return -EINVAL;
416 features = match->data;
417
418 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
419
420 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
421 if (!msi) {
422 dev_err(&dev->dev, "No memory for MSI structure\n");
423 return -ENOMEM;
424 }
425 platform_set_drvdata(dev, msi);
426
427 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
428 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
429
430 if (msi->irqhost == NULL) {
431 dev_err(&dev->dev, "No memory for MSI irqhost\n");
432 err = -ENOMEM;
433 goto error_out;
434 }
435
436 /*
437 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
438 * property. Instead, we use hypercalls to access the MSI.
439 */
440 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
441 err = of_address_to_resource(dev->dev.of_node, 0, &res);
442 if (err) {
443 dev_err(&dev->dev, "invalid resource for node %pOF\n",
444 dev->dev.of_node);
445 goto error_out;
446 }
447
448 msi->msi_regs = ioremap(res.start, resource_size(&res));
449 if (!msi->msi_regs) {
450 err = -ENOMEM;
451 dev_err(&dev->dev, "could not map node %pOF\n",
452 dev->dev.of_node);
453 goto error_out;
454 }
455 msi->msiir_offset =
456 features->msiir_offset + (res.start & 0xfffff);
457
458 /*
459 * First read the MSIIR/MSIIR1 offset from dts
460 * On failure use the hardcode MSIIR offset
461 */
462 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
463 msi->msiir_offset = features->msiir_offset +
464 (res.start & MSIIR_OFFSET_MASK);
465 else
466 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
467 }
468
469 msi->feature = features->fsl_pic_ip;
470
471 /* For erratum PIC1 on MPIC version 2.0*/
472 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
473 && (fsl_mpic_primary_get_version() == 0x0200))
474 msi->feature |= MSI_HW_ERRATA_ENDIAN;
475
476 /*
477 * Remember the phandle, so that we can match with any PCI nodes
478 * that have an "fsl,msi" property.
479 */
480 msi->phandle = dev->dev.of_node->phandle;
481
482 err = fsl_msi_init_allocator(msi);
483 if (err) {
484 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
485 goto error_out;
486 }
487
488 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
489
490 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
491 of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
492 msi->srs_shift = MSIIR1_SRS_SHIFT;
493 msi->ibs_shift = MSIIR1_IBS_SHIFT;
494 if (p)
495 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
496 __func__);
497
498 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
499 irq_index++) {
500 err = fsl_msi_setup_hwirq(msi, dev,
501 irq_index, irq_index);
502 if (err)
503 goto error_out;
504 }
505 } else {
506 static const u32 all_avail[] =
507 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
508
509 msi->srs_shift = MSIIR_SRS_SHIFT;
510 msi->ibs_shift = MSIIR_IBS_SHIFT;
511
512 if (p && len % (2 * sizeof(u32)) != 0) {
513 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
514 __func__);
515 err = -EINVAL;
516 goto error_out;
517 }
518
519 if (!p) {
520 p = all_avail;
521 len = sizeof(all_avail);
522 }
523
524 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
525 if (p[i * 2] % IRQS_PER_MSI_REG ||
526 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
527 pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
528 __func__, dev->dev.of_node,
529 p[i * 2 + 1], p[i * 2]);
530 err = -EINVAL;
531 goto error_out;
532 }
533
534 offset = p[i * 2] / IRQS_PER_MSI_REG;
535 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
536
537 for (j = 0; j < count; j++, irq_index++) {
538 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
539 irq_index);
540 if (err)
541 goto error_out;
542 }
543 }
544 }
545
546 list_add_tail(&msi->list, &msi_head);
547
548 /*
549 * Apply the MSI ops to all the controllers.
550 * It doesn't hurt to reassign the same ops,
551 * but bail out if we find another MSI driver.
552 */
553 list_for_each_entry(phb, &hose_list, list_node) {
554 if (!phb->controller_ops.setup_msi_irqs) {
555 phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
556 phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
557 } else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
558 dev_err(&dev->dev, "Different MSI driver already installed!\n");
559 err = -ENODEV;
560 goto error_out;
561 }
562 }
563 return 0;
564error_out:
565 fsl_of_msi_remove(dev);
566 return err;
567}
568
569static const struct fsl_msi_feature mpic_msi_feature = {
570 .fsl_pic_ip = FSL_PIC_IP_MPIC,
571 .msiir_offset = 0x140,
572};
573
574static const struct fsl_msi_feature ipic_msi_feature = {
575 .fsl_pic_ip = FSL_PIC_IP_IPIC,
576 .msiir_offset = 0x38,
577};
578
579static const struct fsl_msi_feature vmpic_msi_feature = {
580 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
581 .msiir_offset = 0,
582};
583
584static const struct of_device_id fsl_of_msi_ids[] = {
585 {
586 .compatible = "fsl,mpic-msi",
587 .data = &mpic_msi_feature,
588 },
589 {
590 .compatible = "fsl,mpic-msi-v4.3",
591 .data = &mpic_msi_feature,
592 },
593 {
594 .compatible = "fsl,ipic-msi",
595 .data = &ipic_msi_feature,
596 },
597#ifdef CONFIG_EPAPR_PARAVIRT
598 {
599 .compatible = "fsl,vmpic-msi",
600 .data = &vmpic_msi_feature,
601 },
602 {
603 .compatible = "fsl,vmpic-msi-v4.3",
604 .data = &vmpic_msi_feature,
605 },
606#endif
607 {}
608};
609
610static struct platform_driver fsl_of_msi_driver = {
611 .driver = {
612 .name = "fsl-msi",
613 .of_match_table = fsl_of_msi_ids,
614 },
615 .probe = fsl_of_msi_probe,
616 .remove = fsl_of_msi_remove,
617};
618
619static __init int fsl_of_msi_init(void)
620{
621 return platform_driver_register(&fsl_of_msi_driver);
622}
623
624subsys_initcall(fsl_of_msi_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
4 *
5 * Author: Tony Li <tony.li@freescale.com>
6 * Jason Jin <Jason.jin@freescale.com>
7 *
8 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
9 */
10#include <linux/irq.h>
11#include <linux/msi.h>
12#include <linux/pci.h>
13#include <linux/slab.h>
14#include <linux/of_platform.h>
15#include <linux/interrupt.h>
16#include <linux/seq_file.h>
17#include <sysdev/fsl_soc.h>
18#include <asm/prom.h>
19#include <asm/hw_irq.h>
20#include <asm/ppc-pci.h>
21#include <asm/mpic.h>
22#include <asm/fsl_hcalls.h>
23
24#include "fsl_msi.h"
25#include "fsl_pci.h"
26
27#define MSIIR_OFFSET_MASK 0xfffff
28#define MSIIR_IBS_SHIFT 0
29#define MSIIR_SRS_SHIFT 5
30#define MSIIR1_IBS_SHIFT 4
31#define MSIIR1_SRS_SHIFT 0
32#define MSI_SRS_MASK 0xf
33#define MSI_IBS_MASK 0x1f
34
35#define msi_hwirq(msi, msir_index, intr_index) \
36 ((msir_index) << (msi)->srs_shift | \
37 ((intr_index) << (msi)->ibs_shift))
38
39static LIST_HEAD(msi_head);
40
41struct fsl_msi_feature {
42 u32 fsl_pic_ip;
43 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
44};
45
46struct fsl_msi_cascade_data {
47 struct fsl_msi *msi_data;
48 int index;
49 int virq;
50};
51
52static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
53{
54 return in_be32(base + (reg >> 2));
55}
56
57/*
58 * We do not need this actually. The MSIR register has been read once
59 * in the cascade interrupt. So, this MSI interrupt has been acked
60*/
61static void fsl_msi_end_irq(struct irq_data *d)
62{
63}
64
65static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
66{
67 struct fsl_msi *msi_data = irqd->domain->host_data;
68 irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
69 int cascade_virq, srs;
70
71 srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
72 cascade_virq = msi_data->cascade_array[srs]->virq;
73
74 seq_printf(p, " fsl-msi-%d", cascade_virq);
75}
76
77
78static struct irq_chip fsl_msi_chip = {
79 .irq_mask = pci_msi_mask_irq,
80 .irq_unmask = pci_msi_unmask_irq,
81 .irq_ack = fsl_msi_end_irq,
82 .irq_print_chip = fsl_msi_print_chip,
83};
84
85static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
86 irq_hw_number_t hw)
87{
88 struct fsl_msi *msi_data = h->host_data;
89 struct irq_chip *chip = &fsl_msi_chip;
90
91 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
92
93 irq_set_chip_data(virq, msi_data);
94 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
95
96 return 0;
97}
98
99static const struct irq_domain_ops fsl_msi_host_ops = {
100 .map = fsl_msi_host_map,
101};
102
103static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
104{
105 int rc, hwirq;
106
107 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
108 irq_domain_get_of_node(msi_data->irqhost));
109 if (rc)
110 return rc;
111
112 /*
113 * Reserve all the hwirqs
114 * The available hwirqs will be released in fsl_msi_setup_hwirq()
115 */
116 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
117 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
118
119 return 0;
120}
121
122static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
123{
124 struct msi_desc *entry;
125 struct fsl_msi *msi_data;
126 irq_hw_number_t hwirq;
127
128 for_each_pci_msi_entry(entry, pdev) {
129 if (!entry->irq)
130 continue;
131 hwirq = virq_to_hw(entry->irq);
132 msi_data = irq_get_chip_data(entry->irq);
133 irq_set_msi_desc(entry->irq, NULL);
134 irq_dispose_mapping(entry->irq);
135 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
136 }
137
138 return;
139}
140
141static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
142 struct msi_msg *msg,
143 struct fsl_msi *fsl_msi_data)
144{
145 struct fsl_msi *msi_data = fsl_msi_data;
146 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
147 u64 address; /* Physical address of the MSIIR */
148 int len;
149 const __be64 *reg;
150
151 /* If the msi-address-64 property exists, then use it */
152 reg = of_get_property(hose->dn, "msi-address-64", &len);
153 if (reg && (len == sizeof(u64)))
154 address = be64_to_cpup(reg);
155 else
156 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
157
158 msg->address_lo = lower_32_bits(address);
159 msg->address_hi = upper_32_bits(address);
160
161 /*
162 * MPIC version 2.0 has erratum PIC1. It causes
163 * that neither MSI nor MSI-X can work fine.
164 * This is a workaround to allow MSI-X to function
165 * properly. It only works for MSI-X, we prevent
166 * MSI on buggy chips in fsl_setup_msi_irqs().
167 */
168 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
169 msg->data = __swab32(hwirq);
170 else
171 msg->data = hwirq;
172
173 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
174 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
175 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
176}
177
178static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
179{
180 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
181 struct device_node *np;
182 phandle phandle = 0;
183 int rc, hwirq = -ENOMEM;
184 unsigned int virq;
185 struct msi_desc *entry;
186 struct msi_msg msg;
187 struct fsl_msi *msi_data;
188
189 if (type == PCI_CAP_ID_MSI) {
190 /*
191 * MPIC version 2.0 has erratum PIC1. For now MSI
192 * could not work. So check to prevent MSI from
193 * being used on the board with this erratum.
194 */
195 list_for_each_entry(msi_data, &msi_head, list)
196 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
197 return -EINVAL;
198 }
199
200 /*
201 * If the PCI node has an fsl,msi property, then we need to use it
202 * to find the specific MSI.
203 */
204 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
205 if (np) {
206 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
207 of_device_is_compatible(np, "fsl,vmpic-msi") ||
208 of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
209 phandle = np->phandle;
210 else {
211 dev_err(&pdev->dev,
212 "node %pOF has an invalid fsl,msi phandle %u\n",
213 hose->dn, np->phandle);
214 return -EINVAL;
215 }
216 }
217
218 for_each_pci_msi_entry(entry, pdev) {
219 /*
220 * Loop over all the MSI devices until we find one that has an
221 * available interrupt.
222 */
223 list_for_each_entry(msi_data, &msi_head, list) {
224 /*
225 * If the PCI node has an fsl,msi property, then we
226 * restrict our search to the corresponding MSI node.
227 * The simplest way is to skip over MSI nodes with the
228 * wrong phandle. Under the Freescale hypervisor, this
229 * has the additional benefit of skipping over MSI
230 * nodes that are not mapped in the PAMU.
231 */
232 if (phandle && (phandle != msi_data->phandle))
233 continue;
234
235 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
236 if (hwirq >= 0)
237 break;
238 }
239
240 if (hwirq < 0) {
241 rc = hwirq;
242 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
243 goto out_free;
244 }
245
246 virq = irq_create_mapping(msi_data->irqhost, hwirq);
247
248 if (!virq) {
249 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
250 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
251 rc = -ENOSPC;
252 goto out_free;
253 }
254 /* chip_data is msi_data via host->hostdata in host->map() */
255 irq_set_msi_desc(virq, entry);
256
257 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
258 pci_write_msi_msg(virq, &msg);
259 }
260 return 0;
261
262out_free:
263 /* free by the caller of this function */
264 return rc;
265}
266
267static irqreturn_t fsl_msi_cascade(int irq, void *data)
268{
269 unsigned int cascade_irq;
270 struct fsl_msi *msi_data;
271 int msir_index = -1;
272 u32 msir_value = 0;
273 u32 intr_index;
274 u32 have_shift = 0;
275 struct fsl_msi_cascade_data *cascade_data = data;
276 irqreturn_t ret = IRQ_NONE;
277
278 msi_data = cascade_data->msi_data;
279
280 msir_index = cascade_data->index;
281
282 if (msir_index >= NR_MSI_REG_MAX)
283 cascade_irq = 0;
284
285 switch (msi_data->feature & FSL_PIC_IP_MASK) {
286 case FSL_PIC_IP_MPIC:
287 msir_value = fsl_msi_read(msi_data->msi_regs,
288 msir_index * 0x10);
289 break;
290 case FSL_PIC_IP_IPIC:
291 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
292 break;
293#ifdef CONFIG_EPAPR_PARAVIRT
294 case FSL_PIC_IP_VMPIC: {
295 unsigned int ret;
296 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
297 if (ret) {
298 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
299 "irq %u (ret=%u)\n", irq, ret);
300 msir_value = 0;
301 }
302 break;
303 }
304#endif
305 }
306
307 while (msir_value) {
308 intr_index = ffs(msir_value) - 1;
309
310 cascade_irq = irq_linear_revmap(msi_data->irqhost,
311 msi_hwirq(msi_data, msir_index,
312 intr_index + have_shift));
313 if (cascade_irq) {
314 generic_handle_irq(cascade_irq);
315 ret = IRQ_HANDLED;
316 }
317 have_shift += intr_index + 1;
318 msir_value = msir_value >> (intr_index + 1);
319 }
320
321 return ret;
322}
323
324static int fsl_of_msi_remove(struct platform_device *ofdev)
325{
326 struct fsl_msi *msi = platform_get_drvdata(ofdev);
327 int virq, i;
328
329 if (msi->list.prev != NULL)
330 list_del(&msi->list);
331 for (i = 0; i < NR_MSI_REG_MAX; i++) {
332 if (msi->cascade_array[i]) {
333 virq = msi->cascade_array[i]->virq;
334
335 BUG_ON(!virq);
336
337 free_irq(virq, msi->cascade_array[i]);
338 kfree(msi->cascade_array[i]);
339 irq_dispose_mapping(virq);
340 }
341 }
342 if (msi->bitmap.bitmap)
343 msi_bitmap_free(&msi->bitmap);
344 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
345 iounmap(msi->msi_regs);
346 kfree(msi);
347
348 return 0;
349}
350
351static struct lock_class_key fsl_msi_irq_class;
352static struct lock_class_key fsl_msi_irq_request_class;
353
354static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
355 int offset, int irq_index)
356{
357 struct fsl_msi_cascade_data *cascade_data = NULL;
358 int virt_msir, i, ret;
359
360 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
361 if (!virt_msir) {
362 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
363 __func__, irq_index);
364 return 0;
365 }
366
367 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
368 if (!cascade_data) {
369 dev_err(&dev->dev, "No memory for MSI cascade data\n");
370 return -ENOMEM;
371 }
372 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
373 &fsl_msi_irq_request_class);
374 cascade_data->index = offset;
375 cascade_data->msi_data = msi;
376 cascade_data->virq = virt_msir;
377 msi->cascade_array[irq_index] = cascade_data;
378
379 ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
380 "fsl-msi-cascade", cascade_data);
381 if (ret) {
382 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
383 virt_msir, ret);
384 return ret;
385 }
386
387 /* Release the hwirqs corresponding to this MSI register */
388 for (i = 0; i < IRQS_PER_MSI_REG; i++)
389 msi_bitmap_free_hwirqs(&msi->bitmap,
390 msi_hwirq(msi, offset, i), 1);
391
392 return 0;
393}
394
395static const struct of_device_id fsl_of_msi_ids[];
396static int fsl_of_msi_probe(struct platform_device *dev)
397{
398 const struct of_device_id *match;
399 struct fsl_msi *msi;
400 struct resource res, msiir;
401 int err, i, j, irq_index, count;
402 const u32 *p;
403 const struct fsl_msi_feature *features;
404 int len;
405 u32 offset;
406 struct pci_controller *phb;
407
408 match = of_match_device(fsl_of_msi_ids, &dev->dev);
409 if (!match)
410 return -EINVAL;
411 features = match->data;
412
413 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
414
415 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
416 if (!msi) {
417 dev_err(&dev->dev, "No memory for MSI structure\n");
418 return -ENOMEM;
419 }
420 platform_set_drvdata(dev, msi);
421
422 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
423 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
424
425 if (msi->irqhost == NULL) {
426 dev_err(&dev->dev, "No memory for MSI irqhost\n");
427 err = -ENOMEM;
428 goto error_out;
429 }
430
431 /*
432 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
433 * property. Instead, we use hypercalls to access the MSI.
434 */
435 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
436 err = of_address_to_resource(dev->dev.of_node, 0, &res);
437 if (err) {
438 dev_err(&dev->dev, "invalid resource for node %pOF\n",
439 dev->dev.of_node);
440 goto error_out;
441 }
442
443 msi->msi_regs = ioremap(res.start, resource_size(&res));
444 if (!msi->msi_regs) {
445 err = -ENOMEM;
446 dev_err(&dev->dev, "could not map node %pOF\n",
447 dev->dev.of_node);
448 goto error_out;
449 }
450 msi->msiir_offset =
451 features->msiir_offset + (res.start & 0xfffff);
452
453 /*
454 * First read the MSIIR/MSIIR1 offset from dts
455 * On failure use the hardcode MSIIR offset
456 */
457 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
458 msi->msiir_offset = features->msiir_offset +
459 (res.start & MSIIR_OFFSET_MASK);
460 else
461 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
462 }
463
464 msi->feature = features->fsl_pic_ip;
465
466 /* For erratum PIC1 on MPIC version 2.0*/
467 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
468 && (fsl_mpic_primary_get_version() == 0x0200))
469 msi->feature |= MSI_HW_ERRATA_ENDIAN;
470
471 /*
472 * Remember the phandle, so that we can match with any PCI nodes
473 * that have an "fsl,msi" property.
474 */
475 msi->phandle = dev->dev.of_node->phandle;
476
477 err = fsl_msi_init_allocator(msi);
478 if (err) {
479 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
480 goto error_out;
481 }
482
483 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
484
485 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
486 of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
487 msi->srs_shift = MSIIR1_SRS_SHIFT;
488 msi->ibs_shift = MSIIR1_IBS_SHIFT;
489 if (p)
490 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
491 __func__);
492
493 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
494 irq_index++) {
495 err = fsl_msi_setup_hwirq(msi, dev,
496 irq_index, irq_index);
497 if (err)
498 goto error_out;
499 }
500 } else {
501 static const u32 all_avail[] =
502 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
503
504 msi->srs_shift = MSIIR_SRS_SHIFT;
505 msi->ibs_shift = MSIIR_IBS_SHIFT;
506
507 if (p && len % (2 * sizeof(u32)) != 0) {
508 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
509 __func__);
510 err = -EINVAL;
511 goto error_out;
512 }
513
514 if (!p) {
515 p = all_avail;
516 len = sizeof(all_avail);
517 }
518
519 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
520 if (p[i * 2] % IRQS_PER_MSI_REG ||
521 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
522 pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
523 __func__, dev->dev.of_node,
524 p[i * 2 + 1], p[i * 2]);
525 err = -EINVAL;
526 goto error_out;
527 }
528
529 offset = p[i * 2] / IRQS_PER_MSI_REG;
530 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
531
532 for (j = 0; j < count; j++, irq_index++) {
533 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
534 irq_index);
535 if (err)
536 goto error_out;
537 }
538 }
539 }
540
541 list_add_tail(&msi->list, &msi_head);
542
543 /*
544 * Apply the MSI ops to all the controllers.
545 * It doesn't hurt to reassign the same ops,
546 * but bail out if we find another MSI driver.
547 */
548 list_for_each_entry(phb, &hose_list, list_node) {
549 if (!phb->controller_ops.setup_msi_irqs) {
550 phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
551 phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
552 } else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
553 dev_err(&dev->dev, "Different MSI driver already installed!\n");
554 err = -ENODEV;
555 goto error_out;
556 }
557 }
558 return 0;
559error_out:
560 fsl_of_msi_remove(dev);
561 return err;
562}
563
564static const struct fsl_msi_feature mpic_msi_feature = {
565 .fsl_pic_ip = FSL_PIC_IP_MPIC,
566 .msiir_offset = 0x140,
567};
568
569static const struct fsl_msi_feature ipic_msi_feature = {
570 .fsl_pic_ip = FSL_PIC_IP_IPIC,
571 .msiir_offset = 0x38,
572};
573
574static const struct fsl_msi_feature vmpic_msi_feature = {
575 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
576 .msiir_offset = 0,
577};
578
579static const struct of_device_id fsl_of_msi_ids[] = {
580 {
581 .compatible = "fsl,mpic-msi",
582 .data = &mpic_msi_feature,
583 },
584 {
585 .compatible = "fsl,mpic-msi-v4.3",
586 .data = &mpic_msi_feature,
587 },
588 {
589 .compatible = "fsl,ipic-msi",
590 .data = &ipic_msi_feature,
591 },
592#ifdef CONFIG_EPAPR_PARAVIRT
593 {
594 .compatible = "fsl,vmpic-msi",
595 .data = &vmpic_msi_feature,
596 },
597 {
598 .compatible = "fsl,vmpic-msi-v4.3",
599 .data = &vmpic_msi_feature,
600 },
601#endif
602 {}
603};
604
605static struct platform_driver fsl_of_msi_driver = {
606 .driver = {
607 .name = "fsl-msi",
608 .of_match_table = fsl_of_msi_ids,
609 },
610 .probe = fsl_of_msi_probe,
611 .remove = fsl_of_msi_remove,
612};
613
614static __init int fsl_of_msi_init(void)
615{
616 return platform_driver_register(&fsl_of_msi_driver);
617}
618
619subsys_initcall(fsl_of_msi_init);