Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
4 *
5 * Author: Tony Li <tony.li@freescale.com>
6 * Jason Jin <Jason.jin@freescale.com>
7 *
8 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
9 */
10#include <linux/irq.h>
11#include <linux/msi.h>
12#include <linux/pci.h>
13#include <linux/slab.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/of_irq.h>
17#include <linux/platform_device.h>
18#include <linux/property.h>
19#include <linux/interrupt.h>
20#include <linux/irqdomain.h>
21#include <linux/seq_file.h>
22#include <sysdev/fsl_soc.h>
23#include <asm/hw_irq.h>
24#include <asm/ppc-pci.h>
25#include <asm/mpic.h>
26#include <asm/fsl_hcalls.h>
27
28#include "fsl_msi.h"
29#include "fsl_pci.h"
30
31#define MSIIR_OFFSET_MASK 0xfffff
32#define MSIIR_IBS_SHIFT 0
33#define MSIIR_SRS_SHIFT 5
34#define MSIIR1_IBS_SHIFT 4
35#define MSIIR1_SRS_SHIFT 0
36#define MSI_SRS_MASK 0xf
37#define MSI_IBS_MASK 0x1f
38
39#define msi_hwirq(msi, msir_index, intr_index) \
40 ((msir_index) << (msi)->srs_shift | \
41 ((intr_index) << (msi)->ibs_shift))
42
43static LIST_HEAD(msi_head);
44
45struct fsl_msi_feature {
46 u32 fsl_pic_ip;
47 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
48};
49
50struct fsl_msi_cascade_data {
51 struct fsl_msi *msi_data;
52 int index;
53 int virq;
54};
55
56static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
57{
58 return in_be32(base + (reg >> 2));
59}
60
61/*
62 * We do not need this actually. The MSIR register has been read once
63 * in the cascade interrupt. So, this MSI interrupt has been acked
64*/
65static void fsl_msi_end_irq(struct irq_data *d)
66{
67}
68
69static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
70{
71 struct fsl_msi *msi_data = irqd->domain->host_data;
72 irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
73 int cascade_virq, srs;
74
75 srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
76 cascade_virq = msi_data->cascade_array[srs]->virq;
77
78 seq_printf(p, " fsl-msi-%d", cascade_virq);
79}
80
81
82static struct irq_chip fsl_msi_chip = {
83 .irq_mask = pci_msi_mask_irq,
84 .irq_unmask = pci_msi_unmask_irq,
85 .irq_ack = fsl_msi_end_irq,
86 .irq_print_chip = fsl_msi_print_chip,
87};
88
89static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
90 irq_hw_number_t hw)
91{
92 struct fsl_msi *msi_data = h->host_data;
93 struct irq_chip *chip = &fsl_msi_chip;
94
95 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
96
97 irq_set_chip_data(virq, msi_data);
98 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
99
100 return 0;
101}
102
103static const struct irq_domain_ops fsl_msi_host_ops = {
104 .map = fsl_msi_host_map,
105};
106
107static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
108{
109 int rc, hwirq;
110
111 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
112 irq_domain_get_of_node(msi_data->irqhost));
113 if (rc)
114 return rc;
115
116 /*
117 * Reserve all the hwirqs
118 * The available hwirqs will be released in fsl_msi_setup_hwirq()
119 */
120 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
121 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
122
123 return 0;
124}
125
126static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
127{
128 struct msi_desc *entry;
129 struct fsl_msi *msi_data;
130 irq_hw_number_t hwirq;
131
132 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
133 hwirq = virq_to_hw(entry->irq);
134 msi_data = irq_get_chip_data(entry->irq);
135 irq_set_msi_desc(entry->irq, NULL);
136 irq_dispose_mapping(entry->irq);
137 entry->irq = 0;
138 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
139 }
140}
141
142static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
143 struct msi_msg *msg,
144 struct fsl_msi *fsl_msi_data)
145{
146 struct fsl_msi *msi_data = fsl_msi_data;
147 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
148 u64 address; /* Physical address of the MSIIR */
149 int len;
150 const __be64 *reg;
151
152 /* If the msi-address-64 property exists, then use it */
153 reg = of_get_property(hose->dn, "msi-address-64", &len);
154 if (reg && (len == sizeof(u64)))
155 address = be64_to_cpup(reg);
156 else
157 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
158
159 msg->address_lo = lower_32_bits(address);
160 msg->address_hi = upper_32_bits(address);
161
162 /*
163 * MPIC version 2.0 has erratum PIC1. It causes
164 * that neither MSI nor MSI-X can work fine.
165 * This is a workaround to allow MSI-X to function
166 * properly. It only works for MSI-X, we prevent
167 * MSI on buggy chips in fsl_setup_msi_irqs().
168 */
169 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
170 msg->data = __swab32(hwirq);
171 else
172 msg->data = hwirq;
173
174 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
175 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
176 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
177}
178
179static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
180{
181 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
182 struct device_node *np;
183 phandle phandle = 0;
184 int rc, hwirq = -ENOMEM;
185 unsigned int virq;
186 struct msi_desc *entry;
187 struct msi_msg msg;
188 struct fsl_msi *msi_data;
189
190 if (type == PCI_CAP_ID_MSI) {
191 /*
192 * MPIC version 2.0 has erratum PIC1. For now MSI
193 * could not work. So check to prevent MSI from
194 * being used on the board with this erratum.
195 */
196 list_for_each_entry(msi_data, &msi_head, list)
197 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
198 return -EINVAL;
199 }
200
201 /*
202 * If the PCI node has an fsl,msi property, then we need to use it
203 * to find the specific MSI.
204 */
205 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
206 if (np) {
207 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
208 of_device_is_compatible(np, "fsl,vmpic-msi") ||
209 of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
210 phandle = np->phandle;
211 else {
212 dev_err(&pdev->dev,
213 "node %pOF has an invalid fsl,msi phandle %u\n",
214 hose->dn, np->phandle);
215 of_node_put(np);
216 return -EINVAL;
217 }
218 of_node_put(np);
219 }
220
221 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
222 /*
223 * Loop over all the MSI devices until we find one that has an
224 * available interrupt.
225 */
226 list_for_each_entry(msi_data, &msi_head, list) {
227 /*
228 * If the PCI node has an fsl,msi property, then we
229 * restrict our search to the corresponding MSI node.
230 * The simplest way is to skip over MSI nodes with the
231 * wrong phandle. Under the Freescale hypervisor, this
232 * has the additional benefit of skipping over MSI
233 * nodes that are not mapped in the PAMU.
234 */
235 if (phandle && (phandle != msi_data->phandle))
236 continue;
237
238 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
239 if (hwirq >= 0)
240 break;
241 }
242
243 if (hwirq < 0) {
244 rc = hwirq;
245 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
246 goto out_free;
247 }
248
249 virq = irq_create_mapping(msi_data->irqhost, hwirq);
250
251 if (!virq) {
252 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
253 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
254 rc = -ENOSPC;
255 goto out_free;
256 }
257 /* chip_data is msi_data via host->hostdata in host->map() */
258 irq_set_msi_desc(virq, entry);
259
260 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
261 pci_write_msi_msg(virq, &msg);
262 }
263 return 0;
264
265out_free:
266 /* free by the caller of this function */
267 return rc;
268}
269
270static irqreturn_t fsl_msi_cascade(int irq, void *data)
271{
272 struct fsl_msi *msi_data;
273 int msir_index = -1;
274 u32 msir_value = 0;
275 u32 intr_index;
276 u32 have_shift = 0;
277 struct fsl_msi_cascade_data *cascade_data = data;
278 irqreturn_t ret = IRQ_NONE;
279
280 msi_data = cascade_data->msi_data;
281
282 msir_index = cascade_data->index;
283
284 switch (msi_data->feature & FSL_PIC_IP_MASK) {
285 case FSL_PIC_IP_MPIC:
286 msir_value = fsl_msi_read(msi_data->msi_regs,
287 msir_index * 0x10);
288 break;
289 case FSL_PIC_IP_IPIC:
290 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
291 break;
292#ifdef CONFIG_EPAPR_PARAVIRT
293 case FSL_PIC_IP_VMPIC: {
294 unsigned int ret;
295 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
296 if (ret) {
297 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
298 "irq %u (ret=%u)\n", irq, ret);
299 msir_value = 0;
300 }
301 break;
302 }
303#endif
304 }
305
306 while (msir_value) {
307 int err;
308 intr_index = ffs(msir_value) - 1;
309
310 err = generic_handle_domain_irq(msi_data->irqhost,
311 msi_hwirq(msi_data, msir_index,
312 intr_index + have_shift));
313 if (!err)
314 ret = IRQ_HANDLED;
315
316 have_shift += intr_index + 1;
317 msir_value = msir_value >> (intr_index + 1);
318 }
319
320 return ret;
321}
322
323static int fsl_of_msi_remove(struct platform_device *ofdev)
324{
325 struct fsl_msi *msi = platform_get_drvdata(ofdev);
326 int virq, i;
327
328 if (msi->list.prev != NULL)
329 list_del(&msi->list);
330 for (i = 0; i < NR_MSI_REG_MAX; i++) {
331 if (msi->cascade_array[i]) {
332 virq = msi->cascade_array[i]->virq;
333
334 BUG_ON(!virq);
335
336 free_irq(virq, msi->cascade_array[i]);
337 kfree(msi->cascade_array[i]);
338 irq_dispose_mapping(virq);
339 }
340 }
341 if (msi->bitmap.bitmap)
342 msi_bitmap_free(&msi->bitmap);
343 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
344 iounmap(msi->msi_regs);
345 kfree(msi);
346
347 return 0;
348}
349
350static struct lock_class_key fsl_msi_irq_class;
351static struct lock_class_key fsl_msi_irq_request_class;
352
353static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
354 int offset, int irq_index)
355{
356 struct fsl_msi_cascade_data *cascade_data = NULL;
357 int virt_msir, i, ret;
358
359 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
360 if (!virt_msir) {
361 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
362 __func__, irq_index);
363 return 0;
364 }
365
366 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
367 if (!cascade_data) {
368 dev_err(&dev->dev, "No memory for MSI cascade data\n");
369 return -ENOMEM;
370 }
371 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
372 &fsl_msi_irq_request_class);
373 cascade_data->index = offset;
374 cascade_data->msi_data = msi;
375 cascade_data->virq = virt_msir;
376 msi->cascade_array[irq_index] = cascade_data;
377
378 ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
379 "fsl-msi-cascade", cascade_data);
380 if (ret) {
381 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
382 virt_msir, ret);
383 return ret;
384 }
385
386 /* Release the hwirqs corresponding to this MSI register */
387 for (i = 0; i < IRQS_PER_MSI_REG; i++)
388 msi_bitmap_free_hwirqs(&msi->bitmap,
389 msi_hwirq(msi, offset, i), 1);
390
391 return 0;
392}
393
394static const struct of_device_id fsl_of_msi_ids[];
395static int fsl_of_msi_probe(struct platform_device *dev)
396{
397 struct fsl_msi *msi;
398 struct resource res, msiir;
399 int err, i, j, irq_index, count;
400 const u32 *p;
401 const struct fsl_msi_feature *features;
402 int len;
403 u32 offset;
404 struct pci_controller *phb;
405
406 features = device_get_match_data(&dev->dev);
407
408 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
409
410 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
411 if (!msi) {
412 dev_err(&dev->dev, "No memory for MSI structure\n");
413 return -ENOMEM;
414 }
415 platform_set_drvdata(dev, msi);
416
417 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
418 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
419
420 if (msi->irqhost == NULL) {
421 dev_err(&dev->dev, "No memory for MSI irqhost\n");
422 err = -ENOMEM;
423 goto error_out;
424 }
425
426 /*
427 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
428 * property. Instead, we use hypercalls to access the MSI.
429 */
430 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
431 err = of_address_to_resource(dev->dev.of_node, 0, &res);
432 if (err) {
433 dev_err(&dev->dev, "invalid resource for node %pOF\n",
434 dev->dev.of_node);
435 goto error_out;
436 }
437
438 msi->msi_regs = ioremap(res.start, resource_size(&res));
439 if (!msi->msi_regs) {
440 err = -ENOMEM;
441 dev_err(&dev->dev, "could not map node %pOF\n",
442 dev->dev.of_node);
443 goto error_out;
444 }
445 msi->msiir_offset =
446 features->msiir_offset + (res.start & 0xfffff);
447
448 /*
449 * First read the MSIIR/MSIIR1 offset from dts
450 * On failure use the hardcode MSIIR offset
451 */
452 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
453 msi->msiir_offset = features->msiir_offset +
454 (res.start & MSIIR_OFFSET_MASK);
455 else
456 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
457 }
458
459 msi->feature = features->fsl_pic_ip;
460
461 /* For erratum PIC1 on MPIC version 2.0*/
462 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
463 && (fsl_mpic_primary_get_version() == 0x0200))
464 msi->feature |= MSI_HW_ERRATA_ENDIAN;
465
466 /*
467 * Remember the phandle, so that we can match with any PCI nodes
468 * that have an "fsl,msi" property.
469 */
470 msi->phandle = dev->dev.of_node->phandle;
471
472 err = fsl_msi_init_allocator(msi);
473 if (err) {
474 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
475 goto error_out;
476 }
477
478 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
479
480 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
481 of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
482 msi->srs_shift = MSIIR1_SRS_SHIFT;
483 msi->ibs_shift = MSIIR1_IBS_SHIFT;
484 if (p)
485 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
486 __func__);
487
488 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
489 irq_index++) {
490 err = fsl_msi_setup_hwirq(msi, dev,
491 irq_index, irq_index);
492 if (err)
493 goto error_out;
494 }
495 } else {
496 static const u32 all_avail[] =
497 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
498
499 msi->srs_shift = MSIIR_SRS_SHIFT;
500 msi->ibs_shift = MSIIR_IBS_SHIFT;
501
502 if (p && len % (2 * sizeof(u32)) != 0) {
503 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
504 __func__);
505 err = -EINVAL;
506 goto error_out;
507 }
508
509 if (!p) {
510 p = all_avail;
511 len = sizeof(all_avail);
512 }
513
514 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
515 if (p[i * 2] % IRQS_PER_MSI_REG ||
516 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
517 pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
518 __func__, dev->dev.of_node,
519 p[i * 2 + 1], p[i * 2]);
520 err = -EINVAL;
521 goto error_out;
522 }
523
524 offset = p[i * 2] / IRQS_PER_MSI_REG;
525 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
526
527 for (j = 0; j < count; j++, irq_index++) {
528 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
529 irq_index);
530 if (err)
531 goto error_out;
532 }
533 }
534 }
535
536 list_add_tail(&msi->list, &msi_head);
537
538 /*
539 * Apply the MSI ops to all the controllers.
540 * It doesn't hurt to reassign the same ops,
541 * but bail out if we find another MSI driver.
542 */
543 list_for_each_entry(phb, &hose_list, list_node) {
544 if (!phb->controller_ops.setup_msi_irqs) {
545 phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
546 phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
547 } else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
548 dev_err(&dev->dev, "Different MSI driver already installed!\n");
549 err = -ENODEV;
550 goto error_out;
551 }
552 }
553 return 0;
554error_out:
555 fsl_of_msi_remove(dev);
556 return err;
557}
558
559static const struct fsl_msi_feature mpic_msi_feature = {
560 .fsl_pic_ip = FSL_PIC_IP_MPIC,
561 .msiir_offset = 0x140,
562};
563
564static const struct fsl_msi_feature ipic_msi_feature = {
565 .fsl_pic_ip = FSL_PIC_IP_IPIC,
566 .msiir_offset = 0x38,
567};
568
569static const struct fsl_msi_feature vmpic_msi_feature = {
570 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
571 .msiir_offset = 0,
572};
573
574static const struct of_device_id fsl_of_msi_ids[] = {
575 {
576 .compatible = "fsl,mpic-msi",
577 .data = &mpic_msi_feature,
578 },
579 {
580 .compatible = "fsl,mpic-msi-v4.3",
581 .data = &mpic_msi_feature,
582 },
583 {
584 .compatible = "fsl,ipic-msi",
585 .data = &ipic_msi_feature,
586 },
587#ifdef CONFIG_EPAPR_PARAVIRT
588 {
589 .compatible = "fsl,vmpic-msi",
590 .data = &vmpic_msi_feature,
591 },
592 {
593 .compatible = "fsl,vmpic-msi-v4.3",
594 .data = &vmpic_msi_feature,
595 },
596#endif
597 {}
598};
599
600static struct platform_driver fsl_of_msi_driver = {
601 .driver = {
602 .name = "fsl-msi",
603 .of_match_table = fsl_of_msi_ids,
604 },
605 .probe = fsl_of_msi_probe,
606 .remove = fsl_of_msi_remove,
607};
608
609static __init int fsl_of_msi_init(void)
610{
611 return platform_driver_register(&fsl_of_msi_driver);
612}
613
614subsys_initcall(fsl_of_msi_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
4 *
5 * Author: Tony Li <tony.li@freescale.com>
6 * Jason Jin <Jason.jin@freescale.com>
7 *
8 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
9 */
10#include <linux/irq.h>
11#include <linux/msi.h>
12#include <linux/pci.h>
13#include <linux/slab.h>
14#include <linux/of_address.h>
15#include <linux/of_irq.h>
16#include <linux/of_platform.h>
17#include <linux/interrupt.h>
18#include <linux/irqdomain.h>
19#include <linux/seq_file.h>
20#include <sysdev/fsl_soc.h>
21#include <asm/hw_irq.h>
22#include <asm/ppc-pci.h>
23#include <asm/mpic.h>
24#include <asm/fsl_hcalls.h>
25
26#include "fsl_msi.h"
27#include "fsl_pci.h"
28
29#define MSIIR_OFFSET_MASK 0xfffff
30#define MSIIR_IBS_SHIFT 0
31#define MSIIR_SRS_SHIFT 5
32#define MSIIR1_IBS_SHIFT 4
33#define MSIIR1_SRS_SHIFT 0
34#define MSI_SRS_MASK 0xf
35#define MSI_IBS_MASK 0x1f
36
37#define msi_hwirq(msi, msir_index, intr_index) \
38 ((msir_index) << (msi)->srs_shift | \
39 ((intr_index) << (msi)->ibs_shift))
40
41static LIST_HEAD(msi_head);
42
43struct fsl_msi_feature {
44 u32 fsl_pic_ip;
45 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
46};
47
48struct fsl_msi_cascade_data {
49 struct fsl_msi *msi_data;
50 int index;
51 int virq;
52};
53
54static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
55{
56 return in_be32(base + (reg >> 2));
57}
58
59/*
60 * We do not need this actually. The MSIR register has been read once
61 * in the cascade interrupt. So, this MSI interrupt has been acked
62*/
63static void fsl_msi_end_irq(struct irq_data *d)
64{
65}
66
67static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
68{
69 struct fsl_msi *msi_data = irqd->domain->host_data;
70 irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
71 int cascade_virq, srs;
72
73 srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
74 cascade_virq = msi_data->cascade_array[srs]->virq;
75
76 seq_printf(p, " fsl-msi-%d", cascade_virq);
77}
78
79
80static struct irq_chip fsl_msi_chip = {
81 .irq_mask = pci_msi_mask_irq,
82 .irq_unmask = pci_msi_unmask_irq,
83 .irq_ack = fsl_msi_end_irq,
84 .irq_print_chip = fsl_msi_print_chip,
85};
86
87static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
88 irq_hw_number_t hw)
89{
90 struct fsl_msi *msi_data = h->host_data;
91 struct irq_chip *chip = &fsl_msi_chip;
92
93 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
94
95 irq_set_chip_data(virq, msi_data);
96 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
97
98 return 0;
99}
100
101static const struct irq_domain_ops fsl_msi_host_ops = {
102 .map = fsl_msi_host_map,
103};
104
105static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
106{
107 int rc, hwirq;
108
109 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
110 irq_domain_get_of_node(msi_data->irqhost));
111 if (rc)
112 return rc;
113
114 /*
115 * Reserve all the hwirqs
116 * The available hwirqs will be released in fsl_msi_setup_hwirq()
117 */
118 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
119 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
120
121 return 0;
122}
123
124static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
125{
126 struct msi_desc *entry;
127 struct fsl_msi *msi_data;
128 irq_hw_number_t hwirq;
129
130 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
131 hwirq = virq_to_hw(entry->irq);
132 msi_data = irq_get_chip_data(entry->irq);
133 irq_set_msi_desc(entry->irq, NULL);
134 irq_dispose_mapping(entry->irq);
135 entry->irq = 0;
136 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
137 }
138}
139
140static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
141 struct msi_msg *msg,
142 struct fsl_msi *fsl_msi_data)
143{
144 struct fsl_msi *msi_data = fsl_msi_data;
145 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
146 u64 address; /* Physical address of the MSIIR */
147 int len;
148 const __be64 *reg;
149
150 /* If the msi-address-64 property exists, then use it */
151 reg = of_get_property(hose->dn, "msi-address-64", &len);
152 if (reg && (len == sizeof(u64)))
153 address = be64_to_cpup(reg);
154 else
155 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
156
157 msg->address_lo = lower_32_bits(address);
158 msg->address_hi = upper_32_bits(address);
159
160 /*
161 * MPIC version 2.0 has erratum PIC1. It causes
162 * that neither MSI nor MSI-X can work fine.
163 * This is a workaround to allow MSI-X to function
164 * properly. It only works for MSI-X, we prevent
165 * MSI on buggy chips in fsl_setup_msi_irqs().
166 */
167 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
168 msg->data = __swab32(hwirq);
169 else
170 msg->data = hwirq;
171
172 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
173 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
174 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
175}
176
177static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
178{
179 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
180 struct device_node *np;
181 phandle phandle = 0;
182 int rc, hwirq = -ENOMEM;
183 unsigned int virq;
184 struct msi_desc *entry;
185 struct msi_msg msg;
186 struct fsl_msi *msi_data;
187
188 if (type == PCI_CAP_ID_MSI) {
189 /*
190 * MPIC version 2.0 has erratum PIC1. For now MSI
191 * could not work. So check to prevent MSI from
192 * being used on the board with this erratum.
193 */
194 list_for_each_entry(msi_data, &msi_head, list)
195 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
196 return -EINVAL;
197 }
198
199 /*
200 * If the PCI node has an fsl,msi property, then we need to use it
201 * to find the specific MSI.
202 */
203 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
204 if (np) {
205 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
206 of_device_is_compatible(np, "fsl,vmpic-msi") ||
207 of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
208 phandle = np->phandle;
209 else {
210 dev_err(&pdev->dev,
211 "node %pOF has an invalid fsl,msi phandle %u\n",
212 hose->dn, np->phandle);
213 of_node_put(np);
214 return -EINVAL;
215 }
216 of_node_put(np);
217 }
218
219 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
220 /*
221 * Loop over all the MSI devices until we find one that has an
222 * available interrupt.
223 */
224 list_for_each_entry(msi_data, &msi_head, list) {
225 /*
226 * If the PCI node has an fsl,msi property, then we
227 * restrict our search to the corresponding MSI node.
228 * The simplest way is to skip over MSI nodes with the
229 * wrong phandle. Under the Freescale hypervisor, this
230 * has the additional benefit of skipping over MSI
231 * nodes that are not mapped in the PAMU.
232 */
233 if (phandle && (phandle != msi_data->phandle))
234 continue;
235
236 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
237 if (hwirq >= 0)
238 break;
239 }
240
241 if (hwirq < 0) {
242 rc = hwirq;
243 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
244 goto out_free;
245 }
246
247 virq = irq_create_mapping(msi_data->irqhost, hwirq);
248
249 if (!virq) {
250 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
251 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
252 rc = -ENOSPC;
253 goto out_free;
254 }
255 /* chip_data is msi_data via host->hostdata in host->map() */
256 irq_set_msi_desc(virq, entry);
257
258 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
259 pci_write_msi_msg(virq, &msg);
260 }
261 return 0;
262
263out_free:
264 /* free by the caller of this function */
265 return rc;
266}
267
268static irqreturn_t fsl_msi_cascade(int irq, void *data)
269{
270 struct fsl_msi *msi_data;
271 int msir_index = -1;
272 u32 msir_value = 0;
273 u32 intr_index;
274 u32 have_shift = 0;
275 struct fsl_msi_cascade_data *cascade_data = data;
276 irqreturn_t ret = IRQ_NONE;
277
278 msi_data = cascade_data->msi_data;
279
280 msir_index = cascade_data->index;
281
282 switch (msi_data->feature & FSL_PIC_IP_MASK) {
283 case FSL_PIC_IP_MPIC:
284 msir_value = fsl_msi_read(msi_data->msi_regs,
285 msir_index * 0x10);
286 break;
287 case FSL_PIC_IP_IPIC:
288 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
289 break;
290#ifdef CONFIG_EPAPR_PARAVIRT
291 case FSL_PIC_IP_VMPIC: {
292 unsigned int ret;
293 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
294 if (ret) {
295 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
296 "irq %u (ret=%u)\n", irq, ret);
297 msir_value = 0;
298 }
299 break;
300 }
301#endif
302 }
303
304 while (msir_value) {
305 int err;
306 intr_index = ffs(msir_value) - 1;
307
308 err = generic_handle_domain_irq(msi_data->irqhost,
309 msi_hwirq(msi_data, msir_index,
310 intr_index + have_shift));
311 if (!err)
312 ret = IRQ_HANDLED;
313
314 have_shift += intr_index + 1;
315 msir_value = msir_value >> (intr_index + 1);
316 }
317
318 return ret;
319}
320
321static int fsl_of_msi_remove(struct platform_device *ofdev)
322{
323 struct fsl_msi *msi = platform_get_drvdata(ofdev);
324 int virq, i;
325
326 if (msi->list.prev != NULL)
327 list_del(&msi->list);
328 for (i = 0; i < NR_MSI_REG_MAX; i++) {
329 if (msi->cascade_array[i]) {
330 virq = msi->cascade_array[i]->virq;
331
332 BUG_ON(!virq);
333
334 free_irq(virq, msi->cascade_array[i]);
335 kfree(msi->cascade_array[i]);
336 irq_dispose_mapping(virq);
337 }
338 }
339 if (msi->bitmap.bitmap)
340 msi_bitmap_free(&msi->bitmap);
341 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
342 iounmap(msi->msi_regs);
343 kfree(msi);
344
345 return 0;
346}
347
348static struct lock_class_key fsl_msi_irq_class;
349static struct lock_class_key fsl_msi_irq_request_class;
350
351static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
352 int offset, int irq_index)
353{
354 struct fsl_msi_cascade_data *cascade_data = NULL;
355 int virt_msir, i, ret;
356
357 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
358 if (!virt_msir) {
359 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
360 __func__, irq_index);
361 return 0;
362 }
363
364 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
365 if (!cascade_data) {
366 dev_err(&dev->dev, "No memory for MSI cascade data\n");
367 return -ENOMEM;
368 }
369 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
370 &fsl_msi_irq_request_class);
371 cascade_data->index = offset;
372 cascade_data->msi_data = msi;
373 cascade_data->virq = virt_msir;
374 msi->cascade_array[irq_index] = cascade_data;
375
376 ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
377 "fsl-msi-cascade", cascade_data);
378 if (ret) {
379 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
380 virt_msir, ret);
381 return ret;
382 }
383
384 /* Release the hwirqs corresponding to this MSI register */
385 for (i = 0; i < IRQS_PER_MSI_REG; i++)
386 msi_bitmap_free_hwirqs(&msi->bitmap,
387 msi_hwirq(msi, offset, i), 1);
388
389 return 0;
390}
391
392static const struct of_device_id fsl_of_msi_ids[];
393static int fsl_of_msi_probe(struct platform_device *dev)
394{
395 const struct of_device_id *match;
396 struct fsl_msi *msi;
397 struct resource res, msiir;
398 int err, i, j, irq_index, count;
399 const u32 *p;
400 const struct fsl_msi_feature *features;
401 int len;
402 u32 offset;
403 struct pci_controller *phb;
404
405 match = of_match_device(fsl_of_msi_ids, &dev->dev);
406 if (!match)
407 return -EINVAL;
408 features = match->data;
409
410 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
411
412 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
413 if (!msi) {
414 dev_err(&dev->dev, "No memory for MSI structure\n");
415 return -ENOMEM;
416 }
417 platform_set_drvdata(dev, msi);
418
419 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
420 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
421
422 if (msi->irqhost == NULL) {
423 dev_err(&dev->dev, "No memory for MSI irqhost\n");
424 err = -ENOMEM;
425 goto error_out;
426 }
427
428 /*
429 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
430 * property. Instead, we use hypercalls to access the MSI.
431 */
432 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
433 err = of_address_to_resource(dev->dev.of_node, 0, &res);
434 if (err) {
435 dev_err(&dev->dev, "invalid resource for node %pOF\n",
436 dev->dev.of_node);
437 goto error_out;
438 }
439
440 msi->msi_regs = ioremap(res.start, resource_size(&res));
441 if (!msi->msi_regs) {
442 err = -ENOMEM;
443 dev_err(&dev->dev, "could not map node %pOF\n",
444 dev->dev.of_node);
445 goto error_out;
446 }
447 msi->msiir_offset =
448 features->msiir_offset + (res.start & 0xfffff);
449
450 /*
451 * First read the MSIIR/MSIIR1 offset from dts
452 * On failure use the hardcode MSIIR offset
453 */
454 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
455 msi->msiir_offset = features->msiir_offset +
456 (res.start & MSIIR_OFFSET_MASK);
457 else
458 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
459 }
460
461 msi->feature = features->fsl_pic_ip;
462
463 /* For erratum PIC1 on MPIC version 2.0*/
464 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
465 && (fsl_mpic_primary_get_version() == 0x0200))
466 msi->feature |= MSI_HW_ERRATA_ENDIAN;
467
468 /*
469 * Remember the phandle, so that we can match with any PCI nodes
470 * that have an "fsl,msi" property.
471 */
472 msi->phandle = dev->dev.of_node->phandle;
473
474 err = fsl_msi_init_allocator(msi);
475 if (err) {
476 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
477 goto error_out;
478 }
479
480 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
481
482 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
483 of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
484 msi->srs_shift = MSIIR1_SRS_SHIFT;
485 msi->ibs_shift = MSIIR1_IBS_SHIFT;
486 if (p)
487 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
488 __func__);
489
490 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
491 irq_index++) {
492 err = fsl_msi_setup_hwirq(msi, dev,
493 irq_index, irq_index);
494 if (err)
495 goto error_out;
496 }
497 } else {
498 static const u32 all_avail[] =
499 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
500
501 msi->srs_shift = MSIIR_SRS_SHIFT;
502 msi->ibs_shift = MSIIR_IBS_SHIFT;
503
504 if (p && len % (2 * sizeof(u32)) != 0) {
505 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
506 __func__);
507 err = -EINVAL;
508 goto error_out;
509 }
510
511 if (!p) {
512 p = all_avail;
513 len = sizeof(all_avail);
514 }
515
516 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
517 if (p[i * 2] % IRQS_PER_MSI_REG ||
518 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
519 pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
520 __func__, dev->dev.of_node,
521 p[i * 2 + 1], p[i * 2]);
522 err = -EINVAL;
523 goto error_out;
524 }
525
526 offset = p[i * 2] / IRQS_PER_MSI_REG;
527 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
528
529 for (j = 0; j < count; j++, irq_index++) {
530 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
531 irq_index);
532 if (err)
533 goto error_out;
534 }
535 }
536 }
537
538 list_add_tail(&msi->list, &msi_head);
539
540 /*
541 * Apply the MSI ops to all the controllers.
542 * It doesn't hurt to reassign the same ops,
543 * but bail out if we find another MSI driver.
544 */
545 list_for_each_entry(phb, &hose_list, list_node) {
546 if (!phb->controller_ops.setup_msi_irqs) {
547 phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
548 phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
549 } else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
550 dev_err(&dev->dev, "Different MSI driver already installed!\n");
551 err = -ENODEV;
552 goto error_out;
553 }
554 }
555 return 0;
556error_out:
557 fsl_of_msi_remove(dev);
558 return err;
559}
560
561static const struct fsl_msi_feature mpic_msi_feature = {
562 .fsl_pic_ip = FSL_PIC_IP_MPIC,
563 .msiir_offset = 0x140,
564};
565
566static const struct fsl_msi_feature ipic_msi_feature = {
567 .fsl_pic_ip = FSL_PIC_IP_IPIC,
568 .msiir_offset = 0x38,
569};
570
571static const struct fsl_msi_feature vmpic_msi_feature = {
572 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
573 .msiir_offset = 0,
574};
575
576static const struct of_device_id fsl_of_msi_ids[] = {
577 {
578 .compatible = "fsl,mpic-msi",
579 .data = &mpic_msi_feature,
580 },
581 {
582 .compatible = "fsl,mpic-msi-v4.3",
583 .data = &mpic_msi_feature,
584 },
585 {
586 .compatible = "fsl,ipic-msi",
587 .data = &ipic_msi_feature,
588 },
589#ifdef CONFIG_EPAPR_PARAVIRT
590 {
591 .compatible = "fsl,vmpic-msi",
592 .data = &vmpic_msi_feature,
593 },
594 {
595 .compatible = "fsl,vmpic-msi-v4.3",
596 .data = &vmpic_msi_feature,
597 },
598#endif
599 {}
600};
601
602static struct platform_driver fsl_of_msi_driver = {
603 .driver = {
604 .name = "fsl-msi",
605 .of_match_table = fsl_of_msi_ids,
606 },
607 .probe = fsl_of_msi_probe,
608 .remove = fsl_of_msi_remove,
609};
610
611static __init int fsl_of_msi_init(void)
612{
613 return platform_driver_register(&fsl_of_msi_driver);
614}
615
616subsys_initcall(fsl_of_msi_init);