Loading...
1/*
2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3 *
4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com>
6 *
7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 */
15#include <linux/irq.h>
16#include <linux/bootmem.h>
17#include <linux/msi.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/of_platform.h>
21#include <sysdev/fsl_soc.h>
22#include <asm/prom.h>
23#include <asm/hw_irq.h>
24#include <asm/ppc-pci.h>
25#include <asm/mpic.h>
26#include <asm/fsl_hcalls.h>
27
28#include "fsl_msi.h"
29#include "fsl_pci.h"
30
31#define MSIIR_OFFSET_MASK 0xfffff
32#define MSIIR_IBS_SHIFT 0
33#define MSIIR_SRS_SHIFT 5
34#define MSIIR1_IBS_SHIFT 4
35#define MSIIR1_SRS_SHIFT 0
36#define MSI_SRS_MASK 0xf
37#define MSI_IBS_MASK 0x1f
38
39#define msi_hwirq(msi, msir_index, intr_index) \
40 ((msir_index) << (msi)->srs_shift | \
41 ((intr_index) << (msi)->ibs_shift))
42
43static LIST_HEAD(msi_head);
44
45struct fsl_msi_feature {
46 u32 fsl_pic_ip;
47 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
48};
49
50struct fsl_msi_cascade_data {
51 struct fsl_msi *msi_data;
52 int index;
53};
54
55static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
56{
57 return in_be32(base + (reg >> 2));
58}
59
60/*
61 * We do not need this actually. The MSIR register has been read once
62 * in the cascade interrupt. So, this MSI interrupt has been acked
63*/
64static void fsl_msi_end_irq(struct irq_data *d)
65{
66}
67
68static struct irq_chip fsl_msi_chip = {
69 .irq_mask = mask_msi_irq,
70 .irq_unmask = unmask_msi_irq,
71 .irq_ack = fsl_msi_end_irq,
72 .name = "FSL-MSI",
73};
74
75static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
76 irq_hw_number_t hw)
77{
78 struct fsl_msi *msi_data = h->host_data;
79 struct irq_chip *chip = &fsl_msi_chip;
80
81 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
82
83 irq_set_chip_data(virq, msi_data);
84 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
85
86 return 0;
87}
88
89static const struct irq_domain_ops fsl_msi_host_ops = {
90 .map = fsl_msi_host_map,
91};
92
93static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
94{
95 int rc, hwirq;
96
97 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
98 msi_data->irqhost->of_node);
99 if (rc)
100 return rc;
101
102 /*
103 * Reserve all the hwirqs
104 * The available hwirqs will be released in fsl_msi_setup_hwirq()
105 */
106 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
107 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
108
109 return 0;
110}
111
112static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type)
113{
114 if (type == PCI_CAP_ID_MSIX)
115 pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
116
117 return 0;
118}
119
120static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
121{
122 struct msi_desc *entry;
123 struct fsl_msi *msi_data;
124
125 list_for_each_entry(entry, &pdev->msi_list, list) {
126 if (entry->irq == NO_IRQ)
127 continue;
128 msi_data = irq_get_chip_data(entry->irq);
129 irq_set_msi_desc(entry->irq, NULL);
130 msi_bitmap_free_hwirqs(&msi_data->bitmap,
131 virq_to_hw(entry->irq), 1);
132 irq_dispose_mapping(entry->irq);
133 }
134
135 return;
136}
137
138static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
139 struct msi_msg *msg,
140 struct fsl_msi *fsl_msi_data)
141{
142 struct fsl_msi *msi_data = fsl_msi_data;
143 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
144 u64 address; /* Physical address of the MSIIR */
145 int len;
146 const __be64 *reg;
147
148 /* If the msi-address-64 property exists, then use it */
149 reg = of_get_property(hose->dn, "msi-address-64", &len);
150 if (reg && (len == sizeof(u64)))
151 address = be64_to_cpup(reg);
152 else
153 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
154
155 msg->address_lo = lower_32_bits(address);
156 msg->address_hi = upper_32_bits(address);
157
158 msg->data = hwirq;
159
160 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
161 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
162 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
163}
164
165static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
166{
167 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
168 struct device_node *np;
169 phandle phandle = 0;
170 int rc, hwirq = -ENOMEM;
171 unsigned int virq;
172 struct msi_desc *entry;
173 struct msi_msg msg;
174 struct fsl_msi *msi_data;
175
176 /*
177 * If the PCI node has an fsl,msi property, then we need to use it
178 * to find the specific MSI.
179 */
180 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
181 if (np) {
182 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
183 of_device_is_compatible(np, "fsl,vmpic-msi"))
184 phandle = np->phandle;
185 else {
186 dev_err(&pdev->dev,
187 "node %s has an invalid fsl,msi phandle %u\n",
188 hose->dn->full_name, np->phandle);
189 return -EINVAL;
190 }
191 }
192
193 list_for_each_entry(entry, &pdev->msi_list, list) {
194 /*
195 * Loop over all the MSI devices until we find one that has an
196 * available interrupt.
197 */
198 list_for_each_entry(msi_data, &msi_head, list) {
199 /*
200 * If the PCI node has an fsl,msi property, then we
201 * restrict our search to the corresponding MSI node.
202 * The simplest way is to skip over MSI nodes with the
203 * wrong phandle. Under the Freescale hypervisor, this
204 * has the additional benefit of skipping over MSI
205 * nodes that are not mapped in the PAMU.
206 */
207 if (phandle && (phandle != msi_data->phandle))
208 continue;
209
210 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
211 if (hwirq >= 0)
212 break;
213 }
214
215 if (hwirq < 0) {
216 rc = hwirq;
217 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
218 goto out_free;
219 }
220
221 virq = irq_create_mapping(msi_data->irqhost, hwirq);
222
223 if (virq == NO_IRQ) {
224 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
225 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
226 rc = -ENOSPC;
227 goto out_free;
228 }
229 /* chip_data is msi_data via host->hostdata in host->map() */
230 irq_set_msi_desc(virq, entry);
231
232 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
233 write_msi_msg(virq, &msg);
234 }
235 return 0;
236
237out_free:
238 /* free by the caller of this function */
239 return rc;
240}
241
242static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
243{
244 struct irq_chip *chip = irq_desc_get_chip(desc);
245 struct irq_data *idata = irq_desc_get_irq_data(desc);
246 unsigned int cascade_irq;
247 struct fsl_msi *msi_data;
248 int msir_index = -1;
249 u32 msir_value = 0;
250 u32 intr_index;
251 u32 have_shift = 0;
252 struct fsl_msi_cascade_data *cascade_data;
253
254 cascade_data = irq_get_handler_data(irq);
255 msi_data = cascade_data->msi_data;
256
257 raw_spin_lock(&desc->lock);
258 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
259 if (chip->irq_mask_ack)
260 chip->irq_mask_ack(idata);
261 else {
262 chip->irq_mask(idata);
263 chip->irq_ack(idata);
264 }
265 }
266
267 if (unlikely(irqd_irq_inprogress(idata)))
268 goto unlock;
269
270 msir_index = cascade_data->index;
271
272 if (msir_index >= NR_MSI_REG_MAX)
273 cascade_irq = NO_IRQ;
274
275 irqd_set_chained_irq_inprogress(idata);
276 switch (msi_data->feature & FSL_PIC_IP_MASK) {
277 case FSL_PIC_IP_MPIC:
278 msir_value = fsl_msi_read(msi_data->msi_regs,
279 msir_index * 0x10);
280 break;
281 case FSL_PIC_IP_IPIC:
282 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
283 break;
284#ifdef CONFIG_EPAPR_PARAVIRT
285 case FSL_PIC_IP_VMPIC: {
286 unsigned int ret;
287 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
288 if (ret) {
289 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
290 "irq %u (ret=%u)\n", irq, ret);
291 msir_value = 0;
292 }
293 break;
294 }
295#endif
296 }
297
298 while (msir_value) {
299 intr_index = ffs(msir_value) - 1;
300
301 cascade_irq = irq_linear_revmap(msi_data->irqhost,
302 msi_hwirq(msi_data, msir_index,
303 intr_index + have_shift));
304 if (cascade_irq != NO_IRQ)
305 generic_handle_irq(cascade_irq);
306 have_shift += intr_index + 1;
307 msir_value = msir_value >> (intr_index + 1);
308 }
309 irqd_clr_chained_irq_inprogress(idata);
310
311 switch (msi_data->feature & FSL_PIC_IP_MASK) {
312 case FSL_PIC_IP_MPIC:
313 case FSL_PIC_IP_VMPIC:
314 chip->irq_eoi(idata);
315 break;
316 case FSL_PIC_IP_IPIC:
317 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
318 chip->irq_unmask(idata);
319 break;
320 }
321unlock:
322 raw_spin_unlock(&desc->lock);
323}
324
325static int fsl_of_msi_remove(struct platform_device *ofdev)
326{
327 struct fsl_msi *msi = platform_get_drvdata(ofdev);
328 int virq, i;
329 struct fsl_msi_cascade_data *cascade_data;
330
331 if (msi->list.prev != NULL)
332 list_del(&msi->list);
333 for (i = 0; i < NR_MSI_REG_MAX; i++) {
334 virq = msi->msi_virqs[i];
335 if (virq != NO_IRQ) {
336 cascade_data = irq_get_handler_data(virq);
337 kfree(cascade_data);
338 irq_dispose_mapping(virq);
339 }
340 }
341 if (msi->bitmap.bitmap)
342 msi_bitmap_free(&msi->bitmap);
343 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
344 iounmap(msi->msi_regs);
345 kfree(msi);
346
347 return 0;
348}
349
350static struct lock_class_key fsl_msi_irq_class;
351
352static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
353 int offset, int irq_index)
354{
355 struct fsl_msi_cascade_data *cascade_data = NULL;
356 int virt_msir, i;
357
358 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
359 if (virt_msir == NO_IRQ) {
360 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
361 __func__, irq_index);
362 return 0;
363 }
364
365 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
366 if (!cascade_data) {
367 dev_err(&dev->dev, "No memory for MSI cascade data\n");
368 return -ENOMEM;
369 }
370 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
371 msi->msi_virqs[irq_index] = virt_msir;
372 cascade_data->index = offset;
373 cascade_data->msi_data = msi;
374 irq_set_handler_data(virt_msir, cascade_data);
375 irq_set_chained_handler(virt_msir, fsl_msi_cascade);
376
377 /* Release the hwirqs corresponding to this MSI register */
378 for (i = 0; i < IRQS_PER_MSI_REG; i++)
379 msi_bitmap_free_hwirqs(&msi->bitmap,
380 msi_hwirq(msi, offset, i), 1);
381
382 return 0;
383}
384
385static const struct of_device_id fsl_of_msi_ids[];
386static int fsl_of_msi_probe(struct platform_device *dev)
387{
388 const struct of_device_id *match;
389 struct fsl_msi *msi;
390 struct resource res, msiir;
391 int err, i, j, irq_index, count;
392 const u32 *p;
393 const struct fsl_msi_feature *features;
394 int len;
395 u32 offset;
396
397 match = of_match_device(fsl_of_msi_ids, &dev->dev);
398 if (!match)
399 return -EINVAL;
400 features = match->data;
401
402 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
403
404 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
405 if (!msi) {
406 dev_err(&dev->dev, "No memory for MSI structure\n");
407 return -ENOMEM;
408 }
409 platform_set_drvdata(dev, msi);
410
411 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
412 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
413
414 if (msi->irqhost == NULL) {
415 dev_err(&dev->dev, "No memory for MSI irqhost\n");
416 err = -ENOMEM;
417 goto error_out;
418 }
419
420 /*
421 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
422 * property. Instead, we use hypercalls to access the MSI.
423 */
424 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
425 err = of_address_to_resource(dev->dev.of_node, 0, &res);
426 if (err) {
427 dev_err(&dev->dev, "invalid resource for node %s\n",
428 dev->dev.of_node->full_name);
429 goto error_out;
430 }
431
432 msi->msi_regs = ioremap(res.start, resource_size(&res));
433 if (!msi->msi_regs) {
434 err = -ENOMEM;
435 dev_err(&dev->dev, "could not map node %s\n",
436 dev->dev.of_node->full_name);
437 goto error_out;
438 }
439 msi->msiir_offset =
440 features->msiir_offset + (res.start & 0xfffff);
441
442 /*
443 * First read the MSIIR/MSIIR1 offset from dts
444 * On failure use the hardcode MSIIR offset
445 */
446 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
447 msi->msiir_offset = features->msiir_offset +
448 (res.start & MSIIR_OFFSET_MASK);
449 else
450 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
451 }
452
453 msi->feature = features->fsl_pic_ip;
454
455 /*
456 * Remember the phandle, so that we can match with any PCI nodes
457 * that have an "fsl,msi" property.
458 */
459 msi->phandle = dev->dev.of_node->phandle;
460
461 err = fsl_msi_init_allocator(msi);
462 if (err) {
463 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
464 goto error_out;
465 }
466
467 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
468
469 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3")) {
470 msi->srs_shift = MSIIR1_SRS_SHIFT;
471 msi->ibs_shift = MSIIR1_IBS_SHIFT;
472 if (p)
473 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
474 __func__);
475
476 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
477 irq_index++) {
478 err = fsl_msi_setup_hwirq(msi, dev,
479 irq_index, irq_index);
480 if (err)
481 goto error_out;
482 }
483 } else {
484 static const u32 all_avail[] =
485 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
486
487 msi->srs_shift = MSIIR_SRS_SHIFT;
488 msi->ibs_shift = MSIIR_IBS_SHIFT;
489
490 if (p && len % (2 * sizeof(u32)) != 0) {
491 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
492 __func__);
493 err = -EINVAL;
494 goto error_out;
495 }
496
497 if (!p) {
498 p = all_avail;
499 len = sizeof(all_avail);
500 }
501
502 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
503 if (p[i * 2] % IRQS_PER_MSI_REG ||
504 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
505 pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
506 __func__, dev->dev.of_node->full_name,
507 p[i * 2 + 1], p[i * 2]);
508 err = -EINVAL;
509 goto error_out;
510 }
511
512 offset = p[i * 2] / IRQS_PER_MSI_REG;
513 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
514
515 for (j = 0; j < count; j++, irq_index++) {
516 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
517 irq_index);
518 if (err)
519 goto error_out;
520 }
521 }
522 }
523
524 list_add_tail(&msi->list, &msi_head);
525
526 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */
527 if (!ppc_md.setup_msi_irqs) {
528 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
529 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
530 ppc_md.msi_check_device = fsl_msi_check_device;
531 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
532 dev_err(&dev->dev, "Different MSI driver already installed!\n");
533 err = -ENODEV;
534 goto error_out;
535 }
536 return 0;
537error_out:
538 fsl_of_msi_remove(dev);
539 return err;
540}
541
542static const struct fsl_msi_feature mpic_msi_feature = {
543 .fsl_pic_ip = FSL_PIC_IP_MPIC,
544 .msiir_offset = 0x140,
545};
546
547static const struct fsl_msi_feature ipic_msi_feature = {
548 .fsl_pic_ip = FSL_PIC_IP_IPIC,
549 .msiir_offset = 0x38,
550};
551
552static const struct fsl_msi_feature vmpic_msi_feature = {
553 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
554 .msiir_offset = 0,
555};
556
557static const struct of_device_id fsl_of_msi_ids[] = {
558 {
559 .compatible = "fsl,mpic-msi",
560 .data = &mpic_msi_feature,
561 },
562 {
563 .compatible = "fsl,mpic-msi-v4.3",
564 .data = &mpic_msi_feature,
565 },
566 {
567 .compatible = "fsl,ipic-msi",
568 .data = &ipic_msi_feature,
569 },
570#ifdef CONFIG_EPAPR_PARAVIRT
571 {
572 .compatible = "fsl,vmpic-msi",
573 .data = &vmpic_msi_feature,
574 },
575#endif
576 {}
577};
578
579static struct platform_driver fsl_of_msi_driver = {
580 .driver = {
581 .name = "fsl-msi",
582 .owner = THIS_MODULE,
583 .of_match_table = fsl_of_msi_ids,
584 },
585 .probe = fsl_of_msi_probe,
586 .remove = fsl_of_msi_remove,
587};
588
589static __init int fsl_of_msi_init(void)
590{
591 return platform_driver_register(&fsl_of_msi_driver);
592}
593
594subsys_initcall(fsl_of_msi_init);
1/*
2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3 *
4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com>
6 *
7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 */
15#include <linux/irq.h>
16#include <linux/msi.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/of_platform.h>
20#include <linux/interrupt.h>
21#include <linux/seq_file.h>
22#include <sysdev/fsl_soc.h>
23#include <asm/prom.h>
24#include <asm/hw_irq.h>
25#include <asm/ppc-pci.h>
26#include <asm/mpic.h>
27#include <asm/fsl_hcalls.h>
28
29#include "fsl_msi.h"
30#include "fsl_pci.h"
31
32#define MSIIR_OFFSET_MASK 0xfffff
33#define MSIIR_IBS_SHIFT 0
34#define MSIIR_SRS_SHIFT 5
35#define MSIIR1_IBS_SHIFT 4
36#define MSIIR1_SRS_SHIFT 0
37#define MSI_SRS_MASK 0xf
38#define MSI_IBS_MASK 0x1f
39
40#define msi_hwirq(msi, msir_index, intr_index) \
41 ((msir_index) << (msi)->srs_shift | \
42 ((intr_index) << (msi)->ibs_shift))
43
44static LIST_HEAD(msi_head);
45
46struct fsl_msi_feature {
47 u32 fsl_pic_ip;
48 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
49};
50
51struct fsl_msi_cascade_data {
52 struct fsl_msi *msi_data;
53 int index;
54 int virq;
55};
56
57static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
58{
59 return in_be32(base + (reg >> 2));
60}
61
62/*
63 * We do not need this actually. The MSIR register has been read once
64 * in the cascade interrupt. So, this MSI interrupt has been acked
65*/
66static void fsl_msi_end_irq(struct irq_data *d)
67{
68}
69
70static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
71{
72 struct fsl_msi *msi_data = irqd->domain->host_data;
73 irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
74 int cascade_virq, srs;
75
76 srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
77 cascade_virq = msi_data->cascade_array[srs]->virq;
78
79 seq_printf(p, " fsl-msi-%d", cascade_virq);
80}
81
82
83static struct irq_chip fsl_msi_chip = {
84 .irq_mask = pci_msi_mask_irq,
85 .irq_unmask = pci_msi_unmask_irq,
86 .irq_ack = fsl_msi_end_irq,
87 .irq_print_chip = fsl_msi_print_chip,
88};
89
90static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
91 irq_hw_number_t hw)
92{
93 struct fsl_msi *msi_data = h->host_data;
94 struct irq_chip *chip = &fsl_msi_chip;
95
96 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
97
98 irq_set_chip_data(virq, msi_data);
99 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
100
101 return 0;
102}
103
104static const struct irq_domain_ops fsl_msi_host_ops = {
105 .map = fsl_msi_host_map,
106};
107
108static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
109{
110 int rc, hwirq;
111
112 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
113 irq_domain_get_of_node(msi_data->irqhost));
114 if (rc)
115 return rc;
116
117 /*
118 * Reserve all the hwirqs
119 * The available hwirqs will be released in fsl_msi_setup_hwirq()
120 */
121 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
122 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
123
124 return 0;
125}
126
127static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
128{
129 struct msi_desc *entry;
130 struct fsl_msi *msi_data;
131 irq_hw_number_t hwirq;
132
133 for_each_pci_msi_entry(entry, pdev) {
134 if (entry->irq == NO_IRQ)
135 continue;
136 hwirq = virq_to_hw(entry->irq);
137 msi_data = irq_get_chip_data(entry->irq);
138 irq_set_msi_desc(entry->irq, NULL);
139 irq_dispose_mapping(entry->irq);
140 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
141 }
142
143 return;
144}
145
146static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
147 struct msi_msg *msg,
148 struct fsl_msi *fsl_msi_data)
149{
150 struct fsl_msi *msi_data = fsl_msi_data;
151 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
152 u64 address; /* Physical address of the MSIIR */
153 int len;
154 const __be64 *reg;
155
156 /* If the msi-address-64 property exists, then use it */
157 reg = of_get_property(hose->dn, "msi-address-64", &len);
158 if (reg && (len == sizeof(u64)))
159 address = be64_to_cpup(reg);
160 else
161 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
162
163 msg->address_lo = lower_32_bits(address);
164 msg->address_hi = upper_32_bits(address);
165
166 /*
167 * MPIC version 2.0 has erratum PIC1. It causes
168 * that neither MSI nor MSI-X can work fine.
169 * This is a workaround to allow MSI-X to function
170 * properly. It only works for MSI-X, we prevent
171 * MSI on buggy chips in fsl_setup_msi_irqs().
172 */
173 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
174 msg->data = __swab32(hwirq);
175 else
176 msg->data = hwirq;
177
178 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
179 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
180 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
181}
182
183static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
184{
185 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
186 struct device_node *np;
187 phandle phandle = 0;
188 int rc, hwirq = -ENOMEM;
189 unsigned int virq;
190 struct msi_desc *entry;
191 struct msi_msg msg;
192 struct fsl_msi *msi_data;
193
194 if (type == PCI_CAP_ID_MSI) {
195 /*
196 * MPIC version 2.0 has erratum PIC1. For now MSI
197 * could not work. So check to prevent MSI from
198 * being used on the board with this erratum.
199 */
200 list_for_each_entry(msi_data, &msi_head, list)
201 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
202 return -EINVAL;
203 }
204
205 /*
206 * If the PCI node has an fsl,msi property, then we need to use it
207 * to find the specific MSI.
208 */
209 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
210 if (np) {
211 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
212 of_device_is_compatible(np, "fsl,vmpic-msi") ||
213 of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
214 phandle = np->phandle;
215 else {
216 dev_err(&pdev->dev,
217 "node %s has an invalid fsl,msi phandle %u\n",
218 hose->dn->full_name, np->phandle);
219 return -EINVAL;
220 }
221 }
222
223 for_each_pci_msi_entry(entry, pdev) {
224 /*
225 * Loop over all the MSI devices until we find one that has an
226 * available interrupt.
227 */
228 list_for_each_entry(msi_data, &msi_head, list) {
229 /*
230 * If the PCI node has an fsl,msi property, then we
231 * restrict our search to the corresponding MSI node.
232 * The simplest way is to skip over MSI nodes with the
233 * wrong phandle. Under the Freescale hypervisor, this
234 * has the additional benefit of skipping over MSI
235 * nodes that are not mapped in the PAMU.
236 */
237 if (phandle && (phandle != msi_data->phandle))
238 continue;
239
240 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
241 if (hwirq >= 0)
242 break;
243 }
244
245 if (hwirq < 0) {
246 rc = hwirq;
247 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
248 goto out_free;
249 }
250
251 virq = irq_create_mapping(msi_data->irqhost, hwirq);
252
253 if (virq == NO_IRQ) {
254 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
255 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
256 rc = -ENOSPC;
257 goto out_free;
258 }
259 /* chip_data is msi_data via host->hostdata in host->map() */
260 irq_set_msi_desc(virq, entry);
261
262 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
263 pci_write_msi_msg(virq, &msg);
264 }
265 return 0;
266
267out_free:
268 /* free by the caller of this function */
269 return rc;
270}
271
272static irqreturn_t fsl_msi_cascade(int irq, void *data)
273{
274 unsigned int cascade_irq;
275 struct fsl_msi *msi_data;
276 int msir_index = -1;
277 u32 msir_value = 0;
278 u32 intr_index;
279 u32 have_shift = 0;
280 struct fsl_msi_cascade_data *cascade_data = data;
281 irqreturn_t ret = IRQ_NONE;
282
283 msi_data = cascade_data->msi_data;
284
285 msir_index = cascade_data->index;
286
287 if (msir_index >= NR_MSI_REG_MAX)
288 cascade_irq = NO_IRQ;
289
290 switch (msi_data->feature & FSL_PIC_IP_MASK) {
291 case FSL_PIC_IP_MPIC:
292 msir_value = fsl_msi_read(msi_data->msi_regs,
293 msir_index * 0x10);
294 break;
295 case FSL_PIC_IP_IPIC:
296 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
297 break;
298#ifdef CONFIG_EPAPR_PARAVIRT
299 case FSL_PIC_IP_VMPIC: {
300 unsigned int ret;
301 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
302 if (ret) {
303 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
304 "irq %u (ret=%u)\n", irq, ret);
305 msir_value = 0;
306 }
307 break;
308 }
309#endif
310 }
311
312 while (msir_value) {
313 intr_index = ffs(msir_value) - 1;
314
315 cascade_irq = irq_linear_revmap(msi_data->irqhost,
316 msi_hwirq(msi_data, msir_index,
317 intr_index + have_shift));
318 if (cascade_irq != NO_IRQ) {
319 generic_handle_irq(cascade_irq);
320 ret = IRQ_HANDLED;
321 }
322 have_shift += intr_index + 1;
323 msir_value = msir_value >> (intr_index + 1);
324 }
325
326 return ret;
327}
328
329static int fsl_of_msi_remove(struct platform_device *ofdev)
330{
331 struct fsl_msi *msi = platform_get_drvdata(ofdev);
332 int virq, i;
333
334 if (msi->list.prev != NULL)
335 list_del(&msi->list);
336 for (i = 0; i < NR_MSI_REG_MAX; i++) {
337 if (msi->cascade_array[i]) {
338 virq = msi->cascade_array[i]->virq;
339
340 BUG_ON(virq == NO_IRQ);
341
342 free_irq(virq, msi->cascade_array[i]);
343 kfree(msi->cascade_array[i]);
344 irq_dispose_mapping(virq);
345 }
346 }
347 if (msi->bitmap.bitmap)
348 msi_bitmap_free(&msi->bitmap);
349 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
350 iounmap(msi->msi_regs);
351 kfree(msi);
352
353 return 0;
354}
355
356static struct lock_class_key fsl_msi_irq_class;
357
358static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
359 int offset, int irq_index)
360{
361 struct fsl_msi_cascade_data *cascade_data = NULL;
362 int virt_msir, i, ret;
363
364 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
365 if (virt_msir == NO_IRQ) {
366 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
367 __func__, irq_index);
368 return 0;
369 }
370
371 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
372 if (!cascade_data) {
373 dev_err(&dev->dev, "No memory for MSI cascade data\n");
374 return -ENOMEM;
375 }
376 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
377 cascade_data->index = offset;
378 cascade_data->msi_data = msi;
379 cascade_data->virq = virt_msir;
380 msi->cascade_array[irq_index] = cascade_data;
381
382 ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
383 "fsl-msi-cascade", cascade_data);
384 if (ret) {
385 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
386 virt_msir, ret);
387 return ret;
388 }
389
390 /* Release the hwirqs corresponding to this MSI register */
391 for (i = 0; i < IRQS_PER_MSI_REG; i++)
392 msi_bitmap_free_hwirqs(&msi->bitmap,
393 msi_hwirq(msi, offset, i), 1);
394
395 return 0;
396}
397
398static const struct of_device_id fsl_of_msi_ids[];
399static int fsl_of_msi_probe(struct platform_device *dev)
400{
401 const struct of_device_id *match;
402 struct fsl_msi *msi;
403 struct resource res, msiir;
404 int err, i, j, irq_index, count;
405 const u32 *p;
406 const struct fsl_msi_feature *features;
407 int len;
408 u32 offset;
409 struct pci_controller *phb;
410
411 match = of_match_device(fsl_of_msi_ids, &dev->dev);
412 if (!match)
413 return -EINVAL;
414 features = match->data;
415
416 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
417
418 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
419 if (!msi) {
420 dev_err(&dev->dev, "No memory for MSI structure\n");
421 return -ENOMEM;
422 }
423 platform_set_drvdata(dev, msi);
424
425 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
426 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
427
428 if (msi->irqhost == NULL) {
429 dev_err(&dev->dev, "No memory for MSI irqhost\n");
430 err = -ENOMEM;
431 goto error_out;
432 }
433
434 /*
435 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
436 * property. Instead, we use hypercalls to access the MSI.
437 */
438 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
439 err = of_address_to_resource(dev->dev.of_node, 0, &res);
440 if (err) {
441 dev_err(&dev->dev, "invalid resource for node %s\n",
442 dev->dev.of_node->full_name);
443 goto error_out;
444 }
445
446 msi->msi_regs = ioremap(res.start, resource_size(&res));
447 if (!msi->msi_regs) {
448 err = -ENOMEM;
449 dev_err(&dev->dev, "could not map node %s\n",
450 dev->dev.of_node->full_name);
451 goto error_out;
452 }
453 msi->msiir_offset =
454 features->msiir_offset + (res.start & 0xfffff);
455
456 /*
457 * First read the MSIIR/MSIIR1 offset from dts
458 * On failure use the hardcode MSIIR offset
459 */
460 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
461 msi->msiir_offset = features->msiir_offset +
462 (res.start & MSIIR_OFFSET_MASK);
463 else
464 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
465 }
466
467 msi->feature = features->fsl_pic_ip;
468
469 /* For erratum PIC1 on MPIC version 2.0*/
470 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
471 && (fsl_mpic_primary_get_version() == 0x0200))
472 msi->feature |= MSI_HW_ERRATA_ENDIAN;
473
474 /*
475 * Remember the phandle, so that we can match with any PCI nodes
476 * that have an "fsl,msi" property.
477 */
478 msi->phandle = dev->dev.of_node->phandle;
479
480 err = fsl_msi_init_allocator(msi);
481 if (err) {
482 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
483 goto error_out;
484 }
485
486 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
487
488 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
489 of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
490 msi->srs_shift = MSIIR1_SRS_SHIFT;
491 msi->ibs_shift = MSIIR1_IBS_SHIFT;
492 if (p)
493 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
494 __func__);
495
496 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
497 irq_index++) {
498 err = fsl_msi_setup_hwirq(msi, dev,
499 irq_index, irq_index);
500 if (err)
501 goto error_out;
502 }
503 } else {
504 static const u32 all_avail[] =
505 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
506
507 msi->srs_shift = MSIIR_SRS_SHIFT;
508 msi->ibs_shift = MSIIR_IBS_SHIFT;
509
510 if (p && len % (2 * sizeof(u32)) != 0) {
511 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
512 __func__);
513 err = -EINVAL;
514 goto error_out;
515 }
516
517 if (!p) {
518 p = all_avail;
519 len = sizeof(all_avail);
520 }
521
522 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
523 if (p[i * 2] % IRQS_PER_MSI_REG ||
524 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
525 pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
526 __func__, dev->dev.of_node->full_name,
527 p[i * 2 + 1], p[i * 2]);
528 err = -EINVAL;
529 goto error_out;
530 }
531
532 offset = p[i * 2] / IRQS_PER_MSI_REG;
533 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
534
535 for (j = 0; j < count; j++, irq_index++) {
536 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
537 irq_index);
538 if (err)
539 goto error_out;
540 }
541 }
542 }
543
544 list_add_tail(&msi->list, &msi_head);
545
546 /*
547 * Apply the MSI ops to all the controllers.
548 * It doesn't hurt to reassign the same ops,
549 * but bail out if we find another MSI driver.
550 */
551 list_for_each_entry(phb, &hose_list, list_node) {
552 if (!phb->controller_ops.setup_msi_irqs) {
553 phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
554 phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
555 } else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
556 dev_err(&dev->dev, "Different MSI driver already installed!\n");
557 err = -ENODEV;
558 goto error_out;
559 }
560 }
561 return 0;
562error_out:
563 fsl_of_msi_remove(dev);
564 return err;
565}
566
567static const struct fsl_msi_feature mpic_msi_feature = {
568 .fsl_pic_ip = FSL_PIC_IP_MPIC,
569 .msiir_offset = 0x140,
570};
571
572static const struct fsl_msi_feature ipic_msi_feature = {
573 .fsl_pic_ip = FSL_PIC_IP_IPIC,
574 .msiir_offset = 0x38,
575};
576
577static const struct fsl_msi_feature vmpic_msi_feature = {
578 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
579 .msiir_offset = 0,
580};
581
582static const struct of_device_id fsl_of_msi_ids[] = {
583 {
584 .compatible = "fsl,mpic-msi",
585 .data = &mpic_msi_feature,
586 },
587 {
588 .compatible = "fsl,mpic-msi-v4.3",
589 .data = &mpic_msi_feature,
590 },
591 {
592 .compatible = "fsl,ipic-msi",
593 .data = &ipic_msi_feature,
594 },
595#ifdef CONFIG_EPAPR_PARAVIRT
596 {
597 .compatible = "fsl,vmpic-msi",
598 .data = &vmpic_msi_feature,
599 },
600 {
601 .compatible = "fsl,vmpic-msi-v4.3",
602 .data = &vmpic_msi_feature,
603 },
604#endif
605 {}
606};
607
608static struct platform_driver fsl_of_msi_driver = {
609 .driver = {
610 .name = "fsl-msi",
611 .of_match_table = fsl_of_msi_ids,
612 },
613 .probe = fsl_of_msi_probe,
614 .remove = fsl_of_msi_remove,
615};
616
617static __init int fsl_of_msi_init(void)
618{
619 return platform_driver_register(&fsl_of_msi_driver);
620}
621
622subsys_initcall(fsl_of_msi_init);