Loading...
1/*
2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3 *
4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com>
6 *
7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 */
15#include <linux/irq.h>
16#include <linux/bootmem.h>
17#include <linux/msi.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/of_platform.h>
21#include <sysdev/fsl_soc.h>
22#include <asm/prom.h>
23#include <asm/hw_irq.h>
24#include <asm/ppc-pci.h>
25#include <asm/mpic.h>
26#include <asm/fsl_hcalls.h>
27
28#include "fsl_msi.h"
29#include "fsl_pci.h"
30
31LIST_HEAD(msi_head);
32
33struct fsl_msi_feature {
34 u32 fsl_pic_ip;
35 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
36};
37
38struct fsl_msi_cascade_data {
39 struct fsl_msi *msi_data;
40 int index;
41};
42
43static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
44{
45 return in_be32(base + (reg >> 2));
46}
47
48/*
49 * We do not need this actually. The MSIR register has been read once
50 * in the cascade interrupt. So, this MSI interrupt has been acked
51*/
52static void fsl_msi_end_irq(struct irq_data *d)
53{
54}
55
56static struct irq_chip fsl_msi_chip = {
57 .irq_mask = mask_msi_irq,
58 .irq_unmask = unmask_msi_irq,
59 .irq_ack = fsl_msi_end_irq,
60 .name = "FSL-MSI",
61};
62
63static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
64 irq_hw_number_t hw)
65{
66 struct fsl_msi *msi_data = h->host_data;
67 struct irq_chip *chip = &fsl_msi_chip;
68
69 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
70
71 irq_set_chip_data(virq, msi_data);
72 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
73
74 return 0;
75}
76
77static const struct irq_domain_ops fsl_msi_host_ops = {
78 .map = fsl_msi_host_map,
79};
80
81static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
82{
83 int rc;
84
85 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
86 msi_data->irqhost->of_node);
87 if (rc)
88 return rc;
89
90 rc = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
91 if (rc < 0) {
92 msi_bitmap_free(&msi_data->bitmap);
93 return rc;
94 }
95
96 return 0;
97}
98
99static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type)
100{
101 if (type == PCI_CAP_ID_MSIX)
102 pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
103
104 return 0;
105}
106
107static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
108{
109 struct msi_desc *entry;
110 struct fsl_msi *msi_data;
111
112 list_for_each_entry(entry, &pdev->msi_list, list) {
113 if (entry->irq == NO_IRQ)
114 continue;
115 msi_data = irq_get_chip_data(entry->irq);
116 irq_set_msi_desc(entry->irq, NULL);
117 msi_bitmap_free_hwirqs(&msi_data->bitmap,
118 virq_to_hw(entry->irq), 1);
119 irq_dispose_mapping(entry->irq);
120 }
121
122 return;
123}
124
125static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
126 struct msi_msg *msg,
127 struct fsl_msi *fsl_msi_data)
128{
129 struct fsl_msi *msi_data = fsl_msi_data;
130 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
131 u64 address; /* Physical address of the MSIIR */
132 int len;
133 const u64 *reg;
134
135 /* If the msi-address-64 property exists, then use it */
136 reg = of_get_property(hose->dn, "msi-address-64", &len);
137 if (reg && (len == sizeof(u64)))
138 address = be64_to_cpup(reg);
139 else
140 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
141
142 msg->address_lo = lower_32_bits(address);
143 msg->address_hi = upper_32_bits(address);
144
145 msg->data = hwirq;
146
147 pr_debug("%s: allocated srs: %d, ibs: %d\n",
148 __func__, hwirq / IRQS_PER_MSI_REG, hwirq % IRQS_PER_MSI_REG);
149}
150
151static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
152{
153 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
154 struct device_node *np;
155 phandle phandle = 0;
156 int rc, hwirq = -ENOMEM;
157 unsigned int virq;
158 struct msi_desc *entry;
159 struct msi_msg msg;
160 struct fsl_msi *msi_data;
161
162 /*
163 * If the PCI node has an fsl,msi property, then we need to use it
164 * to find the specific MSI.
165 */
166 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
167 if (np) {
168 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
169 of_device_is_compatible(np, "fsl,vmpic-msi"))
170 phandle = np->phandle;
171 else {
172 dev_err(&pdev->dev,
173 "node %s has an invalid fsl,msi phandle %u\n",
174 hose->dn->full_name, np->phandle);
175 return -EINVAL;
176 }
177 }
178
179 list_for_each_entry(entry, &pdev->msi_list, list) {
180 /*
181 * Loop over all the MSI devices until we find one that has an
182 * available interrupt.
183 */
184 list_for_each_entry(msi_data, &msi_head, list) {
185 /*
186 * If the PCI node has an fsl,msi property, then we
187 * restrict our search to the corresponding MSI node.
188 * The simplest way is to skip over MSI nodes with the
189 * wrong phandle. Under the Freescale hypervisor, this
190 * has the additional benefit of skipping over MSI
191 * nodes that are not mapped in the PAMU.
192 */
193 if (phandle && (phandle != msi_data->phandle))
194 continue;
195
196 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
197 if (hwirq >= 0)
198 break;
199 }
200
201 if (hwirq < 0) {
202 rc = hwirq;
203 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
204 goto out_free;
205 }
206
207 virq = irq_create_mapping(msi_data->irqhost, hwirq);
208
209 if (virq == NO_IRQ) {
210 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
211 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
212 rc = -ENOSPC;
213 goto out_free;
214 }
215 /* chip_data is msi_data via host->hostdata in host->map() */
216 irq_set_msi_desc(virq, entry);
217
218 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
219 write_msi_msg(virq, &msg);
220 }
221 return 0;
222
223out_free:
224 /* free by the caller of this function */
225 return rc;
226}
227
228static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
229{
230 struct irq_chip *chip = irq_desc_get_chip(desc);
231 struct irq_data *idata = irq_desc_get_irq_data(desc);
232 unsigned int cascade_irq;
233 struct fsl_msi *msi_data;
234 int msir_index = -1;
235 u32 msir_value = 0;
236 u32 intr_index;
237 u32 have_shift = 0;
238 struct fsl_msi_cascade_data *cascade_data;
239 unsigned int ret;
240
241 cascade_data = irq_get_handler_data(irq);
242 msi_data = cascade_data->msi_data;
243
244 raw_spin_lock(&desc->lock);
245 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
246 if (chip->irq_mask_ack)
247 chip->irq_mask_ack(idata);
248 else {
249 chip->irq_mask(idata);
250 chip->irq_ack(idata);
251 }
252 }
253
254 if (unlikely(irqd_irq_inprogress(idata)))
255 goto unlock;
256
257 msir_index = cascade_data->index;
258
259 if (msir_index >= NR_MSI_REG)
260 cascade_irq = NO_IRQ;
261
262 irqd_set_chained_irq_inprogress(idata);
263 switch (msi_data->feature & FSL_PIC_IP_MASK) {
264 case FSL_PIC_IP_MPIC:
265 msir_value = fsl_msi_read(msi_data->msi_regs,
266 msir_index * 0x10);
267 break;
268 case FSL_PIC_IP_IPIC:
269 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
270 break;
271 case FSL_PIC_IP_VMPIC:
272 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
273 if (ret) {
274 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
275 "irq %u (ret=%u)\n", irq, ret);
276 msir_value = 0;
277 }
278 break;
279 }
280
281 while (msir_value) {
282 intr_index = ffs(msir_value) - 1;
283
284 cascade_irq = irq_linear_revmap(msi_data->irqhost,
285 msir_index * IRQS_PER_MSI_REG +
286 intr_index + have_shift);
287 if (cascade_irq != NO_IRQ)
288 generic_handle_irq(cascade_irq);
289 have_shift += intr_index + 1;
290 msir_value = msir_value >> (intr_index + 1);
291 }
292 irqd_clr_chained_irq_inprogress(idata);
293
294 switch (msi_data->feature & FSL_PIC_IP_MASK) {
295 case FSL_PIC_IP_MPIC:
296 case FSL_PIC_IP_VMPIC:
297 chip->irq_eoi(idata);
298 break;
299 case FSL_PIC_IP_IPIC:
300 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
301 chip->irq_unmask(idata);
302 break;
303 }
304unlock:
305 raw_spin_unlock(&desc->lock);
306}
307
308static int fsl_of_msi_remove(struct platform_device *ofdev)
309{
310 struct fsl_msi *msi = platform_get_drvdata(ofdev);
311 int virq, i;
312 struct fsl_msi_cascade_data *cascade_data;
313
314 if (msi->list.prev != NULL)
315 list_del(&msi->list);
316 for (i = 0; i < NR_MSI_REG; i++) {
317 virq = msi->msi_virqs[i];
318 if (virq != NO_IRQ) {
319 cascade_data = irq_get_handler_data(virq);
320 kfree(cascade_data);
321 irq_dispose_mapping(virq);
322 }
323 }
324 if (msi->bitmap.bitmap)
325 msi_bitmap_free(&msi->bitmap);
326 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
327 iounmap(msi->msi_regs);
328 kfree(msi);
329
330 return 0;
331}
332
333static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
334 struct platform_device *dev,
335 int offset, int irq_index)
336{
337 struct fsl_msi_cascade_data *cascade_data = NULL;
338 int virt_msir;
339
340 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
341 if (virt_msir == NO_IRQ) {
342 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
343 __func__, irq_index);
344 return 0;
345 }
346
347 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
348 if (!cascade_data) {
349 dev_err(&dev->dev, "No memory for MSI cascade data\n");
350 return -ENOMEM;
351 }
352
353 msi->msi_virqs[irq_index] = virt_msir;
354 cascade_data->index = offset;
355 cascade_data->msi_data = msi;
356 irq_set_handler_data(virt_msir, cascade_data);
357 irq_set_chained_handler(virt_msir, fsl_msi_cascade);
358
359 return 0;
360}
361
362static const struct of_device_id fsl_of_msi_ids[];
363static int __devinit fsl_of_msi_probe(struct platform_device *dev)
364{
365 const struct of_device_id *match;
366 struct fsl_msi *msi;
367 struct resource res;
368 int err, i, j, irq_index, count;
369 int rc;
370 const u32 *p;
371 struct fsl_msi_feature *features;
372 int len;
373 u32 offset;
374 static const u32 all_avail[] = { 0, NR_MSI_IRQS };
375
376 match = of_match_device(fsl_of_msi_ids, &dev->dev);
377 if (!match)
378 return -EINVAL;
379 features = match->data;
380
381 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
382
383 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
384 if (!msi) {
385 dev_err(&dev->dev, "No memory for MSI structure\n");
386 return -ENOMEM;
387 }
388 platform_set_drvdata(dev, msi);
389
390 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
391 NR_MSI_IRQS, &fsl_msi_host_ops, msi);
392
393 if (msi->irqhost == NULL) {
394 dev_err(&dev->dev, "No memory for MSI irqhost\n");
395 err = -ENOMEM;
396 goto error_out;
397 }
398
399 /*
400 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
401 * property. Instead, we use hypercalls to access the MSI.
402 */
403 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
404 err = of_address_to_resource(dev->dev.of_node, 0, &res);
405 if (err) {
406 dev_err(&dev->dev, "invalid resource for node %s\n",
407 dev->dev.of_node->full_name);
408 goto error_out;
409 }
410
411 msi->msi_regs = ioremap(res.start, resource_size(&res));
412 if (!msi->msi_regs) {
413 err = -ENOMEM;
414 dev_err(&dev->dev, "could not map node %s\n",
415 dev->dev.of_node->full_name);
416 goto error_out;
417 }
418 msi->msiir_offset =
419 features->msiir_offset + (res.start & 0xfffff);
420 }
421
422 msi->feature = features->fsl_pic_ip;
423
424 /*
425 * Remember the phandle, so that we can match with any PCI nodes
426 * that have an "fsl,msi" property.
427 */
428 msi->phandle = dev->dev.of_node->phandle;
429
430 rc = fsl_msi_init_allocator(msi);
431 if (rc) {
432 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
433 goto error_out;
434 }
435
436 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
437 if (p && len % (2 * sizeof(u32)) != 0) {
438 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
439 __func__);
440 err = -EINVAL;
441 goto error_out;
442 }
443
444 if (!p) {
445 p = all_avail;
446 len = sizeof(all_avail);
447 }
448
449 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
450 if (p[i * 2] % IRQS_PER_MSI_REG ||
451 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
452 printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
453 __func__, dev->dev.of_node->full_name,
454 p[i * 2 + 1], p[i * 2]);
455 err = -EINVAL;
456 goto error_out;
457 }
458
459 offset = p[i * 2] / IRQS_PER_MSI_REG;
460 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
461
462 for (j = 0; j < count; j++, irq_index++) {
463 err = fsl_msi_setup_hwirq(msi, dev, offset + j, irq_index);
464 if (err)
465 goto error_out;
466 }
467 }
468
469 list_add_tail(&msi->list, &msi_head);
470
471 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */
472 if (!ppc_md.setup_msi_irqs) {
473 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
474 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
475 ppc_md.msi_check_device = fsl_msi_check_device;
476 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
477 dev_err(&dev->dev, "Different MSI driver already installed!\n");
478 err = -ENODEV;
479 goto error_out;
480 }
481 return 0;
482error_out:
483 fsl_of_msi_remove(dev);
484 return err;
485}
486
487static const struct fsl_msi_feature mpic_msi_feature = {
488 .fsl_pic_ip = FSL_PIC_IP_MPIC,
489 .msiir_offset = 0x140,
490};
491
492static const struct fsl_msi_feature ipic_msi_feature = {
493 .fsl_pic_ip = FSL_PIC_IP_IPIC,
494 .msiir_offset = 0x38,
495};
496
497static const struct fsl_msi_feature vmpic_msi_feature = {
498 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
499 .msiir_offset = 0,
500};
501
502static const struct of_device_id fsl_of_msi_ids[] = {
503 {
504 .compatible = "fsl,mpic-msi",
505 .data = (void *)&mpic_msi_feature,
506 },
507 {
508 .compatible = "fsl,ipic-msi",
509 .data = (void *)&ipic_msi_feature,
510 },
511 {
512 .compatible = "fsl,vmpic-msi",
513 .data = (void *)&vmpic_msi_feature,
514 },
515 {}
516};
517
518static struct platform_driver fsl_of_msi_driver = {
519 .driver = {
520 .name = "fsl-msi",
521 .owner = THIS_MODULE,
522 .of_match_table = fsl_of_msi_ids,
523 },
524 .probe = fsl_of_msi_probe,
525 .remove = fsl_of_msi_remove,
526};
527
528static __init int fsl_of_msi_init(void)
529{
530 return platform_driver_register(&fsl_of_msi_driver);
531}
532
533subsys_initcall(fsl_of_msi_init);
1/*
2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3 *
4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com>
6 *
7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 */
15#include <linux/irq.h>
16#include <linux/bootmem.h>
17#include <linux/msi.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/of_platform.h>
21#include <sysdev/fsl_soc.h>
22#include <asm/prom.h>
23#include <asm/hw_irq.h>
24#include <asm/ppc-pci.h>
25#include <asm/mpic.h>
26#include "fsl_msi.h"
27#include "fsl_pci.h"
28
29LIST_HEAD(msi_head);
30
31struct fsl_msi_feature {
32 u32 fsl_pic_ip;
33 u32 msiir_offset;
34};
35
36struct fsl_msi_cascade_data {
37 struct fsl_msi *msi_data;
38 int index;
39};
40
41static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
42{
43 return in_be32(base + (reg >> 2));
44}
45
46/*
47 * We do not need this actually. The MSIR register has been read once
48 * in the cascade interrupt. So, this MSI interrupt has been acked
49*/
50static void fsl_msi_end_irq(struct irq_data *d)
51{
52}
53
54static struct irq_chip fsl_msi_chip = {
55 .irq_mask = mask_msi_irq,
56 .irq_unmask = unmask_msi_irq,
57 .irq_ack = fsl_msi_end_irq,
58 .name = "FSL-MSI",
59};
60
61static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
62 irq_hw_number_t hw)
63{
64 struct fsl_msi *msi_data = h->host_data;
65 struct irq_chip *chip = &fsl_msi_chip;
66
67 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
68
69 irq_set_chip_data(virq, msi_data);
70 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
71
72 return 0;
73}
74
75static struct irq_host_ops fsl_msi_host_ops = {
76 .map = fsl_msi_host_map,
77};
78
79static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
80{
81 int rc;
82
83 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
84 msi_data->irqhost->of_node);
85 if (rc)
86 return rc;
87
88 rc = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
89 if (rc < 0) {
90 msi_bitmap_free(&msi_data->bitmap);
91 return rc;
92 }
93
94 return 0;
95}
96
97static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type)
98{
99 if (type == PCI_CAP_ID_MSIX)
100 pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
101
102 return 0;
103}
104
105static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
106{
107 struct msi_desc *entry;
108 struct fsl_msi *msi_data;
109
110 list_for_each_entry(entry, &pdev->msi_list, list) {
111 if (entry->irq == NO_IRQ)
112 continue;
113 msi_data = irq_get_chip_data(entry->irq);
114 irq_set_msi_desc(entry->irq, NULL);
115 msi_bitmap_free_hwirqs(&msi_data->bitmap,
116 virq_to_hw(entry->irq), 1);
117 irq_dispose_mapping(entry->irq);
118 }
119
120 return;
121}
122
123static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
124 struct msi_msg *msg,
125 struct fsl_msi *fsl_msi_data)
126{
127 struct fsl_msi *msi_data = fsl_msi_data;
128 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
129 u64 base = fsl_pci_immrbar_base(hose);
130
131 msg->address_lo = msi_data->msi_addr_lo + lower_32_bits(base);
132 msg->address_hi = msi_data->msi_addr_hi + upper_32_bits(base);
133
134 msg->data = hwirq;
135
136 pr_debug("%s: allocated srs: %d, ibs: %d\n",
137 __func__, hwirq / IRQS_PER_MSI_REG, hwirq % IRQS_PER_MSI_REG);
138}
139
140static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
141{
142 int rc, hwirq = -ENOMEM;
143 unsigned int virq;
144 struct msi_desc *entry;
145 struct msi_msg msg;
146 struct fsl_msi *msi_data;
147
148 list_for_each_entry(entry, &pdev->msi_list, list) {
149 list_for_each_entry(msi_data, &msi_head, list) {
150 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
151 if (hwirq >= 0)
152 break;
153 }
154
155 if (hwirq < 0) {
156 rc = hwirq;
157 pr_debug("%s: fail allocating msi interrupt\n",
158 __func__);
159 goto out_free;
160 }
161
162 virq = irq_create_mapping(msi_data->irqhost, hwirq);
163
164 if (virq == NO_IRQ) {
165 pr_debug("%s: fail mapping hwirq 0x%x\n",
166 __func__, hwirq);
167 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
168 rc = -ENOSPC;
169 goto out_free;
170 }
171 /* chip_data is msi_data via host->hostdata in host->map() */
172 irq_set_msi_desc(virq, entry);
173
174 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
175 write_msi_msg(virq, &msg);
176 }
177 return 0;
178
179out_free:
180 /* free by the caller of this function */
181 return rc;
182}
183
184static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
185{
186 struct irq_chip *chip = irq_desc_get_chip(desc);
187 struct irq_data *idata = irq_desc_get_irq_data(desc);
188 unsigned int cascade_irq;
189 struct fsl_msi *msi_data;
190 int msir_index = -1;
191 u32 msir_value = 0;
192 u32 intr_index;
193 u32 have_shift = 0;
194 struct fsl_msi_cascade_data *cascade_data;
195
196 cascade_data = irq_get_handler_data(irq);
197 msi_data = cascade_data->msi_data;
198
199 raw_spin_lock(&desc->lock);
200 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
201 if (chip->irq_mask_ack)
202 chip->irq_mask_ack(idata);
203 else {
204 chip->irq_mask(idata);
205 chip->irq_ack(idata);
206 }
207 }
208
209 if (unlikely(irqd_irq_inprogress(idata)))
210 goto unlock;
211
212 msir_index = cascade_data->index;
213
214 if (msir_index >= NR_MSI_REG)
215 cascade_irq = NO_IRQ;
216
217 irqd_set_chained_irq_inprogress(idata);
218 switch (msi_data->feature & FSL_PIC_IP_MASK) {
219 case FSL_PIC_IP_MPIC:
220 msir_value = fsl_msi_read(msi_data->msi_regs,
221 msir_index * 0x10);
222 break;
223 case FSL_PIC_IP_IPIC:
224 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
225 break;
226 }
227
228 while (msir_value) {
229 intr_index = ffs(msir_value) - 1;
230
231 cascade_irq = irq_linear_revmap(msi_data->irqhost,
232 msir_index * IRQS_PER_MSI_REG +
233 intr_index + have_shift);
234 if (cascade_irq != NO_IRQ)
235 generic_handle_irq(cascade_irq);
236 have_shift += intr_index + 1;
237 msir_value = msir_value >> (intr_index + 1);
238 }
239 irqd_clr_chained_irq_inprogress(idata);
240
241 switch (msi_data->feature & FSL_PIC_IP_MASK) {
242 case FSL_PIC_IP_MPIC:
243 chip->irq_eoi(idata);
244 break;
245 case FSL_PIC_IP_IPIC:
246 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
247 chip->irq_unmask(idata);
248 break;
249 }
250unlock:
251 raw_spin_unlock(&desc->lock);
252}
253
254static int fsl_of_msi_remove(struct platform_device *ofdev)
255{
256 struct fsl_msi *msi = platform_get_drvdata(ofdev);
257 int virq, i;
258 struct fsl_msi_cascade_data *cascade_data;
259
260 if (msi->list.prev != NULL)
261 list_del(&msi->list);
262 for (i = 0; i < NR_MSI_REG; i++) {
263 virq = msi->msi_virqs[i];
264 if (virq != NO_IRQ) {
265 cascade_data = irq_get_handler_data(virq);
266 kfree(cascade_data);
267 irq_dispose_mapping(virq);
268 }
269 }
270 if (msi->bitmap.bitmap)
271 msi_bitmap_free(&msi->bitmap);
272 iounmap(msi->msi_regs);
273 kfree(msi);
274
275 return 0;
276}
277
278static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
279 struct platform_device *dev,
280 int offset, int irq_index)
281{
282 struct fsl_msi_cascade_data *cascade_data = NULL;
283 int virt_msir;
284
285 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
286 if (virt_msir == NO_IRQ) {
287 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
288 __func__, irq_index);
289 return 0;
290 }
291
292 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
293 if (!cascade_data) {
294 dev_err(&dev->dev, "No memory for MSI cascade data\n");
295 return -ENOMEM;
296 }
297
298 msi->msi_virqs[irq_index] = virt_msir;
299 cascade_data->index = offset + irq_index;
300 cascade_data->msi_data = msi;
301 irq_set_handler_data(virt_msir, cascade_data);
302 irq_set_chained_handler(virt_msir, fsl_msi_cascade);
303
304 return 0;
305}
306
307static const struct of_device_id fsl_of_msi_ids[];
308static int __devinit fsl_of_msi_probe(struct platform_device *dev)
309{
310 const struct of_device_id *match;
311 struct fsl_msi *msi;
312 struct resource res;
313 int err, i, j, irq_index, count;
314 int rc;
315 const u32 *p;
316 struct fsl_msi_feature *features;
317 int len;
318 u32 offset;
319 static const u32 all_avail[] = { 0, NR_MSI_IRQS };
320
321 match = of_match_device(fsl_of_msi_ids, &dev->dev);
322 if (!match)
323 return -EINVAL;
324 features = match->data;
325
326 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
327
328 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
329 if (!msi) {
330 dev_err(&dev->dev, "No memory for MSI structure\n");
331 return -ENOMEM;
332 }
333 platform_set_drvdata(dev, msi);
334
335 msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,
336 NR_MSI_IRQS, &fsl_msi_host_ops, 0);
337
338 if (msi->irqhost == NULL) {
339 dev_err(&dev->dev, "No memory for MSI irqhost\n");
340 err = -ENOMEM;
341 goto error_out;
342 }
343
344 /* Get the MSI reg base */
345 err = of_address_to_resource(dev->dev.of_node, 0, &res);
346 if (err) {
347 dev_err(&dev->dev, "%s resource error!\n",
348 dev->dev.of_node->full_name);
349 goto error_out;
350 }
351
352 msi->msi_regs = ioremap(res.start, resource_size(&res));
353 if (!msi->msi_regs) {
354 dev_err(&dev->dev, "ioremap problem failed\n");
355 goto error_out;
356 }
357
358 msi->feature = features->fsl_pic_ip;
359
360 msi->irqhost->host_data = msi;
361
362 msi->msi_addr_hi = 0x0;
363 msi->msi_addr_lo = features->msiir_offset + (res.start & 0xfffff);
364
365 rc = fsl_msi_init_allocator(msi);
366 if (rc) {
367 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
368 goto error_out;
369 }
370
371 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
372 if (p && len % (2 * sizeof(u32)) != 0) {
373 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
374 __func__);
375 err = -EINVAL;
376 goto error_out;
377 }
378
379 if (!p)
380 p = all_avail;
381
382 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
383 if (p[i * 2] % IRQS_PER_MSI_REG ||
384 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
385 printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
386 __func__, dev->dev.of_node->full_name,
387 p[i * 2 + 1], p[i * 2]);
388 err = -EINVAL;
389 goto error_out;
390 }
391
392 offset = p[i * 2] / IRQS_PER_MSI_REG;
393 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
394
395 for (j = 0; j < count; j++, irq_index++) {
396 err = fsl_msi_setup_hwirq(msi, dev, offset, irq_index);
397 if (err)
398 goto error_out;
399 }
400 }
401
402 list_add_tail(&msi->list, &msi_head);
403
404 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */
405 if (!ppc_md.setup_msi_irqs) {
406 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
407 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
408 ppc_md.msi_check_device = fsl_msi_check_device;
409 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
410 dev_err(&dev->dev, "Different MSI driver already installed!\n");
411 err = -ENODEV;
412 goto error_out;
413 }
414 return 0;
415error_out:
416 fsl_of_msi_remove(dev);
417 return err;
418}
419
420static const struct fsl_msi_feature mpic_msi_feature = {
421 .fsl_pic_ip = FSL_PIC_IP_MPIC,
422 .msiir_offset = 0x140,
423};
424
425static const struct fsl_msi_feature ipic_msi_feature = {
426 .fsl_pic_ip = FSL_PIC_IP_IPIC,
427 .msiir_offset = 0x38,
428};
429
430static const struct of_device_id fsl_of_msi_ids[] = {
431 {
432 .compatible = "fsl,mpic-msi",
433 .data = (void *)&mpic_msi_feature,
434 },
435 {
436 .compatible = "fsl,ipic-msi",
437 .data = (void *)&ipic_msi_feature,
438 },
439 {}
440};
441
442static struct platform_driver fsl_of_msi_driver = {
443 .driver = {
444 .name = "fsl-msi",
445 .owner = THIS_MODULE,
446 .of_match_table = fsl_of_msi_ids,
447 },
448 .probe = fsl_of_msi_probe,
449 .remove = fsl_of_msi_remove,
450};
451
452static __init int fsl_of_msi_init(void)
453{
454 return platform_driver_register(&fsl_of_msi_driver);
455}
456
457subsys_initcall(fsl_of_msi_init);