Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* pci_msi.c: Sparc64 MSI support common layer.
3 *
4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5 */
6#include <linux/kernel.h>
7#include <linux/interrupt.h>
8#include <linux/slab.h>
9#include <linux/irq.h>
10
11#include "pci_impl.h"
12
13static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
14{
15 struct sparc64_msiq_cookie *msiq_cookie = cookie;
16 struct pci_pbm_info *pbm = msiq_cookie->pbm;
17 unsigned long msiqid = msiq_cookie->msiqid;
18 const struct sparc64_msiq_ops *ops;
19 unsigned long orig_head, head;
20 int err;
21
22 ops = pbm->msi_ops;
23
24 err = ops->get_head(pbm, msiqid, &head);
25 if (unlikely(err < 0))
26 goto err_get_head;
27
28 orig_head = head;
29 for (;;) {
30 unsigned long msi;
31
32 err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
33 if (likely(err > 0)) {
34 unsigned int irq;
35
36 irq = pbm->msi_irq_table[msi - pbm->msi_first];
37 generic_handle_irq(irq);
38 }
39
40 if (unlikely(err < 0))
41 goto err_dequeue;
42
43 if (err == 0)
44 break;
45 }
46 if (likely(head != orig_head)) {
47 err = ops->set_head(pbm, msiqid, head);
48 if (unlikely(err < 0))
49 goto err_set_head;
50 }
51 return IRQ_HANDLED;
52
53err_get_head:
54 printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
55 msiqid, err);
56 goto err_out;
57
58err_dequeue:
59 printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
60 "gives error %d\n",
61 head, msiqid, err);
62 goto err_out;
63
64err_set_head:
65 printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
66 "gives error %d\n",
67 head, msiqid, err);
68 goto err_out;
69
70err_out:
71 return IRQ_NONE;
72}
73
74static u32 pick_msiq(struct pci_pbm_info *pbm)
75{
76 static DEFINE_SPINLOCK(rotor_lock);
77 unsigned long flags;
78 u32 ret, rotor;
79
80 spin_lock_irqsave(&rotor_lock, flags);
81
82 rotor = pbm->msiq_rotor;
83 ret = pbm->msiq_first + rotor;
84
85 if (++rotor >= pbm->msiq_num)
86 rotor = 0;
87 pbm->msiq_rotor = rotor;
88
89 spin_unlock_irqrestore(&rotor_lock, flags);
90
91 return ret;
92}
93
94
95static int alloc_msi(struct pci_pbm_info *pbm)
96{
97 int i;
98
99 for (i = 0; i < pbm->msi_num; i++) {
100 if (!test_and_set_bit(i, pbm->msi_bitmap))
101 return i + pbm->msi_first;
102 }
103
104 return -ENOENT;
105}
106
107static void free_msi(struct pci_pbm_info *pbm, int msi_num)
108{
109 msi_num -= pbm->msi_first;
110 clear_bit(msi_num, pbm->msi_bitmap);
111}
112
113static struct irq_chip msi_irq = {
114 .name = "PCI-MSI",
115 .irq_mask = pci_msi_mask_irq,
116 .irq_unmask = pci_msi_unmask_irq,
117 .irq_enable = pci_msi_unmask_irq,
118 .irq_disable = pci_msi_mask_irq,
119 /* XXX affinity XXX */
120};
121
122static int sparc64_setup_msi_irq(unsigned int *irq_p,
123 struct pci_dev *pdev,
124 struct msi_desc *entry)
125{
126 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
127 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
128 struct msi_msg msg;
129 int msi, err;
130 u32 msiqid;
131
132 *irq_p = irq_alloc(0, 0);
133 err = -ENOMEM;
134 if (!*irq_p)
135 goto out_err;
136
137 irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
138 "MSI");
139
140 err = alloc_msi(pbm);
141 if (unlikely(err < 0))
142 goto out_irq_free;
143
144 msi = err;
145
146 msiqid = pick_msiq(pbm);
147
148 err = ops->msi_setup(pbm, msiqid, msi,
149 (entry->msi_attrib.is_64 ? 1 : 0));
150 if (err)
151 goto out_msi_free;
152
153 pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
154
155 if (entry->msi_attrib.is_64) {
156 msg.address_hi = pbm->msi64_start >> 32;
157 msg.address_lo = pbm->msi64_start & 0xffffffff;
158 } else {
159 msg.address_hi = 0;
160 msg.address_lo = pbm->msi32_start;
161 }
162 msg.data = msi;
163
164 irq_set_msi_desc(*irq_p, entry);
165 pci_write_msi_msg(*irq_p, &msg);
166
167 return 0;
168
169out_msi_free:
170 free_msi(pbm, msi);
171
172out_irq_free:
173 irq_set_chip(*irq_p, NULL);
174 irq_free(*irq_p);
175 *irq_p = 0;
176
177out_err:
178 return err;
179}
180
181static void sparc64_teardown_msi_irq(unsigned int irq,
182 struct pci_dev *pdev)
183{
184 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
185 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
186 unsigned int msi_num;
187 int i, err;
188
189 for (i = 0; i < pbm->msi_num; i++) {
190 if (pbm->msi_irq_table[i] == irq)
191 break;
192 }
193 if (i >= pbm->msi_num) {
194 printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
195 pbm->name, irq);
196 return;
197 }
198
199 msi_num = pbm->msi_first + i;
200 pbm->msi_irq_table[i] = ~0U;
201
202 err = ops->msi_teardown(pbm, msi_num);
203 if (err) {
204 printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
205 "irq %u, gives error %d\n",
206 pbm->name, msi_num, irq, err);
207 return;
208 }
209
210 free_msi(pbm, msi_num);
211
212 irq_set_chip(irq, NULL);
213 irq_free(irq);
214}
215
216static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
217{
218 unsigned long size, bits_per_ulong;
219
220 bits_per_ulong = sizeof(unsigned long) * 8;
221 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
222 size /= 8;
223 BUG_ON(size % sizeof(unsigned long));
224
225 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
226 if (!pbm->msi_bitmap)
227 return -ENOMEM;
228
229 return 0;
230}
231
232static void msi_bitmap_free(struct pci_pbm_info *pbm)
233{
234 kfree(pbm->msi_bitmap);
235 pbm->msi_bitmap = NULL;
236}
237
238static int msi_table_alloc(struct pci_pbm_info *pbm)
239{
240 int size, i;
241
242 size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
243 pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
244 if (!pbm->msiq_irq_cookies)
245 return -ENOMEM;
246
247 for (i = 0; i < pbm->msiq_num; i++) {
248 struct sparc64_msiq_cookie *p;
249
250 p = &pbm->msiq_irq_cookies[i];
251 p->pbm = pbm;
252 p->msiqid = pbm->msiq_first + i;
253 }
254
255 size = pbm->msi_num * sizeof(unsigned int);
256 pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
257 if (!pbm->msi_irq_table) {
258 kfree(pbm->msiq_irq_cookies);
259 pbm->msiq_irq_cookies = NULL;
260 return -ENOMEM;
261 }
262
263 return 0;
264}
265
266static void msi_table_free(struct pci_pbm_info *pbm)
267{
268 kfree(pbm->msiq_irq_cookies);
269 pbm->msiq_irq_cookies = NULL;
270
271 kfree(pbm->msi_irq_table);
272 pbm->msi_irq_table = NULL;
273}
274
275static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
276 const struct sparc64_msiq_ops *ops,
277 unsigned long msiqid,
278 unsigned long devino)
279{
280 int irq = ops->msiq_build_irq(pbm, msiqid, devino);
281 int err, nid;
282
283 if (irq < 0)
284 return irq;
285
286 nid = pbm->numa_node;
287 if (nid != -1) {
288 cpumask_t numa_mask;
289
290 cpumask_copy(&numa_mask, cpumask_of_node(nid));
291 irq_set_affinity(irq, &numa_mask);
292 }
293 err = request_irq(irq, sparc64_msiq_interrupt, 0,
294 "MSIQ",
295 &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
296 if (err)
297 return err;
298
299 return 0;
300}
301
302static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
303 const struct sparc64_msiq_ops *ops)
304{
305 int i;
306
307 for (i = 0; i < pbm->msiq_num; i++) {
308 unsigned long msiqid = i + pbm->msiq_first;
309 unsigned long devino = i + pbm->msiq_first_devino;
310 int err;
311
312 err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
313 if (err)
314 return err;
315 }
316
317 return 0;
318}
319
320void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
321 const struct sparc64_msiq_ops *ops)
322{
323 const u32 *val;
324 int len;
325
326 val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len);
327 if (!val || len != 4)
328 goto no_msi;
329 pbm->msiq_num = *val;
330 if (pbm->msiq_num) {
331 const struct msiq_prop {
332 u32 first_msiq;
333 u32 num_msiq;
334 u32 first_devino;
335 } *mqp;
336 const struct msi_range_prop {
337 u32 first_msi;
338 u32 num_msi;
339 } *mrng;
340 const struct addr_range_prop {
341 u32 msi32_high;
342 u32 msi32_low;
343 u32 msi32_len;
344 u32 msi64_high;
345 u32 msi64_low;
346 u32 msi64_len;
347 } *arng;
348
349 val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len);
350 if (!val || len != 4)
351 goto no_msi;
352
353 pbm->msiq_ent_count = *val;
354
355 mqp = of_get_property(pbm->op->dev.of_node,
356 "msi-eq-to-devino", &len);
357 if (!mqp)
358 mqp = of_get_property(pbm->op->dev.of_node,
359 "msi-eq-devino", &len);
360 if (!mqp || len != sizeof(struct msiq_prop))
361 goto no_msi;
362
363 pbm->msiq_first = mqp->first_msiq;
364 pbm->msiq_first_devino = mqp->first_devino;
365
366 val = of_get_property(pbm->op->dev.of_node, "#msi", &len);
367 if (!val || len != 4)
368 goto no_msi;
369 pbm->msi_num = *val;
370
371 mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len);
372 if (!mrng || len != sizeof(struct msi_range_prop))
373 goto no_msi;
374 pbm->msi_first = mrng->first_msi;
375
376 val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len);
377 if (!val || len != 4)
378 goto no_msi;
379 pbm->msi_data_mask = *val;
380
381 val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len);
382 if (!val || len != 4)
383 goto no_msi;
384 pbm->msix_data_width = *val;
385
386 arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges",
387 &len);
388 if (!arng || len != sizeof(struct addr_range_prop))
389 goto no_msi;
390 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
391 (u64) arng->msi32_low;
392 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
393 (u64) arng->msi64_low;
394 pbm->msi32_len = arng->msi32_len;
395 pbm->msi64_len = arng->msi64_len;
396
397 if (msi_bitmap_alloc(pbm))
398 goto no_msi;
399
400 if (msi_table_alloc(pbm)) {
401 msi_bitmap_free(pbm);
402 goto no_msi;
403 }
404
405 if (ops->msiq_alloc(pbm)) {
406 msi_table_free(pbm);
407 msi_bitmap_free(pbm);
408 goto no_msi;
409 }
410
411 if (sparc64_bringup_msi_queues(pbm, ops)) {
412 ops->msiq_free(pbm);
413 msi_table_free(pbm);
414 msi_bitmap_free(pbm);
415 goto no_msi;
416 }
417
418 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
419 "devino[0x%x]\n",
420 pbm->name,
421 pbm->msiq_first, pbm->msiq_num,
422 pbm->msiq_ent_count,
423 pbm->msiq_first_devino);
424 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
425 "width[%u]\n",
426 pbm->name,
427 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
428 pbm->msix_data_width);
429 printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] "
430 "addr64[0x%llx:0x%x]\n",
431 pbm->name,
432 pbm->msi32_start, pbm->msi32_len,
433 pbm->msi64_start, pbm->msi64_len);
434 printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
435 pbm->name,
436 __pa(pbm->msi_queues));
437
438 pbm->msi_ops = ops;
439 pbm->setup_msi_irq = sparc64_setup_msi_irq;
440 pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
441 }
442 return;
443
444no_msi:
445 pbm->msiq_num = 0;
446 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
447}
1/* pci_msi.c: Sparc64 MSI support common layer.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/interrupt.h>
7#include <linux/slab.h>
8#include <linux/irq.h>
9
10#include "pci_impl.h"
11
12static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
13{
14 struct sparc64_msiq_cookie *msiq_cookie = cookie;
15 struct pci_pbm_info *pbm = msiq_cookie->pbm;
16 unsigned long msiqid = msiq_cookie->msiqid;
17 const struct sparc64_msiq_ops *ops;
18 unsigned long orig_head, head;
19 int err;
20
21 ops = pbm->msi_ops;
22
23 err = ops->get_head(pbm, msiqid, &head);
24 if (unlikely(err < 0))
25 goto err_get_head;
26
27 orig_head = head;
28 for (;;) {
29 unsigned long msi;
30
31 err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
32 if (likely(err > 0)) {
33 unsigned int irq;
34
35 irq = pbm->msi_irq_table[msi - pbm->msi_first];
36 generic_handle_irq(irq);
37 }
38
39 if (unlikely(err < 0))
40 goto err_dequeue;
41
42 if (err == 0)
43 break;
44 }
45 if (likely(head != orig_head)) {
46 err = ops->set_head(pbm, msiqid, head);
47 if (unlikely(err < 0))
48 goto err_set_head;
49 }
50 return IRQ_HANDLED;
51
52err_get_head:
53 printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
54 msiqid, err);
55 goto err_out;
56
57err_dequeue:
58 printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
59 "gives error %d\n",
60 head, msiqid, err);
61 goto err_out;
62
63err_set_head:
64 printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
65 "gives error %d\n",
66 head, msiqid, err);
67 goto err_out;
68
69err_out:
70 return IRQ_NONE;
71}
72
73static u32 pick_msiq(struct pci_pbm_info *pbm)
74{
75 static DEFINE_SPINLOCK(rotor_lock);
76 unsigned long flags;
77 u32 ret, rotor;
78
79 spin_lock_irqsave(&rotor_lock, flags);
80
81 rotor = pbm->msiq_rotor;
82 ret = pbm->msiq_first + rotor;
83
84 if (++rotor >= pbm->msiq_num)
85 rotor = 0;
86 pbm->msiq_rotor = rotor;
87
88 spin_unlock_irqrestore(&rotor_lock, flags);
89
90 return ret;
91}
92
93
94static int alloc_msi(struct pci_pbm_info *pbm)
95{
96 int i;
97
98 for (i = 0; i < pbm->msi_num; i++) {
99 if (!test_and_set_bit(i, pbm->msi_bitmap))
100 return i + pbm->msi_first;
101 }
102
103 return -ENOENT;
104}
105
106static void free_msi(struct pci_pbm_info *pbm, int msi_num)
107{
108 msi_num -= pbm->msi_first;
109 clear_bit(msi_num, pbm->msi_bitmap);
110}
111
112static struct irq_chip msi_irq = {
113 .name = "PCI-MSI",
114 .irq_mask = mask_msi_irq,
115 .irq_unmask = unmask_msi_irq,
116 .irq_enable = unmask_msi_irq,
117 .irq_disable = mask_msi_irq,
118 /* XXX affinity XXX */
119};
120
121static int sparc64_setup_msi_irq(unsigned int *irq_p,
122 struct pci_dev *pdev,
123 struct msi_desc *entry)
124{
125 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
126 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
127 struct msi_msg msg;
128 int msi, err;
129 u32 msiqid;
130
131 *irq_p = irq_alloc(0, 0);
132 err = -ENOMEM;
133 if (!*irq_p)
134 goto out_err;
135
136 irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
137 "MSI");
138
139 err = alloc_msi(pbm);
140 if (unlikely(err < 0))
141 goto out_irq_free;
142
143 msi = err;
144
145 msiqid = pick_msiq(pbm);
146
147 err = ops->msi_setup(pbm, msiqid, msi,
148 (entry->msi_attrib.is_64 ? 1 : 0));
149 if (err)
150 goto out_msi_free;
151
152 pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
153
154 if (entry->msi_attrib.is_64) {
155 msg.address_hi = pbm->msi64_start >> 32;
156 msg.address_lo = pbm->msi64_start & 0xffffffff;
157 } else {
158 msg.address_hi = 0;
159 msg.address_lo = pbm->msi32_start;
160 }
161 msg.data = msi;
162
163 irq_set_msi_desc(*irq_p, entry);
164 write_msi_msg(*irq_p, &msg);
165
166 return 0;
167
168out_msi_free:
169 free_msi(pbm, msi);
170
171out_irq_free:
172 irq_set_chip(*irq_p, NULL);
173 irq_free(*irq_p);
174 *irq_p = 0;
175
176out_err:
177 return err;
178}
179
180static void sparc64_teardown_msi_irq(unsigned int irq,
181 struct pci_dev *pdev)
182{
183 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
184 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
185 unsigned int msi_num;
186 int i, err;
187
188 for (i = 0; i < pbm->msi_num; i++) {
189 if (pbm->msi_irq_table[i] == irq)
190 break;
191 }
192 if (i >= pbm->msi_num) {
193 printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
194 pbm->name, irq);
195 return;
196 }
197
198 msi_num = pbm->msi_first + i;
199 pbm->msi_irq_table[i] = ~0U;
200
201 err = ops->msi_teardown(pbm, msi_num);
202 if (err) {
203 printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
204 "irq %u, gives error %d\n",
205 pbm->name, msi_num, irq, err);
206 return;
207 }
208
209 free_msi(pbm, msi_num);
210
211 irq_set_chip(irq, NULL);
212 irq_free(irq);
213}
214
215static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
216{
217 unsigned long size, bits_per_ulong;
218
219 bits_per_ulong = sizeof(unsigned long) * 8;
220 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
221 size /= 8;
222 BUG_ON(size % sizeof(unsigned long));
223
224 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
225 if (!pbm->msi_bitmap)
226 return -ENOMEM;
227
228 return 0;
229}
230
231static void msi_bitmap_free(struct pci_pbm_info *pbm)
232{
233 kfree(pbm->msi_bitmap);
234 pbm->msi_bitmap = NULL;
235}
236
237static int msi_table_alloc(struct pci_pbm_info *pbm)
238{
239 int size, i;
240
241 size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
242 pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
243 if (!pbm->msiq_irq_cookies)
244 return -ENOMEM;
245
246 for (i = 0; i < pbm->msiq_num; i++) {
247 struct sparc64_msiq_cookie *p;
248
249 p = &pbm->msiq_irq_cookies[i];
250 p->pbm = pbm;
251 p->msiqid = pbm->msiq_first + i;
252 }
253
254 size = pbm->msi_num * sizeof(unsigned int);
255 pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
256 if (!pbm->msi_irq_table) {
257 kfree(pbm->msiq_irq_cookies);
258 pbm->msiq_irq_cookies = NULL;
259 return -ENOMEM;
260 }
261
262 return 0;
263}
264
265static void msi_table_free(struct pci_pbm_info *pbm)
266{
267 kfree(pbm->msiq_irq_cookies);
268 pbm->msiq_irq_cookies = NULL;
269
270 kfree(pbm->msi_irq_table);
271 pbm->msi_irq_table = NULL;
272}
273
274static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
275 const struct sparc64_msiq_ops *ops,
276 unsigned long msiqid,
277 unsigned long devino)
278{
279 int irq = ops->msiq_build_irq(pbm, msiqid, devino);
280 int err, nid;
281
282 if (irq < 0)
283 return irq;
284
285 nid = pbm->numa_node;
286 if (nid != -1) {
287 cpumask_t numa_mask;
288
289 cpumask_copy(&numa_mask, cpumask_of_node(nid));
290 irq_set_affinity(irq, &numa_mask);
291 }
292 err = request_irq(irq, sparc64_msiq_interrupt, 0,
293 "MSIQ",
294 &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
295 if (err)
296 return err;
297
298 return 0;
299}
300
301static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
302 const struct sparc64_msiq_ops *ops)
303{
304 int i;
305
306 for (i = 0; i < pbm->msiq_num; i++) {
307 unsigned long msiqid = i + pbm->msiq_first;
308 unsigned long devino = i + pbm->msiq_first_devino;
309 int err;
310
311 err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
312 if (err)
313 return err;
314 }
315
316 return 0;
317}
318
319void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
320 const struct sparc64_msiq_ops *ops)
321{
322 const u32 *val;
323 int len;
324
325 val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len);
326 if (!val || len != 4)
327 goto no_msi;
328 pbm->msiq_num = *val;
329 if (pbm->msiq_num) {
330 const struct msiq_prop {
331 u32 first_msiq;
332 u32 num_msiq;
333 u32 first_devino;
334 } *mqp;
335 const struct msi_range_prop {
336 u32 first_msi;
337 u32 num_msi;
338 } *mrng;
339 const struct addr_range_prop {
340 u32 msi32_high;
341 u32 msi32_low;
342 u32 msi32_len;
343 u32 msi64_high;
344 u32 msi64_low;
345 u32 msi64_len;
346 } *arng;
347
348 val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len);
349 if (!val || len != 4)
350 goto no_msi;
351
352 pbm->msiq_ent_count = *val;
353
354 mqp = of_get_property(pbm->op->dev.of_node,
355 "msi-eq-to-devino", &len);
356 if (!mqp)
357 mqp = of_get_property(pbm->op->dev.of_node,
358 "msi-eq-devino", &len);
359 if (!mqp || len != sizeof(struct msiq_prop))
360 goto no_msi;
361
362 pbm->msiq_first = mqp->first_msiq;
363 pbm->msiq_first_devino = mqp->first_devino;
364
365 val = of_get_property(pbm->op->dev.of_node, "#msi", &len);
366 if (!val || len != 4)
367 goto no_msi;
368 pbm->msi_num = *val;
369
370 mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len);
371 if (!mrng || len != sizeof(struct msi_range_prop))
372 goto no_msi;
373 pbm->msi_first = mrng->first_msi;
374
375 val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len);
376 if (!val || len != 4)
377 goto no_msi;
378 pbm->msi_data_mask = *val;
379
380 val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len);
381 if (!val || len != 4)
382 goto no_msi;
383 pbm->msix_data_width = *val;
384
385 arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges",
386 &len);
387 if (!arng || len != sizeof(struct addr_range_prop))
388 goto no_msi;
389 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
390 (u64) arng->msi32_low;
391 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
392 (u64) arng->msi64_low;
393 pbm->msi32_len = arng->msi32_len;
394 pbm->msi64_len = arng->msi64_len;
395
396 if (msi_bitmap_alloc(pbm))
397 goto no_msi;
398
399 if (msi_table_alloc(pbm)) {
400 msi_bitmap_free(pbm);
401 goto no_msi;
402 }
403
404 if (ops->msiq_alloc(pbm)) {
405 msi_table_free(pbm);
406 msi_bitmap_free(pbm);
407 goto no_msi;
408 }
409
410 if (sparc64_bringup_msi_queues(pbm, ops)) {
411 ops->msiq_free(pbm);
412 msi_table_free(pbm);
413 msi_bitmap_free(pbm);
414 goto no_msi;
415 }
416
417 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
418 "devino[0x%x]\n",
419 pbm->name,
420 pbm->msiq_first, pbm->msiq_num,
421 pbm->msiq_ent_count,
422 pbm->msiq_first_devino);
423 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
424 "width[%u]\n",
425 pbm->name,
426 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
427 pbm->msix_data_width);
428 printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] "
429 "addr64[0x%llx:0x%x]\n",
430 pbm->name,
431 pbm->msi32_start, pbm->msi32_len,
432 pbm->msi64_start, pbm->msi64_len);
433 printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
434 pbm->name,
435 __pa(pbm->msi_queues));
436
437 pbm->msi_ops = ops;
438 pbm->setup_msi_irq = sparc64_setup_msi_irq;
439 pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
440 }
441 return;
442
443no_msi:
444 pbm->msiq_num = 0;
445 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
446}