Loading...
1/*
2 * Freescale MPC85xx/MPC86xx RapidIO RMU support
3 *
4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling
12 *
13 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com>
15 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
16 * Liu Gang <Gang.Liu@freescale.com>
17 *
18 * Copyright 2005 MontaVista Software, Inc.
19 * Matt Porter <mporter@kernel.crashing.org>
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2 of the License, or (at your
24 * option) any later version.
25 */
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h>
30#include <linux/of_irq.h>
31#include <linux/of_platform.h>
32#include <linux/slab.h>
33
34#include "fsl_rio.h"
35
36#define GET_RMM_HANDLE(mport) \
37 (((struct rio_priv *)(mport->priv))->rmm_handle)
38
39/* RapidIO definition irq, which read from OF-tree */
40#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
41#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
42#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
43#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
44
45#define RIO_MIN_TX_RING_SIZE 2
46#define RIO_MAX_TX_RING_SIZE 2048
47#define RIO_MIN_RX_RING_SIZE 2
48#define RIO_MAX_RX_RING_SIZE 2048
49
50#define RIO_IPWMR_SEN 0x00100000
51#define RIO_IPWMR_QFIE 0x00000100
52#define RIO_IPWMR_EIE 0x00000020
53#define RIO_IPWMR_CQ 0x00000002
54#define RIO_IPWMR_PWE 0x00000001
55
56#define RIO_IPWSR_QF 0x00100000
57#define RIO_IPWSR_TE 0x00000080
58#define RIO_IPWSR_QFI 0x00000010
59#define RIO_IPWSR_PWD 0x00000008
60#define RIO_IPWSR_PWB 0x00000004
61
62#define RIO_EPWISR 0x10010
63/* EPWISR Error match value */
64#define RIO_EPWISR_PINT1 0x80000000
65#define RIO_EPWISR_PINT2 0x40000000
66#define RIO_EPWISR_MU 0x00000002
67#define RIO_EPWISR_PW 0x00000001
68
69#define IPWSR_CLEAR 0x98
70#define OMSR_CLEAR 0x1cb3
71#define IMSR_CLEAR 0x491
72#define IDSR_CLEAR 0x91
73#define ODSR_CLEAR 0x1c00
74#define LTLEECSR_ENABLE_ALL 0xFFC000FC
75#define RIO_LTLEECSR 0x060c
76
77#define RIO_IM0SR 0x64
78#define RIO_IM1SR 0x164
79#define RIO_OM0SR 0x4
80#define RIO_OM1SR 0x104
81
82#define RIO_DBELL_WIN_SIZE 0x1000
83
84#define RIO_MSG_OMR_MUI 0x00000002
85#define RIO_MSG_OSR_TE 0x00000080
86#define RIO_MSG_OSR_QOI 0x00000020
87#define RIO_MSG_OSR_QFI 0x00000010
88#define RIO_MSG_OSR_MUB 0x00000004
89#define RIO_MSG_OSR_EOMI 0x00000002
90#define RIO_MSG_OSR_QEI 0x00000001
91
92#define RIO_MSG_IMR_MI 0x00000002
93#define RIO_MSG_ISR_TE 0x00000080
94#define RIO_MSG_ISR_QFI 0x00000010
95#define RIO_MSG_ISR_DIQI 0x00000001
96
97#define RIO_MSG_DESC_SIZE 32
98#define RIO_MSG_BUFFER_SIZE 4096
99
100#define DOORBELL_DMR_DI 0x00000002
101#define DOORBELL_DSR_TE 0x00000080
102#define DOORBELL_DSR_QFI 0x00000010
103#define DOORBELL_DSR_DIQI 0x00000001
104
105#define DOORBELL_MESSAGE_SIZE 0x08
106
107struct rio_msg_regs {
108 u32 omr;
109 u32 osr;
110 u32 pad1;
111 u32 odqdpar;
112 u32 pad2;
113 u32 osar;
114 u32 odpr;
115 u32 odatr;
116 u32 odcr;
117 u32 pad3;
118 u32 odqepar;
119 u32 pad4[13];
120 u32 imr;
121 u32 isr;
122 u32 pad5;
123 u32 ifqdpar;
124 u32 pad6;
125 u32 ifqepar;
126};
127
128struct rio_dbell_regs {
129 u32 odmr;
130 u32 odsr;
131 u32 pad1[4];
132 u32 oddpr;
133 u32 oddatr;
134 u32 pad2[3];
135 u32 odretcr;
136 u32 pad3[12];
137 u32 dmr;
138 u32 dsr;
139 u32 pad4;
140 u32 dqdpar;
141 u32 pad5;
142 u32 dqepar;
143};
144
145struct rio_pw_regs {
146 u32 pwmr;
147 u32 pwsr;
148 u32 epwqbar;
149 u32 pwqbar;
150};
151
152
153struct rio_tx_desc {
154 u32 pad1;
155 u32 saddr;
156 u32 dport;
157 u32 dattr;
158 u32 pad2;
159 u32 pad3;
160 u32 dwcnt;
161 u32 pad4;
162};
163
164struct rio_msg_tx_ring {
165 void *virt;
166 dma_addr_t phys;
167 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
168 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
169 int tx_slot;
170 int size;
171 void *dev_id;
172};
173
174struct rio_msg_rx_ring {
175 void *virt;
176 dma_addr_t phys;
177 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
178 int rx_slot;
179 int size;
180 void *dev_id;
181};
182
183struct fsl_rmu {
184 struct rio_msg_regs __iomem *msg_regs;
185 struct rio_msg_tx_ring msg_tx_ring;
186 struct rio_msg_rx_ring msg_rx_ring;
187 int txirq;
188 int rxirq;
189};
190
191struct rio_dbell_msg {
192 u16 pad1;
193 u16 tid;
194 u16 sid;
195 u16 info;
196};
197
198/**
199 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
200 * @irq: Linux interrupt number
201 * @dev_instance: Pointer to interrupt-specific data
202 *
203 * Handles outbound message interrupts. Executes a register outbound
204 * mailbox event handler and acks the interrupt occurrence.
205 */
206static irqreturn_t
207fsl_rio_tx_handler(int irq, void *dev_instance)
208{
209 int osr;
210 struct rio_mport *port = (struct rio_mport *)dev_instance;
211 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
212
213 osr = in_be32(&rmu->msg_regs->osr);
214
215 if (osr & RIO_MSG_OSR_TE) {
216 pr_info("RIO: outbound message transmission error\n");
217 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
218 goto out;
219 }
220
221 if (osr & RIO_MSG_OSR_QOI) {
222 pr_info("RIO: outbound message queue overflow\n");
223 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
224 goto out;
225 }
226
227 if (osr & RIO_MSG_OSR_EOMI) {
228 u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
229 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
230 if (port->outb_msg[0].mcback != NULL) {
231 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
232 -1,
233 slot);
234 }
235 /* Ack the end-of-message interrupt */
236 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
237 }
238
239out:
240 return IRQ_HANDLED;
241}
242
243/**
244 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
245 * @irq: Linux interrupt number
246 * @dev_instance: Pointer to interrupt-specific data
247 *
248 * Handles inbound message interrupts. Executes a registered inbound
249 * mailbox event handler and acks the interrupt occurrence.
250 */
251static irqreturn_t
252fsl_rio_rx_handler(int irq, void *dev_instance)
253{
254 int isr;
255 struct rio_mport *port = (struct rio_mport *)dev_instance;
256 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
257
258 isr = in_be32(&rmu->msg_regs->isr);
259
260 if (isr & RIO_MSG_ISR_TE) {
261 pr_info("RIO: inbound message reception error\n");
262 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
263 goto out;
264 }
265
266 /* XXX Need to check/dispatch until queue empty */
267 if (isr & RIO_MSG_ISR_DIQI) {
268 /*
269 * Can receive messages for any mailbox/letter to that
270 * mailbox destination. So, make the callback with an
271 * unknown/invalid mailbox number argument.
272 */
273 if (port->inb_msg[0].mcback != NULL)
274 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
275 -1,
276 -1);
277
278 /* Ack the queueing interrupt */
279 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
280 }
281
282out:
283 return IRQ_HANDLED;
284}
285
286/**
287 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
288 * @irq: Linux interrupt number
289 * @dev_instance: Pointer to interrupt-specific data
290 *
291 * Handles doorbell interrupts. Parses a list of registered
292 * doorbell event handlers and executes a matching event handler.
293 */
294static irqreturn_t
295fsl_rio_dbell_handler(int irq, void *dev_instance)
296{
297 int dsr;
298 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
299 int i;
300
301 dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
302
303 if (dsr & DOORBELL_DSR_TE) {
304 pr_info("RIO: doorbell reception error\n");
305 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
306 goto out;
307 }
308
309 if (dsr & DOORBELL_DSR_QFI) {
310 pr_info("RIO: doorbell queue full\n");
311 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
312 }
313
314 /* XXX Need to check/dispatch until queue empty */
315 if (dsr & DOORBELL_DSR_DIQI) {
316 struct rio_dbell_msg *dmsg =
317 fsl_dbell->dbell_ring.virt +
318 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
319 struct rio_dbell *dbell;
320 int found = 0;
321
322 pr_debug
323 ("RIO: processing doorbell,"
324 " sid %2.2x tid %2.2x info %4.4x\n",
325 dmsg->sid, dmsg->tid, dmsg->info);
326
327 for (i = 0; i < MAX_PORT_NUM; i++) {
328 if (fsl_dbell->mport[i]) {
329 list_for_each_entry(dbell,
330 &fsl_dbell->mport[i]->dbells, node) {
331 if ((dbell->res->start
332 <= dmsg->info)
333 && (dbell->res->end
334 >= dmsg->info)) {
335 found = 1;
336 break;
337 }
338 }
339 if (found && dbell->dinb) {
340 dbell->dinb(fsl_dbell->mport[i],
341 dbell->dev_id, dmsg->sid,
342 dmsg->tid,
343 dmsg->info);
344 break;
345 }
346 }
347 }
348
349 if (!found) {
350 pr_debug
351 ("RIO: spurious doorbell,"
352 " sid %2.2x tid %2.2x info %4.4x\n",
353 dmsg->sid, dmsg->tid,
354 dmsg->info);
355 }
356 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
357 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
358 }
359
360out:
361 return IRQ_HANDLED;
362}
363
364void msg_unit_error_handler(void)
365{
366
367 /*XXX: Error recovery is not implemented, we just clear errors */
368 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
369
370 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
371 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
372 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
373 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
374
375 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
376 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
377
378 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
379}
380
381/**
382 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
383 * @irq: Linux interrupt number
384 * @dev_instance: Pointer to interrupt-specific data
385 *
386 * Handles port write interrupts. Parses a list of registered
387 * port write event handlers and executes a matching event handler.
388 */
389static irqreturn_t
390fsl_rio_port_write_handler(int irq, void *dev_instance)
391{
392 u32 ipwmr, ipwsr;
393 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
394 u32 epwisr, tmp;
395
396 epwisr = in_be32(rio_regs_win + RIO_EPWISR);
397 if (!(epwisr & RIO_EPWISR_PW))
398 goto pw_done;
399
400 ipwmr = in_be32(&pw->pw_regs->pwmr);
401 ipwsr = in_be32(&pw->pw_regs->pwsr);
402
403#ifdef DEBUG_PW
404 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
405 if (ipwsr & RIO_IPWSR_QF)
406 pr_debug(" QF");
407 if (ipwsr & RIO_IPWSR_TE)
408 pr_debug(" TE");
409 if (ipwsr & RIO_IPWSR_QFI)
410 pr_debug(" QFI");
411 if (ipwsr & RIO_IPWSR_PWD)
412 pr_debug(" PWD");
413 if (ipwsr & RIO_IPWSR_PWB)
414 pr_debug(" PWB");
415 pr_debug(" )\n");
416#endif
417 /* Schedule deferred processing if PW was received */
418 if (ipwsr & RIO_IPWSR_QFI) {
419 /* Save PW message (if there is room in FIFO),
420 * otherwise discard it.
421 */
422 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
423 pw->port_write_msg.msg_count++;
424 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
425 RIO_PW_MSG_SIZE);
426 } else {
427 pw->port_write_msg.discard_count++;
428 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
429 pw->port_write_msg.discard_count);
430 }
431 /* Clear interrupt and issue Clear Queue command. This allows
432 * another port-write to be received.
433 */
434 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
435 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
436
437 schedule_work(&pw->pw_work);
438 }
439
440 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
441 pw->port_write_msg.err_count++;
442 pr_debug("RIO: Port-Write Transaction Err (%d)\n",
443 pw->port_write_msg.err_count);
444 /* Clear Transaction Error: port-write controller should be
445 * disabled when clearing this error
446 */
447 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
448 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
449 out_be32(&pw->pw_regs->pwmr, ipwmr);
450 }
451
452 if (ipwsr & RIO_IPWSR_PWD) {
453 pw->port_write_msg.discard_count++;
454 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
455 pw->port_write_msg.discard_count);
456 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
457 }
458
459pw_done:
460 if (epwisr & RIO_EPWISR_PINT1) {
461 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
462 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
463 fsl_rio_port_error_handler(0);
464 }
465
466 if (epwisr & RIO_EPWISR_PINT2) {
467 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
468 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
469 fsl_rio_port_error_handler(1);
470 }
471
472 if (epwisr & RIO_EPWISR_MU) {
473 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
474 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
475 msg_unit_error_handler();
476 }
477
478 return IRQ_HANDLED;
479}
480
481static void fsl_pw_dpc(struct work_struct *work)
482{
483 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
484 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
485
486 /*
487 * Process port-write messages
488 */
489 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)msg_buffer,
490 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
491 /* Process one message */
492#ifdef DEBUG_PW
493 {
494 u32 i;
495 pr_debug("%s : Port-Write Message:", __func__);
496 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
497 if ((i%4) == 0)
498 pr_debug("\n0x%02x: 0x%08x", i*4,
499 msg_buffer[i]);
500 else
501 pr_debug(" 0x%08x", msg_buffer[i]);
502 }
503 pr_debug("\n");
504 }
505#endif
506 /* Pass the port-write message to RIO core for processing */
507 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
508 }
509}
510
511/**
512 * fsl_rio_pw_enable - enable/disable port-write interface init
513 * @mport: Master port implementing the port write unit
514 * @enable: 1=enable; 0=disable port-write message handling
515 */
516int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
517{
518 u32 rval;
519
520 rval = in_be32(&pw->pw_regs->pwmr);
521
522 if (enable)
523 rval |= RIO_IPWMR_PWE;
524 else
525 rval &= ~RIO_IPWMR_PWE;
526
527 out_be32(&pw->pw_regs->pwmr, rval);
528
529 return 0;
530}
531
532/**
533 * fsl_rio_port_write_init - MPC85xx port write interface init
534 * @mport: Master port implementing the port write unit
535 *
536 * Initializes port write unit hardware and DMA buffer
537 * ring. Called from fsl_rio_setup(). Returns %0 on success
538 * or %-ENOMEM on failure.
539 */
540
541int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
542{
543 int rc = 0;
544
545 /* Following configurations require a disabled port write controller */
546 out_be32(&pw->pw_regs->pwmr,
547 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
548
549 /* Initialize port write */
550 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
551 RIO_PW_MSG_SIZE,
552 &pw->port_write_msg.phys, GFP_KERNEL);
553 if (!pw->port_write_msg.virt) {
554 pr_err("RIO: unable allocate port write queue\n");
555 return -ENOMEM;
556 }
557
558 pw->port_write_msg.err_count = 0;
559 pw->port_write_msg.discard_count = 0;
560
561 /* Point dequeue/enqueue pointers at first entry */
562 out_be32(&pw->pw_regs->epwqbar, 0);
563 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
564
565 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
566 in_be32(&pw->pw_regs->epwqbar),
567 in_be32(&pw->pw_regs->pwqbar));
568
569 /* Clear interrupt status IPWSR */
570 out_be32(&pw->pw_regs->pwsr,
571 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
572
573 /* Configure port write contoller for snooping enable all reporting,
574 clear queue full */
575 out_be32(&pw->pw_regs->pwmr,
576 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
577
578
579 /* Hook up port-write handler */
580 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
581 IRQF_SHARED, "port-write", (void *)pw);
582 if (rc < 0) {
583 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
584 goto err_out;
585 }
586 /* Enable Error Interrupt */
587 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
588
589 INIT_WORK(&pw->pw_work, fsl_pw_dpc);
590 spin_lock_init(&pw->pw_fifo_lock);
591 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
592 pr_err("FIFO allocation failed\n");
593 rc = -ENOMEM;
594 goto err_out_irq;
595 }
596
597 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
598 in_be32(&pw->pw_regs->pwmr),
599 in_be32(&pw->pw_regs->pwsr));
600
601 return rc;
602
603err_out_irq:
604 free_irq(IRQ_RIO_PW(pw), (void *)pw);
605err_out:
606 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
607 pw->port_write_msg.virt,
608 pw->port_write_msg.phys);
609 return rc;
610}
611
612/**
613 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
614 * @mport: RapidIO master port info
615 * @index: ID of RapidIO interface
616 * @destid: Destination ID of target device
617 * @data: 16-bit info field of RapidIO doorbell message
618 *
619 * Sends a MPC85xx doorbell message. Returns %0 on success or
620 * %-EINVAL on failure.
621 */
622int fsl_rio_doorbell_send(struct rio_mport *mport,
623 int index, u16 destid, u16 data)
624{
625 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
626 index, destid, data);
627
628 /* In the serial version silicons, such as MPC8548, MPC8641,
629 * below operations is must be.
630 */
631 out_be32(&dbell->dbell_regs->odmr, 0x00000000);
632 out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
633 out_be32(&dbell->dbell_regs->oddpr, destid << 16);
634 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
635 out_be32(&dbell->dbell_regs->odmr, 0x00000001);
636
637 return 0;
638}
639
640/**
641 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
642 * @mport: Master port with outbound message queue
643 * @rdev: Target of outbound message
644 * @mbox: Outbound mailbox
645 * @buffer: Message to add to outbound queue
646 * @len: Length of message
647 *
648 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
649 * %0 on success or %-EINVAL on failure.
650 */
651int
652fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
653 void *buffer, size_t len)
654{
655 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
656 u32 omr;
657 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
658 + rmu->msg_tx_ring.tx_slot;
659 int ret = 0;
660
661 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
662 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
663 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
664 ret = -EINVAL;
665 goto out;
666 }
667
668 /* Copy and clear rest of buffer */
669 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
670 len);
671 if (len < (RIO_MAX_MSG_SIZE - 4))
672 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
673 + len, 0, RIO_MAX_MSG_SIZE - len);
674
675 /* Set mbox field for message, and set destid */
676 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
677
678 /* Enable EOMI interrupt and priority */
679 desc->dattr = 0x28000000 | ((mport->index) << 20);
680
681 /* Set transfer size aligned to next power of 2 (in double words) */
682 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
683
684 /* Set snooping and source buffer address */
685 desc->saddr = 0x00000004
686 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
687
688 /* Increment enqueue pointer */
689 omr = in_be32(&rmu->msg_regs->omr);
690 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
691
692 /* Go to next descriptor */
693 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
694 rmu->msg_tx_ring.tx_slot = 0;
695
696out:
697 return ret;
698}
699
700/**
701 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
702 * @mport: Master port implementing the outbound message unit
703 * @dev_id: Device specific pointer to pass on event
704 * @mbox: Mailbox to open
705 * @entries: Number of entries in the outbound mailbox ring
706 *
707 * Initializes buffer ring, request the outbound message interrupt,
708 * and enables the outbound message unit. Returns %0 on success and
709 * %-EINVAL or %-ENOMEM on failure.
710 */
711int
712fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
713{
714 int i, j, rc = 0;
715 struct rio_priv *priv = mport->priv;
716 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
717
718 if ((entries < RIO_MIN_TX_RING_SIZE) ||
719 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
720 rc = -EINVAL;
721 goto out;
722 }
723
724 /* Initialize shadow copy ring */
725 rmu->msg_tx_ring.dev_id = dev_id;
726 rmu->msg_tx_ring.size = entries;
727
728 for (i = 0; i < rmu->msg_tx_ring.size; i++) {
729 rmu->msg_tx_ring.virt_buffer[i] =
730 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
731 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
732 if (!rmu->msg_tx_ring.virt_buffer[i]) {
733 rc = -ENOMEM;
734 for (j = 0; j < rmu->msg_tx_ring.size; j++)
735 if (rmu->msg_tx_ring.virt_buffer[j])
736 dma_free_coherent(priv->dev,
737 RIO_MSG_BUFFER_SIZE,
738 rmu->msg_tx_ring.
739 virt_buffer[j],
740 rmu->msg_tx_ring.
741 phys_buffer[j]);
742 goto out;
743 }
744 }
745
746 /* Initialize outbound message descriptor ring */
747 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
748 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
749 &rmu->msg_tx_ring.phys, GFP_KERNEL);
750 if (!rmu->msg_tx_ring.virt) {
751 rc = -ENOMEM;
752 goto out_dma;
753 }
754 memset(rmu->msg_tx_ring.virt, 0,
755 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
756 rmu->msg_tx_ring.tx_slot = 0;
757
758 /* Point dequeue/enqueue pointers at first entry in ring */
759 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
760 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
761
762 /* Configure for snooping */
763 out_be32(&rmu->msg_regs->osar, 0x00000004);
764
765 /* Clear interrupt status */
766 out_be32(&rmu->msg_regs->osr, 0x000000b3);
767
768 /* Hook up outbound message handler */
769 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
770 "msg_tx", (void *)mport);
771 if (rc < 0)
772 goto out_irq;
773
774 /*
775 * Configure outbound message unit
776 * Snooping
777 * Interrupts (all enabled, except QEIE)
778 * Chaining mode
779 * Disable
780 */
781 out_be32(&rmu->msg_regs->omr, 0x00100220);
782
783 /* Set number of entries */
784 out_be32(&rmu->msg_regs->omr,
785 in_be32(&rmu->msg_regs->omr) |
786 ((get_bitmask_order(entries) - 2) << 12));
787
788 /* Now enable the unit */
789 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
790
791out:
792 return rc;
793
794out_irq:
795 dma_free_coherent(priv->dev,
796 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
797 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
798
799out_dma:
800 for (i = 0; i < rmu->msg_tx_ring.size; i++)
801 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
802 rmu->msg_tx_ring.virt_buffer[i],
803 rmu->msg_tx_ring.phys_buffer[i]);
804
805 return rc;
806}
807
808/**
809 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
810 * @mport: Master port implementing the outbound message unit
811 * @mbox: Mailbox to close
812 *
813 * Disables the outbound message unit, free all buffers, and
814 * frees the outbound message interrupt.
815 */
816void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
817{
818 struct rio_priv *priv = mport->priv;
819 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
820
821 /* Disable inbound message unit */
822 out_be32(&rmu->msg_regs->omr, 0);
823
824 /* Free ring */
825 dma_free_coherent(priv->dev,
826 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
827 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
828
829 /* Free interrupt */
830 free_irq(IRQ_RIO_TX(mport), (void *)mport);
831}
832
833/**
834 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
835 * @mport: Master port implementing the inbound message unit
836 * @dev_id: Device specific pointer to pass on event
837 * @mbox: Mailbox to open
838 * @entries: Number of entries in the inbound mailbox ring
839 *
840 * Initializes buffer ring, request the inbound message interrupt,
841 * and enables the inbound message unit. Returns %0 on success
842 * and %-EINVAL or %-ENOMEM on failure.
843 */
844int
845fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
846{
847 int i, rc = 0;
848 struct rio_priv *priv = mport->priv;
849 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
850
851 if ((entries < RIO_MIN_RX_RING_SIZE) ||
852 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
853 rc = -EINVAL;
854 goto out;
855 }
856
857 /* Initialize client buffer ring */
858 rmu->msg_rx_ring.dev_id = dev_id;
859 rmu->msg_rx_ring.size = entries;
860 rmu->msg_rx_ring.rx_slot = 0;
861 for (i = 0; i < rmu->msg_rx_ring.size; i++)
862 rmu->msg_rx_ring.virt_buffer[i] = NULL;
863
864 /* Initialize inbound message ring */
865 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
866 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
867 &rmu->msg_rx_ring.phys, GFP_KERNEL);
868 if (!rmu->msg_rx_ring.virt) {
869 rc = -ENOMEM;
870 goto out;
871 }
872
873 /* Point dequeue/enqueue pointers at first entry in ring */
874 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
875 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
876
877 /* Clear interrupt status */
878 out_be32(&rmu->msg_regs->isr, 0x00000091);
879
880 /* Hook up inbound message handler */
881 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
882 "msg_rx", (void *)mport);
883 if (rc < 0) {
884 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
885 rmu->msg_tx_ring.virt_buffer[i],
886 rmu->msg_tx_ring.phys_buffer[i]);
887 goto out;
888 }
889
890 /*
891 * Configure inbound message unit:
892 * Snooping
893 * 4KB max message size
894 * Unmask all interrupt sources
895 * Disable
896 */
897 out_be32(&rmu->msg_regs->imr, 0x001b0060);
898
899 /* Set number of queue entries */
900 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
901
902 /* Now enable the unit */
903 setbits32(&rmu->msg_regs->imr, 0x1);
904
905out:
906 return rc;
907}
908
909/**
910 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
911 * @mport: Master port implementing the inbound message unit
912 * @mbox: Mailbox to close
913 *
914 * Disables the inbound message unit, free all buffers, and
915 * frees the inbound message interrupt.
916 */
917void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
918{
919 struct rio_priv *priv = mport->priv;
920 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
921
922 /* Disable inbound message unit */
923 out_be32(&rmu->msg_regs->imr, 0);
924
925 /* Free ring */
926 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
927 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
928
929 /* Free interrupt */
930 free_irq(IRQ_RIO_RX(mport), (void *)mport);
931}
932
933/**
934 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
935 * @mport: Master port implementing the inbound message unit
936 * @mbox: Inbound mailbox number
937 * @buf: Buffer to add to inbound queue
938 *
939 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
940 * %0 on success or %-EINVAL on failure.
941 */
942int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
943{
944 int rc = 0;
945 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
946
947 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
948 rmu->msg_rx_ring.rx_slot);
949
950 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
951 printk(KERN_ERR
952 "RIO: error adding inbound buffer %d, buffer exists\n",
953 rmu->msg_rx_ring.rx_slot);
954 rc = -EINVAL;
955 goto out;
956 }
957
958 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
959 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
960 rmu->msg_rx_ring.rx_slot = 0;
961
962out:
963 return rc;
964}
965
966/**
967 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
968 * @mport: Master port implementing the inbound message unit
969 * @mbox: Inbound mailbox number
970 *
971 * Gets the next available inbound message from the inbound message queue.
972 * A pointer to the message is returned on success or NULL on failure.
973 */
974void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
975{
976 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
977 u32 phys_buf;
978 void *virt_buf;
979 void *buf = NULL;
980 int buf_idx;
981
982 phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
983
984 /* If no more messages, then bail out */
985 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
986 goto out2;
987
988 virt_buf = rmu->msg_rx_ring.virt + (phys_buf
989 - rmu->msg_rx_ring.phys);
990 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
991 buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
992
993 if (!buf) {
994 printk(KERN_ERR
995 "RIO: inbound message copy failed, no buffers\n");
996 goto out1;
997 }
998
999 /* Copy max message size, caller is expected to allocate that big */
1000 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
1001
1002 /* Clear the available buffer */
1003 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
1004
1005out1:
1006 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
1007
1008out2:
1009 return buf;
1010}
1011
1012/**
1013 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
1014 * @mport: Master port implementing the inbound doorbell unit
1015 *
1016 * Initializes doorbell unit hardware and inbound DMA buffer
1017 * ring. Called from fsl_rio_setup(). Returns %0 on success
1018 * or %-ENOMEM on failure.
1019 */
1020int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
1021{
1022 int rc = 0;
1023
1024 /* Initialize inbound doorbells */
1025 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
1026 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
1027 if (!dbell->dbell_ring.virt) {
1028 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
1029 rc = -ENOMEM;
1030 goto out;
1031 }
1032
1033 /* Point dequeue/enqueue pointers at first entry in ring */
1034 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
1035 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
1036
1037 /* Clear interrupt status */
1038 out_be32(&dbell->dbell_regs->dsr, 0x00000091);
1039
1040 /* Hook up doorbell handler */
1041 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
1042 "dbell_rx", (void *)dbell);
1043 if (rc < 0) {
1044 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
1045 dbell->dbell_ring.virt, dbell->dbell_ring.phys);
1046 printk(KERN_ERR
1047 "MPC85xx RIO: unable to request inbound doorbell irq");
1048 goto out;
1049 }
1050
1051 /* Configure doorbells for snooping, 512 entries, and enable */
1052 out_be32(&dbell->dbell_regs->dmr, 0x00108161);
1053
1054out:
1055 return rc;
1056}
1057
1058int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
1059{
1060 struct rio_priv *priv;
1061 struct fsl_rmu *rmu;
1062 u64 msg_start;
1063 const u32 *msg_addr;
1064 int mlen;
1065 int aw;
1066
1067 if (!mport || !mport->priv)
1068 return -EINVAL;
1069
1070 priv = mport->priv;
1071
1072 if (!node) {
1073 dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n",
1074 priv->dev->of_node->full_name);
1075 return -EINVAL;
1076 }
1077
1078 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
1079 if (!rmu)
1080 return -ENOMEM;
1081
1082 aw = of_n_addr_cells(node);
1083 msg_addr = of_get_property(node, "reg", &mlen);
1084 if (!msg_addr) {
1085 pr_err("%s: unable to find 'reg' property of message-unit\n",
1086 node->full_name);
1087 kfree(rmu);
1088 return -ENOMEM;
1089 }
1090 msg_start = of_read_number(msg_addr, aw);
1091
1092 rmu->msg_regs = (struct rio_msg_regs *)
1093 (rmu_regs_win + (u32)msg_start);
1094
1095 rmu->txirq = irq_of_parse_and_map(node, 0);
1096 rmu->rxirq = irq_of_parse_and_map(node, 1);
1097 printk(KERN_INFO "%s: txirq: %d, rxirq %d\n",
1098 node->full_name, rmu->txirq, rmu->rxirq);
1099
1100 priv->rmm_handle = rmu;
1101
1102 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1103 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1104 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1105
1106 return 0;
1107}
1/*
2 * Freescale MPC85xx/MPC86xx RapidIO RMU support
3 *
4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling
12 *
13 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com>
15 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
16 * Liu Gang <Gang.Liu@freescale.com>
17 *
18 * Copyright 2005 MontaVista Software, Inc.
19 * Matt Porter <mporter@kernel.crashing.org>
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2 of the License, or (at your
24 * option) any later version.
25 */
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h>
30#include <linux/of_irq.h>
31#include <linux/of_platform.h>
32#include <linux/slab.h>
33
34#include "fsl_rio.h"
35
36#define GET_RMM_HANDLE(mport) \
37 (((struct rio_priv *)(mport->priv))->rmm_handle)
38
39/* RapidIO definition irq, which read from OF-tree */
40#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
41#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
42#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
43#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
44
45#define RIO_MIN_TX_RING_SIZE 2
46#define RIO_MAX_TX_RING_SIZE 2048
47#define RIO_MIN_RX_RING_SIZE 2
48#define RIO_MAX_RX_RING_SIZE 2048
49
50#define RIO_IPWMR_SEN 0x00100000
51#define RIO_IPWMR_QFIE 0x00000100
52#define RIO_IPWMR_EIE 0x00000020
53#define RIO_IPWMR_CQ 0x00000002
54#define RIO_IPWMR_PWE 0x00000001
55
56#define RIO_IPWSR_QF 0x00100000
57#define RIO_IPWSR_TE 0x00000080
58#define RIO_IPWSR_QFI 0x00000010
59#define RIO_IPWSR_PWD 0x00000008
60#define RIO_IPWSR_PWB 0x00000004
61
62#define RIO_EPWISR 0x10010
63/* EPWISR Error match value */
64#define RIO_EPWISR_PINT1 0x80000000
65#define RIO_EPWISR_PINT2 0x40000000
66#define RIO_EPWISR_MU 0x00000002
67#define RIO_EPWISR_PW 0x00000001
68
69#define IPWSR_CLEAR 0x98
70#define OMSR_CLEAR 0x1cb3
71#define IMSR_CLEAR 0x491
72#define IDSR_CLEAR 0x91
73#define ODSR_CLEAR 0x1c00
74#define LTLEECSR_ENABLE_ALL 0xFFC000FC
75#define RIO_LTLEECSR 0x060c
76
77#define RIO_IM0SR 0x64
78#define RIO_IM1SR 0x164
79#define RIO_OM0SR 0x4
80#define RIO_OM1SR 0x104
81
82#define RIO_DBELL_WIN_SIZE 0x1000
83
84#define RIO_MSG_OMR_MUI 0x00000002
85#define RIO_MSG_OSR_TE 0x00000080
86#define RIO_MSG_OSR_QOI 0x00000020
87#define RIO_MSG_OSR_QFI 0x00000010
88#define RIO_MSG_OSR_MUB 0x00000004
89#define RIO_MSG_OSR_EOMI 0x00000002
90#define RIO_MSG_OSR_QEI 0x00000001
91
92#define RIO_MSG_IMR_MI 0x00000002
93#define RIO_MSG_ISR_TE 0x00000080
94#define RIO_MSG_ISR_QFI 0x00000010
95#define RIO_MSG_ISR_DIQI 0x00000001
96
97#define RIO_MSG_DESC_SIZE 32
98#define RIO_MSG_BUFFER_SIZE 4096
99
100#define DOORBELL_DMR_DI 0x00000002
101#define DOORBELL_DSR_TE 0x00000080
102#define DOORBELL_DSR_QFI 0x00000010
103#define DOORBELL_DSR_DIQI 0x00000001
104
105#define DOORBELL_MESSAGE_SIZE 0x08
106
107static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
108
109struct rio_msg_regs {
110 u32 omr;
111 u32 osr;
112 u32 pad1;
113 u32 odqdpar;
114 u32 pad2;
115 u32 osar;
116 u32 odpr;
117 u32 odatr;
118 u32 odcr;
119 u32 pad3;
120 u32 odqepar;
121 u32 pad4[13];
122 u32 imr;
123 u32 isr;
124 u32 pad5;
125 u32 ifqdpar;
126 u32 pad6;
127 u32 ifqepar;
128};
129
130struct rio_dbell_regs {
131 u32 odmr;
132 u32 odsr;
133 u32 pad1[4];
134 u32 oddpr;
135 u32 oddatr;
136 u32 pad2[3];
137 u32 odretcr;
138 u32 pad3[12];
139 u32 dmr;
140 u32 dsr;
141 u32 pad4;
142 u32 dqdpar;
143 u32 pad5;
144 u32 dqepar;
145};
146
147struct rio_pw_regs {
148 u32 pwmr;
149 u32 pwsr;
150 u32 epwqbar;
151 u32 pwqbar;
152};
153
154
155struct rio_tx_desc {
156 u32 pad1;
157 u32 saddr;
158 u32 dport;
159 u32 dattr;
160 u32 pad2;
161 u32 pad3;
162 u32 dwcnt;
163 u32 pad4;
164};
165
166struct rio_msg_tx_ring {
167 void *virt;
168 dma_addr_t phys;
169 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
170 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
171 int tx_slot;
172 int size;
173 void *dev_id;
174};
175
176struct rio_msg_rx_ring {
177 void *virt;
178 dma_addr_t phys;
179 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
180 int rx_slot;
181 int size;
182 void *dev_id;
183};
184
185struct fsl_rmu {
186 struct rio_msg_regs __iomem *msg_regs;
187 struct rio_msg_tx_ring msg_tx_ring;
188 struct rio_msg_rx_ring msg_rx_ring;
189 int txirq;
190 int rxirq;
191};
192
193struct rio_dbell_msg {
194 u16 pad1;
195 u16 tid;
196 u16 sid;
197 u16 info;
198};
199
200/**
201 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
202 * @irq: Linux interrupt number
203 * @dev_instance: Pointer to interrupt-specific data
204 *
205 * Handles outbound message interrupts. Executes a register outbound
206 * mailbox event handler and acks the interrupt occurrence.
207 */
208static irqreturn_t
209fsl_rio_tx_handler(int irq, void *dev_instance)
210{
211 int osr;
212 struct rio_mport *port = (struct rio_mport *)dev_instance;
213 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
214
215 osr = in_be32(&rmu->msg_regs->osr);
216
217 if (osr & RIO_MSG_OSR_TE) {
218 pr_info("RIO: outbound message transmission error\n");
219 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
220 goto out;
221 }
222
223 if (osr & RIO_MSG_OSR_QOI) {
224 pr_info("RIO: outbound message queue overflow\n");
225 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
226 goto out;
227 }
228
229 if (osr & RIO_MSG_OSR_EOMI) {
230 u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
231 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
232 if (port->outb_msg[0].mcback != NULL) {
233 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
234 -1,
235 slot);
236 }
237 /* Ack the end-of-message interrupt */
238 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
239 }
240
241out:
242 return IRQ_HANDLED;
243}
244
245/**
246 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
247 * @irq: Linux interrupt number
248 * @dev_instance: Pointer to interrupt-specific data
249 *
250 * Handles inbound message interrupts. Executes a registered inbound
251 * mailbox event handler and acks the interrupt occurrence.
252 */
253static irqreturn_t
254fsl_rio_rx_handler(int irq, void *dev_instance)
255{
256 int isr;
257 struct rio_mport *port = (struct rio_mport *)dev_instance;
258 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
259
260 isr = in_be32(&rmu->msg_regs->isr);
261
262 if (isr & RIO_MSG_ISR_TE) {
263 pr_info("RIO: inbound message reception error\n");
264 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
265 goto out;
266 }
267
268 /* XXX Need to check/dispatch until queue empty */
269 if (isr & RIO_MSG_ISR_DIQI) {
270 /*
271 * Can receive messages for any mailbox/letter to that
272 * mailbox destination. So, make the callback with an
273 * unknown/invalid mailbox number argument.
274 */
275 if (port->inb_msg[0].mcback != NULL)
276 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
277 -1,
278 -1);
279
280 /* Ack the queueing interrupt */
281 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
282 }
283
284out:
285 return IRQ_HANDLED;
286}
287
288/**
289 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
290 * @irq: Linux interrupt number
291 * @dev_instance: Pointer to interrupt-specific data
292 *
293 * Handles doorbell interrupts. Parses a list of registered
294 * doorbell event handlers and executes a matching event handler.
295 */
296static irqreturn_t
297fsl_rio_dbell_handler(int irq, void *dev_instance)
298{
299 int dsr;
300 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
301 int i;
302
303 dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
304
305 if (dsr & DOORBELL_DSR_TE) {
306 pr_info("RIO: doorbell reception error\n");
307 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
308 goto out;
309 }
310
311 if (dsr & DOORBELL_DSR_QFI) {
312 pr_info("RIO: doorbell queue full\n");
313 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
314 }
315
316 /* XXX Need to check/dispatch until queue empty */
317 if (dsr & DOORBELL_DSR_DIQI) {
318 struct rio_dbell_msg *dmsg =
319 fsl_dbell->dbell_ring.virt +
320 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
321 struct rio_dbell *dbell;
322 int found = 0;
323
324 pr_debug
325 ("RIO: processing doorbell,"
326 " sid %2.2x tid %2.2x info %4.4x\n",
327 dmsg->sid, dmsg->tid, dmsg->info);
328
329 for (i = 0; i < MAX_PORT_NUM; i++) {
330 if (fsl_dbell->mport[i]) {
331 list_for_each_entry(dbell,
332 &fsl_dbell->mport[i]->dbells, node) {
333 if ((dbell->res->start
334 <= dmsg->info)
335 && (dbell->res->end
336 >= dmsg->info)) {
337 found = 1;
338 break;
339 }
340 }
341 if (found && dbell->dinb) {
342 dbell->dinb(fsl_dbell->mport[i],
343 dbell->dev_id, dmsg->sid,
344 dmsg->tid,
345 dmsg->info);
346 break;
347 }
348 }
349 }
350
351 if (!found) {
352 pr_debug
353 ("RIO: spurious doorbell,"
354 " sid %2.2x tid %2.2x info %4.4x\n",
355 dmsg->sid, dmsg->tid,
356 dmsg->info);
357 }
358 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
359 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
360 }
361
362out:
363 return IRQ_HANDLED;
364}
365
366void msg_unit_error_handler(void)
367{
368
369 /*XXX: Error recovery is not implemented, we just clear errors */
370 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
371
372 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
373 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
374 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
375 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
376
377 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
378 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
379
380 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
381}
382
383/**
384 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
385 * @irq: Linux interrupt number
386 * @dev_instance: Pointer to interrupt-specific data
387 *
388 * Handles port write interrupts. Parses a list of registered
389 * port write event handlers and executes a matching event handler.
390 */
391static irqreturn_t
392fsl_rio_port_write_handler(int irq, void *dev_instance)
393{
394 u32 ipwmr, ipwsr;
395 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
396 u32 epwisr, tmp;
397
398 epwisr = in_be32(rio_regs_win + RIO_EPWISR);
399 if (!(epwisr & RIO_EPWISR_PW))
400 goto pw_done;
401
402 ipwmr = in_be32(&pw->pw_regs->pwmr);
403 ipwsr = in_be32(&pw->pw_regs->pwsr);
404
405#ifdef DEBUG_PW
406 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
407 if (ipwsr & RIO_IPWSR_QF)
408 pr_debug(" QF");
409 if (ipwsr & RIO_IPWSR_TE)
410 pr_debug(" TE");
411 if (ipwsr & RIO_IPWSR_QFI)
412 pr_debug(" QFI");
413 if (ipwsr & RIO_IPWSR_PWD)
414 pr_debug(" PWD");
415 if (ipwsr & RIO_IPWSR_PWB)
416 pr_debug(" PWB");
417 pr_debug(" )\n");
418#endif
419 /* Schedule deferred processing if PW was received */
420 if (ipwsr & RIO_IPWSR_QFI) {
421 /* Save PW message (if there is room in FIFO),
422 * otherwise discard it.
423 */
424 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
425 pw->port_write_msg.msg_count++;
426 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
427 RIO_PW_MSG_SIZE);
428 } else {
429 pw->port_write_msg.discard_count++;
430 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
431 pw->port_write_msg.discard_count);
432 }
433 /* Clear interrupt and issue Clear Queue command. This allows
434 * another port-write to be received.
435 */
436 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
437 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
438
439 schedule_work(&pw->pw_work);
440 }
441
442 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
443 pw->port_write_msg.err_count++;
444 pr_debug("RIO: Port-Write Transaction Err (%d)\n",
445 pw->port_write_msg.err_count);
446 /* Clear Transaction Error: port-write controller should be
447 * disabled when clearing this error
448 */
449 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
450 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
451 out_be32(&pw->pw_regs->pwmr, ipwmr);
452 }
453
454 if (ipwsr & RIO_IPWSR_PWD) {
455 pw->port_write_msg.discard_count++;
456 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
457 pw->port_write_msg.discard_count);
458 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
459 }
460
461pw_done:
462 if (epwisr & RIO_EPWISR_PINT1) {
463 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
464 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
465 fsl_rio_port_error_handler(0);
466 }
467
468 if (epwisr & RIO_EPWISR_PINT2) {
469 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
470 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
471 fsl_rio_port_error_handler(1);
472 }
473
474 if (epwisr & RIO_EPWISR_MU) {
475 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
476 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
477 msg_unit_error_handler();
478 }
479
480 return IRQ_HANDLED;
481}
482
483static void fsl_pw_dpc(struct work_struct *work)
484{
485 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
486 union rio_pw_msg msg_buffer;
487 int i;
488
489 /*
490 * Process port-write messages
491 */
492 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
493 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
494#ifdef DEBUG_PW
495 {
496 u32 i;
497 pr_debug("%s : Port-Write Message:", __func__);
498 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
499 if ((i%4) == 0)
500 pr_debug("\n0x%02x: 0x%08x", i*4,
501 msg_buffer.raw[i]);
502 else
503 pr_debug(" 0x%08x", msg_buffer.raw[i]);
504 }
505 pr_debug("\n");
506 }
507#endif
508 /* Pass the port-write message to RIO core for processing */
509 for (i = 0; i < MAX_PORT_NUM; i++) {
510 if (pw->mport[i])
511 rio_inb_pwrite_handler(pw->mport[i],
512 &msg_buffer);
513 }
514 }
515}
516
517/**
518 * fsl_rio_pw_enable - enable/disable port-write interface init
519 * @mport: Master port implementing the port write unit
520 * @enable: 1=enable; 0=disable port-write message handling
521 */
522int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
523{
524 u32 rval;
525
526 rval = in_be32(&pw->pw_regs->pwmr);
527
528 if (enable)
529 rval |= RIO_IPWMR_PWE;
530 else
531 rval &= ~RIO_IPWMR_PWE;
532
533 out_be32(&pw->pw_regs->pwmr, rval);
534
535 return 0;
536}
537
538/**
539 * fsl_rio_port_write_init - MPC85xx port write interface init
540 * @mport: Master port implementing the port write unit
541 *
542 * Initializes port write unit hardware and DMA buffer
543 * ring. Called from fsl_rio_setup(). Returns %0 on success
544 * or %-ENOMEM on failure.
545 */
546
547int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
548{
549 int rc = 0;
550
551 /* Following configurations require a disabled port write controller */
552 out_be32(&pw->pw_regs->pwmr,
553 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
554
555 /* Initialize port write */
556 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
557 RIO_PW_MSG_SIZE,
558 &pw->port_write_msg.phys, GFP_KERNEL);
559 if (!pw->port_write_msg.virt) {
560 pr_err("RIO: unable allocate port write queue\n");
561 return -ENOMEM;
562 }
563
564 pw->port_write_msg.err_count = 0;
565 pw->port_write_msg.discard_count = 0;
566
567 /* Point dequeue/enqueue pointers at first entry */
568 out_be32(&pw->pw_regs->epwqbar, 0);
569 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
570
571 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
572 in_be32(&pw->pw_regs->epwqbar),
573 in_be32(&pw->pw_regs->pwqbar));
574
575 /* Clear interrupt status IPWSR */
576 out_be32(&pw->pw_regs->pwsr,
577 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
578
579 /* Configure port write controller for snooping enable all reporting,
580 clear queue full */
581 out_be32(&pw->pw_regs->pwmr,
582 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
583
584
585 /* Hook up port-write handler */
586 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
587 IRQF_SHARED, "port-write", (void *)pw);
588 if (rc < 0) {
589 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
590 goto err_out;
591 }
592 /* Enable Error Interrupt */
593 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
594
595 INIT_WORK(&pw->pw_work, fsl_pw_dpc);
596 spin_lock_init(&pw->pw_fifo_lock);
597 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
598 pr_err("FIFO allocation failed\n");
599 rc = -ENOMEM;
600 goto err_out_irq;
601 }
602
603 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
604 in_be32(&pw->pw_regs->pwmr),
605 in_be32(&pw->pw_regs->pwsr));
606
607 return rc;
608
609err_out_irq:
610 free_irq(IRQ_RIO_PW(pw), (void *)pw);
611err_out:
612 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
613 pw->port_write_msg.virt,
614 pw->port_write_msg.phys);
615 return rc;
616}
617
618/**
619 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
620 * @mport: RapidIO master port info
621 * @index: ID of RapidIO interface
622 * @destid: Destination ID of target device
623 * @data: 16-bit info field of RapidIO doorbell message
624 *
625 * Sends a MPC85xx doorbell message. Returns %0 on success or
626 * %-EINVAL on failure.
627 */
628int fsl_rio_doorbell_send(struct rio_mport *mport,
629 int index, u16 destid, u16 data)
630{
631 unsigned long flags;
632
633 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
634 index, destid, data);
635
636 spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
637
638 /* In the serial version silicons, such as MPC8548, MPC8641,
639 * below operations is must be.
640 */
641 out_be32(&dbell->dbell_regs->odmr, 0x00000000);
642 out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
643 out_be32(&dbell->dbell_regs->oddpr, destid << 16);
644 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
645 out_be32(&dbell->dbell_regs->odmr, 0x00000001);
646
647 spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
648
649 return 0;
650}
651
652/**
653 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
654 * @mport: Master port with outbound message queue
655 * @rdev: Target of outbound message
656 * @mbox: Outbound mailbox
657 * @buffer: Message to add to outbound queue
658 * @len: Length of message
659 *
660 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
661 * %0 on success or %-EINVAL on failure.
662 */
663int
664fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
665 void *buffer, size_t len)
666{
667 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
668 u32 omr;
669 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
670 + rmu->msg_tx_ring.tx_slot;
671 int ret = 0;
672
673 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
674 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
675 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
676 ret = -EINVAL;
677 goto out;
678 }
679
680 /* Copy and clear rest of buffer */
681 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
682 len);
683 if (len < (RIO_MAX_MSG_SIZE - 4))
684 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
685 + len, 0, RIO_MAX_MSG_SIZE - len);
686
687 /* Set mbox field for message, and set destid */
688 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
689
690 /* Enable EOMI interrupt and priority */
691 desc->dattr = 0x28000000 | ((mport->index) << 20);
692
693 /* Set transfer size aligned to next power of 2 (in double words) */
694 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
695
696 /* Set snooping and source buffer address */
697 desc->saddr = 0x00000004
698 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
699
700 /* Increment enqueue pointer */
701 omr = in_be32(&rmu->msg_regs->omr);
702 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
703
704 /* Go to next descriptor */
705 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
706 rmu->msg_tx_ring.tx_slot = 0;
707
708out:
709 return ret;
710}
711
712/**
713 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
714 * @mport: Master port implementing the outbound message unit
715 * @dev_id: Device specific pointer to pass on event
716 * @mbox: Mailbox to open
717 * @entries: Number of entries in the outbound mailbox ring
718 *
719 * Initializes buffer ring, request the outbound message interrupt,
720 * and enables the outbound message unit. Returns %0 on success and
721 * %-EINVAL or %-ENOMEM on failure.
722 */
723int
724fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
725{
726 int i, j, rc = 0;
727 struct rio_priv *priv = mport->priv;
728 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
729
730 if ((entries < RIO_MIN_TX_RING_SIZE) ||
731 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
732 rc = -EINVAL;
733 goto out;
734 }
735
736 /* Initialize shadow copy ring */
737 rmu->msg_tx_ring.dev_id = dev_id;
738 rmu->msg_tx_ring.size = entries;
739
740 for (i = 0; i < rmu->msg_tx_ring.size; i++) {
741 rmu->msg_tx_ring.virt_buffer[i] =
742 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
743 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
744 if (!rmu->msg_tx_ring.virt_buffer[i]) {
745 rc = -ENOMEM;
746 for (j = 0; j < rmu->msg_tx_ring.size; j++)
747 if (rmu->msg_tx_ring.virt_buffer[j])
748 dma_free_coherent(priv->dev,
749 RIO_MSG_BUFFER_SIZE,
750 rmu->msg_tx_ring.
751 virt_buffer[j],
752 rmu->msg_tx_ring.
753 phys_buffer[j]);
754 goto out;
755 }
756 }
757
758 /* Initialize outbound message descriptor ring */
759 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
760 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
761 &rmu->msg_tx_ring.phys, GFP_KERNEL);
762 if (!rmu->msg_tx_ring.virt) {
763 rc = -ENOMEM;
764 goto out_dma;
765 }
766 memset(rmu->msg_tx_ring.virt, 0,
767 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
768 rmu->msg_tx_ring.tx_slot = 0;
769
770 /* Point dequeue/enqueue pointers at first entry in ring */
771 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
772 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
773
774 /* Configure for snooping */
775 out_be32(&rmu->msg_regs->osar, 0x00000004);
776
777 /* Clear interrupt status */
778 out_be32(&rmu->msg_regs->osr, 0x000000b3);
779
780 /* Hook up outbound message handler */
781 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
782 "msg_tx", (void *)mport);
783 if (rc < 0)
784 goto out_irq;
785
786 /*
787 * Configure outbound message unit
788 * Snooping
789 * Interrupts (all enabled, except QEIE)
790 * Chaining mode
791 * Disable
792 */
793 out_be32(&rmu->msg_regs->omr, 0x00100220);
794
795 /* Set number of entries */
796 out_be32(&rmu->msg_regs->omr,
797 in_be32(&rmu->msg_regs->omr) |
798 ((get_bitmask_order(entries) - 2) << 12));
799
800 /* Now enable the unit */
801 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
802
803out:
804 return rc;
805
806out_irq:
807 dma_free_coherent(priv->dev,
808 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
809 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
810
811out_dma:
812 for (i = 0; i < rmu->msg_tx_ring.size; i++)
813 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
814 rmu->msg_tx_ring.virt_buffer[i],
815 rmu->msg_tx_ring.phys_buffer[i]);
816
817 return rc;
818}
819
820/**
821 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
822 * @mport: Master port implementing the outbound message unit
823 * @mbox: Mailbox to close
824 *
825 * Disables the outbound message unit, free all buffers, and
826 * frees the outbound message interrupt.
827 */
828void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
829{
830 struct rio_priv *priv = mport->priv;
831 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
832
833 /* Disable inbound message unit */
834 out_be32(&rmu->msg_regs->omr, 0);
835
836 /* Free ring */
837 dma_free_coherent(priv->dev,
838 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
839 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
840
841 /* Free interrupt */
842 free_irq(IRQ_RIO_TX(mport), (void *)mport);
843}
844
845/**
846 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
847 * @mport: Master port implementing the inbound message unit
848 * @dev_id: Device specific pointer to pass on event
849 * @mbox: Mailbox to open
850 * @entries: Number of entries in the inbound mailbox ring
851 *
852 * Initializes buffer ring, request the inbound message interrupt,
853 * and enables the inbound message unit. Returns %0 on success
854 * and %-EINVAL or %-ENOMEM on failure.
855 */
856int
857fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
858{
859 int i, rc = 0;
860 struct rio_priv *priv = mport->priv;
861 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
862
863 if ((entries < RIO_MIN_RX_RING_SIZE) ||
864 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
865 rc = -EINVAL;
866 goto out;
867 }
868
869 /* Initialize client buffer ring */
870 rmu->msg_rx_ring.dev_id = dev_id;
871 rmu->msg_rx_ring.size = entries;
872 rmu->msg_rx_ring.rx_slot = 0;
873 for (i = 0; i < rmu->msg_rx_ring.size; i++)
874 rmu->msg_rx_ring.virt_buffer[i] = NULL;
875
876 /* Initialize inbound message ring */
877 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
878 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
879 &rmu->msg_rx_ring.phys, GFP_KERNEL);
880 if (!rmu->msg_rx_ring.virt) {
881 rc = -ENOMEM;
882 goto out;
883 }
884
885 /* Point dequeue/enqueue pointers at first entry in ring */
886 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
887 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
888
889 /* Clear interrupt status */
890 out_be32(&rmu->msg_regs->isr, 0x00000091);
891
892 /* Hook up inbound message handler */
893 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
894 "msg_rx", (void *)mport);
895 if (rc < 0) {
896 dma_free_coherent(priv->dev,
897 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
898 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
899 goto out;
900 }
901
902 /*
903 * Configure inbound message unit:
904 * Snooping
905 * 4KB max message size
906 * Unmask all interrupt sources
907 * Disable
908 */
909 out_be32(&rmu->msg_regs->imr, 0x001b0060);
910
911 /* Set number of queue entries */
912 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
913
914 /* Now enable the unit */
915 setbits32(&rmu->msg_regs->imr, 0x1);
916
917out:
918 return rc;
919}
920
921/**
922 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
923 * @mport: Master port implementing the inbound message unit
924 * @mbox: Mailbox to close
925 *
926 * Disables the inbound message unit, free all buffers, and
927 * frees the inbound message interrupt.
928 */
929void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
930{
931 struct rio_priv *priv = mport->priv;
932 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
933
934 /* Disable inbound message unit */
935 out_be32(&rmu->msg_regs->imr, 0);
936
937 /* Free ring */
938 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
939 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
940
941 /* Free interrupt */
942 free_irq(IRQ_RIO_RX(mport), (void *)mport);
943}
944
945/**
946 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
947 * @mport: Master port implementing the inbound message unit
948 * @mbox: Inbound mailbox number
949 * @buf: Buffer to add to inbound queue
950 *
951 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
952 * %0 on success or %-EINVAL on failure.
953 */
954int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
955{
956 int rc = 0;
957 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
958
959 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
960 rmu->msg_rx_ring.rx_slot);
961
962 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
963 printk(KERN_ERR
964 "RIO: error adding inbound buffer %d, buffer exists\n",
965 rmu->msg_rx_ring.rx_slot);
966 rc = -EINVAL;
967 goto out;
968 }
969
970 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
971 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
972 rmu->msg_rx_ring.rx_slot = 0;
973
974out:
975 return rc;
976}
977
978/**
979 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
980 * @mport: Master port implementing the inbound message unit
981 * @mbox: Inbound mailbox number
982 *
983 * Gets the next available inbound message from the inbound message queue.
984 * A pointer to the message is returned on success or NULL on failure.
985 */
986void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
987{
988 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
989 u32 phys_buf;
990 void *virt_buf;
991 void *buf = NULL;
992 int buf_idx;
993
994 phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
995
996 /* If no more messages, then bail out */
997 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
998 goto out2;
999
1000 virt_buf = rmu->msg_rx_ring.virt + (phys_buf
1001 - rmu->msg_rx_ring.phys);
1002 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
1003 buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
1004
1005 if (!buf) {
1006 printk(KERN_ERR
1007 "RIO: inbound message copy failed, no buffers\n");
1008 goto out1;
1009 }
1010
1011 /* Copy max message size, caller is expected to allocate that big */
1012 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
1013
1014 /* Clear the available buffer */
1015 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
1016
1017out1:
1018 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
1019
1020out2:
1021 return buf;
1022}
1023
1024/**
1025 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
1026 * @mport: Master port implementing the inbound doorbell unit
1027 *
1028 * Initializes doorbell unit hardware and inbound DMA buffer
1029 * ring. Called from fsl_rio_setup(). Returns %0 on success
1030 * or %-ENOMEM on failure.
1031 */
1032int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
1033{
1034 int rc = 0;
1035
1036 /* Initialize inbound doorbells */
1037 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
1038 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
1039 if (!dbell->dbell_ring.virt) {
1040 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
1041 rc = -ENOMEM;
1042 goto out;
1043 }
1044
1045 /* Point dequeue/enqueue pointers at first entry in ring */
1046 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
1047 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
1048
1049 /* Clear interrupt status */
1050 out_be32(&dbell->dbell_regs->dsr, 0x00000091);
1051
1052 /* Hook up doorbell handler */
1053 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
1054 "dbell_rx", (void *)dbell);
1055 if (rc < 0) {
1056 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
1057 dbell->dbell_ring.virt, dbell->dbell_ring.phys);
1058 printk(KERN_ERR
1059 "MPC85xx RIO: unable to request inbound doorbell irq");
1060 goto out;
1061 }
1062
1063 /* Configure doorbells for snooping, 512 entries, and enable */
1064 out_be32(&dbell->dbell_regs->dmr, 0x00108161);
1065
1066out:
1067 return rc;
1068}
1069
1070int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
1071{
1072 struct rio_priv *priv;
1073 struct fsl_rmu *rmu;
1074 u64 msg_start;
1075 const u32 *msg_addr;
1076 int mlen;
1077 int aw;
1078
1079 if (!mport || !mport->priv)
1080 return -EINVAL;
1081
1082 priv = mport->priv;
1083
1084 if (!node) {
1085 dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n",
1086 priv->dev->of_node);
1087 return -EINVAL;
1088 }
1089
1090 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
1091 if (!rmu)
1092 return -ENOMEM;
1093
1094 aw = of_n_addr_cells(node);
1095 msg_addr = of_get_property(node, "reg", &mlen);
1096 if (!msg_addr) {
1097 pr_err("%pOF: unable to find 'reg' property of message-unit\n",
1098 node);
1099 kfree(rmu);
1100 return -ENOMEM;
1101 }
1102 msg_start = of_read_number(msg_addr, aw);
1103
1104 rmu->msg_regs = (struct rio_msg_regs *)
1105 (rmu_regs_win + (u32)msg_start);
1106
1107 rmu->txirq = irq_of_parse_and_map(node, 0);
1108 rmu->rxirq = irq_of_parse_and_map(node, 1);
1109 printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n",
1110 node, rmu->txirq, rmu->rxirq);
1111
1112 priv->rmm_handle = rmu;
1113
1114 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1115 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1116 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1117
1118 return 0;
1119}