Loading...
1/*
2 * Freescale MPC85xx/MPC86xx RapidIO RMU support
3 *
4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling
12 *
13 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com>
15 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
16 * Liu Gang <Gang.Liu@freescale.com>
17 *
18 * Copyright 2005 MontaVista Software, Inc.
19 * Matt Porter <mporter@kernel.crashing.org>
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2 of the License, or (at your
24 * option) any later version.
25 */
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h>
30#include <linux/of_irq.h>
31#include <linux/of_platform.h>
32#include <linux/slab.h>
33
34#include "fsl_rio.h"
35
36#define GET_RMM_HANDLE(mport) \
37 (((struct rio_priv *)(mport->priv))->rmm_handle)
38
39/* RapidIO definition irq, which read from OF-tree */
40#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
41#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
42#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
43#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
44
45#define RIO_MIN_TX_RING_SIZE 2
46#define RIO_MAX_TX_RING_SIZE 2048
47#define RIO_MIN_RX_RING_SIZE 2
48#define RIO_MAX_RX_RING_SIZE 2048
49
50#define RIO_IPWMR_SEN 0x00100000
51#define RIO_IPWMR_QFIE 0x00000100
52#define RIO_IPWMR_EIE 0x00000020
53#define RIO_IPWMR_CQ 0x00000002
54#define RIO_IPWMR_PWE 0x00000001
55
56#define RIO_IPWSR_QF 0x00100000
57#define RIO_IPWSR_TE 0x00000080
58#define RIO_IPWSR_QFI 0x00000010
59#define RIO_IPWSR_PWD 0x00000008
60#define RIO_IPWSR_PWB 0x00000004
61
62#define RIO_EPWISR 0x10010
63/* EPWISR Error match value */
64#define RIO_EPWISR_PINT1 0x80000000
65#define RIO_EPWISR_PINT2 0x40000000
66#define RIO_EPWISR_MU 0x00000002
67#define RIO_EPWISR_PW 0x00000001
68
69#define IPWSR_CLEAR 0x98
70#define OMSR_CLEAR 0x1cb3
71#define IMSR_CLEAR 0x491
72#define IDSR_CLEAR 0x91
73#define ODSR_CLEAR 0x1c00
74#define LTLEECSR_ENABLE_ALL 0xFFC000FC
75#define RIO_LTLEECSR 0x060c
76
77#define RIO_IM0SR 0x64
78#define RIO_IM1SR 0x164
79#define RIO_OM0SR 0x4
80#define RIO_OM1SR 0x104
81
82#define RIO_DBELL_WIN_SIZE 0x1000
83
84#define RIO_MSG_OMR_MUI 0x00000002
85#define RIO_MSG_OSR_TE 0x00000080
86#define RIO_MSG_OSR_QOI 0x00000020
87#define RIO_MSG_OSR_QFI 0x00000010
88#define RIO_MSG_OSR_MUB 0x00000004
89#define RIO_MSG_OSR_EOMI 0x00000002
90#define RIO_MSG_OSR_QEI 0x00000001
91
92#define RIO_MSG_IMR_MI 0x00000002
93#define RIO_MSG_ISR_TE 0x00000080
94#define RIO_MSG_ISR_QFI 0x00000010
95#define RIO_MSG_ISR_DIQI 0x00000001
96
97#define RIO_MSG_DESC_SIZE 32
98#define RIO_MSG_BUFFER_SIZE 4096
99
100#define DOORBELL_DMR_DI 0x00000002
101#define DOORBELL_DSR_TE 0x00000080
102#define DOORBELL_DSR_QFI 0x00000010
103#define DOORBELL_DSR_DIQI 0x00000001
104
105#define DOORBELL_MESSAGE_SIZE 0x08
106
107struct rio_msg_regs {
108 u32 omr;
109 u32 osr;
110 u32 pad1;
111 u32 odqdpar;
112 u32 pad2;
113 u32 osar;
114 u32 odpr;
115 u32 odatr;
116 u32 odcr;
117 u32 pad3;
118 u32 odqepar;
119 u32 pad4[13];
120 u32 imr;
121 u32 isr;
122 u32 pad5;
123 u32 ifqdpar;
124 u32 pad6;
125 u32 ifqepar;
126};
127
128struct rio_dbell_regs {
129 u32 odmr;
130 u32 odsr;
131 u32 pad1[4];
132 u32 oddpr;
133 u32 oddatr;
134 u32 pad2[3];
135 u32 odretcr;
136 u32 pad3[12];
137 u32 dmr;
138 u32 dsr;
139 u32 pad4;
140 u32 dqdpar;
141 u32 pad5;
142 u32 dqepar;
143};
144
145struct rio_pw_regs {
146 u32 pwmr;
147 u32 pwsr;
148 u32 epwqbar;
149 u32 pwqbar;
150};
151
152
153struct rio_tx_desc {
154 u32 pad1;
155 u32 saddr;
156 u32 dport;
157 u32 dattr;
158 u32 pad2;
159 u32 pad3;
160 u32 dwcnt;
161 u32 pad4;
162};
163
164struct rio_msg_tx_ring {
165 void *virt;
166 dma_addr_t phys;
167 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
168 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
169 int tx_slot;
170 int size;
171 void *dev_id;
172};
173
174struct rio_msg_rx_ring {
175 void *virt;
176 dma_addr_t phys;
177 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
178 int rx_slot;
179 int size;
180 void *dev_id;
181};
182
183struct fsl_rmu {
184 struct rio_msg_regs __iomem *msg_regs;
185 struct rio_msg_tx_ring msg_tx_ring;
186 struct rio_msg_rx_ring msg_rx_ring;
187 int txirq;
188 int rxirq;
189};
190
191struct rio_dbell_msg {
192 u16 pad1;
193 u16 tid;
194 u16 sid;
195 u16 info;
196};
197
198/**
199 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
200 * @irq: Linux interrupt number
201 * @dev_instance: Pointer to interrupt-specific data
202 *
203 * Handles outbound message interrupts. Executes a register outbound
204 * mailbox event handler and acks the interrupt occurrence.
205 */
206static irqreturn_t
207fsl_rio_tx_handler(int irq, void *dev_instance)
208{
209 int osr;
210 struct rio_mport *port = (struct rio_mport *)dev_instance;
211 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
212
213 osr = in_be32(&rmu->msg_regs->osr);
214
215 if (osr & RIO_MSG_OSR_TE) {
216 pr_info("RIO: outbound message transmission error\n");
217 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
218 goto out;
219 }
220
221 if (osr & RIO_MSG_OSR_QOI) {
222 pr_info("RIO: outbound message queue overflow\n");
223 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
224 goto out;
225 }
226
227 if (osr & RIO_MSG_OSR_EOMI) {
228 u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
229 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
230 if (port->outb_msg[0].mcback != NULL) {
231 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
232 -1,
233 slot);
234 }
235 /* Ack the end-of-message interrupt */
236 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
237 }
238
239out:
240 return IRQ_HANDLED;
241}
242
243/**
244 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
245 * @irq: Linux interrupt number
246 * @dev_instance: Pointer to interrupt-specific data
247 *
248 * Handles inbound message interrupts. Executes a registered inbound
249 * mailbox event handler and acks the interrupt occurrence.
250 */
251static irqreturn_t
252fsl_rio_rx_handler(int irq, void *dev_instance)
253{
254 int isr;
255 struct rio_mport *port = (struct rio_mport *)dev_instance;
256 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
257
258 isr = in_be32(&rmu->msg_regs->isr);
259
260 if (isr & RIO_MSG_ISR_TE) {
261 pr_info("RIO: inbound message reception error\n");
262 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
263 goto out;
264 }
265
266 /* XXX Need to check/dispatch until queue empty */
267 if (isr & RIO_MSG_ISR_DIQI) {
268 /*
269 * Can receive messages for any mailbox/letter to that
270 * mailbox destination. So, make the callback with an
271 * unknown/invalid mailbox number argument.
272 */
273 if (port->inb_msg[0].mcback != NULL)
274 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
275 -1,
276 -1);
277
278 /* Ack the queueing interrupt */
279 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
280 }
281
282out:
283 return IRQ_HANDLED;
284}
285
286/**
287 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
288 * @irq: Linux interrupt number
289 * @dev_instance: Pointer to interrupt-specific data
290 *
291 * Handles doorbell interrupts. Parses a list of registered
292 * doorbell event handlers and executes a matching event handler.
293 */
294static irqreturn_t
295fsl_rio_dbell_handler(int irq, void *dev_instance)
296{
297 int dsr;
298 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
299 int i;
300
301 dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
302
303 if (dsr & DOORBELL_DSR_TE) {
304 pr_info("RIO: doorbell reception error\n");
305 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
306 goto out;
307 }
308
309 if (dsr & DOORBELL_DSR_QFI) {
310 pr_info("RIO: doorbell queue full\n");
311 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
312 }
313
314 /* XXX Need to check/dispatch until queue empty */
315 if (dsr & DOORBELL_DSR_DIQI) {
316 struct rio_dbell_msg *dmsg =
317 fsl_dbell->dbell_ring.virt +
318 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
319 struct rio_dbell *dbell;
320 int found = 0;
321
322 pr_debug
323 ("RIO: processing doorbell,"
324 " sid %2.2x tid %2.2x info %4.4x\n",
325 dmsg->sid, dmsg->tid, dmsg->info);
326
327 for (i = 0; i < MAX_PORT_NUM; i++) {
328 if (fsl_dbell->mport[i]) {
329 list_for_each_entry(dbell,
330 &fsl_dbell->mport[i]->dbells, node) {
331 if ((dbell->res->start
332 <= dmsg->info)
333 && (dbell->res->end
334 >= dmsg->info)) {
335 found = 1;
336 break;
337 }
338 }
339 if (found && dbell->dinb) {
340 dbell->dinb(fsl_dbell->mport[i],
341 dbell->dev_id, dmsg->sid,
342 dmsg->tid,
343 dmsg->info);
344 break;
345 }
346 }
347 }
348
349 if (!found) {
350 pr_debug
351 ("RIO: spurious doorbell,"
352 " sid %2.2x tid %2.2x info %4.4x\n",
353 dmsg->sid, dmsg->tid,
354 dmsg->info);
355 }
356 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
357 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
358 }
359
360out:
361 return IRQ_HANDLED;
362}
363
364void msg_unit_error_handler(void)
365{
366
367 /*XXX: Error recovery is not implemented, we just clear errors */
368 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
369
370 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
371 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
372 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
373 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
374
375 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
376 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
377
378 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
379}
380
381/**
382 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
383 * @irq: Linux interrupt number
384 * @dev_instance: Pointer to interrupt-specific data
385 *
386 * Handles port write interrupts. Parses a list of registered
387 * port write event handlers and executes a matching event handler.
388 */
389static irqreturn_t
390fsl_rio_port_write_handler(int irq, void *dev_instance)
391{
392 u32 ipwmr, ipwsr;
393 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
394 u32 epwisr, tmp;
395
396 epwisr = in_be32(rio_regs_win + RIO_EPWISR);
397 if (!(epwisr & RIO_EPWISR_PW))
398 goto pw_done;
399
400 ipwmr = in_be32(&pw->pw_regs->pwmr);
401 ipwsr = in_be32(&pw->pw_regs->pwsr);
402
403#ifdef DEBUG_PW
404 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
405 if (ipwsr & RIO_IPWSR_QF)
406 pr_debug(" QF");
407 if (ipwsr & RIO_IPWSR_TE)
408 pr_debug(" TE");
409 if (ipwsr & RIO_IPWSR_QFI)
410 pr_debug(" QFI");
411 if (ipwsr & RIO_IPWSR_PWD)
412 pr_debug(" PWD");
413 if (ipwsr & RIO_IPWSR_PWB)
414 pr_debug(" PWB");
415 pr_debug(" )\n");
416#endif
417 /* Schedule deferred processing if PW was received */
418 if (ipwsr & RIO_IPWSR_QFI) {
419 /* Save PW message (if there is room in FIFO),
420 * otherwise discard it.
421 */
422 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
423 pw->port_write_msg.msg_count++;
424 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
425 RIO_PW_MSG_SIZE);
426 } else {
427 pw->port_write_msg.discard_count++;
428 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
429 pw->port_write_msg.discard_count);
430 }
431 /* Clear interrupt and issue Clear Queue command. This allows
432 * another port-write to be received.
433 */
434 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
435 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
436
437 schedule_work(&pw->pw_work);
438 }
439
440 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
441 pw->port_write_msg.err_count++;
442 pr_debug("RIO: Port-Write Transaction Err (%d)\n",
443 pw->port_write_msg.err_count);
444 /* Clear Transaction Error: port-write controller should be
445 * disabled when clearing this error
446 */
447 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
448 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
449 out_be32(&pw->pw_regs->pwmr, ipwmr);
450 }
451
452 if (ipwsr & RIO_IPWSR_PWD) {
453 pw->port_write_msg.discard_count++;
454 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
455 pw->port_write_msg.discard_count);
456 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
457 }
458
459pw_done:
460 if (epwisr & RIO_EPWISR_PINT1) {
461 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
462 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
463 fsl_rio_port_error_handler(0);
464 }
465
466 if (epwisr & RIO_EPWISR_PINT2) {
467 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
468 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
469 fsl_rio_port_error_handler(1);
470 }
471
472 if (epwisr & RIO_EPWISR_MU) {
473 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
474 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
475 msg_unit_error_handler();
476 }
477
478 return IRQ_HANDLED;
479}
480
481static void fsl_pw_dpc(struct work_struct *work)
482{
483 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
484 union rio_pw_msg msg_buffer;
485 int i;
486
487 /*
488 * Process port-write messages
489 */
490 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
491 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
492#ifdef DEBUG_PW
493 {
494 u32 i;
495 pr_debug("%s : Port-Write Message:", __func__);
496 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
497 if ((i%4) == 0)
498 pr_debug("\n0x%02x: 0x%08x", i*4,
499 msg_buffer.raw[i]);
500 else
501 pr_debug(" 0x%08x", msg_buffer.raw[i]);
502 }
503 pr_debug("\n");
504 }
505#endif
506 /* Pass the port-write message to RIO core for processing */
507 for (i = 0; i < MAX_PORT_NUM; i++) {
508 if (pw->mport[i])
509 rio_inb_pwrite_handler(pw->mport[i],
510 &msg_buffer);
511 }
512 }
513}
514
515/**
516 * fsl_rio_pw_enable - enable/disable port-write interface init
517 * @mport: Master port implementing the port write unit
518 * @enable: 1=enable; 0=disable port-write message handling
519 */
520int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
521{
522 u32 rval;
523
524 rval = in_be32(&pw->pw_regs->pwmr);
525
526 if (enable)
527 rval |= RIO_IPWMR_PWE;
528 else
529 rval &= ~RIO_IPWMR_PWE;
530
531 out_be32(&pw->pw_regs->pwmr, rval);
532
533 return 0;
534}
535
536/**
537 * fsl_rio_port_write_init - MPC85xx port write interface init
538 * @mport: Master port implementing the port write unit
539 *
540 * Initializes port write unit hardware and DMA buffer
541 * ring. Called from fsl_rio_setup(). Returns %0 on success
542 * or %-ENOMEM on failure.
543 */
544
545int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
546{
547 int rc = 0;
548
549 /* Following configurations require a disabled port write controller */
550 out_be32(&pw->pw_regs->pwmr,
551 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
552
553 /* Initialize port write */
554 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
555 RIO_PW_MSG_SIZE,
556 &pw->port_write_msg.phys, GFP_KERNEL);
557 if (!pw->port_write_msg.virt) {
558 pr_err("RIO: unable allocate port write queue\n");
559 return -ENOMEM;
560 }
561
562 pw->port_write_msg.err_count = 0;
563 pw->port_write_msg.discard_count = 0;
564
565 /* Point dequeue/enqueue pointers at first entry */
566 out_be32(&pw->pw_regs->epwqbar, 0);
567 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
568
569 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
570 in_be32(&pw->pw_regs->epwqbar),
571 in_be32(&pw->pw_regs->pwqbar));
572
573 /* Clear interrupt status IPWSR */
574 out_be32(&pw->pw_regs->pwsr,
575 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
576
577 /* Configure port write controller for snooping enable all reporting,
578 clear queue full */
579 out_be32(&pw->pw_regs->pwmr,
580 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
581
582
583 /* Hook up port-write handler */
584 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
585 IRQF_SHARED, "port-write", (void *)pw);
586 if (rc < 0) {
587 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
588 goto err_out;
589 }
590 /* Enable Error Interrupt */
591 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
592
593 INIT_WORK(&pw->pw_work, fsl_pw_dpc);
594 spin_lock_init(&pw->pw_fifo_lock);
595 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
596 pr_err("FIFO allocation failed\n");
597 rc = -ENOMEM;
598 goto err_out_irq;
599 }
600
601 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
602 in_be32(&pw->pw_regs->pwmr),
603 in_be32(&pw->pw_regs->pwsr));
604
605 return rc;
606
607err_out_irq:
608 free_irq(IRQ_RIO_PW(pw), (void *)pw);
609err_out:
610 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
611 pw->port_write_msg.virt,
612 pw->port_write_msg.phys);
613 return rc;
614}
615
616/**
617 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
618 * @mport: RapidIO master port info
619 * @index: ID of RapidIO interface
620 * @destid: Destination ID of target device
621 * @data: 16-bit info field of RapidIO doorbell message
622 *
623 * Sends a MPC85xx doorbell message. Returns %0 on success or
624 * %-EINVAL on failure.
625 */
626int fsl_rio_doorbell_send(struct rio_mport *mport,
627 int index, u16 destid, u16 data)
628{
629 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
630 index, destid, data);
631
632 /* In the serial version silicons, such as MPC8548, MPC8641,
633 * below operations is must be.
634 */
635 out_be32(&dbell->dbell_regs->odmr, 0x00000000);
636 out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
637 out_be32(&dbell->dbell_regs->oddpr, destid << 16);
638 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
639 out_be32(&dbell->dbell_regs->odmr, 0x00000001);
640
641 return 0;
642}
643
644/**
645 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
646 * @mport: Master port with outbound message queue
647 * @rdev: Target of outbound message
648 * @mbox: Outbound mailbox
649 * @buffer: Message to add to outbound queue
650 * @len: Length of message
651 *
652 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
653 * %0 on success or %-EINVAL on failure.
654 */
655int
656fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
657 void *buffer, size_t len)
658{
659 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
660 u32 omr;
661 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
662 + rmu->msg_tx_ring.tx_slot;
663 int ret = 0;
664
665 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
666 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
667 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
668 ret = -EINVAL;
669 goto out;
670 }
671
672 /* Copy and clear rest of buffer */
673 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
674 len);
675 if (len < (RIO_MAX_MSG_SIZE - 4))
676 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
677 + len, 0, RIO_MAX_MSG_SIZE - len);
678
679 /* Set mbox field for message, and set destid */
680 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
681
682 /* Enable EOMI interrupt and priority */
683 desc->dattr = 0x28000000 | ((mport->index) << 20);
684
685 /* Set transfer size aligned to next power of 2 (in double words) */
686 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
687
688 /* Set snooping and source buffer address */
689 desc->saddr = 0x00000004
690 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
691
692 /* Increment enqueue pointer */
693 omr = in_be32(&rmu->msg_regs->omr);
694 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
695
696 /* Go to next descriptor */
697 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
698 rmu->msg_tx_ring.tx_slot = 0;
699
700out:
701 return ret;
702}
703
704/**
705 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
706 * @mport: Master port implementing the outbound message unit
707 * @dev_id: Device specific pointer to pass on event
708 * @mbox: Mailbox to open
709 * @entries: Number of entries in the outbound mailbox ring
710 *
711 * Initializes buffer ring, request the outbound message interrupt,
712 * and enables the outbound message unit. Returns %0 on success and
713 * %-EINVAL or %-ENOMEM on failure.
714 */
715int
716fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
717{
718 int i, j, rc = 0;
719 struct rio_priv *priv = mport->priv;
720 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
721
722 if ((entries < RIO_MIN_TX_RING_SIZE) ||
723 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
724 rc = -EINVAL;
725 goto out;
726 }
727
728 /* Initialize shadow copy ring */
729 rmu->msg_tx_ring.dev_id = dev_id;
730 rmu->msg_tx_ring.size = entries;
731
732 for (i = 0; i < rmu->msg_tx_ring.size; i++) {
733 rmu->msg_tx_ring.virt_buffer[i] =
734 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
735 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
736 if (!rmu->msg_tx_ring.virt_buffer[i]) {
737 rc = -ENOMEM;
738 for (j = 0; j < rmu->msg_tx_ring.size; j++)
739 if (rmu->msg_tx_ring.virt_buffer[j])
740 dma_free_coherent(priv->dev,
741 RIO_MSG_BUFFER_SIZE,
742 rmu->msg_tx_ring.
743 virt_buffer[j],
744 rmu->msg_tx_ring.
745 phys_buffer[j]);
746 goto out;
747 }
748 }
749
750 /* Initialize outbound message descriptor ring */
751 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
752 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
753 &rmu->msg_tx_ring.phys, GFP_KERNEL);
754 if (!rmu->msg_tx_ring.virt) {
755 rc = -ENOMEM;
756 goto out_dma;
757 }
758 memset(rmu->msg_tx_ring.virt, 0,
759 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
760 rmu->msg_tx_ring.tx_slot = 0;
761
762 /* Point dequeue/enqueue pointers at first entry in ring */
763 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
764 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
765
766 /* Configure for snooping */
767 out_be32(&rmu->msg_regs->osar, 0x00000004);
768
769 /* Clear interrupt status */
770 out_be32(&rmu->msg_regs->osr, 0x000000b3);
771
772 /* Hook up outbound message handler */
773 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
774 "msg_tx", (void *)mport);
775 if (rc < 0)
776 goto out_irq;
777
778 /*
779 * Configure outbound message unit
780 * Snooping
781 * Interrupts (all enabled, except QEIE)
782 * Chaining mode
783 * Disable
784 */
785 out_be32(&rmu->msg_regs->omr, 0x00100220);
786
787 /* Set number of entries */
788 out_be32(&rmu->msg_regs->omr,
789 in_be32(&rmu->msg_regs->omr) |
790 ((get_bitmask_order(entries) - 2) << 12));
791
792 /* Now enable the unit */
793 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
794
795out:
796 return rc;
797
798out_irq:
799 dma_free_coherent(priv->dev,
800 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
801 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
802
803out_dma:
804 for (i = 0; i < rmu->msg_tx_ring.size; i++)
805 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
806 rmu->msg_tx_ring.virt_buffer[i],
807 rmu->msg_tx_ring.phys_buffer[i]);
808
809 return rc;
810}
811
812/**
813 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
814 * @mport: Master port implementing the outbound message unit
815 * @mbox: Mailbox to close
816 *
817 * Disables the outbound message unit, free all buffers, and
818 * frees the outbound message interrupt.
819 */
820void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
821{
822 struct rio_priv *priv = mport->priv;
823 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
824
825 /* Disable inbound message unit */
826 out_be32(&rmu->msg_regs->omr, 0);
827
828 /* Free ring */
829 dma_free_coherent(priv->dev,
830 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
831 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
832
833 /* Free interrupt */
834 free_irq(IRQ_RIO_TX(mport), (void *)mport);
835}
836
837/**
838 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
839 * @mport: Master port implementing the inbound message unit
840 * @dev_id: Device specific pointer to pass on event
841 * @mbox: Mailbox to open
842 * @entries: Number of entries in the inbound mailbox ring
843 *
844 * Initializes buffer ring, request the inbound message interrupt,
845 * and enables the inbound message unit. Returns %0 on success
846 * and %-EINVAL or %-ENOMEM on failure.
847 */
848int
849fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
850{
851 int i, rc = 0;
852 struct rio_priv *priv = mport->priv;
853 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
854
855 if ((entries < RIO_MIN_RX_RING_SIZE) ||
856 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
857 rc = -EINVAL;
858 goto out;
859 }
860
861 /* Initialize client buffer ring */
862 rmu->msg_rx_ring.dev_id = dev_id;
863 rmu->msg_rx_ring.size = entries;
864 rmu->msg_rx_ring.rx_slot = 0;
865 for (i = 0; i < rmu->msg_rx_ring.size; i++)
866 rmu->msg_rx_ring.virt_buffer[i] = NULL;
867
868 /* Initialize inbound message ring */
869 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
870 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
871 &rmu->msg_rx_ring.phys, GFP_KERNEL);
872 if (!rmu->msg_rx_ring.virt) {
873 rc = -ENOMEM;
874 goto out;
875 }
876
877 /* Point dequeue/enqueue pointers at first entry in ring */
878 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
879 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
880
881 /* Clear interrupt status */
882 out_be32(&rmu->msg_regs->isr, 0x00000091);
883
884 /* Hook up inbound message handler */
885 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
886 "msg_rx", (void *)mport);
887 if (rc < 0) {
888 dma_free_coherent(priv->dev,
889 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
890 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
891 goto out;
892 }
893
894 /*
895 * Configure inbound message unit:
896 * Snooping
897 * 4KB max message size
898 * Unmask all interrupt sources
899 * Disable
900 */
901 out_be32(&rmu->msg_regs->imr, 0x001b0060);
902
903 /* Set number of queue entries */
904 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
905
906 /* Now enable the unit */
907 setbits32(&rmu->msg_regs->imr, 0x1);
908
909out:
910 return rc;
911}
912
913/**
914 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
915 * @mport: Master port implementing the inbound message unit
916 * @mbox: Mailbox to close
917 *
918 * Disables the inbound message unit, free all buffers, and
919 * frees the inbound message interrupt.
920 */
921void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
922{
923 struct rio_priv *priv = mport->priv;
924 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
925
926 /* Disable inbound message unit */
927 out_be32(&rmu->msg_regs->imr, 0);
928
929 /* Free ring */
930 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
931 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
932
933 /* Free interrupt */
934 free_irq(IRQ_RIO_RX(mport), (void *)mport);
935}
936
937/**
938 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
939 * @mport: Master port implementing the inbound message unit
940 * @mbox: Inbound mailbox number
941 * @buf: Buffer to add to inbound queue
942 *
943 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
944 * %0 on success or %-EINVAL on failure.
945 */
946int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
947{
948 int rc = 0;
949 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
950
951 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
952 rmu->msg_rx_ring.rx_slot);
953
954 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
955 printk(KERN_ERR
956 "RIO: error adding inbound buffer %d, buffer exists\n",
957 rmu->msg_rx_ring.rx_slot);
958 rc = -EINVAL;
959 goto out;
960 }
961
962 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
963 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
964 rmu->msg_rx_ring.rx_slot = 0;
965
966out:
967 return rc;
968}
969
970/**
971 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
972 * @mport: Master port implementing the inbound message unit
973 * @mbox: Inbound mailbox number
974 *
975 * Gets the next available inbound message from the inbound message queue.
976 * A pointer to the message is returned on success or NULL on failure.
977 */
978void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
979{
980 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
981 u32 phys_buf;
982 void *virt_buf;
983 void *buf = NULL;
984 int buf_idx;
985
986 phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
987
988 /* If no more messages, then bail out */
989 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
990 goto out2;
991
992 virt_buf = rmu->msg_rx_ring.virt + (phys_buf
993 - rmu->msg_rx_ring.phys);
994 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
995 buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
996
997 if (!buf) {
998 printk(KERN_ERR
999 "RIO: inbound message copy failed, no buffers\n");
1000 goto out1;
1001 }
1002
1003 /* Copy max message size, caller is expected to allocate that big */
1004 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
1005
1006 /* Clear the available buffer */
1007 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
1008
1009out1:
1010 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
1011
1012out2:
1013 return buf;
1014}
1015
1016/**
1017 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
1018 * @mport: Master port implementing the inbound doorbell unit
1019 *
1020 * Initializes doorbell unit hardware and inbound DMA buffer
1021 * ring. Called from fsl_rio_setup(). Returns %0 on success
1022 * or %-ENOMEM on failure.
1023 */
1024int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
1025{
1026 int rc = 0;
1027
1028 /* Initialize inbound doorbells */
1029 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
1030 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
1031 if (!dbell->dbell_ring.virt) {
1032 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
1033 rc = -ENOMEM;
1034 goto out;
1035 }
1036
1037 /* Point dequeue/enqueue pointers at first entry in ring */
1038 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
1039 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
1040
1041 /* Clear interrupt status */
1042 out_be32(&dbell->dbell_regs->dsr, 0x00000091);
1043
1044 /* Hook up doorbell handler */
1045 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
1046 "dbell_rx", (void *)dbell);
1047 if (rc < 0) {
1048 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
1049 dbell->dbell_ring.virt, dbell->dbell_ring.phys);
1050 printk(KERN_ERR
1051 "MPC85xx RIO: unable to request inbound doorbell irq");
1052 goto out;
1053 }
1054
1055 /* Configure doorbells for snooping, 512 entries, and enable */
1056 out_be32(&dbell->dbell_regs->dmr, 0x00108161);
1057
1058out:
1059 return rc;
1060}
1061
1062int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
1063{
1064 struct rio_priv *priv;
1065 struct fsl_rmu *rmu;
1066 u64 msg_start;
1067 const u32 *msg_addr;
1068 int mlen;
1069 int aw;
1070
1071 if (!mport || !mport->priv)
1072 return -EINVAL;
1073
1074 priv = mport->priv;
1075
1076 if (!node) {
1077 dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n",
1078 priv->dev->of_node->full_name);
1079 return -EINVAL;
1080 }
1081
1082 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
1083 if (!rmu)
1084 return -ENOMEM;
1085
1086 aw = of_n_addr_cells(node);
1087 msg_addr = of_get_property(node, "reg", &mlen);
1088 if (!msg_addr) {
1089 pr_err("%s: unable to find 'reg' property of message-unit\n",
1090 node->full_name);
1091 kfree(rmu);
1092 return -ENOMEM;
1093 }
1094 msg_start = of_read_number(msg_addr, aw);
1095
1096 rmu->msg_regs = (struct rio_msg_regs *)
1097 (rmu_regs_win + (u32)msg_start);
1098
1099 rmu->txirq = irq_of_parse_and_map(node, 0);
1100 rmu->rxirq = irq_of_parse_and_map(node, 1);
1101 printk(KERN_INFO "%s: txirq: %d, rxirq %d\n",
1102 node->full_name, rmu->txirq, rmu->rxirq);
1103
1104 priv->rmm_handle = rmu;
1105
1106 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1107 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1108 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1109
1110 return 0;
1111}
1/*
2 * Freescale MPC85xx/MPC86xx RapidIO RMU support
3 *
4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling
12 *
13 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com>
15 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
16 * Liu Gang <Gang.Liu@freescale.com>
17 *
18 * Copyright 2005 MontaVista Software, Inc.
19 * Matt Porter <mporter@kernel.crashing.org>
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2 of the License, or (at your
24 * option) any later version.
25 */
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h>
30#include <linux/of_irq.h>
31#include <linux/of_platform.h>
32#include <linux/slab.h>
33
34#include "fsl_rio.h"
35
36#define GET_RMM_HANDLE(mport) \
37 (((struct rio_priv *)(mport->priv))->rmm_handle)
38
39/* RapidIO definition irq, which read from OF-tree */
40#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
41#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
42#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
43#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
44
45#define RIO_MIN_TX_RING_SIZE 2
46#define RIO_MAX_TX_RING_SIZE 2048
47#define RIO_MIN_RX_RING_SIZE 2
48#define RIO_MAX_RX_RING_SIZE 2048
49
50#define RIO_IPWMR_SEN 0x00100000
51#define RIO_IPWMR_QFIE 0x00000100
52#define RIO_IPWMR_EIE 0x00000020
53#define RIO_IPWMR_CQ 0x00000002
54#define RIO_IPWMR_PWE 0x00000001
55
56#define RIO_IPWSR_QF 0x00100000
57#define RIO_IPWSR_TE 0x00000080
58#define RIO_IPWSR_QFI 0x00000010
59#define RIO_IPWSR_PWD 0x00000008
60#define RIO_IPWSR_PWB 0x00000004
61
62#define RIO_EPWISR 0x10010
63/* EPWISR Error match value */
64#define RIO_EPWISR_PINT1 0x80000000
65#define RIO_EPWISR_PINT2 0x40000000
66#define RIO_EPWISR_MU 0x00000002
67#define RIO_EPWISR_PW 0x00000001
68
69#define IPWSR_CLEAR 0x98
70#define OMSR_CLEAR 0x1cb3
71#define IMSR_CLEAR 0x491
72#define IDSR_CLEAR 0x91
73#define ODSR_CLEAR 0x1c00
74#define LTLEECSR_ENABLE_ALL 0xFFC000FC
75#define RIO_LTLEECSR 0x060c
76
77#define RIO_IM0SR 0x64
78#define RIO_IM1SR 0x164
79#define RIO_OM0SR 0x4
80#define RIO_OM1SR 0x104
81
82#define RIO_DBELL_WIN_SIZE 0x1000
83
84#define RIO_MSG_OMR_MUI 0x00000002
85#define RIO_MSG_OSR_TE 0x00000080
86#define RIO_MSG_OSR_QOI 0x00000020
87#define RIO_MSG_OSR_QFI 0x00000010
88#define RIO_MSG_OSR_MUB 0x00000004
89#define RIO_MSG_OSR_EOMI 0x00000002
90#define RIO_MSG_OSR_QEI 0x00000001
91
92#define RIO_MSG_IMR_MI 0x00000002
93#define RIO_MSG_ISR_TE 0x00000080
94#define RIO_MSG_ISR_QFI 0x00000010
95#define RIO_MSG_ISR_DIQI 0x00000001
96
97#define RIO_MSG_DESC_SIZE 32
98#define RIO_MSG_BUFFER_SIZE 4096
99
100#define DOORBELL_DMR_DI 0x00000002
101#define DOORBELL_DSR_TE 0x00000080
102#define DOORBELL_DSR_QFI 0x00000010
103#define DOORBELL_DSR_DIQI 0x00000001
104
105#define DOORBELL_MESSAGE_SIZE 0x08
106
107static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
108
109struct rio_msg_regs {
110 u32 omr;
111 u32 osr;
112 u32 pad1;
113 u32 odqdpar;
114 u32 pad2;
115 u32 osar;
116 u32 odpr;
117 u32 odatr;
118 u32 odcr;
119 u32 pad3;
120 u32 odqepar;
121 u32 pad4[13];
122 u32 imr;
123 u32 isr;
124 u32 pad5;
125 u32 ifqdpar;
126 u32 pad6;
127 u32 ifqepar;
128};
129
130struct rio_dbell_regs {
131 u32 odmr;
132 u32 odsr;
133 u32 pad1[4];
134 u32 oddpr;
135 u32 oddatr;
136 u32 pad2[3];
137 u32 odretcr;
138 u32 pad3[12];
139 u32 dmr;
140 u32 dsr;
141 u32 pad4;
142 u32 dqdpar;
143 u32 pad5;
144 u32 dqepar;
145};
146
147struct rio_pw_regs {
148 u32 pwmr;
149 u32 pwsr;
150 u32 epwqbar;
151 u32 pwqbar;
152};
153
154
155struct rio_tx_desc {
156 u32 pad1;
157 u32 saddr;
158 u32 dport;
159 u32 dattr;
160 u32 pad2;
161 u32 pad3;
162 u32 dwcnt;
163 u32 pad4;
164};
165
166struct rio_msg_tx_ring {
167 void *virt;
168 dma_addr_t phys;
169 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
170 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
171 int tx_slot;
172 int size;
173 void *dev_id;
174};
175
176struct rio_msg_rx_ring {
177 void *virt;
178 dma_addr_t phys;
179 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
180 int rx_slot;
181 int size;
182 void *dev_id;
183};
184
185struct fsl_rmu {
186 struct rio_msg_regs __iomem *msg_regs;
187 struct rio_msg_tx_ring msg_tx_ring;
188 struct rio_msg_rx_ring msg_rx_ring;
189 int txirq;
190 int rxirq;
191};
192
193struct rio_dbell_msg {
194 u16 pad1;
195 u16 tid;
196 u16 sid;
197 u16 info;
198};
199
200/**
201 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
202 * @irq: Linux interrupt number
203 * @dev_instance: Pointer to interrupt-specific data
204 *
205 * Handles outbound message interrupts. Executes a register outbound
206 * mailbox event handler and acks the interrupt occurrence.
207 */
208static irqreturn_t
209fsl_rio_tx_handler(int irq, void *dev_instance)
210{
211 int osr;
212 struct rio_mport *port = (struct rio_mport *)dev_instance;
213 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
214
215 osr = in_be32(&rmu->msg_regs->osr);
216
217 if (osr & RIO_MSG_OSR_TE) {
218 pr_info("RIO: outbound message transmission error\n");
219 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
220 goto out;
221 }
222
223 if (osr & RIO_MSG_OSR_QOI) {
224 pr_info("RIO: outbound message queue overflow\n");
225 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
226 goto out;
227 }
228
229 if (osr & RIO_MSG_OSR_EOMI) {
230 u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
231 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
232 if (port->outb_msg[0].mcback != NULL) {
233 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
234 -1,
235 slot);
236 }
237 /* Ack the end-of-message interrupt */
238 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
239 }
240
241out:
242 return IRQ_HANDLED;
243}
244
245/**
246 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
247 * @irq: Linux interrupt number
248 * @dev_instance: Pointer to interrupt-specific data
249 *
250 * Handles inbound message interrupts. Executes a registered inbound
251 * mailbox event handler and acks the interrupt occurrence.
252 */
253static irqreturn_t
254fsl_rio_rx_handler(int irq, void *dev_instance)
255{
256 int isr;
257 struct rio_mport *port = (struct rio_mport *)dev_instance;
258 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
259
260 isr = in_be32(&rmu->msg_regs->isr);
261
262 if (isr & RIO_MSG_ISR_TE) {
263 pr_info("RIO: inbound message reception error\n");
264 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
265 goto out;
266 }
267
268 /* XXX Need to check/dispatch until queue empty */
269 if (isr & RIO_MSG_ISR_DIQI) {
270 /*
271 * Can receive messages for any mailbox/letter to that
272 * mailbox destination. So, make the callback with an
273 * unknown/invalid mailbox number argument.
274 */
275 if (port->inb_msg[0].mcback != NULL)
276 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
277 -1,
278 -1);
279
280 /* Ack the queueing interrupt */
281 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
282 }
283
284out:
285 return IRQ_HANDLED;
286}
287
288/**
289 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
290 * @irq: Linux interrupt number
291 * @dev_instance: Pointer to interrupt-specific data
292 *
293 * Handles doorbell interrupts. Parses a list of registered
294 * doorbell event handlers and executes a matching event handler.
295 */
296static irqreturn_t
297fsl_rio_dbell_handler(int irq, void *dev_instance)
298{
299 int dsr;
300 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
301 int i;
302
303 dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
304
305 if (dsr & DOORBELL_DSR_TE) {
306 pr_info("RIO: doorbell reception error\n");
307 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
308 goto out;
309 }
310
311 if (dsr & DOORBELL_DSR_QFI) {
312 pr_info("RIO: doorbell queue full\n");
313 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
314 }
315
316 /* XXX Need to check/dispatch until queue empty */
317 if (dsr & DOORBELL_DSR_DIQI) {
318 struct rio_dbell_msg *dmsg =
319 fsl_dbell->dbell_ring.virt +
320 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
321 struct rio_dbell *dbell;
322 int found = 0;
323
324 pr_debug
325 ("RIO: processing doorbell,"
326 " sid %2.2x tid %2.2x info %4.4x\n",
327 dmsg->sid, dmsg->tid, dmsg->info);
328
329 for (i = 0; i < MAX_PORT_NUM; i++) {
330 if (fsl_dbell->mport[i]) {
331 list_for_each_entry(dbell,
332 &fsl_dbell->mport[i]->dbells, node) {
333 if ((dbell->res->start
334 <= dmsg->info)
335 && (dbell->res->end
336 >= dmsg->info)) {
337 found = 1;
338 break;
339 }
340 }
341 if (found && dbell->dinb) {
342 dbell->dinb(fsl_dbell->mport[i],
343 dbell->dev_id, dmsg->sid,
344 dmsg->tid,
345 dmsg->info);
346 break;
347 }
348 }
349 }
350
351 if (!found) {
352 pr_debug
353 ("RIO: spurious doorbell,"
354 " sid %2.2x tid %2.2x info %4.4x\n",
355 dmsg->sid, dmsg->tid,
356 dmsg->info);
357 }
358 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
359 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
360 }
361
362out:
363 return IRQ_HANDLED;
364}
365
366void msg_unit_error_handler(void)
367{
368
369 /*XXX: Error recovery is not implemented, we just clear errors */
370 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
371
372 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
373 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
374 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
375 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
376
377 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
378 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
379
380 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
381}
382
383/**
384 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
385 * @irq: Linux interrupt number
386 * @dev_instance: Pointer to interrupt-specific data
387 *
388 * Handles port write interrupts. Parses a list of registered
389 * port write event handlers and executes a matching event handler.
390 */
391static irqreturn_t
392fsl_rio_port_write_handler(int irq, void *dev_instance)
393{
394 u32 ipwmr, ipwsr;
395 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
396 u32 epwisr, tmp;
397
398 epwisr = in_be32(rio_regs_win + RIO_EPWISR);
399 if (!(epwisr & RIO_EPWISR_PW))
400 goto pw_done;
401
402 ipwmr = in_be32(&pw->pw_regs->pwmr);
403 ipwsr = in_be32(&pw->pw_regs->pwsr);
404
405#ifdef DEBUG_PW
406 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
407 if (ipwsr & RIO_IPWSR_QF)
408 pr_debug(" QF");
409 if (ipwsr & RIO_IPWSR_TE)
410 pr_debug(" TE");
411 if (ipwsr & RIO_IPWSR_QFI)
412 pr_debug(" QFI");
413 if (ipwsr & RIO_IPWSR_PWD)
414 pr_debug(" PWD");
415 if (ipwsr & RIO_IPWSR_PWB)
416 pr_debug(" PWB");
417 pr_debug(" )\n");
418#endif
419 /* Schedule deferred processing if PW was received */
420 if (ipwsr & RIO_IPWSR_QFI) {
421 /* Save PW message (if there is room in FIFO),
422 * otherwise discard it.
423 */
424 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
425 pw->port_write_msg.msg_count++;
426 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
427 RIO_PW_MSG_SIZE);
428 } else {
429 pw->port_write_msg.discard_count++;
430 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
431 pw->port_write_msg.discard_count);
432 }
433 /* Clear interrupt and issue Clear Queue command. This allows
434 * another port-write to be received.
435 */
436 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
437 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
438
439 schedule_work(&pw->pw_work);
440 }
441
442 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
443 pw->port_write_msg.err_count++;
444 pr_debug("RIO: Port-Write Transaction Err (%d)\n",
445 pw->port_write_msg.err_count);
446 /* Clear Transaction Error: port-write controller should be
447 * disabled when clearing this error
448 */
449 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
450 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
451 out_be32(&pw->pw_regs->pwmr, ipwmr);
452 }
453
454 if (ipwsr & RIO_IPWSR_PWD) {
455 pw->port_write_msg.discard_count++;
456 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
457 pw->port_write_msg.discard_count);
458 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
459 }
460
461pw_done:
462 if (epwisr & RIO_EPWISR_PINT1) {
463 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
464 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
465 fsl_rio_port_error_handler(0);
466 }
467
468 if (epwisr & RIO_EPWISR_PINT2) {
469 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
470 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
471 fsl_rio_port_error_handler(1);
472 }
473
474 if (epwisr & RIO_EPWISR_MU) {
475 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
476 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
477 msg_unit_error_handler();
478 }
479
480 return IRQ_HANDLED;
481}
482
483static void fsl_pw_dpc(struct work_struct *work)
484{
485 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
486 union rio_pw_msg msg_buffer;
487 int i;
488
489 /*
490 * Process port-write messages
491 */
492 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
493 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
494#ifdef DEBUG_PW
495 {
496 u32 i;
497 pr_debug("%s : Port-Write Message:", __func__);
498 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
499 if ((i%4) == 0)
500 pr_debug("\n0x%02x: 0x%08x", i*4,
501 msg_buffer.raw[i]);
502 else
503 pr_debug(" 0x%08x", msg_buffer.raw[i]);
504 }
505 pr_debug("\n");
506 }
507#endif
508 /* Pass the port-write message to RIO core for processing */
509 for (i = 0; i < MAX_PORT_NUM; i++) {
510 if (pw->mport[i])
511 rio_inb_pwrite_handler(pw->mport[i],
512 &msg_buffer);
513 }
514 }
515}
516
517/**
518 * fsl_rio_pw_enable - enable/disable port-write interface init
519 * @mport: Master port implementing the port write unit
520 * @enable: 1=enable; 0=disable port-write message handling
521 */
522int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
523{
524 u32 rval;
525
526 rval = in_be32(&pw->pw_regs->pwmr);
527
528 if (enable)
529 rval |= RIO_IPWMR_PWE;
530 else
531 rval &= ~RIO_IPWMR_PWE;
532
533 out_be32(&pw->pw_regs->pwmr, rval);
534
535 return 0;
536}
537
538/**
539 * fsl_rio_port_write_init - MPC85xx port write interface init
540 * @mport: Master port implementing the port write unit
541 *
542 * Initializes port write unit hardware and DMA buffer
543 * ring. Called from fsl_rio_setup(). Returns %0 on success
544 * or %-ENOMEM on failure.
545 */
546
547int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
548{
549 int rc = 0;
550
551 /* Following configurations require a disabled port write controller */
552 out_be32(&pw->pw_regs->pwmr,
553 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
554
555 /* Initialize port write */
556 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
557 RIO_PW_MSG_SIZE,
558 &pw->port_write_msg.phys, GFP_KERNEL);
559 if (!pw->port_write_msg.virt) {
560 pr_err("RIO: unable allocate port write queue\n");
561 return -ENOMEM;
562 }
563
564 pw->port_write_msg.err_count = 0;
565 pw->port_write_msg.discard_count = 0;
566
567 /* Point dequeue/enqueue pointers at first entry */
568 out_be32(&pw->pw_regs->epwqbar, 0);
569 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
570
571 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
572 in_be32(&pw->pw_regs->epwqbar),
573 in_be32(&pw->pw_regs->pwqbar));
574
575 /* Clear interrupt status IPWSR */
576 out_be32(&pw->pw_regs->pwsr,
577 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
578
579 /* Configure port write controller for snooping enable all reporting,
580 clear queue full */
581 out_be32(&pw->pw_regs->pwmr,
582 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
583
584
585 /* Hook up port-write handler */
586 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
587 IRQF_SHARED, "port-write", (void *)pw);
588 if (rc < 0) {
589 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
590 goto err_out;
591 }
592 /* Enable Error Interrupt */
593 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
594
595 INIT_WORK(&pw->pw_work, fsl_pw_dpc);
596 spin_lock_init(&pw->pw_fifo_lock);
597 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
598 pr_err("FIFO allocation failed\n");
599 rc = -ENOMEM;
600 goto err_out_irq;
601 }
602
603 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
604 in_be32(&pw->pw_regs->pwmr),
605 in_be32(&pw->pw_regs->pwsr));
606
607 return rc;
608
609err_out_irq:
610 free_irq(IRQ_RIO_PW(pw), (void *)pw);
611err_out:
612 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
613 pw->port_write_msg.virt,
614 pw->port_write_msg.phys);
615 return rc;
616}
617
618/**
619 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
620 * @mport: RapidIO master port info
621 * @index: ID of RapidIO interface
622 * @destid: Destination ID of target device
623 * @data: 16-bit info field of RapidIO doorbell message
624 *
625 * Sends a MPC85xx doorbell message. Returns %0 on success or
626 * %-EINVAL on failure.
627 */
628int fsl_rio_doorbell_send(struct rio_mport *mport,
629 int index, u16 destid, u16 data)
630{
631 unsigned long flags;
632
633 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
634 index, destid, data);
635
636 spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
637
638 /* In the serial version silicons, such as MPC8548, MPC8641,
639 * below operations is must be.
640 */
641 out_be32(&dbell->dbell_regs->odmr, 0x00000000);
642 out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
643 out_be32(&dbell->dbell_regs->oddpr, destid << 16);
644 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
645 out_be32(&dbell->dbell_regs->odmr, 0x00000001);
646
647 spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
648
649 return 0;
650}
651
652/**
653 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
654 * @mport: Master port with outbound message queue
655 * @rdev: Target of outbound message
656 * @mbox: Outbound mailbox
657 * @buffer: Message to add to outbound queue
658 * @len: Length of message
659 *
660 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
661 * %0 on success or %-EINVAL on failure.
662 */
663int
664fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
665 void *buffer, size_t len)
666{
667 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
668 u32 omr;
669 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
670 + rmu->msg_tx_ring.tx_slot;
671 int ret = 0;
672
673 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
674 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
675 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
676 ret = -EINVAL;
677 goto out;
678 }
679
680 /* Copy and clear rest of buffer */
681 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
682 len);
683 if (len < (RIO_MAX_MSG_SIZE - 4))
684 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
685 + len, 0, RIO_MAX_MSG_SIZE - len);
686
687 /* Set mbox field for message, and set destid */
688 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
689
690 /* Enable EOMI interrupt and priority */
691 desc->dattr = 0x28000000 | ((mport->index) << 20);
692
693 /* Set transfer size aligned to next power of 2 (in double words) */
694 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
695
696 /* Set snooping and source buffer address */
697 desc->saddr = 0x00000004
698 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
699
700 /* Increment enqueue pointer */
701 omr = in_be32(&rmu->msg_regs->omr);
702 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
703
704 /* Go to next descriptor */
705 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
706 rmu->msg_tx_ring.tx_slot = 0;
707
708out:
709 return ret;
710}
711
712/**
713 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
714 * @mport: Master port implementing the outbound message unit
715 * @dev_id: Device specific pointer to pass on event
716 * @mbox: Mailbox to open
717 * @entries: Number of entries in the outbound mailbox ring
718 *
719 * Initializes buffer ring, request the outbound message interrupt,
720 * and enables the outbound message unit. Returns %0 on success and
721 * %-EINVAL or %-ENOMEM on failure.
722 */
723int
724fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
725{
726 int i, j, rc = 0;
727 struct rio_priv *priv = mport->priv;
728 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
729
730 if ((entries < RIO_MIN_TX_RING_SIZE) ||
731 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
732 rc = -EINVAL;
733 goto out;
734 }
735
736 /* Initialize shadow copy ring */
737 rmu->msg_tx_ring.dev_id = dev_id;
738 rmu->msg_tx_ring.size = entries;
739
740 for (i = 0; i < rmu->msg_tx_ring.size; i++) {
741 rmu->msg_tx_ring.virt_buffer[i] =
742 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
743 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
744 if (!rmu->msg_tx_ring.virt_buffer[i]) {
745 rc = -ENOMEM;
746 for (j = 0; j < rmu->msg_tx_ring.size; j++)
747 if (rmu->msg_tx_ring.virt_buffer[j])
748 dma_free_coherent(priv->dev,
749 RIO_MSG_BUFFER_SIZE,
750 rmu->msg_tx_ring.
751 virt_buffer[j],
752 rmu->msg_tx_ring.
753 phys_buffer[j]);
754 goto out;
755 }
756 }
757
758 /* Initialize outbound message descriptor ring */
759 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
760 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
761 &rmu->msg_tx_ring.phys, GFP_KERNEL);
762 if (!rmu->msg_tx_ring.virt) {
763 rc = -ENOMEM;
764 goto out_dma;
765 }
766 memset(rmu->msg_tx_ring.virt, 0,
767 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
768 rmu->msg_tx_ring.tx_slot = 0;
769
770 /* Point dequeue/enqueue pointers at first entry in ring */
771 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
772 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
773
774 /* Configure for snooping */
775 out_be32(&rmu->msg_regs->osar, 0x00000004);
776
777 /* Clear interrupt status */
778 out_be32(&rmu->msg_regs->osr, 0x000000b3);
779
780 /* Hook up outbound message handler */
781 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
782 "msg_tx", (void *)mport);
783 if (rc < 0)
784 goto out_irq;
785
786 /*
787 * Configure outbound message unit
788 * Snooping
789 * Interrupts (all enabled, except QEIE)
790 * Chaining mode
791 * Disable
792 */
793 out_be32(&rmu->msg_regs->omr, 0x00100220);
794
795 /* Set number of entries */
796 out_be32(&rmu->msg_regs->omr,
797 in_be32(&rmu->msg_regs->omr) |
798 ((get_bitmask_order(entries) - 2) << 12));
799
800 /* Now enable the unit */
801 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
802
803out:
804 return rc;
805
806out_irq:
807 dma_free_coherent(priv->dev,
808 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
809 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
810
811out_dma:
812 for (i = 0; i < rmu->msg_tx_ring.size; i++)
813 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
814 rmu->msg_tx_ring.virt_buffer[i],
815 rmu->msg_tx_ring.phys_buffer[i]);
816
817 return rc;
818}
819
820/**
821 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
822 * @mport: Master port implementing the outbound message unit
823 * @mbox: Mailbox to close
824 *
825 * Disables the outbound message unit, free all buffers, and
826 * frees the outbound message interrupt.
827 */
828void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
829{
830 struct rio_priv *priv = mport->priv;
831 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
832
833 /* Disable inbound message unit */
834 out_be32(&rmu->msg_regs->omr, 0);
835
836 /* Free ring */
837 dma_free_coherent(priv->dev,
838 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
839 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
840
841 /* Free interrupt */
842 free_irq(IRQ_RIO_TX(mport), (void *)mport);
843}
844
845/**
846 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
847 * @mport: Master port implementing the inbound message unit
848 * @dev_id: Device specific pointer to pass on event
849 * @mbox: Mailbox to open
850 * @entries: Number of entries in the inbound mailbox ring
851 *
852 * Initializes buffer ring, request the inbound message interrupt,
853 * and enables the inbound message unit. Returns %0 on success
854 * and %-EINVAL or %-ENOMEM on failure.
855 */
856int
857fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
858{
859 int i, rc = 0;
860 struct rio_priv *priv = mport->priv;
861 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
862
863 if ((entries < RIO_MIN_RX_RING_SIZE) ||
864 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
865 rc = -EINVAL;
866 goto out;
867 }
868
869 /* Initialize client buffer ring */
870 rmu->msg_rx_ring.dev_id = dev_id;
871 rmu->msg_rx_ring.size = entries;
872 rmu->msg_rx_ring.rx_slot = 0;
873 for (i = 0; i < rmu->msg_rx_ring.size; i++)
874 rmu->msg_rx_ring.virt_buffer[i] = NULL;
875
876 /* Initialize inbound message ring */
877 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
878 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
879 &rmu->msg_rx_ring.phys, GFP_KERNEL);
880 if (!rmu->msg_rx_ring.virt) {
881 rc = -ENOMEM;
882 goto out;
883 }
884
885 /* Point dequeue/enqueue pointers at first entry in ring */
886 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
887 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
888
889 /* Clear interrupt status */
890 out_be32(&rmu->msg_regs->isr, 0x00000091);
891
892 /* Hook up inbound message handler */
893 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
894 "msg_rx", (void *)mport);
895 if (rc < 0) {
896 dma_free_coherent(priv->dev,
897 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
898 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
899 goto out;
900 }
901
902 /*
903 * Configure inbound message unit:
904 * Snooping
905 * 4KB max message size
906 * Unmask all interrupt sources
907 * Disable
908 */
909 out_be32(&rmu->msg_regs->imr, 0x001b0060);
910
911 /* Set number of queue entries */
912 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
913
914 /* Now enable the unit */
915 setbits32(&rmu->msg_regs->imr, 0x1);
916
917out:
918 return rc;
919}
920
921/**
922 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
923 * @mport: Master port implementing the inbound message unit
924 * @mbox: Mailbox to close
925 *
926 * Disables the inbound message unit, free all buffers, and
927 * frees the inbound message interrupt.
928 */
929void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
930{
931 struct rio_priv *priv = mport->priv;
932 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
933
934 /* Disable inbound message unit */
935 out_be32(&rmu->msg_regs->imr, 0);
936
937 /* Free ring */
938 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
939 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
940
941 /* Free interrupt */
942 free_irq(IRQ_RIO_RX(mport), (void *)mport);
943}
944
945/**
946 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
947 * @mport: Master port implementing the inbound message unit
948 * @mbox: Inbound mailbox number
949 * @buf: Buffer to add to inbound queue
950 *
951 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
952 * %0 on success or %-EINVAL on failure.
953 */
954int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
955{
956 int rc = 0;
957 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
958
959 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
960 rmu->msg_rx_ring.rx_slot);
961
962 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
963 printk(KERN_ERR
964 "RIO: error adding inbound buffer %d, buffer exists\n",
965 rmu->msg_rx_ring.rx_slot);
966 rc = -EINVAL;
967 goto out;
968 }
969
970 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
971 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
972 rmu->msg_rx_ring.rx_slot = 0;
973
974out:
975 return rc;
976}
977
978/**
979 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
980 * @mport: Master port implementing the inbound message unit
981 * @mbox: Inbound mailbox number
982 *
983 * Gets the next available inbound message from the inbound message queue.
984 * A pointer to the message is returned on success or NULL on failure.
985 */
986void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
987{
988 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
989 u32 phys_buf;
990 void *virt_buf;
991 void *buf = NULL;
992 int buf_idx;
993
994 phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
995
996 /* If no more messages, then bail out */
997 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
998 goto out2;
999
1000 virt_buf = rmu->msg_rx_ring.virt + (phys_buf
1001 - rmu->msg_rx_ring.phys);
1002 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
1003 buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
1004
1005 if (!buf) {
1006 printk(KERN_ERR
1007 "RIO: inbound message copy failed, no buffers\n");
1008 goto out1;
1009 }
1010
1011 /* Copy max message size, caller is expected to allocate that big */
1012 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
1013
1014 /* Clear the available buffer */
1015 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
1016
1017out1:
1018 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
1019
1020out2:
1021 return buf;
1022}
1023
1024/**
1025 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
1026 * @mport: Master port implementing the inbound doorbell unit
1027 *
1028 * Initializes doorbell unit hardware and inbound DMA buffer
1029 * ring. Called from fsl_rio_setup(). Returns %0 on success
1030 * or %-ENOMEM on failure.
1031 */
1032int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
1033{
1034 int rc = 0;
1035
1036 /* Initialize inbound doorbells */
1037 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
1038 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
1039 if (!dbell->dbell_ring.virt) {
1040 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
1041 rc = -ENOMEM;
1042 goto out;
1043 }
1044
1045 /* Point dequeue/enqueue pointers at first entry in ring */
1046 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
1047 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
1048
1049 /* Clear interrupt status */
1050 out_be32(&dbell->dbell_regs->dsr, 0x00000091);
1051
1052 /* Hook up doorbell handler */
1053 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
1054 "dbell_rx", (void *)dbell);
1055 if (rc < 0) {
1056 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
1057 dbell->dbell_ring.virt, dbell->dbell_ring.phys);
1058 printk(KERN_ERR
1059 "MPC85xx RIO: unable to request inbound doorbell irq");
1060 goto out;
1061 }
1062
1063 /* Configure doorbells for snooping, 512 entries, and enable */
1064 out_be32(&dbell->dbell_regs->dmr, 0x00108161);
1065
1066out:
1067 return rc;
1068}
1069
1070int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
1071{
1072 struct rio_priv *priv;
1073 struct fsl_rmu *rmu;
1074 u64 msg_start;
1075 const u32 *msg_addr;
1076 int mlen;
1077 int aw;
1078
1079 if (!mport || !mport->priv)
1080 return -EINVAL;
1081
1082 priv = mport->priv;
1083
1084 if (!node) {
1085 dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n",
1086 priv->dev->of_node);
1087 return -EINVAL;
1088 }
1089
1090 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
1091 if (!rmu)
1092 return -ENOMEM;
1093
1094 aw = of_n_addr_cells(node);
1095 msg_addr = of_get_property(node, "reg", &mlen);
1096 if (!msg_addr) {
1097 pr_err("%pOF: unable to find 'reg' property of message-unit\n",
1098 node);
1099 kfree(rmu);
1100 return -ENOMEM;
1101 }
1102 msg_start = of_read_number(msg_addr, aw);
1103
1104 rmu->msg_regs = (struct rio_msg_regs *)
1105 (rmu_regs_win + (u32)msg_start);
1106
1107 rmu->txirq = irq_of_parse_and_map(node, 0);
1108 rmu->rxirq = irq_of_parse_and_map(node, 1);
1109 printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n",
1110 node, rmu->txirq, rmu->rxirq);
1111
1112 priv->rmm_handle = rmu;
1113
1114 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1115 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1116 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1117
1118 return 0;
1119}