Loading...
1/*
2 * Freescale MPC85xx/MPC86xx RapidIO RMU support
3 *
4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling
12 *
13 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com>
15 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
16 * Liu Gang <Gang.Liu@freescale.com>
17 *
18 * Copyright 2005 MontaVista Software, Inc.
19 * Matt Porter <mporter@kernel.crashing.org>
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2 of the License, or (at your
24 * option) any later version.
25 */
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h>
30#include <linux/of_irq.h>
31#include <linux/of_platform.h>
32#include <linux/slab.h>
33
34#include "fsl_rio.h"
35
36#define GET_RMM_HANDLE(mport) \
37 (((struct rio_priv *)(mport->priv))->rmm_handle)
38
39/* RapidIO definition irq, which read from OF-tree */
40#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
41#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
42#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
43#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
44
45#define RIO_MIN_TX_RING_SIZE 2
46#define RIO_MAX_TX_RING_SIZE 2048
47#define RIO_MIN_RX_RING_SIZE 2
48#define RIO_MAX_RX_RING_SIZE 2048
49
50#define RIO_IPWMR_SEN 0x00100000
51#define RIO_IPWMR_QFIE 0x00000100
52#define RIO_IPWMR_EIE 0x00000020
53#define RIO_IPWMR_CQ 0x00000002
54#define RIO_IPWMR_PWE 0x00000001
55
56#define RIO_IPWSR_QF 0x00100000
57#define RIO_IPWSR_TE 0x00000080
58#define RIO_IPWSR_QFI 0x00000010
59#define RIO_IPWSR_PWD 0x00000008
60#define RIO_IPWSR_PWB 0x00000004
61
62#define RIO_EPWISR 0x10010
63/* EPWISR Error match value */
64#define RIO_EPWISR_PINT1 0x80000000
65#define RIO_EPWISR_PINT2 0x40000000
66#define RIO_EPWISR_MU 0x00000002
67#define RIO_EPWISR_PW 0x00000001
68
69#define IPWSR_CLEAR 0x98
70#define OMSR_CLEAR 0x1cb3
71#define IMSR_CLEAR 0x491
72#define IDSR_CLEAR 0x91
73#define ODSR_CLEAR 0x1c00
74#define LTLEECSR_ENABLE_ALL 0xFFC000FC
75#define RIO_LTLEECSR 0x060c
76
77#define RIO_IM0SR 0x64
78#define RIO_IM1SR 0x164
79#define RIO_OM0SR 0x4
80#define RIO_OM1SR 0x104
81
82#define RIO_DBELL_WIN_SIZE 0x1000
83
84#define RIO_MSG_OMR_MUI 0x00000002
85#define RIO_MSG_OSR_TE 0x00000080
86#define RIO_MSG_OSR_QOI 0x00000020
87#define RIO_MSG_OSR_QFI 0x00000010
88#define RIO_MSG_OSR_MUB 0x00000004
89#define RIO_MSG_OSR_EOMI 0x00000002
90#define RIO_MSG_OSR_QEI 0x00000001
91
92#define RIO_MSG_IMR_MI 0x00000002
93#define RIO_MSG_ISR_TE 0x00000080
94#define RIO_MSG_ISR_QFI 0x00000010
95#define RIO_MSG_ISR_DIQI 0x00000001
96
97#define RIO_MSG_DESC_SIZE 32
98#define RIO_MSG_BUFFER_SIZE 4096
99
100#define DOORBELL_DMR_DI 0x00000002
101#define DOORBELL_DSR_TE 0x00000080
102#define DOORBELL_DSR_QFI 0x00000010
103#define DOORBELL_DSR_DIQI 0x00000001
104
105#define DOORBELL_MESSAGE_SIZE 0x08
106
107static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
108
109struct rio_msg_regs {
110 u32 omr;
111 u32 osr;
112 u32 pad1;
113 u32 odqdpar;
114 u32 pad2;
115 u32 osar;
116 u32 odpr;
117 u32 odatr;
118 u32 odcr;
119 u32 pad3;
120 u32 odqepar;
121 u32 pad4[13];
122 u32 imr;
123 u32 isr;
124 u32 pad5;
125 u32 ifqdpar;
126 u32 pad6;
127 u32 ifqepar;
128};
129
130struct rio_dbell_regs {
131 u32 odmr;
132 u32 odsr;
133 u32 pad1[4];
134 u32 oddpr;
135 u32 oddatr;
136 u32 pad2[3];
137 u32 odretcr;
138 u32 pad3[12];
139 u32 dmr;
140 u32 dsr;
141 u32 pad4;
142 u32 dqdpar;
143 u32 pad5;
144 u32 dqepar;
145};
146
147struct rio_pw_regs {
148 u32 pwmr;
149 u32 pwsr;
150 u32 epwqbar;
151 u32 pwqbar;
152};
153
154
155struct rio_tx_desc {
156 u32 pad1;
157 u32 saddr;
158 u32 dport;
159 u32 dattr;
160 u32 pad2;
161 u32 pad3;
162 u32 dwcnt;
163 u32 pad4;
164};
165
166struct rio_msg_tx_ring {
167 void *virt;
168 dma_addr_t phys;
169 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
170 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
171 int tx_slot;
172 int size;
173 void *dev_id;
174};
175
176struct rio_msg_rx_ring {
177 void *virt;
178 dma_addr_t phys;
179 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
180 int rx_slot;
181 int size;
182 void *dev_id;
183};
184
185struct fsl_rmu {
186 struct rio_msg_regs __iomem *msg_regs;
187 struct rio_msg_tx_ring msg_tx_ring;
188 struct rio_msg_rx_ring msg_rx_ring;
189 int txirq;
190 int rxirq;
191};
192
193struct rio_dbell_msg {
194 u16 pad1;
195 u16 tid;
196 u16 sid;
197 u16 info;
198};
199
200/**
201 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
202 * @irq: Linux interrupt number
203 * @dev_instance: Pointer to interrupt-specific data
204 *
205 * Handles outbound message interrupts. Executes a register outbound
206 * mailbox event handler and acks the interrupt occurrence.
207 */
208static irqreturn_t
209fsl_rio_tx_handler(int irq, void *dev_instance)
210{
211 int osr;
212 struct rio_mport *port = (struct rio_mport *)dev_instance;
213 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
214
215 osr = in_be32(&rmu->msg_regs->osr);
216
217 if (osr & RIO_MSG_OSR_TE) {
218 pr_info("RIO: outbound message transmission error\n");
219 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
220 goto out;
221 }
222
223 if (osr & RIO_MSG_OSR_QOI) {
224 pr_info("RIO: outbound message queue overflow\n");
225 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
226 goto out;
227 }
228
229 if (osr & RIO_MSG_OSR_EOMI) {
230 u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
231 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
232 if (port->outb_msg[0].mcback != NULL) {
233 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
234 -1,
235 slot);
236 }
237 /* Ack the end-of-message interrupt */
238 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
239 }
240
241out:
242 return IRQ_HANDLED;
243}
244
245/**
246 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
247 * @irq: Linux interrupt number
248 * @dev_instance: Pointer to interrupt-specific data
249 *
250 * Handles inbound message interrupts. Executes a registered inbound
251 * mailbox event handler and acks the interrupt occurrence.
252 */
253static irqreturn_t
254fsl_rio_rx_handler(int irq, void *dev_instance)
255{
256 int isr;
257 struct rio_mport *port = (struct rio_mport *)dev_instance;
258 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
259
260 isr = in_be32(&rmu->msg_regs->isr);
261
262 if (isr & RIO_MSG_ISR_TE) {
263 pr_info("RIO: inbound message reception error\n");
264 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
265 goto out;
266 }
267
268 /* XXX Need to check/dispatch until queue empty */
269 if (isr & RIO_MSG_ISR_DIQI) {
270 /*
271 * Can receive messages for any mailbox/letter to that
272 * mailbox destination. So, make the callback with an
273 * unknown/invalid mailbox number argument.
274 */
275 if (port->inb_msg[0].mcback != NULL)
276 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
277 -1,
278 -1);
279
280 /* Ack the queueing interrupt */
281 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
282 }
283
284out:
285 return IRQ_HANDLED;
286}
287
288/**
289 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
290 * @irq: Linux interrupt number
291 * @dev_instance: Pointer to interrupt-specific data
292 *
293 * Handles doorbell interrupts. Parses a list of registered
294 * doorbell event handlers and executes a matching event handler.
295 */
296static irqreturn_t
297fsl_rio_dbell_handler(int irq, void *dev_instance)
298{
299 int dsr;
300 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
301 int i;
302
303 dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
304
305 if (dsr & DOORBELL_DSR_TE) {
306 pr_info("RIO: doorbell reception error\n");
307 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
308 goto out;
309 }
310
311 if (dsr & DOORBELL_DSR_QFI) {
312 pr_info("RIO: doorbell queue full\n");
313 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
314 }
315
316 /* XXX Need to check/dispatch until queue empty */
317 if (dsr & DOORBELL_DSR_DIQI) {
318 struct rio_dbell_msg *dmsg =
319 fsl_dbell->dbell_ring.virt +
320 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
321 struct rio_dbell *dbell;
322 int found = 0;
323
324 pr_debug
325 ("RIO: processing doorbell,"
326 " sid %2.2x tid %2.2x info %4.4x\n",
327 dmsg->sid, dmsg->tid, dmsg->info);
328
329 for (i = 0; i < MAX_PORT_NUM; i++) {
330 if (fsl_dbell->mport[i]) {
331 list_for_each_entry(dbell,
332 &fsl_dbell->mport[i]->dbells, node) {
333 if ((dbell->res->start
334 <= dmsg->info)
335 && (dbell->res->end
336 >= dmsg->info)) {
337 found = 1;
338 break;
339 }
340 }
341 if (found && dbell->dinb) {
342 dbell->dinb(fsl_dbell->mport[i],
343 dbell->dev_id, dmsg->sid,
344 dmsg->tid,
345 dmsg->info);
346 break;
347 }
348 }
349 }
350
351 if (!found) {
352 pr_debug
353 ("RIO: spurious doorbell,"
354 " sid %2.2x tid %2.2x info %4.4x\n",
355 dmsg->sid, dmsg->tid,
356 dmsg->info);
357 }
358 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
359 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
360 }
361
362out:
363 return IRQ_HANDLED;
364}
365
366void msg_unit_error_handler(void)
367{
368
369 /*XXX: Error recovery is not implemented, we just clear errors */
370 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
371
372 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
373 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
374 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
375 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
376
377 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
378 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
379
380 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
381}
382
383/**
384 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
385 * @irq: Linux interrupt number
386 * @dev_instance: Pointer to interrupt-specific data
387 *
388 * Handles port write interrupts. Parses a list of registered
389 * port write event handlers and executes a matching event handler.
390 */
391static irqreturn_t
392fsl_rio_port_write_handler(int irq, void *dev_instance)
393{
394 u32 ipwmr, ipwsr;
395 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
396 u32 epwisr, tmp;
397
398 epwisr = in_be32(rio_regs_win + RIO_EPWISR);
399 if (!(epwisr & RIO_EPWISR_PW))
400 goto pw_done;
401
402 ipwmr = in_be32(&pw->pw_regs->pwmr);
403 ipwsr = in_be32(&pw->pw_regs->pwsr);
404
405#ifdef DEBUG_PW
406 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
407 if (ipwsr & RIO_IPWSR_QF)
408 pr_debug(" QF");
409 if (ipwsr & RIO_IPWSR_TE)
410 pr_debug(" TE");
411 if (ipwsr & RIO_IPWSR_QFI)
412 pr_debug(" QFI");
413 if (ipwsr & RIO_IPWSR_PWD)
414 pr_debug(" PWD");
415 if (ipwsr & RIO_IPWSR_PWB)
416 pr_debug(" PWB");
417 pr_debug(" )\n");
418#endif
419 /* Schedule deferred processing if PW was received */
420 if (ipwsr & RIO_IPWSR_QFI) {
421 /* Save PW message (if there is room in FIFO),
422 * otherwise discard it.
423 */
424 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
425 pw->port_write_msg.msg_count++;
426 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
427 RIO_PW_MSG_SIZE);
428 } else {
429 pw->port_write_msg.discard_count++;
430 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
431 pw->port_write_msg.discard_count);
432 }
433 /* Clear interrupt and issue Clear Queue command. This allows
434 * another port-write to be received.
435 */
436 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
437 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
438
439 schedule_work(&pw->pw_work);
440 }
441
442 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
443 pw->port_write_msg.err_count++;
444 pr_debug("RIO: Port-Write Transaction Err (%d)\n",
445 pw->port_write_msg.err_count);
446 /* Clear Transaction Error: port-write controller should be
447 * disabled when clearing this error
448 */
449 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
450 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
451 out_be32(&pw->pw_regs->pwmr, ipwmr);
452 }
453
454 if (ipwsr & RIO_IPWSR_PWD) {
455 pw->port_write_msg.discard_count++;
456 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
457 pw->port_write_msg.discard_count);
458 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
459 }
460
461pw_done:
462 if (epwisr & RIO_EPWISR_PINT1) {
463 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
464 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
465 fsl_rio_port_error_handler(0);
466 }
467
468 if (epwisr & RIO_EPWISR_PINT2) {
469 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
470 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
471 fsl_rio_port_error_handler(1);
472 }
473
474 if (epwisr & RIO_EPWISR_MU) {
475 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
476 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
477 msg_unit_error_handler();
478 }
479
480 return IRQ_HANDLED;
481}
482
483static void fsl_pw_dpc(struct work_struct *work)
484{
485 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
486 union rio_pw_msg msg_buffer;
487 int i;
488
489 /*
490 * Process port-write messages
491 */
492 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
493 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
494#ifdef DEBUG_PW
495 {
496 u32 i;
497 pr_debug("%s : Port-Write Message:", __func__);
498 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
499 if ((i%4) == 0)
500 pr_debug("\n0x%02x: 0x%08x", i*4,
501 msg_buffer.raw[i]);
502 else
503 pr_debug(" 0x%08x", msg_buffer.raw[i]);
504 }
505 pr_debug("\n");
506 }
507#endif
508 /* Pass the port-write message to RIO core for processing */
509 for (i = 0; i < MAX_PORT_NUM; i++) {
510 if (pw->mport[i])
511 rio_inb_pwrite_handler(pw->mport[i],
512 &msg_buffer);
513 }
514 }
515}
516
517/**
518 * fsl_rio_pw_enable - enable/disable port-write interface init
519 * @mport: Master port implementing the port write unit
520 * @enable: 1=enable; 0=disable port-write message handling
521 */
522int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
523{
524 u32 rval;
525
526 rval = in_be32(&pw->pw_regs->pwmr);
527
528 if (enable)
529 rval |= RIO_IPWMR_PWE;
530 else
531 rval &= ~RIO_IPWMR_PWE;
532
533 out_be32(&pw->pw_regs->pwmr, rval);
534
535 return 0;
536}
537
538/**
539 * fsl_rio_port_write_init - MPC85xx port write interface init
540 * @mport: Master port implementing the port write unit
541 *
542 * Initializes port write unit hardware and DMA buffer
543 * ring. Called from fsl_rio_setup(). Returns %0 on success
544 * or %-ENOMEM on failure.
545 */
546
547int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
548{
549 int rc = 0;
550
551 /* Following configurations require a disabled port write controller */
552 out_be32(&pw->pw_regs->pwmr,
553 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
554
555 /* Initialize port write */
556 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
557 RIO_PW_MSG_SIZE,
558 &pw->port_write_msg.phys, GFP_KERNEL);
559 if (!pw->port_write_msg.virt) {
560 pr_err("RIO: unable allocate port write queue\n");
561 return -ENOMEM;
562 }
563
564 pw->port_write_msg.err_count = 0;
565 pw->port_write_msg.discard_count = 0;
566
567 /* Point dequeue/enqueue pointers at first entry */
568 out_be32(&pw->pw_regs->epwqbar, 0);
569 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
570
571 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
572 in_be32(&pw->pw_regs->epwqbar),
573 in_be32(&pw->pw_regs->pwqbar));
574
575 /* Clear interrupt status IPWSR */
576 out_be32(&pw->pw_regs->pwsr,
577 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
578
579 /* Configure port write controller for snooping enable all reporting,
580 clear queue full */
581 out_be32(&pw->pw_regs->pwmr,
582 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
583
584
585 /* Hook up port-write handler */
586 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
587 IRQF_SHARED, "port-write", (void *)pw);
588 if (rc < 0) {
589 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
590 goto err_out;
591 }
592 /* Enable Error Interrupt */
593 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
594
595 INIT_WORK(&pw->pw_work, fsl_pw_dpc);
596 spin_lock_init(&pw->pw_fifo_lock);
597 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
598 pr_err("FIFO allocation failed\n");
599 rc = -ENOMEM;
600 goto err_out_irq;
601 }
602
603 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
604 in_be32(&pw->pw_regs->pwmr),
605 in_be32(&pw->pw_regs->pwsr));
606
607 return rc;
608
609err_out_irq:
610 free_irq(IRQ_RIO_PW(pw), (void *)pw);
611err_out:
612 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
613 pw->port_write_msg.virt,
614 pw->port_write_msg.phys);
615 return rc;
616}
617
618/**
619 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
620 * @mport: RapidIO master port info
621 * @index: ID of RapidIO interface
622 * @destid: Destination ID of target device
623 * @data: 16-bit info field of RapidIO doorbell message
624 *
625 * Sends a MPC85xx doorbell message. Returns %0 on success or
626 * %-EINVAL on failure.
627 */
628int fsl_rio_doorbell_send(struct rio_mport *mport,
629 int index, u16 destid, u16 data)
630{
631 unsigned long flags;
632
633 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
634 index, destid, data);
635
636 spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
637
638 /* In the serial version silicons, such as MPC8548, MPC8641,
639 * below operations is must be.
640 */
641 out_be32(&dbell->dbell_regs->odmr, 0x00000000);
642 out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
643 out_be32(&dbell->dbell_regs->oddpr, destid << 16);
644 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
645 out_be32(&dbell->dbell_regs->odmr, 0x00000001);
646
647 spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
648
649 return 0;
650}
651
652/**
653 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
654 * @mport: Master port with outbound message queue
655 * @rdev: Target of outbound message
656 * @mbox: Outbound mailbox
657 * @buffer: Message to add to outbound queue
658 * @len: Length of message
659 *
660 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
661 * %0 on success or %-EINVAL on failure.
662 */
663int
664fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
665 void *buffer, size_t len)
666{
667 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
668 u32 omr;
669 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
670 + rmu->msg_tx_ring.tx_slot;
671 int ret = 0;
672
673 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
674 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
675 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
676 ret = -EINVAL;
677 goto out;
678 }
679
680 /* Copy and clear rest of buffer */
681 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
682 len);
683 if (len < (RIO_MAX_MSG_SIZE - 4))
684 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
685 + len, 0, RIO_MAX_MSG_SIZE - len);
686
687 /* Set mbox field for message, and set destid */
688 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
689
690 /* Enable EOMI interrupt and priority */
691 desc->dattr = 0x28000000 | ((mport->index) << 20);
692
693 /* Set transfer size aligned to next power of 2 (in double words) */
694 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
695
696 /* Set snooping and source buffer address */
697 desc->saddr = 0x00000004
698 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
699
700 /* Increment enqueue pointer */
701 omr = in_be32(&rmu->msg_regs->omr);
702 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
703
704 /* Go to next descriptor */
705 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
706 rmu->msg_tx_ring.tx_slot = 0;
707
708out:
709 return ret;
710}
711
712/**
713 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
714 * @mport: Master port implementing the outbound message unit
715 * @dev_id: Device specific pointer to pass on event
716 * @mbox: Mailbox to open
717 * @entries: Number of entries in the outbound mailbox ring
718 *
719 * Initializes buffer ring, request the outbound message interrupt,
720 * and enables the outbound message unit. Returns %0 on success and
721 * %-EINVAL or %-ENOMEM on failure.
722 */
723int
724fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
725{
726 int i, j, rc = 0;
727 struct rio_priv *priv = mport->priv;
728 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
729
730 if ((entries < RIO_MIN_TX_RING_SIZE) ||
731 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
732 rc = -EINVAL;
733 goto out;
734 }
735
736 /* Initialize shadow copy ring */
737 rmu->msg_tx_ring.dev_id = dev_id;
738 rmu->msg_tx_ring.size = entries;
739
740 for (i = 0; i < rmu->msg_tx_ring.size; i++) {
741 rmu->msg_tx_ring.virt_buffer[i] =
742 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
743 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
744 if (!rmu->msg_tx_ring.virt_buffer[i]) {
745 rc = -ENOMEM;
746 for (j = 0; j < rmu->msg_tx_ring.size; j++)
747 if (rmu->msg_tx_ring.virt_buffer[j])
748 dma_free_coherent(priv->dev,
749 RIO_MSG_BUFFER_SIZE,
750 rmu->msg_tx_ring.
751 virt_buffer[j],
752 rmu->msg_tx_ring.
753 phys_buffer[j]);
754 goto out;
755 }
756 }
757
758 /* Initialize outbound message descriptor ring */
759 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
760 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
761 &rmu->msg_tx_ring.phys, GFP_KERNEL);
762 if (!rmu->msg_tx_ring.virt) {
763 rc = -ENOMEM;
764 goto out_dma;
765 }
766 memset(rmu->msg_tx_ring.virt, 0,
767 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
768 rmu->msg_tx_ring.tx_slot = 0;
769
770 /* Point dequeue/enqueue pointers at first entry in ring */
771 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
772 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
773
774 /* Configure for snooping */
775 out_be32(&rmu->msg_regs->osar, 0x00000004);
776
777 /* Clear interrupt status */
778 out_be32(&rmu->msg_regs->osr, 0x000000b3);
779
780 /* Hook up outbound message handler */
781 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
782 "msg_tx", (void *)mport);
783 if (rc < 0)
784 goto out_irq;
785
786 /*
787 * Configure outbound message unit
788 * Snooping
789 * Interrupts (all enabled, except QEIE)
790 * Chaining mode
791 * Disable
792 */
793 out_be32(&rmu->msg_regs->omr, 0x00100220);
794
795 /* Set number of entries */
796 out_be32(&rmu->msg_regs->omr,
797 in_be32(&rmu->msg_regs->omr) |
798 ((get_bitmask_order(entries) - 2) << 12));
799
800 /* Now enable the unit */
801 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
802
803out:
804 return rc;
805
806out_irq:
807 dma_free_coherent(priv->dev,
808 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
809 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
810
811out_dma:
812 for (i = 0; i < rmu->msg_tx_ring.size; i++)
813 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
814 rmu->msg_tx_ring.virt_buffer[i],
815 rmu->msg_tx_ring.phys_buffer[i]);
816
817 return rc;
818}
819
820/**
821 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
822 * @mport: Master port implementing the outbound message unit
823 * @mbox: Mailbox to close
824 *
825 * Disables the outbound message unit, free all buffers, and
826 * frees the outbound message interrupt.
827 */
828void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
829{
830 struct rio_priv *priv = mport->priv;
831 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
832
833 /* Disable inbound message unit */
834 out_be32(&rmu->msg_regs->omr, 0);
835
836 /* Free ring */
837 dma_free_coherent(priv->dev,
838 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
839 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
840
841 /* Free interrupt */
842 free_irq(IRQ_RIO_TX(mport), (void *)mport);
843}
844
845/**
846 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
847 * @mport: Master port implementing the inbound message unit
848 * @dev_id: Device specific pointer to pass on event
849 * @mbox: Mailbox to open
850 * @entries: Number of entries in the inbound mailbox ring
851 *
852 * Initializes buffer ring, request the inbound message interrupt,
853 * and enables the inbound message unit. Returns %0 on success
854 * and %-EINVAL or %-ENOMEM on failure.
855 */
856int
857fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
858{
859 int i, rc = 0;
860 struct rio_priv *priv = mport->priv;
861 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
862
863 if ((entries < RIO_MIN_RX_RING_SIZE) ||
864 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
865 rc = -EINVAL;
866 goto out;
867 }
868
869 /* Initialize client buffer ring */
870 rmu->msg_rx_ring.dev_id = dev_id;
871 rmu->msg_rx_ring.size = entries;
872 rmu->msg_rx_ring.rx_slot = 0;
873 for (i = 0; i < rmu->msg_rx_ring.size; i++)
874 rmu->msg_rx_ring.virt_buffer[i] = NULL;
875
876 /* Initialize inbound message ring */
877 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
878 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
879 &rmu->msg_rx_ring.phys, GFP_KERNEL);
880 if (!rmu->msg_rx_ring.virt) {
881 rc = -ENOMEM;
882 goto out;
883 }
884
885 /* Point dequeue/enqueue pointers at first entry in ring */
886 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
887 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
888
889 /* Clear interrupt status */
890 out_be32(&rmu->msg_regs->isr, 0x00000091);
891
892 /* Hook up inbound message handler */
893 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
894 "msg_rx", (void *)mport);
895 if (rc < 0) {
896 dma_free_coherent(priv->dev,
897 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
898 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
899 goto out;
900 }
901
902 /*
903 * Configure inbound message unit:
904 * Snooping
905 * 4KB max message size
906 * Unmask all interrupt sources
907 * Disable
908 */
909 out_be32(&rmu->msg_regs->imr, 0x001b0060);
910
911 /* Set number of queue entries */
912 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
913
914 /* Now enable the unit */
915 setbits32(&rmu->msg_regs->imr, 0x1);
916
917out:
918 return rc;
919}
920
921/**
922 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
923 * @mport: Master port implementing the inbound message unit
924 * @mbox: Mailbox to close
925 *
926 * Disables the inbound message unit, free all buffers, and
927 * frees the inbound message interrupt.
928 */
929void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
930{
931 struct rio_priv *priv = mport->priv;
932 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
933
934 /* Disable inbound message unit */
935 out_be32(&rmu->msg_regs->imr, 0);
936
937 /* Free ring */
938 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
939 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
940
941 /* Free interrupt */
942 free_irq(IRQ_RIO_RX(mport), (void *)mport);
943}
944
945/**
946 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
947 * @mport: Master port implementing the inbound message unit
948 * @mbox: Inbound mailbox number
949 * @buf: Buffer to add to inbound queue
950 *
951 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
952 * %0 on success or %-EINVAL on failure.
953 */
954int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
955{
956 int rc = 0;
957 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
958
959 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
960 rmu->msg_rx_ring.rx_slot);
961
962 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
963 printk(KERN_ERR
964 "RIO: error adding inbound buffer %d, buffer exists\n",
965 rmu->msg_rx_ring.rx_slot);
966 rc = -EINVAL;
967 goto out;
968 }
969
970 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
971 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
972 rmu->msg_rx_ring.rx_slot = 0;
973
974out:
975 return rc;
976}
977
978/**
979 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
980 * @mport: Master port implementing the inbound message unit
981 * @mbox: Inbound mailbox number
982 *
983 * Gets the next available inbound message from the inbound message queue.
984 * A pointer to the message is returned on success or NULL on failure.
985 */
986void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
987{
988 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
989 u32 phys_buf;
990 void *virt_buf;
991 void *buf = NULL;
992 int buf_idx;
993
994 phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
995
996 /* If no more messages, then bail out */
997 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
998 goto out2;
999
1000 virt_buf = rmu->msg_rx_ring.virt + (phys_buf
1001 - rmu->msg_rx_ring.phys);
1002 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
1003 buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
1004
1005 if (!buf) {
1006 printk(KERN_ERR
1007 "RIO: inbound message copy failed, no buffers\n");
1008 goto out1;
1009 }
1010
1011 /* Copy max message size, caller is expected to allocate that big */
1012 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
1013
1014 /* Clear the available buffer */
1015 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
1016
1017out1:
1018 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
1019
1020out2:
1021 return buf;
1022}
1023
1024/**
1025 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
1026 * @mport: Master port implementing the inbound doorbell unit
1027 *
1028 * Initializes doorbell unit hardware and inbound DMA buffer
1029 * ring. Called from fsl_rio_setup(). Returns %0 on success
1030 * or %-ENOMEM on failure.
1031 */
1032int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
1033{
1034 int rc = 0;
1035
1036 /* Initialize inbound doorbells */
1037 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
1038 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
1039 if (!dbell->dbell_ring.virt) {
1040 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
1041 rc = -ENOMEM;
1042 goto out;
1043 }
1044
1045 /* Point dequeue/enqueue pointers at first entry in ring */
1046 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
1047 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
1048
1049 /* Clear interrupt status */
1050 out_be32(&dbell->dbell_regs->dsr, 0x00000091);
1051
1052 /* Hook up doorbell handler */
1053 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
1054 "dbell_rx", (void *)dbell);
1055 if (rc < 0) {
1056 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
1057 dbell->dbell_ring.virt, dbell->dbell_ring.phys);
1058 printk(KERN_ERR
1059 "MPC85xx RIO: unable to request inbound doorbell irq");
1060 goto out;
1061 }
1062
1063 /* Configure doorbells for snooping, 512 entries, and enable */
1064 out_be32(&dbell->dbell_regs->dmr, 0x00108161);
1065
1066out:
1067 return rc;
1068}
1069
1070int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
1071{
1072 struct rio_priv *priv;
1073 struct fsl_rmu *rmu;
1074 u64 msg_start;
1075 const u32 *msg_addr;
1076 int mlen;
1077 int aw;
1078
1079 if (!mport || !mport->priv)
1080 return -EINVAL;
1081
1082 priv = mport->priv;
1083
1084 if (!node) {
1085 dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n",
1086 priv->dev->of_node);
1087 return -EINVAL;
1088 }
1089
1090 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
1091 if (!rmu)
1092 return -ENOMEM;
1093
1094 aw = of_n_addr_cells(node);
1095 msg_addr = of_get_property(node, "reg", &mlen);
1096 if (!msg_addr) {
1097 pr_err("%pOF: unable to find 'reg' property of message-unit\n",
1098 node);
1099 kfree(rmu);
1100 return -ENOMEM;
1101 }
1102 msg_start = of_read_number(msg_addr, aw);
1103
1104 rmu->msg_regs = (struct rio_msg_regs *)
1105 (rmu_regs_win + (u32)msg_start);
1106
1107 rmu->txirq = irq_of_parse_and_map(node, 0);
1108 rmu->rxirq = irq_of_parse_and_map(node, 1);
1109 printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n",
1110 node, rmu->txirq, rmu->rxirq);
1111
1112 priv->rmm_handle = rmu;
1113
1114 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1115 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1116 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1117
1118 return 0;
1119}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Freescale MPC85xx/MPC86xx RapidIO RMU support
4 *
5 * Copyright 2009 Sysgo AG
6 * Thomas Moll <thomas.moll@sysgo.com>
7 * - fixed maintenance access routines, check for aligned access
8 *
9 * Copyright 2009 Integrated Device Technology, Inc.
10 * Alex Bounine <alexandre.bounine@idt.com>
11 * - Added Port-Write message handling
12 * - Added Machine Check exception handling
13 *
14 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
15 * Zhang Wei <wei.zhang@freescale.com>
16 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
17 * Liu Gang <Gang.Liu@freescale.com>
18 *
19 * Copyright 2005 MontaVista Software, Inc.
20 * Matt Porter <mporter@kernel.crashing.org>
21 */
22
23#include <linux/types.h>
24#include <linux/dma-mapping.h>
25#include <linux/interrupt.h>
26#include <linux/of_irq.h>
27#include <linux/of_platform.h>
28#include <linux/slab.h>
29
30#include "fsl_rio.h"
31
32#define GET_RMM_HANDLE(mport) \
33 (((struct rio_priv *)(mport->priv))->rmm_handle)
34
35/* RapidIO definition irq, which read from OF-tree */
36#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
37#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
38#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
39#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
40
41#define RIO_MIN_TX_RING_SIZE 2
42#define RIO_MAX_TX_RING_SIZE 2048
43#define RIO_MIN_RX_RING_SIZE 2
44#define RIO_MAX_RX_RING_SIZE 2048
45
46#define RIO_IPWMR_SEN 0x00100000
47#define RIO_IPWMR_QFIE 0x00000100
48#define RIO_IPWMR_EIE 0x00000020
49#define RIO_IPWMR_CQ 0x00000002
50#define RIO_IPWMR_PWE 0x00000001
51
52#define RIO_IPWSR_QF 0x00100000
53#define RIO_IPWSR_TE 0x00000080
54#define RIO_IPWSR_QFI 0x00000010
55#define RIO_IPWSR_PWD 0x00000008
56#define RIO_IPWSR_PWB 0x00000004
57
58#define RIO_EPWISR 0x10010
59/* EPWISR Error match value */
60#define RIO_EPWISR_PINT1 0x80000000
61#define RIO_EPWISR_PINT2 0x40000000
62#define RIO_EPWISR_MU 0x00000002
63#define RIO_EPWISR_PW 0x00000001
64
65#define IPWSR_CLEAR 0x98
66#define OMSR_CLEAR 0x1cb3
67#define IMSR_CLEAR 0x491
68#define IDSR_CLEAR 0x91
69#define ODSR_CLEAR 0x1c00
70#define LTLEECSR_ENABLE_ALL 0xFFC000FC
71#define RIO_LTLEECSR 0x060c
72
73#define RIO_IM0SR 0x64
74#define RIO_IM1SR 0x164
75#define RIO_OM0SR 0x4
76#define RIO_OM1SR 0x104
77
78#define RIO_DBELL_WIN_SIZE 0x1000
79
80#define RIO_MSG_OMR_MUI 0x00000002
81#define RIO_MSG_OSR_TE 0x00000080
82#define RIO_MSG_OSR_QOI 0x00000020
83#define RIO_MSG_OSR_QFI 0x00000010
84#define RIO_MSG_OSR_MUB 0x00000004
85#define RIO_MSG_OSR_EOMI 0x00000002
86#define RIO_MSG_OSR_QEI 0x00000001
87
88#define RIO_MSG_IMR_MI 0x00000002
89#define RIO_MSG_ISR_TE 0x00000080
90#define RIO_MSG_ISR_QFI 0x00000010
91#define RIO_MSG_ISR_DIQI 0x00000001
92
93#define RIO_MSG_DESC_SIZE 32
94#define RIO_MSG_BUFFER_SIZE 4096
95
96#define DOORBELL_DMR_DI 0x00000002
97#define DOORBELL_DSR_TE 0x00000080
98#define DOORBELL_DSR_QFI 0x00000010
99#define DOORBELL_DSR_DIQI 0x00000001
100
101#define DOORBELL_MESSAGE_SIZE 0x08
102
103static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
104
105struct rio_msg_regs {
106 u32 omr;
107 u32 osr;
108 u32 pad1;
109 u32 odqdpar;
110 u32 pad2;
111 u32 osar;
112 u32 odpr;
113 u32 odatr;
114 u32 odcr;
115 u32 pad3;
116 u32 odqepar;
117 u32 pad4[13];
118 u32 imr;
119 u32 isr;
120 u32 pad5;
121 u32 ifqdpar;
122 u32 pad6;
123 u32 ifqepar;
124};
125
126struct rio_dbell_regs {
127 u32 odmr;
128 u32 odsr;
129 u32 pad1[4];
130 u32 oddpr;
131 u32 oddatr;
132 u32 pad2[3];
133 u32 odretcr;
134 u32 pad3[12];
135 u32 dmr;
136 u32 dsr;
137 u32 pad4;
138 u32 dqdpar;
139 u32 pad5;
140 u32 dqepar;
141};
142
143struct rio_pw_regs {
144 u32 pwmr;
145 u32 pwsr;
146 u32 epwqbar;
147 u32 pwqbar;
148};
149
150
151struct rio_tx_desc {
152 u32 pad1;
153 u32 saddr;
154 u32 dport;
155 u32 dattr;
156 u32 pad2;
157 u32 pad3;
158 u32 dwcnt;
159 u32 pad4;
160};
161
162struct rio_msg_tx_ring {
163 void *virt;
164 dma_addr_t phys;
165 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
166 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
167 int tx_slot;
168 int size;
169 void *dev_id;
170};
171
172struct rio_msg_rx_ring {
173 void *virt;
174 dma_addr_t phys;
175 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
176 int rx_slot;
177 int size;
178 void *dev_id;
179};
180
181struct fsl_rmu {
182 struct rio_msg_regs __iomem *msg_regs;
183 struct rio_msg_tx_ring msg_tx_ring;
184 struct rio_msg_rx_ring msg_rx_ring;
185 int txirq;
186 int rxirq;
187};
188
189struct rio_dbell_msg {
190 u16 pad1;
191 u16 tid;
192 u16 sid;
193 u16 info;
194};
195
196/**
197 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
198 * @irq: Linux interrupt number
199 * @dev_instance: Pointer to interrupt-specific data
200 *
201 * Handles outbound message interrupts. Executes a register outbound
202 * mailbox event handler and acks the interrupt occurrence.
203 */
204static irqreturn_t
205fsl_rio_tx_handler(int irq, void *dev_instance)
206{
207 int osr;
208 struct rio_mport *port = (struct rio_mport *)dev_instance;
209 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
210
211 osr = in_be32(&rmu->msg_regs->osr);
212
213 if (osr & RIO_MSG_OSR_TE) {
214 pr_info("RIO: outbound message transmission error\n");
215 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
216 goto out;
217 }
218
219 if (osr & RIO_MSG_OSR_QOI) {
220 pr_info("RIO: outbound message queue overflow\n");
221 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
222 goto out;
223 }
224
225 if (osr & RIO_MSG_OSR_EOMI) {
226 u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
227 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
228 if (port->outb_msg[0].mcback != NULL) {
229 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
230 -1,
231 slot);
232 }
233 /* Ack the end-of-message interrupt */
234 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
235 }
236
237out:
238 return IRQ_HANDLED;
239}
240
241/**
242 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
243 * @irq: Linux interrupt number
244 * @dev_instance: Pointer to interrupt-specific data
245 *
246 * Handles inbound message interrupts. Executes a registered inbound
247 * mailbox event handler and acks the interrupt occurrence.
248 */
249static irqreturn_t
250fsl_rio_rx_handler(int irq, void *dev_instance)
251{
252 int isr;
253 struct rio_mport *port = (struct rio_mport *)dev_instance;
254 struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
255
256 isr = in_be32(&rmu->msg_regs->isr);
257
258 if (isr & RIO_MSG_ISR_TE) {
259 pr_info("RIO: inbound message reception error\n");
260 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
261 goto out;
262 }
263
264 /* XXX Need to check/dispatch until queue empty */
265 if (isr & RIO_MSG_ISR_DIQI) {
266 /*
267 * Can receive messages for any mailbox/letter to that
268 * mailbox destination. So, make the callback with an
269 * unknown/invalid mailbox number argument.
270 */
271 if (port->inb_msg[0].mcback != NULL)
272 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
273 -1,
274 -1);
275
276 /* Ack the queueing interrupt */
277 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
278 }
279
280out:
281 return IRQ_HANDLED;
282}
283
284/**
285 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
286 * @irq: Linux interrupt number
287 * @dev_instance: Pointer to interrupt-specific data
288 *
289 * Handles doorbell interrupts. Parses a list of registered
290 * doorbell event handlers and executes a matching event handler.
291 */
292static irqreturn_t
293fsl_rio_dbell_handler(int irq, void *dev_instance)
294{
295 int dsr;
296 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
297 int i;
298
299 dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
300
301 if (dsr & DOORBELL_DSR_TE) {
302 pr_info("RIO: doorbell reception error\n");
303 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
304 goto out;
305 }
306
307 if (dsr & DOORBELL_DSR_QFI) {
308 pr_info("RIO: doorbell queue full\n");
309 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
310 }
311
312 /* XXX Need to check/dispatch until queue empty */
313 if (dsr & DOORBELL_DSR_DIQI) {
314 struct rio_dbell_msg *dmsg =
315 fsl_dbell->dbell_ring.virt +
316 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
317 struct rio_dbell *dbell;
318 int found = 0;
319
320 pr_debug
321 ("RIO: processing doorbell,"
322 " sid %2.2x tid %2.2x info %4.4x\n",
323 dmsg->sid, dmsg->tid, dmsg->info);
324
325 for (i = 0; i < MAX_PORT_NUM; i++) {
326 if (fsl_dbell->mport[i]) {
327 list_for_each_entry(dbell,
328 &fsl_dbell->mport[i]->dbells, node) {
329 if ((dbell->res->start
330 <= dmsg->info)
331 && (dbell->res->end
332 >= dmsg->info)) {
333 found = 1;
334 break;
335 }
336 }
337 if (found && dbell->dinb) {
338 dbell->dinb(fsl_dbell->mport[i],
339 dbell->dev_id, dmsg->sid,
340 dmsg->tid,
341 dmsg->info);
342 break;
343 }
344 }
345 }
346
347 if (!found) {
348 pr_debug
349 ("RIO: spurious doorbell,"
350 " sid %2.2x tid %2.2x info %4.4x\n",
351 dmsg->sid, dmsg->tid,
352 dmsg->info);
353 }
354 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
355 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
356 }
357
358out:
359 return IRQ_HANDLED;
360}
361
362void msg_unit_error_handler(void)
363{
364
365 /*XXX: Error recovery is not implemented, we just clear errors */
366 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
367
368 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
369 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
370 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
371 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
372
373 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
374 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
375
376 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
377}
378
379/**
380 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
381 * @irq: Linux interrupt number
382 * @dev_instance: Pointer to interrupt-specific data
383 *
384 * Handles port write interrupts. Parses a list of registered
385 * port write event handlers and executes a matching event handler.
386 */
387static irqreturn_t
388fsl_rio_port_write_handler(int irq, void *dev_instance)
389{
390 u32 ipwmr, ipwsr;
391 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
392 u32 epwisr, tmp;
393
394 epwisr = in_be32(rio_regs_win + RIO_EPWISR);
395 if (!(epwisr & RIO_EPWISR_PW))
396 goto pw_done;
397
398 ipwmr = in_be32(&pw->pw_regs->pwmr);
399 ipwsr = in_be32(&pw->pw_regs->pwsr);
400
401#ifdef DEBUG_PW
402 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
403 if (ipwsr & RIO_IPWSR_QF)
404 pr_debug(" QF");
405 if (ipwsr & RIO_IPWSR_TE)
406 pr_debug(" TE");
407 if (ipwsr & RIO_IPWSR_QFI)
408 pr_debug(" QFI");
409 if (ipwsr & RIO_IPWSR_PWD)
410 pr_debug(" PWD");
411 if (ipwsr & RIO_IPWSR_PWB)
412 pr_debug(" PWB");
413 pr_debug(" )\n");
414#endif
415 /* Schedule deferred processing if PW was received */
416 if (ipwsr & RIO_IPWSR_QFI) {
417 /* Save PW message (if there is room in FIFO),
418 * otherwise discard it.
419 */
420 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
421 pw->port_write_msg.msg_count++;
422 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
423 RIO_PW_MSG_SIZE);
424 } else {
425 pw->port_write_msg.discard_count++;
426 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
427 pw->port_write_msg.discard_count);
428 }
429 /* Clear interrupt and issue Clear Queue command. This allows
430 * another port-write to be received.
431 */
432 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
433 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
434
435 schedule_work(&pw->pw_work);
436 }
437
438 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
439 pw->port_write_msg.err_count++;
440 pr_debug("RIO: Port-Write Transaction Err (%d)\n",
441 pw->port_write_msg.err_count);
442 /* Clear Transaction Error: port-write controller should be
443 * disabled when clearing this error
444 */
445 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
446 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
447 out_be32(&pw->pw_regs->pwmr, ipwmr);
448 }
449
450 if (ipwsr & RIO_IPWSR_PWD) {
451 pw->port_write_msg.discard_count++;
452 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
453 pw->port_write_msg.discard_count);
454 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
455 }
456
457pw_done:
458 if (epwisr & RIO_EPWISR_PINT1) {
459 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
460 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
461 fsl_rio_port_error_handler(0);
462 }
463
464 if (epwisr & RIO_EPWISR_PINT2) {
465 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
466 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
467 fsl_rio_port_error_handler(1);
468 }
469
470 if (epwisr & RIO_EPWISR_MU) {
471 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
472 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
473 msg_unit_error_handler();
474 }
475
476 return IRQ_HANDLED;
477}
478
479static void fsl_pw_dpc(struct work_struct *work)
480{
481 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
482 union rio_pw_msg msg_buffer;
483 int i;
484
485 /*
486 * Process port-write messages
487 */
488 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
489 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
490#ifdef DEBUG_PW
491 {
492 u32 i;
493 pr_debug("%s : Port-Write Message:", __func__);
494 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
495 if ((i%4) == 0)
496 pr_debug("\n0x%02x: 0x%08x", i*4,
497 msg_buffer.raw[i]);
498 else
499 pr_debug(" 0x%08x", msg_buffer.raw[i]);
500 }
501 pr_debug("\n");
502 }
503#endif
504 /* Pass the port-write message to RIO core for processing */
505 for (i = 0; i < MAX_PORT_NUM; i++) {
506 if (pw->mport[i])
507 rio_inb_pwrite_handler(pw->mport[i],
508 &msg_buffer);
509 }
510 }
511}
512
513/**
514 * fsl_rio_pw_enable - enable/disable port-write interface init
515 * @mport: Master port implementing the port write unit
516 * @enable: 1=enable; 0=disable port-write message handling
517 */
518int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
519{
520 u32 rval;
521
522 rval = in_be32(&pw->pw_regs->pwmr);
523
524 if (enable)
525 rval |= RIO_IPWMR_PWE;
526 else
527 rval &= ~RIO_IPWMR_PWE;
528
529 out_be32(&pw->pw_regs->pwmr, rval);
530
531 return 0;
532}
533
534/**
535 * fsl_rio_port_write_init - MPC85xx port write interface init
536 * @mport: Master port implementing the port write unit
537 *
538 * Initializes port write unit hardware and DMA buffer
539 * ring. Called from fsl_rio_setup(). Returns %0 on success
540 * or %-ENOMEM on failure.
541 */
542
543int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
544{
545 int rc = 0;
546
547 /* Following configurations require a disabled port write controller */
548 out_be32(&pw->pw_regs->pwmr,
549 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
550
551 /* Initialize port write */
552 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
553 RIO_PW_MSG_SIZE,
554 &pw->port_write_msg.phys, GFP_KERNEL);
555 if (!pw->port_write_msg.virt) {
556 pr_err("RIO: unable allocate port write queue\n");
557 return -ENOMEM;
558 }
559
560 pw->port_write_msg.err_count = 0;
561 pw->port_write_msg.discard_count = 0;
562
563 /* Point dequeue/enqueue pointers at first entry */
564 out_be32(&pw->pw_regs->epwqbar, 0);
565 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
566
567 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
568 in_be32(&pw->pw_regs->epwqbar),
569 in_be32(&pw->pw_regs->pwqbar));
570
571 /* Clear interrupt status IPWSR */
572 out_be32(&pw->pw_regs->pwsr,
573 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
574
575 /* Configure port write controller for snooping enable all reporting,
576 clear queue full */
577 out_be32(&pw->pw_regs->pwmr,
578 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
579
580
581 /* Hook up port-write handler */
582 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
583 IRQF_SHARED, "port-write", (void *)pw);
584 if (rc < 0) {
585 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
586 goto err_out;
587 }
588 /* Enable Error Interrupt */
589 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
590
591 INIT_WORK(&pw->pw_work, fsl_pw_dpc);
592 spin_lock_init(&pw->pw_fifo_lock);
593 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
594 pr_err("FIFO allocation failed\n");
595 rc = -ENOMEM;
596 goto err_out_irq;
597 }
598
599 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
600 in_be32(&pw->pw_regs->pwmr),
601 in_be32(&pw->pw_regs->pwsr));
602
603 return rc;
604
605err_out_irq:
606 free_irq(IRQ_RIO_PW(pw), (void *)pw);
607err_out:
608 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
609 pw->port_write_msg.virt,
610 pw->port_write_msg.phys);
611 return rc;
612}
613
614/**
615 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
616 * @mport: RapidIO master port info
617 * @index: ID of RapidIO interface
618 * @destid: Destination ID of target device
619 * @data: 16-bit info field of RapidIO doorbell message
620 *
621 * Sends a MPC85xx doorbell message. Returns %0 on success or
622 * %-EINVAL on failure.
623 */
624int fsl_rio_doorbell_send(struct rio_mport *mport,
625 int index, u16 destid, u16 data)
626{
627 unsigned long flags;
628
629 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
630 index, destid, data);
631
632 spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
633
634 /* In the serial version silicons, such as MPC8548, MPC8641,
635 * below operations is must be.
636 */
637 out_be32(&dbell->dbell_regs->odmr, 0x00000000);
638 out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
639 out_be32(&dbell->dbell_regs->oddpr, destid << 16);
640 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
641 out_be32(&dbell->dbell_regs->odmr, 0x00000001);
642
643 spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
644
645 return 0;
646}
647
648/**
649 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
650 * @mport: Master port with outbound message queue
651 * @rdev: Target of outbound message
652 * @mbox: Outbound mailbox
653 * @buffer: Message to add to outbound queue
654 * @len: Length of message
655 *
656 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
657 * %0 on success or %-EINVAL on failure.
658 */
659int
660fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
661 void *buffer, size_t len)
662{
663 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
664 u32 omr;
665 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
666 + rmu->msg_tx_ring.tx_slot;
667 int ret = 0;
668
669 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
670 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
671 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
672 ret = -EINVAL;
673 goto out;
674 }
675
676 /* Copy and clear rest of buffer */
677 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
678 len);
679 if (len < (RIO_MAX_MSG_SIZE - 4))
680 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
681 + len, 0, RIO_MAX_MSG_SIZE - len);
682
683 /* Set mbox field for message, and set destid */
684 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
685
686 /* Enable EOMI interrupt and priority */
687 desc->dattr = 0x28000000 | ((mport->index) << 20);
688
689 /* Set transfer size aligned to next power of 2 (in double words) */
690 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
691
692 /* Set snooping and source buffer address */
693 desc->saddr = 0x00000004
694 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
695
696 /* Increment enqueue pointer */
697 omr = in_be32(&rmu->msg_regs->omr);
698 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
699
700 /* Go to next descriptor */
701 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
702 rmu->msg_tx_ring.tx_slot = 0;
703
704out:
705 return ret;
706}
707
708/**
709 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
710 * @mport: Master port implementing the outbound message unit
711 * @dev_id: Device specific pointer to pass on event
712 * @mbox: Mailbox to open
713 * @entries: Number of entries in the outbound mailbox ring
714 *
715 * Initializes buffer ring, request the outbound message interrupt,
716 * and enables the outbound message unit. Returns %0 on success and
717 * %-EINVAL or %-ENOMEM on failure.
718 */
719int
720fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
721{
722 int i, j, rc = 0;
723 struct rio_priv *priv = mport->priv;
724 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
725
726 if ((entries < RIO_MIN_TX_RING_SIZE) ||
727 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
728 rc = -EINVAL;
729 goto out;
730 }
731
732 /* Initialize shadow copy ring */
733 rmu->msg_tx_ring.dev_id = dev_id;
734 rmu->msg_tx_ring.size = entries;
735
736 for (i = 0; i < rmu->msg_tx_ring.size; i++) {
737 rmu->msg_tx_ring.virt_buffer[i] =
738 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
739 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
740 if (!rmu->msg_tx_ring.virt_buffer[i]) {
741 rc = -ENOMEM;
742 for (j = 0; j < rmu->msg_tx_ring.size; j++)
743 if (rmu->msg_tx_ring.virt_buffer[j])
744 dma_free_coherent(priv->dev,
745 RIO_MSG_BUFFER_SIZE,
746 rmu->msg_tx_ring.
747 virt_buffer[j],
748 rmu->msg_tx_ring.
749 phys_buffer[j]);
750 goto out;
751 }
752 }
753
754 /* Initialize outbound message descriptor ring */
755 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
756 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
757 &rmu->msg_tx_ring.phys,
758 GFP_KERNEL);
759 if (!rmu->msg_tx_ring.virt) {
760 rc = -ENOMEM;
761 goto out_dma;
762 }
763 rmu->msg_tx_ring.tx_slot = 0;
764
765 /* Point dequeue/enqueue pointers at first entry in ring */
766 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
767 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
768
769 /* Configure for snooping */
770 out_be32(&rmu->msg_regs->osar, 0x00000004);
771
772 /* Clear interrupt status */
773 out_be32(&rmu->msg_regs->osr, 0x000000b3);
774
775 /* Hook up outbound message handler */
776 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
777 "msg_tx", (void *)mport);
778 if (rc < 0)
779 goto out_irq;
780
781 /*
782 * Configure outbound message unit
783 * Snooping
784 * Interrupts (all enabled, except QEIE)
785 * Chaining mode
786 * Disable
787 */
788 out_be32(&rmu->msg_regs->omr, 0x00100220);
789
790 /* Set number of entries */
791 out_be32(&rmu->msg_regs->omr,
792 in_be32(&rmu->msg_regs->omr) |
793 ((get_bitmask_order(entries) - 2) << 12));
794
795 /* Now enable the unit */
796 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
797
798out:
799 return rc;
800
801out_irq:
802 dma_free_coherent(priv->dev,
803 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
804 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
805
806out_dma:
807 for (i = 0; i < rmu->msg_tx_ring.size; i++)
808 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
809 rmu->msg_tx_ring.virt_buffer[i],
810 rmu->msg_tx_ring.phys_buffer[i]);
811
812 return rc;
813}
814
815/**
816 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
817 * @mport: Master port implementing the outbound message unit
818 * @mbox: Mailbox to close
819 *
820 * Disables the outbound message unit, free all buffers, and
821 * frees the outbound message interrupt.
822 */
823void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
824{
825 struct rio_priv *priv = mport->priv;
826 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
827
828 /* Disable inbound message unit */
829 out_be32(&rmu->msg_regs->omr, 0);
830
831 /* Free ring */
832 dma_free_coherent(priv->dev,
833 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
834 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
835
836 /* Free interrupt */
837 free_irq(IRQ_RIO_TX(mport), (void *)mport);
838}
839
840/**
841 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
842 * @mport: Master port implementing the inbound message unit
843 * @dev_id: Device specific pointer to pass on event
844 * @mbox: Mailbox to open
845 * @entries: Number of entries in the inbound mailbox ring
846 *
847 * Initializes buffer ring, request the inbound message interrupt,
848 * and enables the inbound message unit. Returns %0 on success
849 * and %-EINVAL or %-ENOMEM on failure.
850 */
851int
852fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
853{
854 int i, rc = 0;
855 struct rio_priv *priv = mport->priv;
856 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
857
858 if ((entries < RIO_MIN_RX_RING_SIZE) ||
859 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
860 rc = -EINVAL;
861 goto out;
862 }
863
864 /* Initialize client buffer ring */
865 rmu->msg_rx_ring.dev_id = dev_id;
866 rmu->msg_rx_ring.size = entries;
867 rmu->msg_rx_ring.rx_slot = 0;
868 for (i = 0; i < rmu->msg_rx_ring.size; i++)
869 rmu->msg_rx_ring.virt_buffer[i] = NULL;
870
871 /* Initialize inbound message ring */
872 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
873 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
874 &rmu->msg_rx_ring.phys, GFP_KERNEL);
875 if (!rmu->msg_rx_ring.virt) {
876 rc = -ENOMEM;
877 goto out;
878 }
879
880 /* Point dequeue/enqueue pointers at first entry in ring */
881 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
882 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
883
884 /* Clear interrupt status */
885 out_be32(&rmu->msg_regs->isr, 0x00000091);
886
887 /* Hook up inbound message handler */
888 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
889 "msg_rx", (void *)mport);
890 if (rc < 0) {
891 dma_free_coherent(priv->dev,
892 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
893 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
894 goto out;
895 }
896
897 /*
898 * Configure inbound message unit:
899 * Snooping
900 * 4KB max message size
901 * Unmask all interrupt sources
902 * Disable
903 */
904 out_be32(&rmu->msg_regs->imr, 0x001b0060);
905
906 /* Set number of queue entries */
907 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
908
909 /* Now enable the unit */
910 setbits32(&rmu->msg_regs->imr, 0x1);
911
912out:
913 return rc;
914}
915
916/**
917 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
918 * @mport: Master port implementing the inbound message unit
919 * @mbox: Mailbox to close
920 *
921 * Disables the inbound message unit, free all buffers, and
922 * frees the inbound message interrupt.
923 */
924void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
925{
926 struct rio_priv *priv = mport->priv;
927 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
928
929 /* Disable inbound message unit */
930 out_be32(&rmu->msg_regs->imr, 0);
931
932 /* Free ring */
933 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
934 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
935
936 /* Free interrupt */
937 free_irq(IRQ_RIO_RX(mport), (void *)mport);
938}
939
940/**
941 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
942 * @mport: Master port implementing the inbound message unit
943 * @mbox: Inbound mailbox number
944 * @buf: Buffer to add to inbound queue
945 *
946 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
947 * %0 on success or %-EINVAL on failure.
948 */
949int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
950{
951 int rc = 0;
952 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
953
954 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
955 rmu->msg_rx_ring.rx_slot);
956
957 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
958 printk(KERN_ERR
959 "RIO: error adding inbound buffer %d, buffer exists\n",
960 rmu->msg_rx_ring.rx_slot);
961 rc = -EINVAL;
962 goto out;
963 }
964
965 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
966 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
967 rmu->msg_rx_ring.rx_slot = 0;
968
969out:
970 return rc;
971}
972
973/**
974 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
975 * @mport: Master port implementing the inbound message unit
976 * @mbox: Inbound mailbox number
977 *
978 * Gets the next available inbound message from the inbound message queue.
979 * A pointer to the message is returned on success or NULL on failure.
980 */
981void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
982{
983 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
984 u32 phys_buf;
985 void *virt_buf;
986 void *buf = NULL;
987 int buf_idx;
988
989 phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
990
991 /* If no more messages, then bail out */
992 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
993 goto out2;
994
995 virt_buf = rmu->msg_rx_ring.virt + (phys_buf
996 - rmu->msg_rx_ring.phys);
997 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
998 buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
999
1000 if (!buf) {
1001 printk(KERN_ERR
1002 "RIO: inbound message copy failed, no buffers\n");
1003 goto out1;
1004 }
1005
1006 /* Copy max message size, caller is expected to allocate that big */
1007 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
1008
1009 /* Clear the available buffer */
1010 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
1011
1012out1:
1013 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
1014
1015out2:
1016 return buf;
1017}
1018
1019/**
1020 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
1021 * @mport: Master port implementing the inbound doorbell unit
1022 *
1023 * Initializes doorbell unit hardware and inbound DMA buffer
1024 * ring. Called from fsl_rio_setup(). Returns %0 on success
1025 * or %-ENOMEM on failure.
1026 */
1027int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
1028{
1029 int rc = 0;
1030
1031 /* Initialize inbound doorbells */
1032 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
1033 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
1034 if (!dbell->dbell_ring.virt) {
1035 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
1036 rc = -ENOMEM;
1037 goto out;
1038 }
1039
1040 /* Point dequeue/enqueue pointers at first entry in ring */
1041 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
1042 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
1043
1044 /* Clear interrupt status */
1045 out_be32(&dbell->dbell_regs->dsr, 0x00000091);
1046
1047 /* Hook up doorbell handler */
1048 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
1049 "dbell_rx", (void *)dbell);
1050 if (rc < 0) {
1051 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
1052 dbell->dbell_ring.virt, dbell->dbell_ring.phys);
1053 printk(KERN_ERR
1054 "MPC85xx RIO: unable to request inbound doorbell irq");
1055 goto out;
1056 }
1057
1058 /* Configure doorbells for snooping, 512 entries, and enable */
1059 out_be32(&dbell->dbell_regs->dmr, 0x00108161);
1060
1061out:
1062 return rc;
1063}
1064
1065int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
1066{
1067 struct rio_priv *priv;
1068 struct fsl_rmu *rmu;
1069 u64 msg_start;
1070 const u32 *msg_addr;
1071 int mlen;
1072 int aw;
1073
1074 if (!mport || !mport->priv)
1075 return -EINVAL;
1076
1077 priv = mport->priv;
1078
1079 if (!node) {
1080 dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n",
1081 priv->dev->of_node);
1082 return -EINVAL;
1083 }
1084
1085 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
1086 if (!rmu)
1087 return -ENOMEM;
1088
1089 aw = of_n_addr_cells(node);
1090 msg_addr = of_get_property(node, "reg", &mlen);
1091 if (!msg_addr) {
1092 pr_err("%pOF: unable to find 'reg' property of message-unit\n",
1093 node);
1094 kfree(rmu);
1095 return -ENOMEM;
1096 }
1097 msg_start = of_read_number(msg_addr, aw);
1098
1099 rmu->msg_regs = (struct rio_msg_regs *)
1100 (rmu_regs_win + (u32)msg_start);
1101
1102 rmu->txirq = irq_of_parse_and_map(node, 0);
1103 rmu->rxirq = irq_of_parse_and_map(node, 1);
1104 printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n",
1105 node, rmu->txirq, rmu->rxirq);
1106
1107 priv->rmm_handle = rmu;
1108
1109 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1110 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1111 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1112
1113 return 0;
1114}