Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
4 *
5 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
6 * Varma Electronics Oy
7 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
8 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/delay.h>
15#include <linux/netdevice.h>
16#include <linux/if_arp.h>
17#include <linux/if_ether.h>
18#include <linux/list.h>
19#include <linux/can/dev.h>
20#include <linux/can/error.h>
21#include <linux/io.h>
22
23#include "mscan.h"
24
25static const struct can_bittiming_const mscan_bittiming_const = {
26 .name = "mscan",
27 .tseg1_min = 4,
28 .tseg1_max = 16,
29 .tseg2_min = 2,
30 .tseg2_max = 8,
31 .sjw_max = 4,
32 .brp_min = 1,
33 .brp_max = 64,
34 .brp_inc = 1,
35};
36
37struct mscan_state {
38 u8 mode;
39 u8 canrier;
40 u8 cantier;
41};
42
43static enum can_state state_map[] = {
44 CAN_STATE_ERROR_ACTIVE,
45 CAN_STATE_ERROR_WARNING,
46 CAN_STATE_ERROR_PASSIVE,
47 CAN_STATE_BUS_OFF
48};
49
50static int mscan_set_mode(struct net_device *dev, u8 mode)
51{
52 struct mscan_priv *priv = netdev_priv(dev);
53 struct mscan_regs __iomem *regs = priv->reg_base;
54 int ret = 0;
55 int i;
56 u8 canctl1;
57
58 if (mode != MSCAN_NORMAL_MODE) {
59 if (priv->tx_active) {
60 /* Abort transfers before going to sleep */#
61 out_8(®s->cantarq, priv->tx_active);
62 /* Suppress TX done interrupts */
63 out_8(®s->cantier, 0);
64 }
65
66 canctl1 = in_8(®s->canctl1);
67 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
68 setbits8(®s->canctl0, MSCAN_SLPRQ);
69 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
70 if (in_8(®s->canctl1) & MSCAN_SLPAK)
71 break;
72 udelay(100);
73 }
74 /*
75 * The mscan controller will fail to enter sleep mode,
76 * while there are irregular activities on bus, like
77 * somebody keeps retransmitting. This behavior is
78 * undocumented and seems to differ between mscan built
79 * in mpc5200b and mpc5200. We proceed in that case,
80 * since otherwise the slprq will be kept set and the
81 * controller will get stuck. NOTE: INITRQ or CSWAI
82 * will abort all active transmit actions, if still
83 * any, at once.
84 */
85 if (i >= MSCAN_SET_MODE_RETRIES)
86 netdev_dbg(dev,
87 "device failed to enter sleep mode. "
88 "We proceed anyhow.\n");
89 else
90 priv->can.state = CAN_STATE_SLEEPING;
91 }
92
93 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
94 setbits8(®s->canctl0, MSCAN_INITRQ);
95 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
96 if (in_8(®s->canctl1) & MSCAN_INITAK)
97 break;
98 }
99 if (i >= MSCAN_SET_MODE_RETRIES)
100 ret = -ENODEV;
101 }
102 if (!ret)
103 priv->can.state = CAN_STATE_STOPPED;
104
105 if (mode & MSCAN_CSWAI)
106 setbits8(®s->canctl0, MSCAN_CSWAI);
107
108 } else {
109 canctl1 = in_8(®s->canctl1);
110 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
111 clrbits8(®s->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
112 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
113 canctl1 = in_8(®s->canctl1);
114 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
115 break;
116 }
117 if (i >= MSCAN_SET_MODE_RETRIES)
118 ret = -ENODEV;
119 else
120 priv->can.state = CAN_STATE_ERROR_ACTIVE;
121 }
122 }
123 return ret;
124}
125
126static int mscan_start(struct net_device *dev)
127{
128 struct mscan_priv *priv = netdev_priv(dev);
129 struct mscan_regs __iomem *regs = priv->reg_base;
130 u8 canrflg;
131 int err;
132
133 out_8(®s->canrier, 0);
134
135 INIT_LIST_HEAD(&priv->tx_head);
136 priv->prev_buf_id = 0;
137 priv->cur_pri = 0;
138 priv->tx_active = 0;
139 priv->shadow_canrier = 0;
140 priv->flags = 0;
141
142 if (priv->type == MSCAN_TYPE_MPC5121) {
143 /* Clear pending bus-off condition */
144 if (in_8(®s->canmisc) & MSCAN_BOHOLD)
145 out_8(®s->canmisc, MSCAN_BOHOLD);
146 }
147
148 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
149 if (err)
150 return err;
151
152 canrflg = in_8(®s->canrflg);
153 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
154 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
155 MSCAN_STATE_TX(canrflg))];
156 out_8(®s->cantier, 0);
157
158 /* Enable receive interrupts. */
159 out_8(®s->canrier, MSCAN_RX_INTS_ENABLE);
160
161 return 0;
162}
163
164static int mscan_restart(struct net_device *dev)
165{
166 struct mscan_priv *priv = netdev_priv(dev);
167
168 if (priv->type == MSCAN_TYPE_MPC5121) {
169 struct mscan_regs __iomem *regs = priv->reg_base;
170
171 priv->can.state = CAN_STATE_ERROR_ACTIVE;
172 WARN(!(in_8(®s->canmisc) & MSCAN_BOHOLD),
173 "bus-off state expected\n");
174 out_8(®s->canmisc, MSCAN_BOHOLD);
175 /* Re-enable receive interrupts. */
176 out_8(®s->canrier, MSCAN_RX_INTS_ENABLE);
177 } else {
178 if (priv->can.state <= CAN_STATE_BUS_OFF)
179 mscan_set_mode(dev, MSCAN_INIT_MODE);
180 return mscan_start(dev);
181 }
182
183 return 0;
184}
185
186static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
187{
188 struct can_frame *frame = (struct can_frame *)skb->data;
189 struct mscan_priv *priv = netdev_priv(dev);
190 struct mscan_regs __iomem *regs = priv->reg_base;
191 int i, rtr, buf_id;
192 u32 can_id;
193
194 if (can_dropped_invalid_skb(dev, skb))
195 return NETDEV_TX_OK;
196
197 out_8(®s->cantier, 0);
198
199 i = ~priv->tx_active & MSCAN_TXE;
200 buf_id = ffs(i) - 1;
201 switch (hweight8(i)) {
202 case 0:
203 netif_stop_queue(dev);
204 netdev_err(dev, "Tx Ring full when queue awake!\n");
205 return NETDEV_TX_BUSY;
206 case 1:
207 /*
208 * if buf_id < 3, then current frame will be send out of order,
209 * since buffer with lower id have higher priority (hell..)
210 */
211 netif_stop_queue(dev);
212 case 2:
213 if (buf_id < priv->prev_buf_id) {
214 priv->cur_pri++;
215 if (priv->cur_pri == 0xff) {
216 set_bit(F_TX_WAIT_ALL, &priv->flags);
217 netif_stop_queue(dev);
218 }
219 }
220 set_bit(F_TX_PROGRESS, &priv->flags);
221 break;
222 }
223 priv->prev_buf_id = buf_id;
224 out_8(®s->cantbsel, i);
225
226 rtr = frame->can_id & CAN_RTR_FLAG;
227
228 /* RTR is always the lowest bit of interest, then IDs follow */
229 if (frame->can_id & CAN_EFF_FLAG) {
230 can_id = (frame->can_id & CAN_EFF_MASK)
231 << (MSCAN_EFF_RTR_SHIFT + 1);
232 if (rtr)
233 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
234 out_be16(®s->tx.idr3_2, can_id);
235
236 can_id >>= 16;
237 /* EFF_FLAGS are between the IDs :( */
238 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
239 | MSCAN_EFF_FLAGS;
240 } else {
241 can_id = (frame->can_id & CAN_SFF_MASK)
242 << (MSCAN_SFF_RTR_SHIFT + 1);
243 if (rtr)
244 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
245 }
246 out_be16(®s->tx.idr1_0, can_id);
247
248 if (!rtr) {
249 void __iomem *data = ®s->tx.dsr1_0;
250 u16 *payload = (u16 *)frame->data;
251
252 for (i = 0; i < frame->can_dlc / 2; i++) {
253 out_be16(data, *payload++);
254 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
255 }
256 /* write remaining byte if necessary */
257 if (frame->can_dlc & 1)
258 out_8(data, frame->data[frame->can_dlc - 1]);
259 }
260
261 out_8(®s->tx.dlr, frame->can_dlc);
262 out_8(®s->tx.tbpr, priv->cur_pri);
263
264 /* Start transmission. */
265 out_8(®s->cantflg, 1 << buf_id);
266
267 if (!test_bit(F_TX_PROGRESS, &priv->flags))
268 netif_trans_update(dev);
269
270 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
271
272 can_put_echo_skb(skb, dev, buf_id);
273
274 /* Enable interrupt. */
275 priv->tx_active |= 1 << buf_id;
276 out_8(®s->cantier, priv->tx_active);
277
278 return NETDEV_TX_OK;
279}
280
281static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
282{
283 struct mscan_priv *priv = netdev_priv(dev);
284
285 if (unlikely(canrflg & MSCAN_CSCIF))
286 return state_map[max(MSCAN_STATE_RX(canrflg),
287 MSCAN_STATE_TX(canrflg))];
288
289 return priv->can.state;
290}
291
292static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
293{
294 struct mscan_priv *priv = netdev_priv(dev);
295 struct mscan_regs __iomem *regs = priv->reg_base;
296 u32 can_id;
297 int i;
298
299 can_id = in_be16(®s->rx.idr1_0);
300 if (can_id & (1 << 3)) {
301 frame->can_id = CAN_EFF_FLAG;
302 can_id = ((can_id << 16) | in_be16(®s->rx.idr3_2));
303 can_id = ((can_id & 0xffe00000) |
304 ((can_id & 0x7ffff) << 2)) >> 2;
305 } else {
306 can_id >>= 4;
307 frame->can_id = 0;
308 }
309
310 frame->can_id |= can_id >> 1;
311 if (can_id & 1)
312 frame->can_id |= CAN_RTR_FLAG;
313
314 frame->can_dlc = get_can_dlc(in_8(®s->rx.dlr) & 0xf);
315
316 if (!(frame->can_id & CAN_RTR_FLAG)) {
317 void __iomem *data = ®s->rx.dsr1_0;
318 u16 *payload = (u16 *)frame->data;
319
320 for (i = 0; i < frame->can_dlc / 2; i++) {
321 *payload++ = in_be16(data);
322 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
323 }
324 /* read remaining byte if necessary */
325 if (frame->can_dlc & 1)
326 frame->data[frame->can_dlc - 1] = in_8(data);
327 }
328
329 out_8(®s->canrflg, MSCAN_RXF);
330}
331
332static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
333 u8 canrflg)
334{
335 struct mscan_priv *priv = netdev_priv(dev);
336 struct mscan_regs __iomem *regs = priv->reg_base;
337 struct net_device_stats *stats = &dev->stats;
338 enum can_state new_state;
339
340 netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
341 frame->can_id = CAN_ERR_FLAG;
342
343 if (canrflg & MSCAN_OVRIF) {
344 frame->can_id |= CAN_ERR_CRTL;
345 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
346 stats->rx_over_errors++;
347 stats->rx_errors++;
348 } else {
349 frame->data[1] = 0;
350 }
351
352 new_state = get_new_state(dev, canrflg);
353 if (new_state != priv->can.state) {
354 can_change_state(dev, frame,
355 state_map[MSCAN_STATE_TX(canrflg)],
356 state_map[MSCAN_STATE_RX(canrflg)]);
357
358 if (priv->can.state == CAN_STATE_BUS_OFF) {
359 /*
360 * The MSCAN on the MPC5200 does recover from bus-off
361 * automatically. To avoid that we stop the chip doing
362 * a light-weight stop (we are in irq-context).
363 */
364 if (priv->type != MSCAN_TYPE_MPC5121) {
365 out_8(®s->cantier, 0);
366 out_8(®s->canrier, 0);
367 setbits8(®s->canctl0,
368 MSCAN_SLPRQ | MSCAN_INITRQ);
369 }
370 can_bus_off(dev);
371 }
372 }
373 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
374 frame->can_dlc = CAN_ERR_DLC;
375 out_8(®s->canrflg, MSCAN_ERR_IF);
376}
377
378static int mscan_rx_poll(struct napi_struct *napi, int quota)
379{
380 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
381 struct net_device *dev = napi->dev;
382 struct mscan_regs __iomem *regs = priv->reg_base;
383 struct net_device_stats *stats = &dev->stats;
384 int work_done = 0;
385 struct sk_buff *skb;
386 struct can_frame *frame;
387 u8 canrflg;
388
389 while (work_done < quota) {
390 canrflg = in_8(®s->canrflg);
391 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
392 break;
393
394 skb = alloc_can_skb(dev, &frame);
395 if (!skb) {
396 if (printk_ratelimit())
397 netdev_notice(dev, "packet dropped\n");
398 stats->rx_dropped++;
399 out_8(®s->canrflg, canrflg);
400 continue;
401 }
402
403 if (canrflg & MSCAN_RXF)
404 mscan_get_rx_frame(dev, frame);
405 else if (canrflg & MSCAN_ERR_IF)
406 mscan_get_err_frame(dev, frame, canrflg);
407
408 stats->rx_packets++;
409 stats->rx_bytes += frame->can_dlc;
410 work_done++;
411 netif_receive_skb(skb);
412 }
413
414 if (work_done < quota) {
415 if (likely(napi_complete_done(&priv->napi, work_done))) {
416 clear_bit(F_RX_PROGRESS, &priv->flags);
417 if (priv->can.state < CAN_STATE_BUS_OFF)
418 out_8(®s->canrier, priv->shadow_canrier);
419 }
420 }
421 return work_done;
422}
423
424static irqreturn_t mscan_isr(int irq, void *dev_id)
425{
426 struct net_device *dev = (struct net_device *)dev_id;
427 struct mscan_priv *priv = netdev_priv(dev);
428 struct mscan_regs __iomem *regs = priv->reg_base;
429 struct net_device_stats *stats = &dev->stats;
430 u8 cantier, cantflg, canrflg;
431 irqreturn_t ret = IRQ_NONE;
432
433 cantier = in_8(®s->cantier) & MSCAN_TXE;
434 cantflg = in_8(®s->cantflg) & cantier;
435
436 if (cantier && cantflg) {
437 struct list_head *tmp, *pos;
438
439 list_for_each_safe(pos, tmp, &priv->tx_head) {
440 struct tx_queue_entry *entry =
441 list_entry(pos, struct tx_queue_entry, list);
442 u8 mask = entry->mask;
443
444 if (!(cantflg & mask))
445 continue;
446
447 out_8(®s->cantbsel, mask);
448 stats->tx_bytes += in_8(®s->tx.dlr);
449 stats->tx_packets++;
450 can_get_echo_skb(dev, entry->id);
451 priv->tx_active &= ~mask;
452 list_del(pos);
453 }
454
455 if (list_empty(&priv->tx_head)) {
456 clear_bit(F_TX_WAIT_ALL, &priv->flags);
457 clear_bit(F_TX_PROGRESS, &priv->flags);
458 priv->cur_pri = 0;
459 } else {
460 netif_trans_update(dev);
461 }
462
463 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
464 netif_wake_queue(dev);
465
466 out_8(®s->cantier, priv->tx_active);
467 ret = IRQ_HANDLED;
468 }
469
470 canrflg = in_8(®s->canrflg);
471 if ((canrflg & ~MSCAN_STAT_MSK) &&
472 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
473 if (canrflg & ~MSCAN_STAT_MSK) {
474 priv->shadow_canrier = in_8(®s->canrier);
475 out_8(®s->canrier, 0);
476 napi_schedule(&priv->napi);
477 ret = IRQ_HANDLED;
478 } else {
479 clear_bit(F_RX_PROGRESS, &priv->flags);
480 }
481 }
482 return ret;
483}
484
485static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
486{
487 int ret = 0;
488
489 switch (mode) {
490 case CAN_MODE_START:
491 ret = mscan_restart(dev);
492 if (ret)
493 break;
494 if (netif_queue_stopped(dev))
495 netif_wake_queue(dev);
496 break;
497
498 default:
499 ret = -EOPNOTSUPP;
500 break;
501 }
502 return ret;
503}
504
505static int mscan_do_set_bittiming(struct net_device *dev)
506{
507 struct mscan_priv *priv = netdev_priv(dev);
508 struct mscan_regs __iomem *regs = priv->reg_base;
509 struct can_bittiming *bt = &priv->can.bittiming;
510 u8 btr0, btr1;
511
512 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
513 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
514 BTR1_SET_TSEG2(bt->phase_seg2) |
515 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
516
517 netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
518
519 out_8(®s->canbtr0, btr0);
520 out_8(®s->canbtr1, btr1);
521
522 return 0;
523}
524
525static int mscan_get_berr_counter(const struct net_device *dev,
526 struct can_berr_counter *bec)
527{
528 struct mscan_priv *priv = netdev_priv(dev);
529 struct mscan_regs __iomem *regs = priv->reg_base;
530
531 bec->txerr = in_8(®s->cantxerr);
532 bec->rxerr = in_8(®s->canrxerr);
533
534 return 0;
535}
536
537static int mscan_open(struct net_device *dev)
538{
539 int ret;
540 struct mscan_priv *priv = netdev_priv(dev);
541 struct mscan_regs __iomem *regs = priv->reg_base;
542
543 if (priv->clk_ipg) {
544 ret = clk_prepare_enable(priv->clk_ipg);
545 if (ret)
546 goto exit_retcode;
547 }
548 if (priv->clk_can) {
549 ret = clk_prepare_enable(priv->clk_can);
550 if (ret)
551 goto exit_dis_ipg_clock;
552 }
553
554 /* common open */
555 ret = open_candev(dev);
556 if (ret)
557 goto exit_dis_can_clock;
558
559 napi_enable(&priv->napi);
560
561 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
562 if (ret < 0) {
563 netdev_err(dev, "failed to attach interrupt\n");
564 goto exit_napi_disable;
565 }
566
567 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
568 setbits8(®s->canctl1, MSCAN_LISTEN);
569 else
570 clrbits8(®s->canctl1, MSCAN_LISTEN);
571
572 ret = mscan_start(dev);
573 if (ret)
574 goto exit_free_irq;
575
576 netif_start_queue(dev);
577
578 return 0;
579
580exit_free_irq:
581 free_irq(dev->irq, dev);
582exit_napi_disable:
583 napi_disable(&priv->napi);
584 close_candev(dev);
585exit_dis_can_clock:
586 if (priv->clk_can)
587 clk_disable_unprepare(priv->clk_can);
588exit_dis_ipg_clock:
589 if (priv->clk_ipg)
590 clk_disable_unprepare(priv->clk_ipg);
591exit_retcode:
592 return ret;
593}
594
595static int mscan_close(struct net_device *dev)
596{
597 struct mscan_priv *priv = netdev_priv(dev);
598 struct mscan_regs __iomem *regs = priv->reg_base;
599
600 netif_stop_queue(dev);
601 napi_disable(&priv->napi);
602
603 out_8(®s->cantier, 0);
604 out_8(®s->canrier, 0);
605 mscan_set_mode(dev, MSCAN_INIT_MODE);
606 close_candev(dev);
607 free_irq(dev->irq, dev);
608
609 if (priv->clk_can)
610 clk_disable_unprepare(priv->clk_can);
611 if (priv->clk_ipg)
612 clk_disable_unprepare(priv->clk_ipg);
613
614 return 0;
615}
616
617static const struct net_device_ops mscan_netdev_ops = {
618 .ndo_open = mscan_open,
619 .ndo_stop = mscan_close,
620 .ndo_start_xmit = mscan_start_xmit,
621 .ndo_change_mtu = can_change_mtu,
622};
623
624int register_mscandev(struct net_device *dev, int mscan_clksrc)
625{
626 struct mscan_priv *priv = netdev_priv(dev);
627 struct mscan_regs __iomem *regs = priv->reg_base;
628 u8 ctl1;
629
630 ctl1 = in_8(®s->canctl1);
631 if (mscan_clksrc)
632 ctl1 |= MSCAN_CLKSRC;
633 else
634 ctl1 &= ~MSCAN_CLKSRC;
635
636 if (priv->type == MSCAN_TYPE_MPC5121) {
637 priv->can.do_get_berr_counter = mscan_get_berr_counter;
638 ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
639 }
640
641 ctl1 |= MSCAN_CANE;
642 out_8(®s->canctl1, ctl1);
643 udelay(100);
644
645 /* acceptance mask/acceptance code (accept everything) */
646 out_be16(®s->canidar1_0, 0);
647 out_be16(®s->canidar3_2, 0);
648 out_be16(®s->canidar5_4, 0);
649 out_be16(®s->canidar7_6, 0);
650
651 out_be16(®s->canidmr1_0, 0xffff);
652 out_be16(®s->canidmr3_2, 0xffff);
653 out_be16(®s->canidmr5_4, 0xffff);
654 out_be16(®s->canidmr7_6, 0xffff);
655 /* Two 32 bit Acceptance Filters */
656 out_8(®s->canidac, MSCAN_AF_32BIT);
657
658 mscan_set_mode(dev, MSCAN_INIT_MODE);
659
660 return register_candev(dev);
661}
662
663void unregister_mscandev(struct net_device *dev)
664{
665 struct mscan_priv *priv = netdev_priv(dev);
666 struct mscan_regs __iomem *regs = priv->reg_base;
667 mscan_set_mode(dev, MSCAN_INIT_MODE);
668 clrbits8(®s->canctl1, MSCAN_CANE);
669 unregister_candev(dev);
670}
671
672struct net_device *alloc_mscandev(void)
673{
674 struct net_device *dev;
675 struct mscan_priv *priv;
676 int i;
677
678 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
679 if (!dev)
680 return NULL;
681 priv = netdev_priv(dev);
682
683 dev->netdev_ops = &mscan_netdev_ops;
684
685 dev->flags |= IFF_ECHO; /* we support local echo */
686
687 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
688
689 priv->can.bittiming_const = &mscan_bittiming_const;
690 priv->can.do_set_bittiming = mscan_do_set_bittiming;
691 priv->can.do_set_mode = mscan_do_set_mode;
692 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
693 CAN_CTRLMODE_LISTENONLY;
694
695 for (i = 0; i < TX_QUEUE_SIZE; i++) {
696 priv->tx_queue[i].id = i;
697 priv->tx_queue[i].mask = 1 << i;
698 }
699
700 return dev;
701}
702
703MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
704MODULE_LICENSE("GPL v2");
705MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
1/*
2 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/if_arp.h>
28#include <linux/if_ether.h>
29#include <linux/list.h>
30#include <linux/can/dev.h>
31#include <linux/can/error.h>
32#include <linux/io.h>
33
34#include "mscan.h"
35
36static const struct can_bittiming_const mscan_bittiming_const = {
37 .name = "mscan",
38 .tseg1_min = 4,
39 .tseg1_max = 16,
40 .tseg2_min = 2,
41 .tseg2_max = 8,
42 .sjw_max = 4,
43 .brp_min = 1,
44 .brp_max = 64,
45 .brp_inc = 1,
46};
47
48struct mscan_state {
49 u8 mode;
50 u8 canrier;
51 u8 cantier;
52};
53
54static enum can_state state_map[] = {
55 CAN_STATE_ERROR_ACTIVE,
56 CAN_STATE_ERROR_WARNING,
57 CAN_STATE_ERROR_PASSIVE,
58 CAN_STATE_BUS_OFF
59};
60
61static int mscan_set_mode(struct net_device *dev, u8 mode)
62{
63 struct mscan_priv *priv = netdev_priv(dev);
64 struct mscan_regs __iomem *regs = priv->reg_base;
65 int ret = 0;
66 int i;
67 u8 canctl1;
68
69 if (mode != MSCAN_NORMAL_MODE) {
70 if (priv->tx_active) {
71 /* Abort transfers before going to sleep */#
72 out_8(®s->cantarq, priv->tx_active);
73 /* Suppress TX done interrupts */
74 out_8(®s->cantier, 0);
75 }
76
77 canctl1 = in_8(®s->canctl1);
78 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
79 setbits8(®s->canctl0, MSCAN_SLPRQ);
80 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
81 if (in_8(®s->canctl1) & MSCAN_SLPAK)
82 break;
83 udelay(100);
84 }
85 /*
86 * The mscan controller will fail to enter sleep mode,
87 * while there are irregular activities on bus, like
88 * somebody keeps retransmitting. This behavior is
89 * undocumented and seems to differ between mscan built
90 * in mpc5200b and mpc5200. We proceed in that case,
91 * since otherwise the slprq will be kept set and the
92 * controller will get stuck. NOTE: INITRQ or CSWAI
93 * will abort all active transmit actions, if still
94 * any, at once.
95 */
96 if (i >= MSCAN_SET_MODE_RETRIES)
97 netdev_dbg(dev,
98 "device failed to enter sleep mode. "
99 "We proceed anyhow.\n");
100 else
101 priv->can.state = CAN_STATE_SLEEPING;
102 }
103
104 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
105 setbits8(®s->canctl0, MSCAN_INITRQ);
106 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
107 if (in_8(®s->canctl1) & MSCAN_INITAK)
108 break;
109 }
110 if (i >= MSCAN_SET_MODE_RETRIES)
111 ret = -ENODEV;
112 }
113 if (!ret)
114 priv->can.state = CAN_STATE_STOPPED;
115
116 if (mode & MSCAN_CSWAI)
117 setbits8(®s->canctl0, MSCAN_CSWAI);
118
119 } else {
120 canctl1 = in_8(®s->canctl1);
121 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
122 clrbits8(®s->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
123 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
124 canctl1 = in_8(®s->canctl1);
125 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
126 break;
127 }
128 if (i >= MSCAN_SET_MODE_RETRIES)
129 ret = -ENODEV;
130 else
131 priv->can.state = CAN_STATE_ERROR_ACTIVE;
132 }
133 }
134 return ret;
135}
136
137static int mscan_start(struct net_device *dev)
138{
139 struct mscan_priv *priv = netdev_priv(dev);
140 struct mscan_regs __iomem *regs = priv->reg_base;
141 u8 canrflg;
142 int err;
143
144 out_8(®s->canrier, 0);
145
146 INIT_LIST_HEAD(&priv->tx_head);
147 priv->prev_buf_id = 0;
148 priv->cur_pri = 0;
149 priv->tx_active = 0;
150 priv->shadow_canrier = 0;
151 priv->flags = 0;
152
153 if (priv->type == MSCAN_TYPE_MPC5121) {
154 /* Clear pending bus-off condition */
155 if (in_8(®s->canmisc) & MSCAN_BOHOLD)
156 out_8(®s->canmisc, MSCAN_BOHOLD);
157 }
158
159 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
160 if (err)
161 return err;
162
163 canrflg = in_8(®s->canrflg);
164 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
165 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
166 MSCAN_STATE_TX(canrflg))];
167 out_8(®s->cantier, 0);
168
169 /* Enable receive interrupts. */
170 out_8(®s->canrier, MSCAN_RX_INTS_ENABLE);
171
172 return 0;
173}
174
175static int mscan_restart(struct net_device *dev)
176{
177 struct mscan_priv *priv = netdev_priv(dev);
178
179 if (priv->type == MSCAN_TYPE_MPC5121) {
180 struct mscan_regs __iomem *regs = priv->reg_base;
181
182 priv->can.state = CAN_STATE_ERROR_ACTIVE;
183 WARN(!(in_8(®s->canmisc) & MSCAN_BOHOLD),
184 "bus-off state expected\n");
185 out_8(®s->canmisc, MSCAN_BOHOLD);
186 /* Re-enable receive interrupts. */
187 out_8(®s->canrier, MSCAN_RX_INTS_ENABLE);
188 } else {
189 if (priv->can.state <= CAN_STATE_BUS_OFF)
190 mscan_set_mode(dev, MSCAN_INIT_MODE);
191 return mscan_start(dev);
192 }
193
194 return 0;
195}
196
197static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
198{
199 struct can_frame *frame = (struct can_frame *)skb->data;
200 struct mscan_priv *priv = netdev_priv(dev);
201 struct mscan_regs __iomem *regs = priv->reg_base;
202 int i, rtr, buf_id;
203 u32 can_id;
204
205 if (can_dropped_invalid_skb(dev, skb))
206 return NETDEV_TX_OK;
207
208 out_8(®s->cantier, 0);
209
210 i = ~priv->tx_active & MSCAN_TXE;
211 buf_id = ffs(i) - 1;
212 switch (hweight8(i)) {
213 case 0:
214 netif_stop_queue(dev);
215 netdev_err(dev, "Tx Ring full when queue awake!\n");
216 return NETDEV_TX_BUSY;
217 case 1:
218 /*
219 * if buf_id < 3, then current frame will be send out of order,
220 * since buffer with lower id have higher priority (hell..)
221 */
222 netif_stop_queue(dev);
223 case 2:
224 if (buf_id < priv->prev_buf_id) {
225 priv->cur_pri++;
226 if (priv->cur_pri == 0xff) {
227 set_bit(F_TX_WAIT_ALL, &priv->flags);
228 netif_stop_queue(dev);
229 }
230 }
231 set_bit(F_TX_PROGRESS, &priv->flags);
232 break;
233 }
234 priv->prev_buf_id = buf_id;
235 out_8(®s->cantbsel, i);
236
237 rtr = frame->can_id & CAN_RTR_FLAG;
238
239 /* RTR is always the lowest bit of interest, then IDs follow */
240 if (frame->can_id & CAN_EFF_FLAG) {
241 can_id = (frame->can_id & CAN_EFF_MASK)
242 << (MSCAN_EFF_RTR_SHIFT + 1);
243 if (rtr)
244 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
245 out_be16(®s->tx.idr3_2, can_id);
246
247 can_id >>= 16;
248 /* EFF_FLAGS are between the IDs :( */
249 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
250 | MSCAN_EFF_FLAGS;
251 } else {
252 can_id = (frame->can_id & CAN_SFF_MASK)
253 << (MSCAN_SFF_RTR_SHIFT + 1);
254 if (rtr)
255 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
256 }
257 out_be16(®s->tx.idr1_0, can_id);
258
259 if (!rtr) {
260 void __iomem *data = ®s->tx.dsr1_0;
261 u16 *payload = (u16 *)frame->data;
262
263 for (i = 0; i < frame->can_dlc / 2; i++) {
264 out_be16(data, *payload++);
265 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
266 }
267 /* write remaining byte if necessary */
268 if (frame->can_dlc & 1)
269 out_8(data, frame->data[frame->can_dlc - 1]);
270 }
271
272 out_8(®s->tx.dlr, frame->can_dlc);
273 out_8(®s->tx.tbpr, priv->cur_pri);
274
275 /* Start transmission. */
276 out_8(®s->cantflg, 1 << buf_id);
277
278 if (!test_bit(F_TX_PROGRESS, &priv->flags))
279 netif_trans_update(dev);
280
281 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
282
283 can_put_echo_skb(skb, dev, buf_id);
284
285 /* Enable interrupt. */
286 priv->tx_active |= 1 << buf_id;
287 out_8(®s->cantier, priv->tx_active);
288
289 return NETDEV_TX_OK;
290}
291
292static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
293{
294 struct mscan_priv *priv = netdev_priv(dev);
295
296 if (unlikely(canrflg & MSCAN_CSCIF))
297 return state_map[max(MSCAN_STATE_RX(canrflg),
298 MSCAN_STATE_TX(canrflg))];
299
300 return priv->can.state;
301}
302
303static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
304{
305 struct mscan_priv *priv = netdev_priv(dev);
306 struct mscan_regs __iomem *regs = priv->reg_base;
307 u32 can_id;
308 int i;
309
310 can_id = in_be16(®s->rx.idr1_0);
311 if (can_id & (1 << 3)) {
312 frame->can_id = CAN_EFF_FLAG;
313 can_id = ((can_id << 16) | in_be16(®s->rx.idr3_2));
314 can_id = ((can_id & 0xffe00000) |
315 ((can_id & 0x7ffff) << 2)) >> 2;
316 } else {
317 can_id >>= 4;
318 frame->can_id = 0;
319 }
320
321 frame->can_id |= can_id >> 1;
322 if (can_id & 1)
323 frame->can_id |= CAN_RTR_FLAG;
324
325 frame->can_dlc = get_can_dlc(in_8(®s->rx.dlr) & 0xf);
326
327 if (!(frame->can_id & CAN_RTR_FLAG)) {
328 void __iomem *data = ®s->rx.dsr1_0;
329 u16 *payload = (u16 *)frame->data;
330
331 for (i = 0; i < frame->can_dlc / 2; i++) {
332 *payload++ = in_be16(data);
333 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
334 }
335 /* read remaining byte if necessary */
336 if (frame->can_dlc & 1)
337 frame->data[frame->can_dlc - 1] = in_8(data);
338 }
339
340 out_8(®s->canrflg, MSCAN_RXF);
341}
342
343static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
344 u8 canrflg)
345{
346 struct mscan_priv *priv = netdev_priv(dev);
347 struct mscan_regs __iomem *regs = priv->reg_base;
348 struct net_device_stats *stats = &dev->stats;
349 enum can_state new_state;
350
351 netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
352 frame->can_id = CAN_ERR_FLAG;
353
354 if (canrflg & MSCAN_OVRIF) {
355 frame->can_id |= CAN_ERR_CRTL;
356 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
357 stats->rx_over_errors++;
358 stats->rx_errors++;
359 } else {
360 frame->data[1] = 0;
361 }
362
363 new_state = get_new_state(dev, canrflg);
364 if (new_state != priv->can.state) {
365 can_change_state(dev, frame,
366 state_map[MSCAN_STATE_TX(canrflg)],
367 state_map[MSCAN_STATE_RX(canrflg)]);
368
369 if (priv->can.state == CAN_STATE_BUS_OFF) {
370 /*
371 * The MSCAN on the MPC5200 does recover from bus-off
372 * automatically. To avoid that we stop the chip doing
373 * a light-weight stop (we are in irq-context).
374 */
375 if (priv->type != MSCAN_TYPE_MPC5121) {
376 out_8(®s->cantier, 0);
377 out_8(®s->canrier, 0);
378 setbits8(®s->canctl0,
379 MSCAN_SLPRQ | MSCAN_INITRQ);
380 }
381 can_bus_off(dev);
382 }
383 }
384 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
385 frame->can_dlc = CAN_ERR_DLC;
386 out_8(®s->canrflg, MSCAN_ERR_IF);
387}
388
389static int mscan_rx_poll(struct napi_struct *napi, int quota)
390{
391 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
392 struct net_device *dev = napi->dev;
393 struct mscan_regs __iomem *regs = priv->reg_base;
394 struct net_device_stats *stats = &dev->stats;
395 int npackets = 0;
396 int ret = 1;
397 struct sk_buff *skb;
398 struct can_frame *frame;
399 u8 canrflg;
400
401 while (npackets < quota) {
402 canrflg = in_8(®s->canrflg);
403 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
404 break;
405
406 skb = alloc_can_skb(dev, &frame);
407 if (!skb) {
408 if (printk_ratelimit())
409 netdev_notice(dev, "packet dropped\n");
410 stats->rx_dropped++;
411 out_8(®s->canrflg, canrflg);
412 continue;
413 }
414
415 if (canrflg & MSCAN_RXF)
416 mscan_get_rx_frame(dev, frame);
417 else if (canrflg & MSCAN_ERR_IF)
418 mscan_get_err_frame(dev, frame, canrflg);
419
420 stats->rx_packets++;
421 stats->rx_bytes += frame->can_dlc;
422 npackets++;
423 netif_receive_skb(skb);
424 }
425
426 if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
427 napi_complete(&priv->napi);
428 clear_bit(F_RX_PROGRESS, &priv->flags);
429 if (priv->can.state < CAN_STATE_BUS_OFF)
430 out_8(®s->canrier, priv->shadow_canrier);
431 ret = 0;
432 }
433 return ret;
434}
435
436static irqreturn_t mscan_isr(int irq, void *dev_id)
437{
438 struct net_device *dev = (struct net_device *)dev_id;
439 struct mscan_priv *priv = netdev_priv(dev);
440 struct mscan_regs __iomem *regs = priv->reg_base;
441 struct net_device_stats *stats = &dev->stats;
442 u8 cantier, cantflg, canrflg;
443 irqreturn_t ret = IRQ_NONE;
444
445 cantier = in_8(®s->cantier) & MSCAN_TXE;
446 cantflg = in_8(®s->cantflg) & cantier;
447
448 if (cantier && cantflg) {
449 struct list_head *tmp, *pos;
450
451 list_for_each_safe(pos, tmp, &priv->tx_head) {
452 struct tx_queue_entry *entry =
453 list_entry(pos, struct tx_queue_entry, list);
454 u8 mask = entry->mask;
455
456 if (!(cantflg & mask))
457 continue;
458
459 out_8(®s->cantbsel, mask);
460 stats->tx_bytes += in_8(®s->tx.dlr);
461 stats->tx_packets++;
462 can_get_echo_skb(dev, entry->id);
463 priv->tx_active &= ~mask;
464 list_del(pos);
465 }
466
467 if (list_empty(&priv->tx_head)) {
468 clear_bit(F_TX_WAIT_ALL, &priv->flags);
469 clear_bit(F_TX_PROGRESS, &priv->flags);
470 priv->cur_pri = 0;
471 } else {
472 netif_trans_update(dev);
473 }
474
475 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
476 netif_wake_queue(dev);
477
478 out_8(®s->cantier, priv->tx_active);
479 ret = IRQ_HANDLED;
480 }
481
482 canrflg = in_8(®s->canrflg);
483 if ((canrflg & ~MSCAN_STAT_MSK) &&
484 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
485 if (canrflg & ~MSCAN_STAT_MSK) {
486 priv->shadow_canrier = in_8(®s->canrier);
487 out_8(®s->canrier, 0);
488 napi_schedule(&priv->napi);
489 ret = IRQ_HANDLED;
490 } else {
491 clear_bit(F_RX_PROGRESS, &priv->flags);
492 }
493 }
494 return ret;
495}
496
497static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
498{
499 int ret = 0;
500
501 switch (mode) {
502 case CAN_MODE_START:
503 ret = mscan_restart(dev);
504 if (ret)
505 break;
506 if (netif_queue_stopped(dev))
507 netif_wake_queue(dev);
508 break;
509
510 default:
511 ret = -EOPNOTSUPP;
512 break;
513 }
514 return ret;
515}
516
517static int mscan_do_set_bittiming(struct net_device *dev)
518{
519 struct mscan_priv *priv = netdev_priv(dev);
520 struct mscan_regs __iomem *regs = priv->reg_base;
521 struct can_bittiming *bt = &priv->can.bittiming;
522 u8 btr0, btr1;
523
524 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
525 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
526 BTR1_SET_TSEG2(bt->phase_seg2) |
527 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
528
529 netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
530
531 out_8(®s->canbtr0, btr0);
532 out_8(®s->canbtr1, btr1);
533
534 return 0;
535}
536
537static int mscan_get_berr_counter(const struct net_device *dev,
538 struct can_berr_counter *bec)
539{
540 struct mscan_priv *priv = netdev_priv(dev);
541 struct mscan_regs __iomem *regs = priv->reg_base;
542
543 bec->txerr = in_8(®s->cantxerr);
544 bec->rxerr = in_8(®s->canrxerr);
545
546 return 0;
547}
548
549static int mscan_open(struct net_device *dev)
550{
551 int ret;
552 struct mscan_priv *priv = netdev_priv(dev);
553 struct mscan_regs __iomem *regs = priv->reg_base;
554
555 if (priv->clk_ipg) {
556 ret = clk_prepare_enable(priv->clk_ipg);
557 if (ret)
558 goto exit_retcode;
559 }
560 if (priv->clk_can) {
561 ret = clk_prepare_enable(priv->clk_can);
562 if (ret)
563 goto exit_dis_ipg_clock;
564 }
565
566 /* common open */
567 ret = open_candev(dev);
568 if (ret)
569 goto exit_dis_can_clock;
570
571 napi_enable(&priv->napi);
572
573 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
574 if (ret < 0) {
575 netdev_err(dev, "failed to attach interrupt\n");
576 goto exit_napi_disable;
577 }
578
579 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
580 setbits8(®s->canctl1, MSCAN_LISTEN);
581 else
582 clrbits8(®s->canctl1, MSCAN_LISTEN);
583
584 ret = mscan_start(dev);
585 if (ret)
586 goto exit_free_irq;
587
588 netif_start_queue(dev);
589
590 return 0;
591
592exit_free_irq:
593 free_irq(dev->irq, dev);
594exit_napi_disable:
595 napi_disable(&priv->napi);
596 close_candev(dev);
597exit_dis_can_clock:
598 if (priv->clk_can)
599 clk_disable_unprepare(priv->clk_can);
600exit_dis_ipg_clock:
601 if (priv->clk_ipg)
602 clk_disable_unprepare(priv->clk_ipg);
603exit_retcode:
604 return ret;
605}
606
607static int mscan_close(struct net_device *dev)
608{
609 struct mscan_priv *priv = netdev_priv(dev);
610 struct mscan_regs __iomem *regs = priv->reg_base;
611
612 netif_stop_queue(dev);
613 napi_disable(&priv->napi);
614
615 out_8(®s->cantier, 0);
616 out_8(®s->canrier, 0);
617 mscan_set_mode(dev, MSCAN_INIT_MODE);
618 close_candev(dev);
619 free_irq(dev->irq, dev);
620
621 if (priv->clk_can)
622 clk_disable_unprepare(priv->clk_can);
623 if (priv->clk_ipg)
624 clk_disable_unprepare(priv->clk_ipg);
625
626 return 0;
627}
628
629static const struct net_device_ops mscan_netdev_ops = {
630 .ndo_open = mscan_open,
631 .ndo_stop = mscan_close,
632 .ndo_start_xmit = mscan_start_xmit,
633 .ndo_change_mtu = can_change_mtu,
634};
635
636int register_mscandev(struct net_device *dev, int mscan_clksrc)
637{
638 struct mscan_priv *priv = netdev_priv(dev);
639 struct mscan_regs __iomem *regs = priv->reg_base;
640 u8 ctl1;
641
642 ctl1 = in_8(®s->canctl1);
643 if (mscan_clksrc)
644 ctl1 |= MSCAN_CLKSRC;
645 else
646 ctl1 &= ~MSCAN_CLKSRC;
647
648 if (priv->type == MSCAN_TYPE_MPC5121) {
649 priv->can.do_get_berr_counter = mscan_get_berr_counter;
650 ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
651 }
652
653 ctl1 |= MSCAN_CANE;
654 out_8(®s->canctl1, ctl1);
655 udelay(100);
656
657 /* acceptance mask/acceptance code (accept everything) */
658 out_be16(®s->canidar1_0, 0);
659 out_be16(®s->canidar3_2, 0);
660 out_be16(®s->canidar5_4, 0);
661 out_be16(®s->canidar7_6, 0);
662
663 out_be16(®s->canidmr1_0, 0xffff);
664 out_be16(®s->canidmr3_2, 0xffff);
665 out_be16(®s->canidmr5_4, 0xffff);
666 out_be16(®s->canidmr7_6, 0xffff);
667 /* Two 32 bit Acceptance Filters */
668 out_8(®s->canidac, MSCAN_AF_32BIT);
669
670 mscan_set_mode(dev, MSCAN_INIT_MODE);
671
672 return register_candev(dev);
673}
674
675void unregister_mscandev(struct net_device *dev)
676{
677 struct mscan_priv *priv = netdev_priv(dev);
678 struct mscan_regs __iomem *regs = priv->reg_base;
679 mscan_set_mode(dev, MSCAN_INIT_MODE);
680 clrbits8(®s->canctl1, MSCAN_CANE);
681 unregister_candev(dev);
682}
683
684struct net_device *alloc_mscandev(void)
685{
686 struct net_device *dev;
687 struct mscan_priv *priv;
688 int i;
689
690 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
691 if (!dev)
692 return NULL;
693 priv = netdev_priv(dev);
694
695 dev->netdev_ops = &mscan_netdev_ops;
696
697 dev->flags |= IFF_ECHO; /* we support local echo */
698
699 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
700
701 priv->can.bittiming_const = &mscan_bittiming_const;
702 priv->can.do_set_bittiming = mscan_do_set_bittiming;
703 priv->can.do_set_mode = mscan_do_set_mode;
704 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
705 CAN_CTRLMODE_LISTENONLY;
706
707 for (i = 0; i < TX_QUEUE_SIZE; i++) {
708 priv->tx_queue[i].id = i;
709 priv->tx_queue[i].mask = 1 << i;
710 }
711
712 return dev;
713}
714
715MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
716MODULE_LICENSE("GPL v2");
717MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");