Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
4 *
5 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
6 * Varma Electronics Oy
7 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
8 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/delay.h>
15#include <linux/netdevice.h>
16#include <linux/if_arp.h>
17#include <linux/if_ether.h>
18#include <linux/list.h>
19#include <linux/can/dev.h>
20#include <linux/can/error.h>
21#include <linux/io.h>
22
23#include "mscan.h"
24
25static const struct can_bittiming_const mscan_bittiming_const = {
26 .name = "mscan",
27 .tseg1_min = 4,
28 .tseg1_max = 16,
29 .tseg2_min = 2,
30 .tseg2_max = 8,
31 .sjw_max = 4,
32 .brp_min = 1,
33 .brp_max = 64,
34 .brp_inc = 1,
35};
36
37static enum can_state state_map[] = {
38 CAN_STATE_ERROR_ACTIVE,
39 CAN_STATE_ERROR_WARNING,
40 CAN_STATE_ERROR_PASSIVE,
41 CAN_STATE_BUS_OFF
42};
43
44static int mscan_set_mode(struct net_device *dev, u8 mode)
45{
46 struct mscan_priv *priv = netdev_priv(dev);
47 struct mscan_regs __iomem *regs = priv->reg_base;
48 int ret = 0;
49 int i;
50 u8 canctl1;
51
52 if (mode != MSCAN_NORMAL_MODE) {
53 if (priv->tx_active) {
54 /* Abort transfers before going to sleep */#
55 out_8(®s->cantarq, priv->tx_active);
56 /* Suppress TX done interrupts */
57 out_8(®s->cantier, 0);
58 }
59
60 canctl1 = in_8(®s->canctl1);
61 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
62 setbits8(®s->canctl0, MSCAN_SLPRQ);
63 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
64 if (in_8(®s->canctl1) & MSCAN_SLPAK)
65 break;
66 udelay(100);
67 }
68 /*
69 * The mscan controller will fail to enter sleep mode,
70 * while there are irregular activities on bus, like
71 * somebody keeps retransmitting. This behavior is
72 * undocumented and seems to differ between mscan built
73 * in mpc5200b and mpc5200. We proceed in that case,
74 * since otherwise the slprq will be kept set and the
75 * controller will get stuck. NOTE: INITRQ or CSWAI
76 * will abort all active transmit actions, if still
77 * any, at once.
78 */
79 if (i >= MSCAN_SET_MODE_RETRIES)
80 netdev_dbg(dev,
81 "device failed to enter sleep mode. "
82 "We proceed anyhow.\n");
83 else
84 priv->can.state = CAN_STATE_SLEEPING;
85 }
86
87 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
88 setbits8(®s->canctl0, MSCAN_INITRQ);
89 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
90 if (in_8(®s->canctl1) & MSCAN_INITAK)
91 break;
92 }
93 if (i >= MSCAN_SET_MODE_RETRIES)
94 ret = -ENODEV;
95 }
96 if (!ret)
97 priv->can.state = CAN_STATE_STOPPED;
98
99 if (mode & MSCAN_CSWAI)
100 setbits8(®s->canctl0, MSCAN_CSWAI);
101
102 } else {
103 canctl1 = in_8(®s->canctl1);
104 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
105 clrbits8(®s->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
106 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
107 canctl1 = in_8(®s->canctl1);
108 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
109 break;
110 }
111 if (i >= MSCAN_SET_MODE_RETRIES)
112 ret = -ENODEV;
113 else
114 priv->can.state = CAN_STATE_ERROR_ACTIVE;
115 }
116 }
117 return ret;
118}
119
120static int mscan_start(struct net_device *dev)
121{
122 struct mscan_priv *priv = netdev_priv(dev);
123 struct mscan_regs __iomem *regs = priv->reg_base;
124 u8 canrflg;
125 int err;
126
127 out_8(®s->canrier, 0);
128
129 INIT_LIST_HEAD(&priv->tx_head);
130 priv->prev_buf_id = 0;
131 priv->cur_pri = 0;
132 priv->tx_active = 0;
133 priv->shadow_canrier = 0;
134 priv->flags = 0;
135
136 if (priv->type == MSCAN_TYPE_MPC5121) {
137 /* Clear pending bus-off condition */
138 if (in_8(®s->canmisc) & MSCAN_BOHOLD)
139 out_8(®s->canmisc, MSCAN_BOHOLD);
140 }
141
142 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
143 if (err)
144 return err;
145
146 canrflg = in_8(®s->canrflg);
147 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
148 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
149 MSCAN_STATE_TX(canrflg))];
150 out_8(®s->cantier, 0);
151
152 /* Enable receive interrupts. */
153 out_8(®s->canrier, MSCAN_RX_INTS_ENABLE);
154
155 return 0;
156}
157
158static int mscan_restart(struct net_device *dev)
159{
160 struct mscan_priv *priv = netdev_priv(dev);
161
162 if (priv->type == MSCAN_TYPE_MPC5121) {
163 struct mscan_regs __iomem *regs = priv->reg_base;
164
165 priv->can.state = CAN_STATE_ERROR_ACTIVE;
166 WARN(!(in_8(®s->canmisc) & MSCAN_BOHOLD),
167 "bus-off state expected\n");
168 out_8(®s->canmisc, MSCAN_BOHOLD);
169 /* Re-enable receive interrupts. */
170 out_8(®s->canrier, MSCAN_RX_INTS_ENABLE);
171 } else {
172 if (priv->can.state <= CAN_STATE_BUS_OFF)
173 mscan_set_mode(dev, MSCAN_INIT_MODE);
174 return mscan_start(dev);
175 }
176
177 return 0;
178}
179
180static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
181{
182 struct can_frame *frame = (struct can_frame *)skb->data;
183 struct mscan_priv *priv = netdev_priv(dev);
184 struct mscan_regs __iomem *regs = priv->reg_base;
185 int i, rtr, buf_id;
186 u32 can_id;
187
188 if (can_dev_dropped_skb(dev, skb))
189 return NETDEV_TX_OK;
190
191 out_8(®s->cantier, 0);
192
193 i = ~priv->tx_active & MSCAN_TXE;
194 buf_id = ffs(i) - 1;
195 switch (hweight8(i)) {
196 case 0:
197 netif_stop_queue(dev);
198 netdev_err(dev, "Tx Ring full when queue awake!\n");
199 return NETDEV_TX_BUSY;
200 case 1:
201 /*
202 * if buf_id < 3, then current frame will be send out of order,
203 * since buffer with lower id have higher priority (hell..)
204 */
205 netif_stop_queue(dev);
206 fallthrough;
207 case 2:
208 if (buf_id < priv->prev_buf_id) {
209 priv->cur_pri++;
210 if (priv->cur_pri == 0xff) {
211 set_bit(F_TX_WAIT_ALL, &priv->flags);
212 netif_stop_queue(dev);
213 }
214 }
215 set_bit(F_TX_PROGRESS, &priv->flags);
216 break;
217 }
218 priv->prev_buf_id = buf_id;
219 out_8(®s->cantbsel, i);
220
221 rtr = frame->can_id & CAN_RTR_FLAG;
222
223 /* RTR is always the lowest bit of interest, then IDs follow */
224 if (frame->can_id & CAN_EFF_FLAG) {
225 can_id = (frame->can_id & CAN_EFF_MASK)
226 << (MSCAN_EFF_RTR_SHIFT + 1);
227 if (rtr)
228 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
229 out_be16(®s->tx.idr3_2, can_id);
230
231 can_id >>= 16;
232 /* EFF_FLAGS are between the IDs :( */
233 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
234 | MSCAN_EFF_FLAGS;
235 } else {
236 can_id = (frame->can_id & CAN_SFF_MASK)
237 << (MSCAN_SFF_RTR_SHIFT + 1);
238 if (rtr)
239 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
240 }
241 out_be16(®s->tx.idr1_0, can_id);
242
243 if (!rtr) {
244 void __iomem *data = ®s->tx.dsr1_0;
245 u16 *payload = (u16 *)frame->data;
246
247 for (i = 0; i < frame->len / 2; i++) {
248 out_be16(data, *payload++);
249 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
250 }
251 /* write remaining byte if necessary */
252 if (frame->len & 1)
253 out_8(data, frame->data[frame->len - 1]);
254 }
255
256 out_8(®s->tx.dlr, frame->len);
257 out_8(®s->tx.tbpr, priv->cur_pri);
258
259 /* Start transmission. */
260 out_8(®s->cantflg, 1 << buf_id);
261
262 if (!test_bit(F_TX_PROGRESS, &priv->flags))
263 netif_trans_update(dev);
264
265 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
266
267 can_put_echo_skb(skb, dev, buf_id, 0);
268
269 /* Enable interrupt. */
270 priv->tx_active |= 1 << buf_id;
271 out_8(®s->cantier, priv->tx_active);
272
273 return NETDEV_TX_OK;
274}
275
276static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
277{
278 struct mscan_priv *priv = netdev_priv(dev);
279
280 if (unlikely(canrflg & MSCAN_CSCIF))
281 return state_map[max(MSCAN_STATE_RX(canrflg),
282 MSCAN_STATE_TX(canrflg))];
283
284 return priv->can.state;
285}
286
287static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
288{
289 struct mscan_priv *priv = netdev_priv(dev);
290 struct mscan_regs __iomem *regs = priv->reg_base;
291 u32 can_id;
292 int i;
293
294 can_id = in_be16(®s->rx.idr1_0);
295 if (can_id & (1 << 3)) {
296 frame->can_id = CAN_EFF_FLAG;
297 can_id = ((can_id << 16) | in_be16(®s->rx.idr3_2));
298 can_id = ((can_id & 0xffe00000) |
299 ((can_id & 0x7ffff) << 2)) >> 2;
300 } else {
301 can_id >>= 4;
302 frame->can_id = 0;
303 }
304
305 frame->can_id |= can_id >> 1;
306 if (can_id & 1)
307 frame->can_id |= CAN_RTR_FLAG;
308
309 frame->len = can_cc_dlc2len(in_8(®s->rx.dlr) & 0xf);
310
311 if (!(frame->can_id & CAN_RTR_FLAG)) {
312 void __iomem *data = ®s->rx.dsr1_0;
313 u16 *payload = (u16 *)frame->data;
314
315 for (i = 0; i < frame->len / 2; i++) {
316 *payload++ = in_be16(data);
317 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
318 }
319 /* read remaining byte if necessary */
320 if (frame->len & 1)
321 frame->data[frame->len - 1] = in_8(data);
322 }
323
324 out_8(®s->canrflg, MSCAN_RXF);
325}
326
327static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
328 u8 canrflg)
329{
330 struct mscan_priv *priv = netdev_priv(dev);
331 struct mscan_regs __iomem *regs = priv->reg_base;
332 struct net_device_stats *stats = &dev->stats;
333 enum can_state new_state;
334
335 netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
336 frame->can_id = CAN_ERR_FLAG;
337
338 if (canrflg & MSCAN_OVRIF) {
339 frame->can_id |= CAN_ERR_CRTL;
340 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
341 stats->rx_over_errors++;
342 stats->rx_errors++;
343 } else {
344 frame->data[1] = 0;
345 }
346
347 new_state = get_new_state(dev, canrflg);
348 if (new_state != priv->can.state) {
349 can_change_state(dev, frame,
350 state_map[MSCAN_STATE_TX(canrflg)],
351 state_map[MSCAN_STATE_RX(canrflg)]);
352
353 if (priv->can.state == CAN_STATE_BUS_OFF) {
354 /*
355 * The MSCAN on the MPC5200 does recover from bus-off
356 * automatically. To avoid that we stop the chip doing
357 * a light-weight stop (we are in irq-context).
358 */
359 if (priv->type != MSCAN_TYPE_MPC5121) {
360 out_8(®s->cantier, 0);
361 out_8(®s->canrier, 0);
362 setbits8(®s->canctl0,
363 MSCAN_SLPRQ | MSCAN_INITRQ);
364 }
365 can_bus_off(dev);
366 }
367 }
368 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
369 frame->len = CAN_ERR_DLC;
370 out_8(®s->canrflg, MSCAN_ERR_IF);
371}
372
373static int mscan_rx_poll(struct napi_struct *napi, int quota)
374{
375 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
376 struct net_device *dev = napi->dev;
377 struct mscan_regs __iomem *regs = priv->reg_base;
378 struct net_device_stats *stats = &dev->stats;
379 int work_done = 0;
380 struct sk_buff *skb;
381 struct can_frame *frame;
382 u8 canrflg;
383
384 while (work_done < quota) {
385 canrflg = in_8(®s->canrflg);
386 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
387 break;
388
389 skb = alloc_can_skb(dev, &frame);
390 if (!skb) {
391 if (printk_ratelimit())
392 netdev_notice(dev, "packet dropped\n");
393 stats->rx_dropped++;
394 out_8(®s->canrflg, canrflg);
395 continue;
396 }
397
398 if (canrflg & MSCAN_RXF) {
399 mscan_get_rx_frame(dev, frame);
400 stats->rx_packets++;
401 if (!(frame->can_id & CAN_RTR_FLAG))
402 stats->rx_bytes += frame->len;
403 } else if (canrflg & MSCAN_ERR_IF) {
404 mscan_get_err_frame(dev, frame, canrflg);
405 }
406
407 work_done++;
408 netif_receive_skb(skb);
409 }
410
411 if (work_done < quota) {
412 if (likely(napi_complete_done(&priv->napi, work_done))) {
413 clear_bit(F_RX_PROGRESS, &priv->flags);
414 if (priv->can.state < CAN_STATE_BUS_OFF)
415 out_8(®s->canrier, priv->shadow_canrier);
416 }
417 }
418 return work_done;
419}
420
421static irqreturn_t mscan_isr(int irq, void *dev_id)
422{
423 struct net_device *dev = (struct net_device *)dev_id;
424 struct mscan_priv *priv = netdev_priv(dev);
425 struct mscan_regs __iomem *regs = priv->reg_base;
426 struct net_device_stats *stats = &dev->stats;
427 u8 cantier, cantflg, canrflg;
428 irqreturn_t ret = IRQ_NONE;
429
430 cantier = in_8(®s->cantier) & MSCAN_TXE;
431 cantflg = in_8(®s->cantflg) & cantier;
432
433 if (cantier && cantflg) {
434 struct list_head *tmp, *pos;
435
436 list_for_each_safe(pos, tmp, &priv->tx_head) {
437 struct tx_queue_entry *entry =
438 list_entry(pos, struct tx_queue_entry, list);
439 u8 mask = entry->mask;
440
441 if (!(cantflg & mask))
442 continue;
443
444 out_8(®s->cantbsel, mask);
445 stats->tx_bytes += can_get_echo_skb(dev, entry->id,
446 NULL);
447 stats->tx_packets++;
448 priv->tx_active &= ~mask;
449 list_del(pos);
450 }
451
452 if (list_empty(&priv->tx_head)) {
453 clear_bit(F_TX_WAIT_ALL, &priv->flags);
454 clear_bit(F_TX_PROGRESS, &priv->flags);
455 priv->cur_pri = 0;
456 } else {
457 netif_trans_update(dev);
458 }
459
460 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
461 netif_wake_queue(dev);
462
463 out_8(®s->cantier, priv->tx_active);
464 ret = IRQ_HANDLED;
465 }
466
467 canrflg = in_8(®s->canrflg);
468 if ((canrflg & ~MSCAN_STAT_MSK) &&
469 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
470 if (canrflg & ~MSCAN_STAT_MSK) {
471 priv->shadow_canrier = in_8(®s->canrier);
472 out_8(®s->canrier, 0);
473 napi_schedule(&priv->napi);
474 ret = IRQ_HANDLED;
475 } else {
476 clear_bit(F_RX_PROGRESS, &priv->flags);
477 }
478 }
479 return ret;
480}
481
482static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
483{
484 int ret = 0;
485
486 switch (mode) {
487 case CAN_MODE_START:
488 ret = mscan_restart(dev);
489 if (ret)
490 break;
491 if (netif_queue_stopped(dev))
492 netif_wake_queue(dev);
493 break;
494
495 default:
496 ret = -EOPNOTSUPP;
497 break;
498 }
499 return ret;
500}
501
502static int mscan_do_set_bittiming(struct net_device *dev)
503{
504 struct mscan_priv *priv = netdev_priv(dev);
505 struct mscan_regs __iomem *regs = priv->reg_base;
506 struct can_bittiming *bt = &priv->can.bittiming;
507 u8 btr0, btr1;
508
509 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
510 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
511 BTR1_SET_TSEG2(bt->phase_seg2) |
512 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
513
514 netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
515
516 out_8(®s->canbtr0, btr0);
517 out_8(®s->canbtr1, btr1);
518
519 return 0;
520}
521
522static int mscan_get_berr_counter(const struct net_device *dev,
523 struct can_berr_counter *bec)
524{
525 struct mscan_priv *priv = netdev_priv(dev);
526 struct mscan_regs __iomem *regs = priv->reg_base;
527
528 bec->txerr = in_8(®s->cantxerr);
529 bec->rxerr = in_8(®s->canrxerr);
530
531 return 0;
532}
533
534static int mscan_open(struct net_device *dev)
535{
536 int ret;
537 struct mscan_priv *priv = netdev_priv(dev);
538 struct mscan_regs __iomem *regs = priv->reg_base;
539
540 ret = clk_prepare_enable(priv->clk_ipg);
541 if (ret)
542 goto exit_retcode;
543 ret = clk_prepare_enable(priv->clk_can);
544 if (ret)
545 goto exit_dis_ipg_clock;
546
547 /* common open */
548 ret = open_candev(dev);
549 if (ret)
550 goto exit_dis_can_clock;
551
552 napi_enable(&priv->napi);
553
554 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
555 if (ret < 0) {
556 netdev_err(dev, "failed to attach interrupt\n");
557 goto exit_napi_disable;
558 }
559
560 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
561 setbits8(®s->canctl1, MSCAN_LISTEN);
562 else
563 clrbits8(®s->canctl1, MSCAN_LISTEN);
564
565 ret = mscan_start(dev);
566 if (ret)
567 goto exit_free_irq;
568
569 netif_start_queue(dev);
570
571 return 0;
572
573exit_free_irq:
574 free_irq(dev->irq, dev);
575exit_napi_disable:
576 napi_disable(&priv->napi);
577 close_candev(dev);
578exit_dis_can_clock:
579 clk_disable_unprepare(priv->clk_can);
580exit_dis_ipg_clock:
581 clk_disable_unprepare(priv->clk_ipg);
582exit_retcode:
583 return ret;
584}
585
586static int mscan_close(struct net_device *dev)
587{
588 struct mscan_priv *priv = netdev_priv(dev);
589 struct mscan_regs __iomem *regs = priv->reg_base;
590
591 netif_stop_queue(dev);
592 napi_disable(&priv->napi);
593
594 out_8(®s->cantier, 0);
595 out_8(®s->canrier, 0);
596 mscan_set_mode(dev, MSCAN_INIT_MODE);
597 close_candev(dev);
598 free_irq(dev->irq, dev);
599
600 clk_disable_unprepare(priv->clk_can);
601 clk_disable_unprepare(priv->clk_ipg);
602
603 return 0;
604}
605
606static const struct net_device_ops mscan_netdev_ops = {
607 .ndo_open = mscan_open,
608 .ndo_stop = mscan_close,
609 .ndo_start_xmit = mscan_start_xmit,
610 .ndo_change_mtu = can_change_mtu,
611};
612
613static const struct ethtool_ops mscan_ethtool_ops = {
614 .get_ts_info = ethtool_op_get_ts_info,
615};
616
617int register_mscandev(struct net_device *dev, int mscan_clksrc)
618{
619 struct mscan_priv *priv = netdev_priv(dev);
620 struct mscan_regs __iomem *regs = priv->reg_base;
621 u8 ctl1;
622
623 ctl1 = in_8(®s->canctl1);
624 if (mscan_clksrc)
625 ctl1 |= MSCAN_CLKSRC;
626 else
627 ctl1 &= ~MSCAN_CLKSRC;
628
629 if (priv->type == MSCAN_TYPE_MPC5121) {
630 priv->can.do_get_berr_counter = mscan_get_berr_counter;
631 ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
632 }
633
634 ctl1 |= MSCAN_CANE;
635 out_8(®s->canctl1, ctl1);
636 udelay(100);
637
638 /* acceptance mask/acceptance code (accept everything) */
639 out_be16(®s->canidar1_0, 0);
640 out_be16(®s->canidar3_2, 0);
641 out_be16(®s->canidar5_4, 0);
642 out_be16(®s->canidar7_6, 0);
643
644 out_be16(®s->canidmr1_0, 0xffff);
645 out_be16(®s->canidmr3_2, 0xffff);
646 out_be16(®s->canidmr5_4, 0xffff);
647 out_be16(®s->canidmr7_6, 0xffff);
648 /* Two 32 bit Acceptance Filters */
649 out_8(®s->canidac, MSCAN_AF_32BIT);
650
651 mscan_set_mode(dev, MSCAN_INIT_MODE);
652
653 return register_candev(dev);
654}
655
656void unregister_mscandev(struct net_device *dev)
657{
658 struct mscan_priv *priv = netdev_priv(dev);
659 struct mscan_regs __iomem *regs = priv->reg_base;
660 mscan_set_mode(dev, MSCAN_INIT_MODE);
661 clrbits8(®s->canctl1, MSCAN_CANE);
662 unregister_candev(dev);
663}
664
665struct net_device *alloc_mscandev(void)
666{
667 struct net_device *dev;
668 struct mscan_priv *priv;
669 int i;
670
671 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
672 if (!dev)
673 return NULL;
674 priv = netdev_priv(dev);
675
676 dev->netdev_ops = &mscan_netdev_ops;
677 dev->ethtool_ops = &mscan_ethtool_ops;
678
679 dev->flags |= IFF_ECHO; /* we support local echo */
680
681 netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8);
682
683 priv->can.bittiming_const = &mscan_bittiming_const;
684 priv->can.do_set_bittiming = mscan_do_set_bittiming;
685 priv->can.do_set_mode = mscan_do_set_mode;
686 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
687 CAN_CTRLMODE_LISTENONLY;
688
689 for (i = 0; i < TX_QUEUE_SIZE; i++) {
690 priv->tx_queue[i].id = i;
691 priv->tx_queue[i].mask = 1 << i;
692 }
693
694 return dev;
695}
696
697MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
698MODULE_LICENSE("GPL v2");
699MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
4 *
5 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
6 * Varma Electronics Oy
7 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
8 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/delay.h>
15#include <linux/netdevice.h>
16#include <linux/if_arp.h>
17#include <linux/if_ether.h>
18#include <linux/list.h>
19#include <linux/can/dev.h>
20#include <linux/can/error.h>
21#include <linux/io.h>
22
23#include "mscan.h"
24
25static const struct can_bittiming_const mscan_bittiming_const = {
26 .name = "mscan",
27 .tseg1_min = 4,
28 .tseg1_max = 16,
29 .tseg2_min = 2,
30 .tseg2_max = 8,
31 .sjw_max = 4,
32 .brp_min = 1,
33 .brp_max = 64,
34 .brp_inc = 1,
35};
36
37struct mscan_state {
38 u8 mode;
39 u8 canrier;
40 u8 cantier;
41};
42
43static enum can_state state_map[] = {
44 CAN_STATE_ERROR_ACTIVE,
45 CAN_STATE_ERROR_WARNING,
46 CAN_STATE_ERROR_PASSIVE,
47 CAN_STATE_BUS_OFF
48};
49
50static int mscan_set_mode(struct net_device *dev, u8 mode)
51{
52 struct mscan_priv *priv = netdev_priv(dev);
53 struct mscan_regs __iomem *regs = priv->reg_base;
54 int ret = 0;
55 int i;
56 u8 canctl1;
57
58 if (mode != MSCAN_NORMAL_MODE) {
59 if (priv->tx_active) {
60 /* Abort transfers before going to sleep */#
61 out_8(®s->cantarq, priv->tx_active);
62 /* Suppress TX done interrupts */
63 out_8(®s->cantier, 0);
64 }
65
66 canctl1 = in_8(®s->canctl1);
67 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
68 setbits8(®s->canctl0, MSCAN_SLPRQ);
69 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
70 if (in_8(®s->canctl1) & MSCAN_SLPAK)
71 break;
72 udelay(100);
73 }
74 /*
75 * The mscan controller will fail to enter sleep mode,
76 * while there are irregular activities on bus, like
77 * somebody keeps retransmitting. This behavior is
78 * undocumented and seems to differ between mscan built
79 * in mpc5200b and mpc5200. We proceed in that case,
80 * since otherwise the slprq will be kept set and the
81 * controller will get stuck. NOTE: INITRQ or CSWAI
82 * will abort all active transmit actions, if still
83 * any, at once.
84 */
85 if (i >= MSCAN_SET_MODE_RETRIES)
86 netdev_dbg(dev,
87 "device failed to enter sleep mode. "
88 "We proceed anyhow.\n");
89 else
90 priv->can.state = CAN_STATE_SLEEPING;
91 }
92
93 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
94 setbits8(®s->canctl0, MSCAN_INITRQ);
95 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
96 if (in_8(®s->canctl1) & MSCAN_INITAK)
97 break;
98 }
99 if (i >= MSCAN_SET_MODE_RETRIES)
100 ret = -ENODEV;
101 }
102 if (!ret)
103 priv->can.state = CAN_STATE_STOPPED;
104
105 if (mode & MSCAN_CSWAI)
106 setbits8(®s->canctl0, MSCAN_CSWAI);
107
108 } else {
109 canctl1 = in_8(®s->canctl1);
110 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
111 clrbits8(®s->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
112 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
113 canctl1 = in_8(®s->canctl1);
114 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
115 break;
116 }
117 if (i >= MSCAN_SET_MODE_RETRIES)
118 ret = -ENODEV;
119 else
120 priv->can.state = CAN_STATE_ERROR_ACTIVE;
121 }
122 }
123 return ret;
124}
125
126static int mscan_start(struct net_device *dev)
127{
128 struct mscan_priv *priv = netdev_priv(dev);
129 struct mscan_regs __iomem *regs = priv->reg_base;
130 u8 canrflg;
131 int err;
132
133 out_8(®s->canrier, 0);
134
135 INIT_LIST_HEAD(&priv->tx_head);
136 priv->prev_buf_id = 0;
137 priv->cur_pri = 0;
138 priv->tx_active = 0;
139 priv->shadow_canrier = 0;
140 priv->flags = 0;
141
142 if (priv->type == MSCAN_TYPE_MPC5121) {
143 /* Clear pending bus-off condition */
144 if (in_8(®s->canmisc) & MSCAN_BOHOLD)
145 out_8(®s->canmisc, MSCAN_BOHOLD);
146 }
147
148 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
149 if (err)
150 return err;
151
152 canrflg = in_8(®s->canrflg);
153 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
154 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
155 MSCAN_STATE_TX(canrflg))];
156 out_8(®s->cantier, 0);
157
158 /* Enable receive interrupts. */
159 out_8(®s->canrier, MSCAN_RX_INTS_ENABLE);
160
161 return 0;
162}
163
164static int mscan_restart(struct net_device *dev)
165{
166 struct mscan_priv *priv = netdev_priv(dev);
167
168 if (priv->type == MSCAN_TYPE_MPC5121) {
169 struct mscan_regs __iomem *regs = priv->reg_base;
170
171 priv->can.state = CAN_STATE_ERROR_ACTIVE;
172 WARN(!(in_8(®s->canmisc) & MSCAN_BOHOLD),
173 "bus-off state expected\n");
174 out_8(®s->canmisc, MSCAN_BOHOLD);
175 /* Re-enable receive interrupts. */
176 out_8(®s->canrier, MSCAN_RX_INTS_ENABLE);
177 } else {
178 if (priv->can.state <= CAN_STATE_BUS_OFF)
179 mscan_set_mode(dev, MSCAN_INIT_MODE);
180 return mscan_start(dev);
181 }
182
183 return 0;
184}
185
186static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
187{
188 struct can_frame *frame = (struct can_frame *)skb->data;
189 struct mscan_priv *priv = netdev_priv(dev);
190 struct mscan_regs __iomem *regs = priv->reg_base;
191 int i, rtr, buf_id;
192 u32 can_id;
193
194 if (can_dropped_invalid_skb(dev, skb))
195 return NETDEV_TX_OK;
196
197 out_8(®s->cantier, 0);
198
199 i = ~priv->tx_active & MSCAN_TXE;
200 buf_id = ffs(i) - 1;
201 switch (hweight8(i)) {
202 case 0:
203 netif_stop_queue(dev);
204 netdev_err(dev, "Tx Ring full when queue awake!\n");
205 return NETDEV_TX_BUSY;
206 case 1:
207 /*
208 * if buf_id < 3, then current frame will be send out of order,
209 * since buffer with lower id have higher priority (hell..)
210 */
211 netif_stop_queue(dev);
212 case 2:
213 if (buf_id < priv->prev_buf_id) {
214 priv->cur_pri++;
215 if (priv->cur_pri == 0xff) {
216 set_bit(F_TX_WAIT_ALL, &priv->flags);
217 netif_stop_queue(dev);
218 }
219 }
220 set_bit(F_TX_PROGRESS, &priv->flags);
221 break;
222 }
223 priv->prev_buf_id = buf_id;
224 out_8(®s->cantbsel, i);
225
226 rtr = frame->can_id & CAN_RTR_FLAG;
227
228 /* RTR is always the lowest bit of interest, then IDs follow */
229 if (frame->can_id & CAN_EFF_FLAG) {
230 can_id = (frame->can_id & CAN_EFF_MASK)
231 << (MSCAN_EFF_RTR_SHIFT + 1);
232 if (rtr)
233 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
234 out_be16(®s->tx.idr3_2, can_id);
235
236 can_id >>= 16;
237 /* EFF_FLAGS are between the IDs :( */
238 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
239 | MSCAN_EFF_FLAGS;
240 } else {
241 can_id = (frame->can_id & CAN_SFF_MASK)
242 << (MSCAN_SFF_RTR_SHIFT + 1);
243 if (rtr)
244 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
245 }
246 out_be16(®s->tx.idr1_0, can_id);
247
248 if (!rtr) {
249 void __iomem *data = ®s->tx.dsr1_0;
250 u16 *payload = (u16 *)frame->data;
251
252 for (i = 0; i < frame->can_dlc / 2; i++) {
253 out_be16(data, *payload++);
254 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
255 }
256 /* write remaining byte if necessary */
257 if (frame->can_dlc & 1)
258 out_8(data, frame->data[frame->can_dlc - 1]);
259 }
260
261 out_8(®s->tx.dlr, frame->can_dlc);
262 out_8(®s->tx.tbpr, priv->cur_pri);
263
264 /* Start transmission. */
265 out_8(®s->cantflg, 1 << buf_id);
266
267 if (!test_bit(F_TX_PROGRESS, &priv->flags))
268 netif_trans_update(dev);
269
270 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
271
272 can_put_echo_skb(skb, dev, buf_id);
273
274 /* Enable interrupt. */
275 priv->tx_active |= 1 << buf_id;
276 out_8(®s->cantier, priv->tx_active);
277
278 return NETDEV_TX_OK;
279}
280
281static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
282{
283 struct mscan_priv *priv = netdev_priv(dev);
284
285 if (unlikely(canrflg & MSCAN_CSCIF))
286 return state_map[max(MSCAN_STATE_RX(canrflg),
287 MSCAN_STATE_TX(canrflg))];
288
289 return priv->can.state;
290}
291
292static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
293{
294 struct mscan_priv *priv = netdev_priv(dev);
295 struct mscan_regs __iomem *regs = priv->reg_base;
296 u32 can_id;
297 int i;
298
299 can_id = in_be16(®s->rx.idr1_0);
300 if (can_id & (1 << 3)) {
301 frame->can_id = CAN_EFF_FLAG;
302 can_id = ((can_id << 16) | in_be16(®s->rx.idr3_2));
303 can_id = ((can_id & 0xffe00000) |
304 ((can_id & 0x7ffff) << 2)) >> 2;
305 } else {
306 can_id >>= 4;
307 frame->can_id = 0;
308 }
309
310 frame->can_id |= can_id >> 1;
311 if (can_id & 1)
312 frame->can_id |= CAN_RTR_FLAG;
313
314 frame->can_dlc = get_can_dlc(in_8(®s->rx.dlr) & 0xf);
315
316 if (!(frame->can_id & CAN_RTR_FLAG)) {
317 void __iomem *data = ®s->rx.dsr1_0;
318 u16 *payload = (u16 *)frame->data;
319
320 for (i = 0; i < frame->can_dlc / 2; i++) {
321 *payload++ = in_be16(data);
322 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
323 }
324 /* read remaining byte if necessary */
325 if (frame->can_dlc & 1)
326 frame->data[frame->can_dlc - 1] = in_8(data);
327 }
328
329 out_8(®s->canrflg, MSCAN_RXF);
330}
331
332static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
333 u8 canrflg)
334{
335 struct mscan_priv *priv = netdev_priv(dev);
336 struct mscan_regs __iomem *regs = priv->reg_base;
337 struct net_device_stats *stats = &dev->stats;
338 enum can_state new_state;
339
340 netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
341 frame->can_id = CAN_ERR_FLAG;
342
343 if (canrflg & MSCAN_OVRIF) {
344 frame->can_id |= CAN_ERR_CRTL;
345 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
346 stats->rx_over_errors++;
347 stats->rx_errors++;
348 } else {
349 frame->data[1] = 0;
350 }
351
352 new_state = get_new_state(dev, canrflg);
353 if (new_state != priv->can.state) {
354 can_change_state(dev, frame,
355 state_map[MSCAN_STATE_TX(canrflg)],
356 state_map[MSCAN_STATE_RX(canrflg)]);
357
358 if (priv->can.state == CAN_STATE_BUS_OFF) {
359 /*
360 * The MSCAN on the MPC5200 does recover from bus-off
361 * automatically. To avoid that we stop the chip doing
362 * a light-weight stop (we are in irq-context).
363 */
364 if (priv->type != MSCAN_TYPE_MPC5121) {
365 out_8(®s->cantier, 0);
366 out_8(®s->canrier, 0);
367 setbits8(®s->canctl0,
368 MSCAN_SLPRQ | MSCAN_INITRQ);
369 }
370 can_bus_off(dev);
371 }
372 }
373 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
374 frame->can_dlc = CAN_ERR_DLC;
375 out_8(®s->canrflg, MSCAN_ERR_IF);
376}
377
378static int mscan_rx_poll(struct napi_struct *napi, int quota)
379{
380 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
381 struct net_device *dev = napi->dev;
382 struct mscan_regs __iomem *regs = priv->reg_base;
383 struct net_device_stats *stats = &dev->stats;
384 int npackets = 0;
385 int ret = 1;
386 struct sk_buff *skb;
387 struct can_frame *frame;
388 u8 canrflg;
389
390 while (npackets < quota) {
391 canrflg = in_8(®s->canrflg);
392 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
393 break;
394
395 skb = alloc_can_skb(dev, &frame);
396 if (!skb) {
397 if (printk_ratelimit())
398 netdev_notice(dev, "packet dropped\n");
399 stats->rx_dropped++;
400 out_8(®s->canrflg, canrflg);
401 continue;
402 }
403
404 if (canrflg & MSCAN_RXF)
405 mscan_get_rx_frame(dev, frame);
406 else if (canrflg & MSCAN_ERR_IF)
407 mscan_get_err_frame(dev, frame, canrflg);
408
409 stats->rx_packets++;
410 stats->rx_bytes += frame->can_dlc;
411 npackets++;
412 netif_receive_skb(skb);
413 }
414
415 if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
416 napi_complete(&priv->napi);
417 clear_bit(F_RX_PROGRESS, &priv->flags);
418 if (priv->can.state < CAN_STATE_BUS_OFF)
419 out_8(®s->canrier, priv->shadow_canrier);
420 ret = 0;
421 }
422 return ret;
423}
424
425static irqreturn_t mscan_isr(int irq, void *dev_id)
426{
427 struct net_device *dev = (struct net_device *)dev_id;
428 struct mscan_priv *priv = netdev_priv(dev);
429 struct mscan_regs __iomem *regs = priv->reg_base;
430 struct net_device_stats *stats = &dev->stats;
431 u8 cantier, cantflg, canrflg;
432 irqreturn_t ret = IRQ_NONE;
433
434 cantier = in_8(®s->cantier) & MSCAN_TXE;
435 cantflg = in_8(®s->cantflg) & cantier;
436
437 if (cantier && cantflg) {
438 struct list_head *tmp, *pos;
439
440 list_for_each_safe(pos, tmp, &priv->tx_head) {
441 struct tx_queue_entry *entry =
442 list_entry(pos, struct tx_queue_entry, list);
443 u8 mask = entry->mask;
444
445 if (!(cantflg & mask))
446 continue;
447
448 out_8(®s->cantbsel, mask);
449 stats->tx_bytes += in_8(®s->tx.dlr);
450 stats->tx_packets++;
451 can_get_echo_skb(dev, entry->id);
452 priv->tx_active &= ~mask;
453 list_del(pos);
454 }
455
456 if (list_empty(&priv->tx_head)) {
457 clear_bit(F_TX_WAIT_ALL, &priv->flags);
458 clear_bit(F_TX_PROGRESS, &priv->flags);
459 priv->cur_pri = 0;
460 } else {
461 netif_trans_update(dev);
462 }
463
464 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
465 netif_wake_queue(dev);
466
467 out_8(®s->cantier, priv->tx_active);
468 ret = IRQ_HANDLED;
469 }
470
471 canrflg = in_8(®s->canrflg);
472 if ((canrflg & ~MSCAN_STAT_MSK) &&
473 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
474 if (canrflg & ~MSCAN_STAT_MSK) {
475 priv->shadow_canrier = in_8(®s->canrier);
476 out_8(®s->canrier, 0);
477 napi_schedule(&priv->napi);
478 ret = IRQ_HANDLED;
479 } else {
480 clear_bit(F_RX_PROGRESS, &priv->flags);
481 }
482 }
483 return ret;
484}
485
486static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
487{
488 int ret = 0;
489
490 switch (mode) {
491 case CAN_MODE_START:
492 ret = mscan_restart(dev);
493 if (ret)
494 break;
495 if (netif_queue_stopped(dev))
496 netif_wake_queue(dev);
497 break;
498
499 default:
500 ret = -EOPNOTSUPP;
501 break;
502 }
503 return ret;
504}
505
506static int mscan_do_set_bittiming(struct net_device *dev)
507{
508 struct mscan_priv *priv = netdev_priv(dev);
509 struct mscan_regs __iomem *regs = priv->reg_base;
510 struct can_bittiming *bt = &priv->can.bittiming;
511 u8 btr0, btr1;
512
513 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
514 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
515 BTR1_SET_TSEG2(bt->phase_seg2) |
516 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
517
518 netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
519
520 out_8(®s->canbtr0, btr0);
521 out_8(®s->canbtr1, btr1);
522
523 return 0;
524}
525
526static int mscan_get_berr_counter(const struct net_device *dev,
527 struct can_berr_counter *bec)
528{
529 struct mscan_priv *priv = netdev_priv(dev);
530 struct mscan_regs __iomem *regs = priv->reg_base;
531
532 bec->txerr = in_8(®s->cantxerr);
533 bec->rxerr = in_8(®s->canrxerr);
534
535 return 0;
536}
537
538static int mscan_open(struct net_device *dev)
539{
540 int ret;
541 struct mscan_priv *priv = netdev_priv(dev);
542 struct mscan_regs __iomem *regs = priv->reg_base;
543
544 if (priv->clk_ipg) {
545 ret = clk_prepare_enable(priv->clk_ipg);
546 if (ret)
547 goto exit_retcode;
548 }
549 if (priv->clk_can) {
550 ret = clk_prepare_enable(priv->clk_can);
551 if (ret)
552 goto exit_dis_ipg_clock;
553 }
554
555 /* common open */
556 ret = open_candev(dev);
557 if (ret)
558 goto exit_dis_can_clock;
559
560 napi_enable(&priv->napi);
561
562 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
563 if (ret < 0) {
564 netdev_err(dev, "failed to attach interrupt\n");
565 goto exit_napi_disable;
566 }
567
568 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
569 setbits8(®s->canctl1, MSCAN_LISTEN);
570 else
571 clrbits8(®s->canctl1, MSCAN_LISTEN);
572
573 ret = mscan_start(dev);
574 if (ret)
575 goto exit_free_irq;
576
577 netif_start_queue(dev);
578
579 return 0;
580
581exit_free_irq:
582 free_irq(dev->irq, dev);
583exit_napi_disable:
584 napi_disable(&priv->napi);
585 close_candev(dev);
586exit_dis_can_clock:
587 if (priv->clk_can)
588 clk_disable_unprepare(priv->clk_can);
589exit_dis_ipg_clock:
590 if (priv->clk_ipg)
591 clk_disable_unprepare(priv->clk_ipg);
592exit_retcode:
593 return ret;
594}
595
596static int mscan_close(struct net_device *dev)
597{
598 struct mscan_priv *priv = netdev_priv(dev);
599 struct mscan_regs __iomem *regs = priv->reg_base;
600
601 netif_stop_queue(dev);
602 napi_disable(&priv->napi);
603
604 out_8(®s->cantier, 0);
605 out_8(®s->canrier, 0);
606 mscan_set_mode(dev, MSCAN_INIT_MODE);
607 close_candev(dev);
608 free_irq(dev->irq, dev);
609
610 if (priv->clk_can)
611 clk_disable_unprepare(priv->clk_can);
612 if (priv->clk_ipg)
613 clk_disable_unprepare(priv->clk_ipg);
614
615 return 0;
616}
617
618static const struct net_device_ops mscan_netdev_ops = {
619 .ndo_open = mscan_open,
620 .ndo_stop = mscan_close,
621 .ndo_start_xmit = mscan_start_xmit,
622 .ndo_change_mtu = can_change_mtu,
623};
624
625int register_mscandev(struct net_device *dev, int mscan_clksrc)
626{
627 struct mscan_priv *priv = netdev_priv(dev);
628 struct mscan_regs __iomem *regs = priv->reg_base;
629 u8 ctl1;
630
631 ctl1 = in_8(®s->canctl1);
632 if (mscan_clksrc)
633 ctl1 |= MSCAN_CLKSRC;
634 else
635 ctl1 &= ~MSCAN_CLKSRC;
636
637 if (priv->type == MSCAN_TYPE_MPC5121) {
638 priv->can.do_get_berr_counter = mscan_get_berr_counter;
639 ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
640 }
641
642 ctl1 |= MSCAN_CANE;
643 out_8(®s->canctl1, ctl1);
644 udelay(100);
645
646 /* acceptance mask/acceptance code (accept everything) */
647 out_be16(®s->canidar1_0, 0);
648 out_be16(®s->canidar3_2, 0);
649 out_be16(®s->canidar5_4, 0);
650 out_be16(®s->canidar7_6, 0);
651
652 out_be16(®s->canidmr1_0, 0xffff);
653 out_be16(®s->canidmr3_2, 0xffff);
654 out_be16(®s->canidmr5_4, 0xffff);
655 out_be16(®s->canidmr7_6, 0xffff);
656 /* Two 32 bit Acceptance Filters */
657 out_8(®s->canidac, MSCAN_AF_32BIT);
658
659 mscan_set_mode(dev, MSCAN_INIT_MODE);
660
661 return register_candev(dev);
662}
663
664void unregister_mscandev(struct net_device *dev)
665{
666 struct mscan_priv *priv = netdev_priv(dev);
667 struct mscan_regs __iomem *regs = priv->reg_base;
668 mscan_set_mode(dev, MSCAN_INIT_MODE);
669 clrbits8(®s->canctl1, MSCAN_CANE);
670 unregister_candev(dev);
671}
672
673struct net_device *alloc_mscandev(void)
674{
675 struct net_device *dev;
676 struct mscan_priv *priv;
677 int i;
678
679 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
680 if (!dev)
681 return NULL;
682 priv = netdev_priv(dev);
683
684 dev->netdev_ops = &mscan_netdev_ops;
685
686 dev->flags |= IFF_ECHO; /* we support local echo */
687
688 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
689
690 priv->can.bittiming_const = &mscan_bittiming_const;
691 priv->can.do_set_bittiming = mscan_do_set_bittiming;
692 priv->can.do_set_mode = mscan_do_set_mode;
693 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
694 CAN_CTRLMODE_LISTENONLY;
695
696 for (i = 0; i < TX_QUEUE_SIZE; i++) {
697 priv->tx_queue[i].id = i;
698 priv->tx_queue[i].mask = 1 << i;
699 }
700
701 return dev;
702}
703
704MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
705MODULE_LICENSE("GPL v2");
706MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");