Loading...
1/*
2 * SuperH IrDA Driver
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on sh_sir.c
8 * Copyright (C) 2009 Renesas Solutions Corp.
9 * Copyright 2006-2009 Analog Devices Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16/*
17 * CAUTION
18 *
19 * This driver is very simple.
20 * So, it doesn't have below support now
21 * - MIR/FIR support
22 * - DMA transfer support
23 * - FIFO mode support
24 */
25#include <linux/io.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/clk.h>
30#include <net/irda/wrapper.h>
31#include <net/irda/irda_device.h>
32
33#define DRIVER_NAME "sh_irda"
34
35#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
36#define __IRDARAM_LEN 0x13FF
37#else
38#define __IRDARAM_LEN 0x1039
39#endif
40
41#define IRTMR 0x1F00 /* Transfer mode */
42#define IRCFR 0x1F02 /* Configuration */
43#define IRCTR 0x1F04 /* IR control */
44#define IRTFLR 0x1F20 /* Transmit frame length */
45#define IRTCTR 0x1F22 /* Transmit control */
46#define IRRFLR 0x1F40 /* Receive frame length */
47#define IRRCTR 0x1F42 /* Receive control */
48#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
49#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
50#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
51#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
52#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
53#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
54#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
55#define CRCCTR 0x1F80 /* CRC engine control */
56#define CRCIR 0x1F86 /* CRC engine input data */
57#define CRCCR 0x1F8A /* CRC engine calculation */
58#define CRCOR 0x1F8E /* CRC engine output data */
59#define FIFOCP 0x1FC0 /* FIFO current pointer */
60#define FIFOFP 0x1FC2 /* FIFO follow pointer */
61#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
62#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
63#define FIFOSEL 0x1FC8 /* FIFO select */
64#define FIFORS 0x1FCA /* FIFO receive status */
65#define FIFORFL 0x1FCC /* FIFO receive frame length */
66#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
67#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
68#define BIFCTL 0x1FD2 /* BUS interface control */
69#define IRDARAM 0x0000 /* IrDA buffer RAM */
70#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
71
72/* IRTMR */
73#define TMD_MASK (0x3 << 14) /* Transfer Mode */
74#define TMD_SIR (0x0 << 14)
75#define TMD_MIR (0x3 << 14)
76#define TMD_FIR (0x2 << 14)
77
78#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
79#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
80#define SIM (1 << 0) /* SIR Interrupt Mask */
81#define xIM_MASK (FIFORIM | MIM | SIM)
82
83/* IRCFR */
84#define RTO_SHIFT 8 /* shift for Receive Timeout */
85#define RTO (0x3 << RTO_SHIFT)
86
87/* IRTCTR */
88#define ARMOD (1 << 15) /* Auto-Receive Mode */
89#define TE (1 << 0) /* Transmit Enable */
90
91/* IRRFLR */
92#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
93
94/* IRRCTR */
95#define RE (1 << 0) /* Receive Enable */
96
97/*
98 * SIRISR, SIRIMR, SIRICR,
99 * MFIRISR, MFIRIMR, MFIRICR
100 */
101#define FRE (1 << 15) /* Frame Receive End */
102#define TROV (1 << 11) /* Transfer Area Overflow */
103#define xIR_9 (1 << 9)
104#define TOT xIR_9 /* for SIR Timeout */
105#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
106#define xIR_8 (1 << 8)
107#define FER xIR_8 /* for SIR Framing Error */
108#define CRCER xIR_8 /* for MIR/FIR CRC error */
109#define FTE (1 << 7) /* Frame Transmit End */
110#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
111
112/* SIRBCR */
113#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
114
115/* CRCCTR */
116#define CRC_RST (1 << 15) /* CRC Engine Reset */
117#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
118
119/* CRCIR */
120#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
121
122/************************************************************************
123
124
125 enum / structure
126
127
128************************************************************************/
129enum sh_irda_mode {
130 SH_IRDA_NONE = 0,
131 SH_IRDA_SIR,
132 SH_IRDA_MIR,
133 SH_IRDA_FIR,
134};
135
136struct sh_irda_self;
137struct sh_irda_xir_func {
138 int (*xir_fre) (struct sh_irda_self *self);
139 int (*xir_trov) (struct sh_irda_self *self);
140 int (*xir_9) (struct sh_irda_self *self);
141 int (*xir_8) (struct sh_irda_self *self);
142 int (*xir_fte) (struct sh_irda_self *self);
143};
144
145struct sh_irda_self {
146 void __iomem *membase;
147 unsigned int irq;
148 struct clk *clk;
149
150 struct net_device *ndev;
151
152 struct irlap_cb *irlap;
153 struct qos_info qos;
154
155 iobuff_t tx_buff;
156 iobuff_t rx_buff;
157
158 enum sh_irda_mode mode;
159 spinlock_t lock;
160
161 struct sh_irda_xir_func *xir_func;
162};
163
164/************************************************************************
165
166
167 common function
168
169
170************************************************************************/
171static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
172{
173 unsigned long flags;
174
175 spin_lock_irqsave(&self->lock, flags);
176 iowrite16(data, self->membase + offset);
177 spin_unlock_irqrestore(&self->lock, flags);
178}
179
180static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
181{
182 unsigned long flags;
183 u16 ret;
184
185 spin_lock_irqsave(&self->lock, flags);
186 ret = ioread16(self->membase + offset);
187 spin_unlock_irqrestore(&self->lock, flags);
188
189 return ret;
190}
191
192static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
193 u16 mask, u16 data)
194{
195 unsigned long flags;
196 u16 old, new;
197
198 spin_lock_irqsave(&self->lock, flags);
199 old = ioread16(self->membase + offset);
200 new = (old & ~mask) | data;
201 if (old != new)
202 iowrite16(data, self->membase + offset);
203 spin_unlock_irqrestore(&self->lock, flags);
204}
205
206/************************************************************************
207
208
209 mode function
210
211
212************************************************************************/
213/*=====================================
214 *
215 * common
216 *
217 *=====================================*/
218static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
219{
220 struct device *dev = &self->ndev->dev;
221
222 sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
223 dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
224}
225
226static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
227{
228 struct device *dev = &self->ndev->dev;
229
230 if (SH_IRDA_SIR != self->mode)
231 interval = 0;
232
233 if (interval < 0 || interval > 2) {
234 dev_err(dev, "unsupported timeout interval\n");
235 return -EINVAL;
236 }
237
238 sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
239 return 0;
240}
241
242static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
243{
244 struct device *dev = &self->ndev->dev;
245 u16 val;
246
247 if (baudrate < 0)
248 return 0;
249
250 if (SH_IRDA_SIR != self->mode) {
251 dev_err(dev, "it is not SIR mode\n");
252 return -EINVAL;
253 }
254
255 /*
256 * Baud rate (bits/s) =
257 * (48 MHz / 26) / (baud rate counter value + 1) x 16
258 */
259 val = (48000000 / 26 / 16 / baudrate) - 1;
260 dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
261
262 sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
263
264 return 0;
265}
266
267static int xir_get_rcv_length(struct sh_irda_self *self)
268{
269 return RFL_MASK & sh_irda_read(self, IRRFLR);
270}
271
272/*=====================================
273 *
274 * NONE MODE
275 *
276 *=====================================*/
277static int xir_fre(struct sh_irda_self *self)
278{
279 struct device *dev = &self->ndev->dev;
280 dev_err(dev, "none mode: frame recv\n");
281 return 0;
282}
283
284static int xir_trov(struct sh_irda_self *self)
285{
286 struct device *dev = &self->ndev->dev;
287 dev_err(dev, "none mode: buffer ram over\n");
288 return 0;
289}
290
291static int xir_9(struct sh_irda_self *self)
292{
293 struct device *dev = &self->ndev->dev;
294 dev_err(dev, "none mode: time over\n");
295 return 0;
296}
297
298static int xir_8(struct sh_irda_self *self)
299{
300 struct device *dev = &self->ndev->dev;
301 dev_err(dev, "none mode: framing error\n");
302 return 0;
303}
304
305static int xir_fte(struct sh_irda_self *self)
306{
307 struct device *dev = &self->ndev->dev;
308 dev_err(dev, "none mode: frame transmit end\n");
309 return 0;
310}
311
312static struct sh_irda_xir_func xir_func = {
313 .xir_fre = xir_fre,
314 .xir_trov = xir_trov,
315 .xir_9 = xir_9,
316 .xir_8 = xir_8,
317 .xir_fte = xir_fte,
318};
319
320/*=====================================
321 *
322 * MIR/FIR MODE
323 *
324 * MIR/FIR are not supported now
325 *=====================================*/
326static struct sh_irda_xir_func mfir_func = {
327 .xir_fre = xir_fre,
328 .xir_trov = xir_trov,
329 .xir_9 = xir_9,
330 .xir_8 = xir_8,
331 .xir_fte = xir_fte,
332};
333
334/*=====================================
335 *
336 * SIR MODE
337 *
338 *=====================================*/
339static int sir_fre(struct sh_irda_self *self)
340{
341 struct device *dev = &self->ndev->dev;
342 u16 data16;
343 u8 *data = (u8 *)&data16;
344 int len = xir_get_rcv_length(self);
345 int i, j;
346
347 if (len > IRDARAM_LEN)
348 len = IRDARAM_LEN;
349
350 dev_dbg(dev, "frame recv length = %d\n", len);
351
352 for (i = 0; i < len; i++) {
353 j = i % 2;
354 if (!j)
355 data16 = sh_irda_read(self, IRDARAM + i);
356
357 async_unwrap_char(self->ndev, &self->ndev->stats,
358 &self->rx_buff, data[j]);
359 }
360 self->ndev->last_rx = jiffies;
361
362 sh_irda_rcv_ctrl(self, 1);
363
364 return 0;
365}
366
367static int sir_trov(struct sh_irda_self *self)
368{
369 struct device *dev = &self->ndev->dev;
370
371 dev_err(dev, "buffer ram over\n");
372 sh_irda_rcv_ctrl(self, 1);
373 return 0;
374}
375
376static int sir_tot(struct sh_irda_self *self)
377{
378 struct device *dev = &self->ndev->dev;
379
380 dev_err(dev, "time over\n");
381 sh_irda_set_baudrate(self, 9600);
382 sh_irda_rcv_ctrl(self, 1);
383 return 0;
384}
385
386static int sir_fer(struct sh_irda_self *self)
387{
388 struct device *dev = &self->ndev->dev;
389
390 dev_err(dev, "framing error\n");
391 sh_irda_rcv_ctrl(self, 1);
392 return 0;
393}
394
395static int sir_fte(struct sh_irda_self *self)
396{
397 struct device *dev = &self->ndev->dev;
398
399 dev_dbg(dev, "frame transmit end\n");
400 netif_wake_queue(self->ndev);
401
402 return 0;
403}
404
405static struct sh_irda_xir_func sir_func = {
406 .xir_fre = sir_fre,
407 .xir_trov = sir_trov,
408 .xir_9 = sir_tot,
409 .xir_8 = sir_fer,
410 .xir_fte = sir_fte,
411};
412
413static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
414{
415 struct device *dev = &self->ndev->dev;
416 struct sh_irda_xir_func *func;
417 const char *name;
418 u16 data;
419
420 switch (mode) {
421 case SH_IRDA_SIR:
422 name = "SIR";
423 data = TMD_SIR;
424 func = &sir_func;
425 break;
426 case SH_IRDA_MIR:
427 name = "MIR";
428 data = TMD_MIR;
429 func = &mfir_func;
430 break;
431 case SH_IRDA_FIR:
432 name = "FIR";
433 data = TMD_FIR;
434 func = &mfir_func;
435 break;
436 default:
437 name = "NONE";
438 data = 0;
439 func = &xir_func;
440 break;
441 }
442
443 self->mode = mode;
444 self->xir_func = func;
445 sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
446
447 dev_dbg(dev, "switch to %s mode", name);
448}
449
450/************************************************************************
451
452
453 irq function
454
455
456************************************************************************/
457static void sh_irda_set_irq_mask(struct sh_irda_self *self)
458{
459 u16 tmr_hole;
460 u16 xir_reg;
461
462 /* set all mask */
463 sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
464 sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
465 sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
466
467 /* clear irq */
468 sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
469 sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
470
471 switch (self->mode) {
472 case SH_IRDA_SIR:
473 tmr_hole = SIM;
474 xir_reg = SIRIMR;
475 break;
476 case SH_IRDA_MIR:
477 case SH_IRDA_FIR:
478 tmr_hole = MIM;
479 xir_reg = MFIRIMR;
480 break;
481 default:
482 tmr_hole = 0;
483 xir_reg = 0;
484 break;
485 }
486
487 /* open mask */
488 if (xir_reg) {
489 sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
490 sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
491 }
492}
493
494static irqreturn_t sh_irda_irq(int irq, void *dev_id)
495{
496 struct sh_irda_self *self = dev_id;
497 struct sh_irda_xir_func *func = self->xir_func;
498 u16 isr = sh_irda_read(self, SIRISR);
499
500 /* clear irq */
501 sh_irda_write(self, SIRICR, isr);
502
503 if (isr & FRE)
504 func->xir_fre(self);
505 if (isr & TROV)
506 func->xir_trov(self);
507 if (isr & xIR_9)
508 func->xir_9(self);
509 if (isr & xIR_8)
510 func->xir_8(self);
511 if (isr & FTE)
512 func->xir_fte(self);
513
514 return IRQ_HANDLED;
515}
516
517/************************************************************************
518
519
520 CRC function
521
522
523************************************************************************/
524static void sh_irda_crc_reset(struct sh_irda_self *self)
525{
526 sh_irda_write(self, CRCCTR, CRC_RST);
527}
528
529static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
530{
531 sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
532}
533
534static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
535{
536 return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
537}
538
539static u16 sh_irda_crc_out(struct sh_irda_self *self)
540{
541 return sh_irda_read(self, CRCOR);
542}
543
544static int sh_irda_crc_init(struct sh_irda_self *self)
545{
546 struct device *dev = &self->ndev->dev;
547 int ret = -EIO;
548 u16 val;
549
550 sh_irda_crc_reset(self);
551
552 sh_irda_crc_add(self, 0xCC);
553 sh_irda_crc_add(self, 0xF5);
554 sh_irda_crc_add(self, 0xF1);
555 sh_irda_crc_add(self, 0xA7);
556
557 val = sh_irda_crc_cnt(self);
558 if (4 != val) {
559 dev_err(dev, "CRC count error %x\n", val);
560 goto crc_init_out;
561 }
562
563 val = sh_irda_crc_out(self);
564 if (0x51DF != val) {
565 dev_err(dev, "CRC result error%x\n", val);
566 goto crc_init_out;
567 }
568
569 ret = 0;
570
571crc_init_out:
572
573 sh_irda_crc_reset(self);
574 return ret;
575}
576
577/************************************************************************
578
579
580 iobuf function
581
582
583************************************************************************/
584static void sh_irda_remove_iobuf(struct sh_irda_self *self)
585{
586 kfree(self->rx_buff.head);
587
588 self->tx_buff.head = NULL;
589 self->tx_buff.data = NULL;
590 self->rx_buff.head = NULL;
591 self->rx_buff.data = NULL;
592}
593
594static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
595{
596 if (self->rx_buff.head ||
597 self->tx_buff.head) {
598 dev_err(&self->ndev->dev, "iobuff has already existed.");
599 return -EINVAL;
600 }
601
602 /* rx_buff */
603 self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
604 if (!self->rx_buff.head)
605 return -ENOMEM;
606
607 self->rx_buff.truesize = rxsize;
608 self->rx_buff.in_frame = FALSE;
609 self->rx_buff.state = OUTSIDE_FRAME;
610 self->rx_buff.data = self->rx_buff.head;
611
612 /* tx_buff */
613 self->tx_buff.head = self->membase + IRDARAM;
614 self->tx_buff.truesize = IRDARAM_LEN;
615
616 return 0;
617}
618
619/************************************************************************
620
621
622 net_device_ops function
623
624
625************************************************************************/
626static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
627{
628 struct sh_irda_self *self = netdev_priv(ndev);
629 struct device *dev = &self->ndev->dev;
630 int speed = irda_get_next_speed(skb);
631 int ret;
632
633 dev_dbg(dev, "hard xmit\n");
634
635 netif_stop_queue(ndev);
636 sh_irda_rcv_ctrl(self, 0);
637
638 ret = sh_irda_set_baudrate(self, speed);
639 if (ret < 0)
640 goto sh_irda_hard_xmit_end;
641
642 self->tx_buff.len = 0;
643 if (skb->len) {
644 unsigned long flags;
645
646 spin_lock_irqsave(&self->lock, flags);
647 self->tx_buff.len = async_wrap_skb(skb,
648 self->tx_buff.head,
649 self->tx_buff.truesize);
650 spin_unlock_irqrestore(&self->lock, flags);
651
652 if (self->tx_buff.len > self->tx_buff.truesize)
653 self->tx_buff.len = self->tx_buff.truesize;
654
655 sh_irda_write(self, IRTFLR, self->tx_buff.len);
656 sh_irda_write(self, IRTCTR, ARMOD | TE);
657 } else
658 goto sh_irda_hard_xmit_end;
659
660 dev_kfree_skb(skb);
661
662 return 0;
663
664sh_irda_hard_xmit_end:
665 sh_irda_set_baudrate(self, 9600);
666 netif_wake_queue(self->ndev);
667 sh_irda_rcv_ctrl(self, 1);
668 dev_kfree_skb(skb);
669
670 return ret;
671
672}
673
674static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
675{
676 /*
677 * FIXME
678 *
679 * This function is needed for irda framework.
680 * But nothing to do now
681 */
682 return 0;
683}
684
685static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
686{
687 struct sh_irda_self *self = netdev_priv(ndev);
688
689 return &self->ndev->stats;
690}
691
692static int sh_irda_open(struct net_device *ndev)
693{
694 struct sh_irda_self *self = netdev_priv(ndev);
695 int err;
696
697 clk_enable(self->clk);
698 err = sh_irda_crc_init(self);
699 if (err)
700 goto open_err;
701
702 sh_irda_set_mode(self, SH_IRDA_SIR);
703 sh_irda_set_timeout(self, 2);
704 sh_irda_set_baudrate(self, 9600);
705
706 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
707 if (!self->irlap) {
708 err = -ENODEV;
709 goto open_err;
710 }
711
712 netif_start_queue(ndev);
713 sh_irda_rcv_ctrl(self, 1);
714 sh_irda_set_irq_mask(self);
715
716 dev_info(&ndev->dev, "opened\n");
717
718 return 0;
719
720open_err:
721 clk_disable(self->clk);
722
723 return err;
724}
725
726static int sh_irda_stop(struct net_device *ndev)
727{
728 struct sh_irda_self *self = netdev_priv(ndev);
729
730 /* Stop IrLAP */
731 if (self->irlap) {
732 irlap_close(self->irlap);
733 self->irlap = NULL;
734 }
735
736 netif_stop_queue(ndev);
737
738 dev_info(&ndev->dev, "stoped\n");
739
740 return 0;
741}
742
743static const struct net_device_ops sh_irda_ndo = {
744 .ndo_open = sh_irda_open,
745 .ndo_stop = sh_irda_stop,
746 .ndo_start_xmit = sh_irda_hard_xmit,
747 .ndo_do_ioctl = sh_irda_ioctl,
748 .ndo_get_stats = sh_irda_stats,
749};
750
751/************************************************************************
752
753
754 platform_driver function
755
756
757************************************************************************/
758static int __devinit sh_irda_probe(struct platform_device *pdev)
759{
760 struct net_device *ndev;
761 struct sh_irda_self *self;
762 struct resource *res;
763 int irq;
764 int err = -ENOMEM;
765
766 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
767 irq = platform_get_irq(pdev, 0);
768 if (!res || irq < 0) {
769 dev_err(&pdev->dev, "Not enough platform resources.\n");
770 goto exit;
771 }
772
773 ndev = alloc_irdadev(sizeof(*self));
774 if (!ndev)
775 goto exit;
776
777 self = netdev_priv(ndev);
778 self->membase = ioremap_nocache(res->start, resource_size(res));
779 if (!self->membase) {
780 err = -ENXIO;
781 dev_err(&pdev->dev, "Unable to ioremap.\n");
782 goto err_mem_1;
783 }
784
785 err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
786 if (err)
787 goto err_mem_2;
788
789 self->clk = clk_get(&pdev->dev, NULL);
790 if (IS_ERR(self->clk)) {
791 dev_err(&pdev->dev, "cannot get irda clock\n");
792 goto err_mem_3;
793 }
794
795 irda_init_max_qos_capabilies(&self->qos);
796
797 ndev->netdev_ops = &sh_irda_ndo;
798 ndev->irq = irq;
799
800 self->ndev = ndev;
801 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
802 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
803 spin_lock_init(&self->lock);
804
805 irda_qos_bits_to_value(&self->qos);
806
807 err = register_netdev(ndev);
808 if (err)
809 goto err_mem_4;
810
811 platform_set_drvdata(pdev, ndev);
812
813 if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
814 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
815 goto err_mem_4;
816 }
817
818 dev_info(&pdev->dev, "SuperH IrDA probed\n");
819
820 goto exit;
821
822err_mem_4:
823 clk_put(self->clk);
824err_mem_3:
825 sh_irda_remove_iobuf(self);
826err_mem_2:
827 iounmap(self->membase);
828err_mem_1:
829 free_netdev(ndev);
830exit:
831 return err;
832}
833
834static int __devexit sh_irda_remove(struct platform_device *pdev)
835{
836 struct net_device *ndev = platform_get_drvdata(pdev);
837 struct sh_irda_self *self = netdev_priv(ndev);
838
839 if (!self)
840 return 0;
841
842 unregister_netdev(ndev);
843 clk_put(self->clk);
844 sh_irda_remove_iobuf(self);
845 iounmap(self->membase);
846 free_netdev(ndev);
847 platform_set_drvdata(pdev, NULL);
848
849 return 0;
850}
851
852static struct platform_driver sh_irda_driver = {
853 .probe = sh_irda_probe,
854 .remove = __devexit_p(sh_irda_remove),
855 .driver = {
856 .name = DRIVER_NAME,
857 },
858};
859
860static int __init sh_irda_init(void)
861{
862 return platform_driver_register(&sh_irda_driver);
863}
864
865static void __exit sh_irda_exit(void)
866{
867 platform_driver_unregister(&sh_irda_driver);
868}
869
870module_init(sh_irda_init);
871module_exit(sh_irda_exit);
872
873MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
874MODULE_DESCRIPTION("SuperH IrDA driver");
875MODULE_LICENSE("GPL");
1/*
2 * SuperH IrDA Driver
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * Based on sh_sir.c
8 * Copyright (C) 2009 Renesas Solutions Corp.
9 * Copyright 2006-2009 Analog Devices Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16/*
17 * CAUTION
18 *
19 * This driver is very simple.
20 * So, it doesn't have below support now
21 * - MIR/FIR support
22 * - DMA transfer support
23 * - FIFO mode support
24 */
25#include <linux/io.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/pm_runtime.h>
30#include <linux/clk.h>
31#include <net/irda/wrapper.h>
32#include <net/irda/irda_device.h>
33
34#define DRIVER_NAME "sh_irda"
35
36#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
37#define __IRDARAM_LEN 0x13FF
38#else
39#define __IRDARAM_LEN 0x1039
40#endif
41
42#define IRTMR 0x1F00 /* Transfer mode */
43#define IRCFR 0x1F02 /* Configuration */
44#define IRCTR 0x1F04 /* IR control */
45#define IRTFLR 0x1F20 /* Transmit frame length */
46#define IRTCTR 0x1F22 /* Transmit control */
47#define IRRFLR 0x1F40 /* Receive frame length */
48#define IRRCTR 0x1F42 /* Receive control */
49#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
50#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
51#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
52#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
53#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
54#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
55#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
56#define CRCCTR 0x1F80 /* CRC engine control */
57#define CRCIR 0x1F86 /* CRC engine input data */
58#define CRCCR 0x1F8A /* CRC engine calculation */
59#define CRCOR 0x1F8E /* CRC engine output data */
60#define FIFOCP 0x1FC0 /* FIFO current pointer */
61#define FIFOFP 0x1FC2 /* FIFO follow pointer */
62#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
63#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
64#define FIFOSEL 0x1FC8 /* FIFO select */
65#define FIFORS 0x1FCA /* FIFO receive status */
66#define FIFORFL 0x1FCC /* FIFO receive frame length */
67#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
68#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
69#define BIFCTL 0x1FD2 /* BUS interface control */
70#define IRDARAM 0x0000 /* IrDA buffer RAM */
71#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
72
73/* IRTMR */
74#define TMD_MASK (0x3 << 14) /* Transfer Mode */
75#define TMD_SIR (0x0 << 14)
76#define TMD_MIR (0x3 << 14)
77#define TMD_FIR (0x2 << 14)
78
79#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
80#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
81#define SIM (1 << 0) /* SIR Interrupt Mask */
82#define xIM_MASK (FIFORIM | MIM | SIM)
83
84/* IRCFR */
85#define RTO_SHIFT 8 /* shift for Receive Timeout */
86#define RTO (0x3 << RTO_SHIFT)
87
88/* IRTCTR */
89#define ARMOD (1 << 15) /* Auto-Receive Mode */
90#define TE (1 << 0) /* Transmit Enable */
91
92/* IRRFLR */
93#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
94
95/* IRRCTR */
96#define RE (1 << 0) /* Receive Enable */
97
98/*
99 * SIRISR, SIRIMR, SIRICR,
100 * MFIRISR, MFIRIMR, MFIRICR
101 */
102#define FRE (1 << 15) /* Frame Receive End */
103#define TROV (1 << 11) /* Transfer Area Overflow */
104#define xIR_9 (1 << 9)
105#define TOT xIR_9 /* for SIR Timeout */
106#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
107#define xIR_8 (1 << 8)
108#define FER xIR_8 /* for SIR Framing Error */
109#define CRCER xIR_8 /* for MIR/FIR CRC error */
110#define FTE (1 << 7) /* Frame Transmit End */
111#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
112
113/* SIRBCR */
114#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
115
116/* CRCCTR */
117#define CRC_RST (1 << 15) /* CRC Engine Reset */
118#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
119
120/* CRCIR */
121#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
122
123/************************************************************************
124
125
126 enum / structure
127
128
129************************************************************************/
130enum sh_irda_mode {
131 SH_IRDA_NONE = 0,
132 SH_IRDA_SIR,
133 SH_IRDA_MIR,
134 SH_IRDA_FIR,
135};
136
137struct sh_irda_self;
138struct sh_irda_xir_func {
139 int (*xir_fre) (struct sh_irda_self *self);
140 int (*xir_trov) (struct sh_irda_self *self);
141 int (*xir_9) (struct sh_irda_self *self);
142 int (*xir_8) (struct sh_irda_self *self);
143 int (*xir_fte) (struct sh_irda_self *self);
144};
145
146struct sh_irda_self {
147 void __iomem *membase;
148 unsigned int irq;
149 struct platform_device *pdev;
150
151 struct net_device *ndev;
152
153 struct irlap_cb *irlap;
154 struct qos_info qos;
155
156 iobuff_t tx_buff;
157 iobuff_t rx_buff;
158
159 enum sh_irda_mode mode;
160 spinlock_t lock;
161
162 struct sh_irda_xir_func *xir_func;
163};
164
165/************************************************************************
166
167
168 common function
169
170
171************************************************************************/
172static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
173{
174 unsigned long flags;
175
176 spin_lock_irqsave(&self->lock, flags);
177 iowrite16(data, self->membase + offset);
178 spin_unlock_irqrestore(&self->lock, flags);
179}
180
181static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
182{
183 unsigned long flags;
184 u16 ret;
185
186 spin_lock_irqsave(&self->lock, flags);
187 ret = ioread16(self->membase + offset);
188 spin_unlock_irqrestore(&self->lock, flags);
189
190 return ret;
191}
192
193static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
194 u16 mask, u16 data)
195{
196 unsigned long flags;
197 u16 old, new;
198
199 spin_lock_irqsave(&self->lock, flags);
200 old = ioread16(self->membase + offset);
201 new = (old & ~mask) | data;
202 if (old != new)
203 iowrite16(data, self->membase + offset);
204 spin_unlock_irqrestore(&self->lock, flags);
205}
206
207/************************************************************************
208
209
210 mode function
211
212
213************************************************************************/
214/*=====================================
215 *
216 * common
217 *
218 *=====================================*/
219static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
220{
221 struct device *dev = &self->ndev->dev;
222
223 sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
224 dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
225}
226
227static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
228{
229 struct device *dev = &self->ndev->dev;
230
231 if (SH_IRDA_SIR != self->mode)
232 interval = 0;
233
234 if (interval < 0 || interval > 2) {
235 dev_err(dev, "unsupported timeout interval\n");
236 return -EINVAL;
237 }
238
239 sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
240 return 0;
241}
242
243static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
244{
245 struct device *dev = &self->ndev->dev;
246 u16 val;
247
248 if (baudrate < 0)
249 return 0;
250
251 if (SH_IRDA_SIR != self->mode) {
252 dev_err(dev, "it is not SIR mode\n");
253 return -EINVAL;
254 }
255
256 /*
257 * Baud rate (bits/s) =
258 * (48 MHz / 26) / (baud rate counter value + 1) x 16
259 */
260 val = (48000000 / 26 / 16 / baudrate) - 1;
261 dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
262
263 sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
264
265 return 0;
266}
267
268static int sh_irda_get_rcv_length(struct sh_irda_self *self)
269{
270 return RFL_MASK & sh_irda_read(self, IRRFLR);
271}
272
273/*=====================================
274 *
275 * NONE MODE
276 *
277 *=====================================*/
278static int sh_irda_xir_fre(struct sh_irda_self *self)
279{
280 struct device *dev = &self->ndev->dev;
281 dev_err(dev, "none mode: frame recv\n");
282 return 0;
283}
284
285static int sh_irda_xir_trov(struct sh_irda_self *self)
286{
287 struct device *dev = &self->ndev->dev;
288 dev_err(dev, "none mode: buffer ram over\n");
289 return 0;
290}
291
292static int sh_irda_xir_9(struct sh_irda_self *self)
293{
294 struct device *dev = &self->ndev->dev;
295 dev_err(dev, "none mode: time over\n");
296 return 0;
297}
298
299static int sh_irda_xir_8(struct sh_irda_self *self)
300{
301 struct device *dev = &self->ndev->dev;
302 dev_err(dev, "none mode: framing error\n");
303 return 0;
304}
305
306static int sh_irda_xir_fte(struct sh_irda_self *self)
307{
308 struct device *dev = &self->ndev->dev;
309 dev_err(dev, "none mode: frame transmit end\n");
310 return 0;
311}
312
313static struct sh_irda_xir_func sh_irda_xir_func = {
314 .xir_fre = sh_irda_xir_fre,
315 .xir_trov = sh_irda_xir_trov,
316 .xir_9 = sh_irda_xir_9,
317 .xir_8 = sh_irda_xir_8,
318 .xir_fte = sh_irda_xir_fte,
319};
320
321/*=====================================
322 *
323 * MIR/FIR MODE
324 *
325 * MIR/FIR are not supported now
326 *=====================================*/
327static struct sh_irda_xir_func sh_irda_mfir_func = {
328 .xir_fre = sh_irda_xir_fre,
329 .xir_trov = sh_irda_xir_trov,
330 .xir_9 = sh_irda_xir_9,
331 .xir_8 = sh_irda_xir_8,
332 .xir_fte = sh_irda_xir_fte,
333};
334
335/*=====================================
336 *
337 * SIR MODE
338 *
339 *=====================================*/
340static int sh_irda_sir_fre(struct sh_irda_self *self)
341{
342 struct device *dev = &self->ndev->dev;
343 u16 data16;
344 u8 *data = (u8 *)&data16;
345 int len = sh_irda_get_rcv_length(self);
346 int i, j;
347
348 if (len > IRDARAM_LEN)
349 len = IRDARAM_LEN;
350
351 dev_dbg(dev, "frame recv length = %d\n", len);
352
353 for (i = 0; i < len; i++) {
354 j = i % 2;
355 if (!j)
356 data16 = sh_irda_read(self, IRDARAM + i);
357
358 async_unwrap_char(self->ndev, &self->ndev->stats,
359 &self->rx_buff, data[j]);
360 }
361 self->ndev->last_rx = jiffies;
362
363 sh_irda_rcv_ctrl(self, 1);
364
365 return 0;
366}
367
368static int sh_irda_sir_trov(struct sh_irda_self *self)
369{
370 struct device *dev = &self->ndev->dev;
371
372 dev_err(dev, "buffer ram over\n");
373 sh_irda_rcv_ctrl(self, 1);
374 return 0;
375}
376
377static int sh_irda_sir_tot(struct sh_irda_self *self)
378{
379 struct device *dev = &self->ndev->dev;
380
381 dev_err(dev, "time over\n");
382 sh_irda_set_baudrate(self, 9600);
383 sh_irda_rcv_ctrl(self, 1);
384 return 0;
385}
386
387static int sh_irda_sir_fer(struct sh_irda_self *self)
388{
389 struct device *dev = &self->ndev->dev;
390
391 dev_err(dev, "framing error\n");
392 sh_irda_rcv_ctrl(self, 1);
393 return 0;
394}
395
396static int sh_irda_sir_fte(struct sh_irda_self *self)
397{
398 struct device *dev = &self->ndev->dev;
399
400 dev_dbg(dev, "frame transmit end\n");
401 netif_wake_queue(self->ndev);
402
403 return 0;
404}
405
406static struct sh_irda_xir_func sh_irda_sir_func = {
407 .xir_fre = sh_irda_sir_fre,
408 .xir_trov = sh_irda_sir_trov,
409 .xir_9 = sh_irda_sir_tot,
410 .xir_8 = sh_irda_sir_fer,
411 .xir_fte = sh_irda_sir_fte,
412};
413
414static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
415{
416 struct device *dev = &self->ndev->dev;
417 struct sh_irda_xir_func *func;
418 const char *name;
419 u16 data;
420
421 switch (mode) {
422 case SH_IRDA_SIR:
423 name = "SIR";
424 data = TMD_SIR;
425 func = &sh_irda_sir_func;
426 break;
427 case SH_IRDA_MIR:
428 name = "MIR";
429 data = TMD_MIR;
430 func = &sh_irda_mfir_func;
431 break;
432 case SH_IRDA_FIR:
433 name = "FIR";
434 data = TMD_FIR;
435 func = &sh_irda_mfir_func;
436 break;
437 default:
438 name = "NONE";
439 data = 0;
440 func = &sh_irda_xir_func;
441 break;
442 }
443
444 self->mode = mode;
445 self->xir_func = func;
446 sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
447
448 dev_dbg(dev, "switch to %s mode", name);
449}
450
451/************************************************************************
452
453
454 irq function
455
456
457************************************************************************/
458static void sh_irda_set_irq_mask(struct sh_irda_self *self)
459{
460 u16 tmr_hole;
461 u16 xir_reg;
462
463 /* set all mask */
464 sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
465 sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
466 sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
467
468 /* clear irq */
469 sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
470 sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
471
472 switch (self->mode) {
473 case SH_IRDA_SIR:
474 tmr_hole = SIM;
475 xir_reg = SIRIMR;
476 break;
477 case SH_IRDA_MIR:
478 case SH_IRDA_FIR:
479 tmr_hole = MIM;
480 xir_reg = MFIRIMR;
481 break;
482 default:
483 tmr_hole = 0;
484 xir_reg = 0;
485 break;
486 }
487
488 /* open mask */
489 if (xir_reg) {
490 sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
491 sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
492 }
493}
494
495static irqreturn_t sh_irda_irq(int irq, void *dev_id)
496{
497 struct sh_irda_self *self = dev_id;
498 struct sh_irda_xir_func *func = self->xir_func;
499 u16 isr = sh_irda_read(self, SIRISR);
500
501 /* clear irq */
502 sh_irda_write(self, SIRICR, isr);
503
504 if (isr & FRE)
505 func->xir_fre(self);
506 if (isr & TROV)
507 func->xir_trov(self);
508 if (isr & xIR_9)
509 func->xir_9(self);
510 if (isr & xIR_8)
511 func->xir_8(self);
512 if (isr & FTE)
513 func->xir_fte(self);
514
515 return IRQ_HANDLED;
516}
517
518/************************************************************************
519
520
521 CRC function
522
523
524************************************************************************/
525static void sh_irda_crc_reset(struct sh_irda_self *self)
526{
527 sh_irda_write(self, CRCCTR, CRC_RST);
528}
529
530static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
531{
532 sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
533}
534
535static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
536{
537 return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
538}
539
540static u16 sh_irda_crc_out(struct sh_irda_self *self)
541{
542 return sh_irda_read(self, CRCOR);
543}
544
545static int sh_irda_crc_init(struct sh_irda_self *self)
546{
547 struct device *dev = &self->ndev->dev;
548 int ret = -EIO;
549 u16 val;
550
551 sh_irda_crc_reset(self);
552
553 sh_irda_crc_add(self, 0xCC);
554 sh_irda_crc_add(self, 0xF5);
555 sh_irda_crc_add(self, 0xF1);
556 sh_irda_crc_add(self, 0xA7);
557
558 val = sh_irda_crc_cnt(self);
559 if (4 != val) {
560 dev_err(dev, "CRC count error %x\n", val);
561 goto crc_init_out;
562 }
563
564 val = sh_irda_crc_out(self);
565 if (0x51DF != val) {
566 dev_err(dev, "CRC result error%x\n", val);
567 goto crc_init_out;
568 }
569
570 ret = 0;
571
572crc_init_out:
573
574 sh_irda_crc_reset(self);
575 return ret;
576}
577
578/************************************************************************
579
580
581 iobuf function
582
583
584************************************************************************/
585static void sh_irda_remove_iobuf(struct sh_irda_self *self)
586{
587 kfree(self->rx_buff.head);
588
589 self->tx_buff.head = NULL;
590 self->tx_buff.data = NULL;
591 self->rx_buff.head = NULL;
592 self->rx_buff.data = NULL;
593}
594
595static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
596{
597 if (self->rx_buff.head ||
598 self->tx_buff.head) {
599 dev_err(&self->ndev->dev, "iobuff has already existed.");
600 return -EINVAL;
601 }
602
603 /* rx_buff */
604 self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
605 if (!self->rx_buff.head)
606 return -ENOMEM;
607
608 self->rx_buff.truesize = rxsize;
609 self->rx_buff.in_frame = FALSE;
610 self->rx_buff.state = OUTSIDE_FRAME;
611 self->rx_buff.data = self->rx_buff.head;
612
613 /* tx_buff */
614 self->tx_buff.head = self->membase + IRDARAM;
615 self->tx_buff.truesize = IRDARAM_LEN;
616
617 return 0;
618}
619
620/************************************************************************
621
622
623 net_device_ops function
624
625
626************************************************************************/
627static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
628{
629 struct sh_irda_self *self = netdev_priv(ndev);
630 struct device *dev = &self->ndev->dev;
631 int speed = irda_get_next_speed(skb);
632 int ret;
633
634 dev_dbg(dev, "hard xmit\n");
635
636 netif_stop_queue(ndev);
637 sh_irda_rcv_ctrl(self, 0);
638
639 ret = sh_irda_set_baudrate(self, speed);
640 if (ret < 0)
641 goto sh_irda_hard_xmit_end;
642
643 self->tx_buff.len = 0;
644 if (skb->len) {
645 unsigned long flags;
646
647 spin_lock_irqsave(&self->lock, flags);
648 self->tx_buff.len = async_wrap_skb(skb,
649 self->tx_buff.head,
650 self->tx_buff.truesize);
651 spin_unlock_irqrestore(&self->lock, flags);
652
653 if (self->tx_buff.len > self->tx_buff.truesize)
654 self->tx_buff.len = self->tx_buff.truesize;
655
656 sh_irda_write(self, IRTFLR, self->tx_buff.len);
657 sh_irda_write(self, IRTCTR, ARMOD | TE);
658 } else
659 goto sh_irda_hard_xmit_end;
660
661 dev_kfree_skb(skb);
662
663 return 0;
664
665sh_irda_hard_xmit_end:
666 sh_irda_set_baudrate(self, 9600);
667 netif_wake_queue(self->ndev);
668 sh_irda_rcv_ctrl(self, 1);
669 dev_kfree_skb(skb);
670
671 return ret;
672
673}
674
675static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
676{
677 /*
678 * FIXME
679 *
680 * This function is needed for irda framework.
681 * But nothing to do now
682 */
683 return 0;
684}
685
686static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
687{
688 struct sh_irda_self *self = netdev_priv(ndev);
689
690 return &self->ndev->stats;
691}
692
693static int sh_irda_open(struct net_device *ndev)
694{
695 struct sh_irda_self *self = netdev_priv(ndev);
696 int err;
697
698 pm_runtime_get_sync(&self->pdev->dev);
699 err = sh_irda_crc_init(self);
700 if (err)
701 goto open_err;
702
703 sh_irda_set_mode(self, SH_IRDA_SIR);
704 sh_irda_set_timeout(self, 2);
705 sh_irda_set_baudrate(self, 9600);
706
707 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
708 if (!self->irlap) {
709 err = -ENODEV;
710 goto open_err;
711 }
712
713 netif_start_queue(ndev);
714 sh_irda_rcv_ctrl(self, 1);
715 sh_irda_set_irq_mask(self);
716
717 dev_info(&ndev->dev, "opened\n");
718
719 return 0;
720
721open_err:
722 pm_runtime_put_sync(&self->pdev->dev);
723
724 return err;
725}
726
727static int sh_irda_stop(struct net_device *ndev)
728{
729 struct sh_irda_self *self = netdev_priv(ndev);
730
731 /* Stop IrLAP */
732 if (self->irlap) {
733 irlap_close(self->irlap);
734 self->irlap = NULL;
735 }
736
737 netif_stop_queue(ndev);
738 pm_runtime_put_sync(&self->pdev->dev);
739
740 dev_info(&ndev->dev, "stopped\n");
741
742 return 0;
743}
744
745static const struct net_device_ops sh_irda_ndo = {
746 .ndo_open = sh_irda_open,
747 .ndo_stop = sh_irda_stop,
748 .ndo_start_xmit = sh_irda_hard_xmit,
749 .ndo_do_ioctl = sh_irda_ioctl,
750 .ndo_get_stats = sh_irda_stats,
751};
752
753/************************************************************************
754
755
756 platform_driver function
757
758
759************************************************************************/
760static int __devinit sh_irda_probe(struct platform_device *pdev)
761{
762 struct net_device *ndev;
763 struct sh_irda_self *self;
764 struct resource *res;
765 int irq;
766 int err = -ENOMEM;
767
768 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
769 irq = platform_get_irq(pdev, 0);
770 if (!res || irq < 0) {
771 dev_err(&pdev->dev, "Not enough platform resources.\n");
772 goto exit;
773 }
774
775 ndev = alloc_irdadev(sizeof(*self));
776 if (!ndev)
777 goto exit;
778
779 self = netdev_priv(ndev);
780 self->membase = ioremap_nocache(res->start, resource_size(res));
781 if (!self->membase) {
782 err = -ENXIO;
783 dev_err(&pdev->dev, "Unable to ioremap.\n");
784 goto err_mem_1;
785 }
786
787 err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
788 if (err)
789 goto err_mem_2;
790
791 self->pdev = pdev;
792 pm_runtime_enable(&pdev->dev);
793
794 irda_init_max_qos_capabilies(&self->qos);
795
796 ndev->netdev_ops = &sh_irda_ndo;
797 ndev->irq = irq;
798
799 self->ndev = ndev;
800 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
801 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
802 spin_lock_init(&self->lock);
803
804 irda_qos_bits_to_value(&self->qos);
805
806 err = register_netdev(ndev);
807 if (err)
808 goto err_mem_4;
809
810 platform_set_drvdata(pdev, ndev);
811
812 if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
813 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
814 goto err_mem_4;
815 }
816
817 dev_info(&pdev->dev, "SuperH IrDA probed\n");
818
819 goto exit;
820
821err_mem_4:
822 pm_runtime_disable(&pdev->dev);
823 sh_irda_remove_iobuf(self);
824err_mem_2:
825 iounmap(self->membase);
826err_mem_1:
827 free_netdev(ndev);
828exit:
829 return err;
830}
831
832static int __devexit sh_irda_remove(struct platform_device *pdev)
833{
834 struct net_device *ndev = platform_get_drvdata(pdev);
835 struct sh_irda_self *self = netdev_priv(ndev);
836
837 if (!self)
838 return 0;
839
840 unregister_netdev(ndev);
841 pm_runtime_disable(&pdev->dev);
842 sh_irda_remove_iobuf(self);
843 iounmap(self->membase);
844 free_netdev(ndev);
845 platform_set_drvdata(pdev, NULL);
846
847 return 0;
848}
849
850static int sh_irda_runtime_nop(struct device *dev)
851{
852 /* Runtime PM callback shared between ->runtime_suspend()
853 * and ->runtime_resume(). Simply returns success.
854 *
855 * This driver re-initializes all registers after
856 * pm_runtime_get_sync() anyway so there is no need
857 * to save and restore registers here.
858 */
859 return 0;
860}
861
862static const struct dev_pm_ops sh_irda_pm_ops = {
863 .runtime_suspend = sh_irda_runtime_nop,
864 .runtime_resume = sh_irda_runtime_nop,
865};
866
867static struct platform_driver sh_irda_driver = {
868 .probe = sh_irda_probe,
869 .remove = __devexit_p(sh_irda_remove),
870 .driver = {
871 .name = DRIVER_NAME,
872 .pm = &sh_irda_pm_ops,
873 },
874};
875
876module_platform_driver(sh_irda_driver);
877
878MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
879MODULE_DESCRIPTION("SuperH IrDA driver");
880MODULE_LICENSE("GPL");