Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1/*
  2 * SuperH IrDA Driver
  3 *
  4 * Copyright (C) 2009 Renesas Solutions Corp.
  5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
  6 *
  7 * Based on bfin_sir.c
  8 * Copyright 2006-2009 Analog Devices Inc.
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License version 2 as
 12 * published by the Free Software Foundation.
 13 */
 14
 15#include <linux/io.h>
 16#include <linux/interrupt.h>
 17#include <linux/module.h>
 18#include <linux/platform_device.h>
 19#include <linux/slab.h>
 20#include <net/irda/wrapper.h>
 21#include <net/irda/irda_device.h>
 22#include <asm/clock.h>
 23
 24#define DRIVER_NAME "sh_sir"
 25
 26#define RX_PHASE	(1 << 0)
 27#define TX_PHASE	(1 << 1)
 28#define TX_COMP_PHASE	(1 << 2) /* tx complete */
 29#define NONE_PHASE	(1 << 31)
 30
 31#define IRIF_RINTCLR	0x0016 /* DMA rx interrupt source clear */
 32#define IRIF_TINTCLR	0x0018 /* DMA tx interrupt source clear */
 33#define IRIF_SIR0	0x0020 /* IrDA-SIR10 control */
 34#define IRIF_SIR1	0x0022 /* IrDA-SIR10 baudrate error correction */
 35#define IRIF_SIR2	0x0024 /* IrDA-SIR10 baudrate count */
 36#define IRIF_SIR3	0x0026 /* IrDA-SIR10 status */
 37#define IRIF_SIR_FRM	0x0028 /* Hardware frame processing set */
 38#define IRIF_SIR_EOF	0x002A /* EOF value */
 39#define IRIF_SIR_FLG	0x002C /* Flag clear */
 40#define IRIF_UART_STS2	0x002E /* UART status 2 */
 41#define IRIF_UART0	0x0030 /* UART control */
 42#define IRIF_UART1	0x0032 /* UART status */
 43#define IRIF_UART2	0x0034 /* UART mode */
 44#define IRIF_UART3	0x0036 /* UART transmit data */
 45#define IRIF_UART4	0x0038 /* UART receive data */
 46#define IRIF_UART5	0x003A /* UART interrupt mask */
 47#define IRIF_UART6	0x003C /* UART baud rate error correction */
 48#define IRIF_UART7	0x003E /* UART baud rate count set */
 49#define IRIF_CRC0	0x0040 /* CRC engine control */
 50#define IRIF_CRC1	0x0042 /* CRC engine input data */
 51#define IRIF_CRC2	0x0044 /* CRC engine calculation */
 52#define IRIF_CRC3	0x0046 /* CRC engine output data 1 */
 53#define IRIF_CRC4	0x0048 /* CRC engine output data 2 */
 54
 55/* IRIF_SIR0 */
 56#define IRTPW		(1 << 1) /* transmit pulse width select */
 57#define IRERRC		(1 << 0) /* Clear receive pulse width error */
 58
 59/* IRIF_SIR3 */
 60#define IRERR		(1 << 0) /* received pulse width Error */
 61
 62/* IRIF_SIR_FRM */
 63#define EOFD		(1 << 9) /* EOF detection flag */
 64#define FRER		(1 << 8) /* Frame Error bit */
 65#define FRP		(1 << 0) /* Frame processing set */
 66
 67/* IRIF_UART_STS2 */
 68#define IRSME		(1 << 6) /* Receive Sum     Error flag */
 69#define IROVE		(1 << 5) /* Receive Overrun Error flag */
 70#define IRFRE		(1 << 4) /* Receive Framing Error flag */
 71#define IRPRE		(1 << 3) /* Receive Parity  Error flag */
 72
 73/* IRIF_UART0_*/
 74#define TBEC		(1 << 2) /* Transmit Data Clear */
 75#define RIE		(1 << 1) /* Receive Enable */
 76#define TIE		(1 << 0) /* Transmit Enable */
 77
 78/* IRIF_UART1 */
 79#define URSME		(1 << 6) /* Receive Sum Error Flag */
 80#define UROVE		(1 << 5) /* Receive Overrun Error Flag */
 81#define URFRE		(1 << 4) /* Receive Framing Error Flag */
 82#define URPRE		(1 << 3) /* Receive Parity Error Flag */
 83#define RBF		(1 << 2) /* Receive Buffer Full Flag */
 84#define TSBE		(1 << 1) /* Transmit Shift Buffer Empty Flag */
 85#define TBE		(1 << 0) /* Transmit Buffer Empty flag */
 86#define TBCOMP		(TSBE | TBE)
 87
 88/* IRIF_UART5 */
 89#define RSEIM		(1 << 6) /* Receive Sum Error Flag IRQ Mask */
 90#define RBFIM		(1 << 2) /* Receive Buffer Full Flag IRQ Mask */
 91#define TSBEIM		(1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
 92#define TBEIM		(1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
 93#define RX_MASK		(RSEIM  | RBFIM)
 94
 95/* IRIF_CRC0 */
 96#define CRC_RST		(1 << 15) /* CRC Engine Reset */
 97#define CRC_CT_MASK	0x0FFF
 98
 99/************************************************************************
100
101
102			structure
103
104
105************************************************************************/
106struct sh_sir_self {
107	void __iomem		*membase;
108	unsigned int		 irq;
109	struct clk		*clk;
110
111	struct net_device	*ndev;
112
113	struct irlap_cb		*irlap;
114	struct qos_info		qos;
115
116	iobuff_t		tx_buff;
117	iobuff_t		rx_buff;
118};
119
120/************************************************************************
121
122
123			common function
124
125
126************************************************************************/
127static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
128{
129	iowrite16(data, self->membase + offset);
130}
131
132static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
133{
134	return ioread16(self->membase + offset);
135}
136
137static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
138			       u16 mask, u16 data)
139{
140	u16 old, new;
141
142	old = sh_sir_read(self, offset);
143	new = (old & ~mask) | data;
144	if (old != new)
145		sh_sir_write(self, offset, new);
146}
147
148/************************************************************************
149
150
151			CRC function
152
153
154************************************************************************/
155static void sh_sir_crc_reset(struct sh_sir_self *self)
156{
157	sh_sir_write(self, IRIF_CRC0, CRC_RST);
158}
159
160static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
161{
162	sh_sir_write(self, IRIF_CRC1, (u16)data);
163}
164
165static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
166{
167	return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
168}
169
170static u16 sh_sir_crc_out(struct sh_sir_self *self)
171{
172	return sh_sir_read(self, IRIF_CRC4);
173}
174
175static int sh_sir_crc_init(struct sh_sir_self *self)
176{
177	struct device *dev = &self->ndev->dev;
178	int ret = -EIO;
179	u16 val;
180
181	sh_sir_crc_reset(self);
182
183	sh_sir_crc_add(self, 0xCC);
184	sh_sir_crc_add(self, 0xF5);
185	sh_sir_crc_add(self, 0xF1);
186	sh_sir_crc_add(self, 0xA7);
187
188	val = sh_sir_crc_cnt(self);
189	if (4 != val) {
190		dev_err(dev, "CRC count error %x\n", val);
191		goto crc_init_out;
192	}
193
194	val = sh_sir_crc_out(self);
195	if (0x51DF != val) {
196		dev_err(dev, "CRC result error%x\n", val);
197		goto crc_init_out;
198	}
199
200	ret = 0;
201
202crc_init_out:
203
204	sh_sir_crc_reset(self);
205	return ret;
206}
207
208/************************************************************************
209
210
211			baud rate functions
212
213
214************************************************************************/
215#define SCLK_BASE 1843200 /* 1.8432MHz */
216
217static u32 sh_sir_find_sclk(struct clk *irda_clk)
218{
219	struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
220	struct clk *pclk = clk_get(NULL, "peripheral_clk");
221	u32 limit, min = 0xffffffff, tmp;
222	int i, index = 0;
223
224	limit = clk_get_rate(pclk);
225	clk_put(pclk);
226
227	/* IrDA can not set over peripheral_clk */
228	for (i = 0;
229	     freq_table[i].frequency != CPUFREQ_TABLE_END;
230	     i++) {
231		u32 freq = freq_table[i].frequency;
232
233		if (freq == CPUFREQ_ENTRY_INVALID)
234			continue;
235
236		/* IrDA should not over peripheral_clk */
237		if (freq > limit)
238			continue;
239
240		tmp = freq % SCLK_BASE;
241		if (tmp < min) {
242			min = tmp;
243			index = i;
244		}
245	}
246
247	return freq_table[index].frequency;
248}
249
250#define ERR_ROUNDING(a) ((a + 5000) / 10000)
251static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
252{
253	struct clk *clk;
254	struct device *dev = &self->ndev->dev;
255	u32 rate;
256	u16 uabca, uabc;
257	u16 irbca, irbc;
258	u32 min, rerr, tmp;
259	int i;
260
261	/* Baud Rate Error Correction x 10000 */
262	u32 rate_err_array[] = {
263		   0,  625, 1250, 1875,
264		2500, 3125, 3750, 4375,
265		5000, 5625, 6250, 6875,
266		7500, 8125, 8750, 9375,
267	};
268
269	/*
270	 * FIXME
271	 *
272	 * it support 9600 only now
273	 */
274	switch (baudrate) {
275	case 9600:
276		break;
277	default:
278		dev_err(dev, "un-supported baudrate %d\n", baudrate);
279		return -EIO;
280	}
281
282	clk = clk_get(NULL, "irda_clk");
283	if (!clk) {
284		dev_err(dev, "can not get irda_clk\n");
285		return -EIO;
286	}
287
288	clk_set_rate(clk, sh_sir_find_sclk(clk));
289	rate = clk_get_rate(clk);
290	clk_put(clk);
291
292	dev_dbg(dev, "selected sclk = %d\n", rate);
293
294	/*
295	 * CALCULATION
296	 *
297	 * 1843200 = system rate / (irbca + (irbc + 1))
298	 */
299
300	irbc = rate / SCLK_BASE;
301
302	tmp = rate - (SCLK_BASE * irbc);
303	tmp *= 10000;
304
305	rerr = tmp / SCLK_BASE;
306
307	min = 0xffffffff;
308	irbca = 0;
309	for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
310		tmp = abs(rate_err_array[i] - rerr);
311		if (min > tmp) {
312			min = tmp;
313			irbca = i;
314		}
315	}
316
317	tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
318	if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
319		dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
320
321	dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
322	       SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
323
324	irbca = (irbca & 0xF) << 4;
325	irbc  = (irbc - 1) & 0xF;
326
327	if (!irbc) {
328		dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
329		return -EIO;
330	}
331
332	sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
333	sh_sir_write(self, IRIF_SIR1, irbca);
334	sh_sir_write(self, IRIF_SIR2, irbc);
335
336	/*
337	 * CALCULATION
338	 *
339	 * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
340	 */
341
342	uabc = rate / baudrate;
343	uabc = (uabc / 16) - 1;
344	uabc = (uabc + 1) * 16;
345
346	tmp = rate - (uabc * baudrate);
347	tmp *= 10000;
348
349	rerr = tmp / baudrate;
350
351	min = 0xffffffff;
352	uabca = 0;
353	for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
354		tmp = abs(rate_err_array[i] - rerr);
355		if (min > tmp) {
356			min = tmp;
357			uabca = i;
358		}
359	}
360
361	tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
362	if ((baudrate / 100) < abs(tmp - baudrate))
363		dev_warn(dev, "UART freq error margin over %d\n", tmp);
364
365	dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
366	       baudrate, tmp,
367	       uabc, rate_err_array[uabca]);
368
369	uabca = (uabca & 0xF) << 4;
370	uabc  = (uabc / 16) - 1;
371
372	sh_sir_write(self, IRIF_UART6, uabca);
373	sh_sir_write(self, IRIF_UART7, uabc);
374
375	return 0;
376}
377
378/************************************************************************
379
380
381			iobuf function
382
383
384************************************************************************/
385static int __sh_sir_init_iobuf(iobuff_t *io, int size)
386{
387	io->head = kmalloc(size, GFP_KERNEL);
388	if (!io->head)
389		return -ENOMEM;
390
391	io->truesize	= size;
392	io->in_frame	= FALSE;
393	io->state	= OUTSIDE_FRAME;
394	io->data	= io->head;
395
396	return 0;
397}
398
399static void sh_sir_remove_iobuf(struct sh_sir_self *self)
400{
401	kfree(self->rx_buff.head);
402	kfree(self->tx_buff.head);
403
404	self->rx_buff.head = NULL;
405	self->tx_buff.head = NULL;
406}
407
408static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
409{
410	int err = -ENOMEM;
411
412	if (self->rx_buff.head ||
413	    self->tx_buff.head) {
414		dev_err(&self->ndev->dev, "iobuff has already existed.");
415		return err;
416	}
417
418	err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
419	if (err)
420		goto iobuf_err;
421
422	err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
423
424iobuf_err:
425	if (err)
426		sh_sir_remove_iobuf(self);
427
428	return err;
429}
430
431/************************************************************************
432
433
434			status function
435
436
437************************************************************************/
438static void sh_sir_clear_all_err(struct sh_sir_self *self)
439{
440	/* Clear error flag for receive pulse width */
441	sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
442
443	/* Clear frame / EOF error flag */
444	sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
445
446	/* Clear all status error */
447	sh_sir_write(self, IRIF_UART_STS2, 0);
448}
449
450static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
451{
452	u16 uart5 = 0;
453	u16 uart0 = 0;
454
455	switch (phase) {
456	case TX_PHASE:
457		uart5 = TBEIM;
458		uart0 = TBEC | TIE;
459		break;
460	case TX_COMP_PHASE:
461		uart5 = TSBEIM;
462		uart0 = TIE;
463		break;
464	case RX_PHASE:
465		uart5 = RX_MASK;
466		uart0 = RIE;
467		break;
468	default:
469		break;
470	}
471
472	sh_sir_write(self, IRIF_UART5, uart5);
473	sh_sir_write(self, IRIF_UART0, uart0);
474}
475
476static int sh_sir_is_which_phase(struct sh_sir_self *self)
477{
478	u16 val = sh_sir_read(self, IRIF_UART5);
479
480	if (val & TBEIM)
481		return TX_PHASE;
482
483	if (val & TSBEIM)
484		return TX_COMP_PHASE;
485
486	if (val & RX_MASK)
487		return RX_PHASE;
488
489	return NONE_PHASE;
490}
491
492static void sh_sir_tx(struct sh_sir_self *self, int phase)
493{
494	switch (phase) {
495	case TX_PHASE:
496		if (0 >= self->tx_buff.len) {
497			sh_sir_set_phase(self, TX_COMP_PHASE);
498		} else {
499			sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
500			self->tx_buff.len--;
501			self->tx_buff.data++;
502		}
503		break;
504	case TX_COMP_PHASE:
505		sh_sir_set_phase(self, RX_PHASE);
506		netif_wake_queue(self->ndev);
507		break;
508	default:
509		dev_err(&self->ndev->dev, "should not happen\n");
510		break;
511	}
512}
513
514static int sh_sir_read_data(struct sh_sir_self *self)
515{
516	u16 val = 0;
517	int timeout = 1024;
518
519	while (timeout--) {
520		val = sh_sir_read(self, IRIF_UART1);
521
522		/* data get */
523		if (val & RBF) {
524			if (val & (URSME | UROVE | URFRE | URPRE))
525				break;
526
527			return (int)sh_sir_read(self, IRIF_UART4);
528		}
529
530		udelay(1);
531	}
532
533	dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
534		val, sh_sir_read(self, IRIF_UART_STS2));
535
536	/* read data register for clear error */
537	sh_sir_read(self, IRIF_UART4);
538
539	return -1;
540}
541
542static void sh_sir_rx(struct sh_sir_self *self)
543{
544	int timeout = 1024;
545	int data;
546
547	while (timeout--) {
548		data = sh_sir_read_data(self);
549		if (data < 0)
550			break;
551
552		async_unwrap_char(self->ndev, &self->ndev->stats,
553				  &self->rx_buff, (u8)data);
554		self->ndev->last_rx = jiffies;
555
556		if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
557			continue;
558
559		break;
560	}
561}
562
563static irqreturn_t sh_sir_irq(int irq, void *dev_id)
564{
565	struct sh_sir_self *self = dev_id;
566	struct device *dev = &self->ndev->dev;
567	int phase = sh_sir_is_which_phase(self);
568
569	switch (phase) {
570	case TX_COMP_PHASE:
571	case TX_PHASE:
572		sh_sir_tx(self, phase);
573		break;
574	case RX_PHASE:
575		if (sh_sir_read(self, IRIF_SIR3))
576			dev_err(dev, "rcv pulse width error occurred\n");
577
578		sh_sir_rx(self);
579		sh_sir_clear_all_err(self);
580		break;
581	default:
582		dev_err(dev, "unknown interrupt\n");
583	}
584
585	 return IRQ_HANDLED;
586}
587
588/************************************************************************
589
590
591			net_device_ops function
592
593
594************************************************************************/
595static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
596{
597	struct sh_sir_self *self = netdev_priv(ndev);
598	int speed = irda_get_next_speed(skb);
599
600	if ((0 < speed) &&
601	    (9600 != speed)) {
602		dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
603		return -EIO;
604	}
605
606	netif_stop_queue(ndev);
607
608	self->tx_buff.data = self->tx_buff.head;
609	self->tx_buff.len = 0;
610	if (skb->len)
611		self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
612						   self->tx_buff.truesize);
613
614	sh_sir_set_phase(self, TX_PHASE);
615	dev_kfree_skb(skb);
616
617	return 0;
618}
619
620static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
621{
622	/*
623	 * FIXME
624	 *
625	 * This function is needed for irda framework.
626	 * But nothing to do now
627	 */
628	return 0;
629}
630
631static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
632{
633	struct sh_sir_self *self = netdev_priv(ndev);
634
635	return &self->ndev->stats;
636}
637
638static int sh_sir_open(struct net_device *ndev)
639{
640	struct sh_sir_self *self = netdev_priv(ndev);
641	int err;
642
643	clk_enable(self->clk);
644	err = sh_sir_crc_init(self);
645	if (err)
646		goto open_err;
647
648	sh_sir_set_baudrate(self, 9600);
649
650	self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
651	if (!self->irlap) {
652		err = -ENODEV;
653		goto open_err;
654	}
655
656	/*
657	 * Now enable the interrupt then start the queue
658	 */
659	sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
660	sh_sir_read(self, IRIF_UART1); /* flag clear */
661	sh_sir_read(self, IRIF_UART4); /* flag clear */
662	sh_sir_set_phase(self, RX_PHASE);
663
664	netif_start_queue(ndev);
665
666	dev_info(&self->ndev->dev, "opened\n");
667
668	return 0;
669
670open_err:
671	clk_disable(self->clk);
672
673	return err;
674}
675
676static int sh_sir_stop(struct net_device *ndev)
677{
678	struct sh_sir_self *self = netdev_priv(ndev);
679
680	/* Stop IrLAP */
681	if (self->irlap) {
682		irlap_close(self->irlap);
683		self->irlap = NULL;
684	}
685
686	netif_stop_queue(ndev);
687
688	dev_info(&ndev->dev, "stoped\n");
689
690	return 0;
691}
692
693static const struct net_device_ops sh_sir_ndo = {
694	.ndo_open		= sh_sir_open,
695	.ndo_stop		= sh_sir_stop,
696	.ndo_start_xmit		= sh_sir_hard_xmit,
697	.ndo_do_ioctl		= sh_sir_ioctl,
698	.ndo_get_stats		= sh_sir_stats,
699};
700
701/************************************************************************
702
703
704			platform_driver function
705
706
707************************************************************************/
708static int __devinit sh_sir_probe(struct platform_device *pdev)
709{
710	struct net_device *ndev;
711	struct sh_sir_self *self;
712	struct resource *res;
713	char clk_name[8];
714	int irq;
715	int err = -ENOMEM;
716
717	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
718	irq = platform_get_irq(pdev, 0);
719	if (!res || irq < 0) {
720		dev_err(&pdev->dev, "Not enough platform resources.\n");
721		goto exit;
722	}
723
724	ndev = alloc_irdadev(sizeof(*self));
725	if (!ndev)
726		goto exit;
727
728	self = netdev_priv(ndev);
729	self->membase = ioremap_nocache(res->start, resource_size(res));
730	if (!self->membase) {
731		err = -ENXIO;
732		dev_err(&pdev->dev, "Unable to ioremap.\n");
733		goto err_mem_1;
734	}
735
736	err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
737	if (err)
738		goto err_mem_2;
739
740	snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
741	self->clk = clk_get(&pdev->dev, clk_name);
742	if (IS_ERR(self->clk)) {
743		dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
744		goto err_mem_3;
745	}
746
747	irda_init_max_qos_capabilies(&self->qos);
748
749	ndev->netdev_ops	= &sh_sir_ndo;
750	ndev->irq		= irq;
751
752	self->ndev			= ndev;
753	self->qos.baud_rate.bits	&= IR_9600; /* FIXME */
754	self->qos.min_turn_time.bits	= 1; /* 10 ms or more */
755
756	irda_qos_bits_to_value(&self->qos);
757
758	err = register_netdev(ndev);
759	if (err)
760		goto err_mem_4;
761
762	platform_set_drvdata(pdev, ndev);
763
764	if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) {
765		dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
766		goto err_mem_4;
767	}
768
769	dev_info(&pdev->dev, "SuperH IrDA probed\n");
770
771	goto exit;
772
773err_mem_4:
774	clk_put(self->clk);
775err_mem_3:
776	sh_sir_remove_iobuf(self);
777err_mem_2:
778	iounmap(self->membase);
779err_mem_1:
780	free_netdev(ndev);
781exit:
782	return err;
783}
784
785static int __devexit sh_sir_remove(struct platform_device *pdev)
786{
787	struct net_device *ndev = platform_get_drvdata(pdev);
788	struct sh_sir_self *self = netdev_priv(ndev);
789
790	if (!self)
791		return 0;
792
793	unregister_netdev(ndev);
794	clk_put(self->clk);
795	sh_sir_remove_iobuf(self);
796	iounmap(self->membase);
797	free_netdev(ndev);
798	platform_set_drvdata(pdev, NULL);
799
800	return 0;
801}
802
803static struct platform_driver sh_sir_driver = {
804	.probe   = sh_sir_probe,
805	.remove  = __devexit_p(sh_sir_remove),
806	.driver  = {
807		.name = DRIVER_NAME,
808	},
809};
810
811static int __init sh_sir_init(void)
812{
813	return platform_driver_register(&sh_sir_driver);
814}
815
816static void __exit sh_sir_exit(void)
817{
818	platform_driver_unregister(&sh_sir_driver);
819}
820
821module_init(sh_sir_init);
822module_exit(sh_sir_exit);
823
824MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
825MODULE_DESCRIPTION("SuperH IrDA driver");
826MODULE_LICENSE("GPL");