Loading...
1/*
2 * Moxa C101 synchronous serial card driver for Linux
3 *
4 * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
11 *
12 * Sources of information:
13 * Hitachi HD64570 SCA User's Manual
14 * Moxa C101 User's Manual
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/capability.h>
22#include <linux/slab.h>
23#include <linux/types.h>
24#include <linux/string.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/moduleparam.h>
28#include <linux/netdevice.h>
29#include <linux/hdlc.h>
30#include <linux/delay.h>
31#include <asm/io.h>
32
33#include "hd64570.h"
34
35
36static const char* version = "Moxa C101 driver version: 1.15";
37static const char* devname = "C101";
38
39#undef DEBUG_PKT
40#define DEBUG_RINGS
41
42#define C101_PAGE 0x1D00
43#define C101_DTR 0x1E00
44#define C101_SCA 0x1F00
45#define C101_WINDOW_SIZE 0x2000
46#define C101_MAPPED_RAM_SIZE 0x4000
47
48#define RAM_SIZE (256 * 1024)
49#define TX_RING_BUFFERS 10
50#define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) / \
51 (sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS)
52
53#define CLOCK_BASE 9830400 /* 9.8304 MHz */
54#define PAGE0_ALWAYS_MAPPED
55
56static char *hw; /* pointer to hw=xxx command line string */
57
58
59typedef struct card_s {
60 struct net_device *dev;
61 spinlock_t lock; /* TX lock */
62 u8 __iomem *win0base; /* ISA window base address */
63 u32 phy_winbase; /* ISA physical base address */
64 sync_serial_settings settings;
65 int rxpart; /* partial frame received, next frame invalid*/
66 unsigned short encoding;
67 unsigned short parity;
68 u16 rx_ring_buffers; /* number of buffers in a ring */
69 u16 tx_ring_buffers;
70 u16 buff_offset; /* offset of first buffer of first channel */
71 u16 rxin; /* rx ring buffer 'in' pointer */
72 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
73 u16 txlast;
74 u8 rxs, txs, tmc; /* SCA registers */
75 u8 irq; /* IRQ (3-15) */
76 u8 page;
77
78 struct card_s *next_card;
79}card_t;
80
81typedef card_t port_t;
82
83static card_t *first_card;
84static card_t **new_card = &first_card;
85
86
87#define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg))
88#define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg))
89#define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg))
90
91/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
92#define sca_outw(value, reg, card) do { \
93 writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
94 writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
95} while(0)
96
97#define port_to_card(port) (port)
98#define log_node(port) (0)
99#define phy_node(port) (0)
100#define winsize(card) (C101_WINDOW_SIZE)
101#define win0base(card) ((card)->win0base)
102#define winbase(card) ((card)->win0base + 0x2000)
103#define get_port(card, port) (card)
104static void sca_msci_intr(port_t *port);
105
106
107static inline u8 sca_get_page(card_t *card)
108{
109 return card->page;
110}
111
112static inline void openwin(card_t *card, u8 page)
113{
114 card->page = page;
115 writeb(page, card->win0base + C101_PAGE);
116}
117
118
119#include "hd64570.c"
120
121
122static inline void set_carrier(port_t *port)
123{
124 if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
125 netif_carrier_on(port_to_dev(port));
126 else
127 netif_carrier_off(port_to_dev(port));
128}
129
130
131static void sca_msci_intr(port_t *port)
132{
133 u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
134
135 /* Reset MSCI TX underrun and CDCD (ignored) status bit */
136 sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
137
138 if (stat & ST1_UDRN) {
139 /* TX Underrun error detected */
140 port_to_dev(port)->stats.tx_errors++;
141 port_to_dev(port)->stats.tx_fifo_errors++;
142 }
143
144 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
145 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */
146 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
147
148 if (stat & ST1_CDCD)
149 set_carrier(port);
150}
151
152
153static void c101_set_iface(port_t *port)
154{
155 u8 rxs = port->rxs & CLK_BRG_MASK;
156 u8 txs = port->txs & CLK_BRG_MASK;
157
158 switch(port->settings.clock_type) {
159 case CLOCK_INT:
160 rxs |= CLK_BRG_RX; /* TX clock */
161 txs |= CLK_RXCLK_TX; /* BRG output */
162 break;
163
164 case CLOCK_TXINT:
165 rxs |= CLK_LINE_RX; /* RXC input */
166 txs |= CLK_BRG_TX; /* BRG output */
167 break;
168
169 case CLOCK_TXFROMRX:
170 rxs |= CLK_LINE_RX; /* RXC input */
171 txs |= CLK_RXCLK_TX; /* RX clock */
172 break;
173
174 default: /* EXTernal clock */
175 rxs |= CLK_LINE_RX; /* RXC input */
176 txs |= CLK_LINE_TX; /* TXC input */
177 }
178
179 port->rxs = rxs;
180 port->txs = txs;
181 sca_out(rxs, MSCI1_OFFSET + RXS, port);
182 sca_out(txs, MSCI1_OFFSET + TXS, port);
183 sca_set_port(port);
184}
185
186
187static int c101_open(struct net_device *dev)
188{
189 port_t *port = dev_to_port(dev);
190 int result;
191
192 result = hdlc_open(dev);
193 if (result)
194 return result;
195
196 writeb(1, port->win0base + C101_DTR);
197 sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */
198 sca_open(dev);
199 /* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */
200 sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
201 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
202
203 set_carrier(port);
204
205 /* enable MSCI1 CDCD interrupt */
206 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
207 sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port);
208 sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */
209 c101_set_iface(port);
210 return 0;
211}
212
213
214static int c101_close(struct net_device *dev)
215{
216 port_t *port = dev_to_port(dev);
217
218 sca_close(dev);
219 writeb(0, port->win0base + C101_DTR);
220 sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port);
221 hdlc_close(dev);
222 return 0;
223}
224
225
226static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
227{
228 const size_t size = sizeof(sync_serial_settings);
229 sync_serial_settings new_line;
230 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
231 port_t *port = dev_to_port(dev);
232
233#ifdef DEBUG_RINGS
234 if (cmd == SIOCDEVPRIVATE) {
235 sca_dump_rings(dev);
236 printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n",
237 sca_in(MSCI1_OFFSET + ST0, port),
238 sca_in(MSCI1_OFFSET + ST1, port),
239 sca_in(MSCI1_OFFSET + ST2, port),
240 sca_in(MSCI1_OFFSET + ST3, port));
241 return 0;
242 }
243#endif
244 if (cmd != SIOCWANDEV)
245 return hdlc_ioctl(dev, ifr, cmd);
246
247 switch(ifr->ifr_settings.type) {
248 case IF_GET_IFACE:
249 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
250 if (ifr->ifr_settings.size < size) {
251 ifr->ifr_settings.size = size; /* data size wanted */
252 return -ENOBUFS;
253 }
254 if (copy_to_user(line, &port->settings, size))
255 return -EFAULT;
256 return 0;
257
258 case IF_IFACE_SYNC_SERIAL:
259 if(!capable(CAP_NET_ADMIN))
260 return -EPERM;
261
262 if (copy_from_user(&new_line, line, size))
263 return -EFAULT;
264
265 if (new_line.clock_type != CLOCK_EXT &&
266 new_line.clock_type != CLOCK_TXFROMRX &&
267 new_line.clock_type != CLOCK_INT &&
268 new_line.clock_type != CLOCK_TXINT)
269 return -EINVAL; /* No such clock setting */
270
271 if (new_line.loopback != 0 && new_line.loopback != 1)
272 return -EINVAL;
273
274 memcpy(&port->settings, &new_line, size); /* Update settings */
275 c101_set_iface(port);
276 return 0;
277
278 default:
279 return hdlc_ioctl(dev, ifr, cmd);
280 }
281}
282
283
284
285static void c101_destroy_card(card_t *card)
286{
287 readb(card->win0base + C101_PAGE); /* Resets SCA? */
288
289 if (card->irq)
290 free_irq(card->irq, card);
291
292 if (card->win0base) {
293 iounmap(card->win0base);
294 release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE);
295 }
296
297 free_netdev(card->dev);
298
299 kfree(card);
300}
301
302static const struct net_device_ops c101_ops = {
303 .ndo_open = c101_open,
304 .ndo_stop = c101_close,
305 .ndo_start_xmit = hdlc_start_xmit,
306 .ndo_do_ioctl = c101_ioctl,
307};
308
309static int __init c101_run(unsigned long irq, unsigned long winbase)
310{
311 struct net_device *dev;
312 hdlc_device *hdlc;
313 card_t *card;
314 int result;
315
316 if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
317 pr_err("invalid IRQ value\n");
318 return -ENODEV;
319 }
320
321 if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
322 pr_err("invalid RAM value\n");
323 return -ENODEV;
324 }
325
326 card = kzalloc(sizeof(card_t), GFP_KERNEL);
327 if (card == NULL)
328 return -ENOBUFS;
329
330 card->dev = alloc_hdlcdev(card);
331 if (!card->dev) {
332 pr_err("unable to allocate memory\n");
333 kfree(card);
334 return -ENOBUFS;
335 }
336
337 if (request_irq(irq, sca_intr, 0, devname, card)) {
338 pr_err("could not allocate IRQ\n");
339 c101_destroy_card(card);
340 return -EBUSY;
341 }
342 card->irq = irq;
343
344 if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
345 pr_err("could not request RAM window\n");
346 c101_destroy_card(card);
347 return -EBUSY;
348 }
349 card->phy_winbase = winbase;
350 card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
351 if (!card->win0base) {
352 pr_err("could not map I/O address\n");
353 c101_destroy_card(card);
354 return -EFAULT;
355 }
356
357 card->tx_ring_buffers = TX_RING_BUFFERS;
358 card->rx_ring_buffers = RX_RING_BUFFERS;
359 card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */
360
361 readb(card->win0base + C101_PAGE); /* Resets SCA? */
362 udelay(100);
363 writeb(0, card->win0base + C101_PAGE);
364 writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */
365
366 sca_init(card, 0);
367
368 dev = port_to_dev(card);
369 hdlc = dev_to_hdlc(dev);
370
371 spin_lock_init(&card->lock);
372 dev->irq = irq;
373 dev->mem_start = winbase;
374 dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
375 dev->tx_queue_len = 50;
376 dev->netdev_ops = &c101_ops;
377 hdlc->attach = sca_attach;
378 hdlc->xmit = sca_xmit;
379 card->settings.clock_type = CLOCK_EXT;
380
381 result = register_hdlc_device(dev);
382 if (result) {
383 pr_warn("unable to register hdlc device\n");
384 c101_destroy_card(card);
385 return result;
386 }
387
388 sca_init_port(card); /* Set up C101 memory */
389 set_carrier(card);
390
391 netdev_info(dev, "Moxa C101 on IRQ%u, using %u TX + %u RX packets rings\n",
392 card->irq, card->tx_ring_buffers, card->rx_ring_buffers);
393
394 *new_card = card;
395 new_card = &card->next_card;
396 return 0;
397}
398
399
400
401static int __init c101_init(void)
402{
403 if (hw == NULL) {
404#ifdef MODULE
405 pr_info("no card initialized\n");
406#endif
407 return -EINVAL; /* no parameters specified, abort */
408 }
409
410 pr_info("%s\n", version);
411
412 do {
413 unsigned long irq, ram;
414
415 irq = simple_strtoul(hw, &hw, 0);
416
417 if (*hw++ != ',')
418 break;
419 ram = simple_strtoul(hw, &hw, 0);
420
421 if (*hw == ':' || *hw == '\x0')
422 c101_run(irq, ram);
423
424 if (*hw == '\x0')
425 return first_card ? 0 : -EINVAL;
426 }while(*hw++ == ':');
427
428 pr_err("invalid hardware parameters\n");
429 return first_card ? 0 : -EINVAL;
430}
431
432
433static void __exit c101_cleanup(void)
434{
435 card_t *card = first_card;
436
437 while (card) {
438 card_t *ptr = card;
439 card = card->next_card;
440 unregister_hdlc_device(port_to_dev(ptr));
441 c101_destroy_card(ptr);
442 }
443}
444
445
446module_init(c101_init);
447module_exit(c101_cleanup);
448
449MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
450MODULE_DESCRIPTION("Moxa C101 serial port driver");
451MODULE_LICENSE("GPL v2");
452module_param(hw, charp, 0444);
453MODULE_PARM_DESC(hw, "irq,ram:irq,...");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Moxa C101 synchronous serial card driver for Linux
4 *
5 * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
8 *
9 * Sources of information:
10 * Hitachi HD64570 SCA User's Manual
11 * Moxa C101 User's Manual
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/capability.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/netdevice.h>
25#include <linux/hdlc.h>
26#include <linux/delay.h>
27#include <asm/io.h>
28
29#include "hd64570.h"
30
31
32static const char* version = "Moxa C101 driver version: 1.15";
33static const char* devname = "C101";
34
35#undef DEBUG_PKT
36#define DEBUG_RINGS
37
38#define C101_PAGE 0x1D00
39#define C101_DTR 0x1E00
40#define C101_SCA 0x1F00
41#define C101_WINDOW_SIZE 0x2000
42#define C101_MAPPED_RAM_SIZE 0x4000
43
44#define RAM_SIZE (256 * 1024)
45#define TX_RING_BUFFERS 10
46#define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) / \
47 (sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS)
48
49#define CLOCK_BASE 9830400 /* 9.8304 MHz */
50#define PAGE0_ALWAYS_MAPPED
51
52static char *hw; /* pointer to hw=xxx command line string */
53
54
55typedef struct card_s {
56 struct net_device *dev;
57 spinlock_t lock; /* TX lock */
58 u8 __iomem *win0base; /* ISA window base address */
59 u32 phy_winbase; /* ISA physical base address */
60 sync_serial_settings settings;
61 int rxpart; /* partial frame received, next frame invalid*/
62 unsigned short encoding;
63 unsigned short parity;
64 u16 rx_ring_buffers; /* number of buffers in a ring */
65 u16 tx_ring_buffers;
66 u16 buff_offset; /* offset of first buffer of first channel */
67 u16 rxin; /* rx ring buffer 'in' pointer */
68 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
69 u16 txlast;
70 u8 rxs, txs, tmc; /* SCA registers */
71 u8 irq; /* IRQ (3-15) */
72 u8 page;
73
74 struct card_s *next_card;
75}card_t;
76
77typedef card_t port_t;
78
79static card_t *first_card;
80static card_t **new_card = &first_card;
81
82
83#define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg))
84#define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg))
85#define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg))
86
87/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
88#define sca_outw(value, reg, card) do { \
89 writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
90 writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
91} while(0)
92
93#define port_to_card(port) (port)
94#define log_node(port) (0)
95#define phy_node(port) (0)
96#define winsize(card) (C101_WINDOW_SIZE)
97#define win0base(card) ((card)->win0base)
98#define winbase(card) ((card)->win0base + 0x2000)
99#define get_port(card, port) (card)
100static void sca_msci_intr(port_t *port);
101
102
103static inline u8 sca_get_page(card_t *card)
104{
105 return card->page;
106}
107
108static inline void openwin(card_t *card, u8 page)
109{
110 card->page = page;
111 writeb(page, card->win0base + C101_PAGE);
112}
113
114
115#include "hd64570.c"
116
117
118static inline void set_carrier(port_t *port)
119{
120 if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
121 netif_carrier_on(port_to_dev(port));
122 else
123 netif_carrier_off(port_to_dev(port));
124}
125
126
127static void sca_msci_intr(port_t *port)
128{
129 u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
130
131 /* Reset MSCI TX underrun and CDCD (ignored) status bit */
132 sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
133
134 if (stat & ST1_UDRN) {
135 /* TX Underrun error detected */
136 port_to_dev(port)->stats.tx_errors++;
137 port_to_dev(port)->stats.tx_fifo_errors++;
138 }
139
140 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
141 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */
142 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
143
144 if (stat & ST1_CDCD)
145 set_carrier(port);
146}
147
148
149static void c101_set_iface(port_t *port)
150{
151 u8 rxs = port->rxs & CLK_BRG_MASK;
152 u8 txs = port->txs & CLK_BRG_MASK;
153
154 switch(port->settings.clock_type) {
155 case CLOCK_INT:
156 rxs |= CLK_BRG_RX; /* TX clock */
157 txs |= CLK_RXCLK_TX; /* BRG output */
158 break;
159
160 case CLOCK_TXINT:
161 rxs |= CLK_LINE_RX; /* RXC input */
162 txs |= CLK_BRG_TX; /* BRG output */
163 break;
164
165 case CLOCK_TXFROMRX:
166 rxs |= CLK_LINE_RX; /* RXC input */
167 txs |= CLK_RXCLK_TX; /* RX clock */
168 break;
169
170 default: /* EXTernal clock */
171 rxs |= CLK_LINE_RX; /* RXC input */
172 txs |= CLK_LINE_TX; /* TXC input */
173 }
174
175 port->rxs = rxs;
176 port->txs = txs;
177 sca_out(rxs, MSCI1_OFFSET + RXS, port);
178 sca_out(txs, MSCI1_OFFSET + TXS, port);
179 sca_set_port(port);
180}
181
182
183static int c101_open(struct net_device *dev)
184{
185 port_t *port = dev_to_port(dev);
186 int result;
187
188 result = hdlc_open(dev);
189 if (result)
190 return result;
191
192 writeb(1, port->win0base + C101_DTR);
193 sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */
194 sca_open(dev);
195 /* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */
196 sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
198
199 set_carrier(port);
200
201 /* enable MSCI1 CDCD interrupt */
202 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
203 sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port);
204 sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */
205 c101_set_iface(port);
206 return 0;
207}
208
209
210static int c101_close(struct net_device *dev)
211{
212 port_t *port = dev_to_port(dev);
213
214 sca_close(dev);
215 writeb(0, port->win0base + C101_DTR);
216 sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port);
217 hdlc_close(dev);
218 return 0;
219}
220
221
222static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
223{
224 const size_t size = sizeof(sync_serial_settings);
225 sync_serial_settings new_line;
226 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
227 port_t *port = dev_to_port(dev);
228
229#ifdef DEBUG_RINGS
230 if (cmd == SIOCDEVPRIVATE) {
231 sca_dump_rings(dev);
232 printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n",
233 sca_in(MSCI1_OFFSET + ST0, port),
234 sca_in(MSCI1_OFFSET + ST1, port),
235 sca_in(MSCI1_OFFSET + ST2, port),
236 sca_in(MSCI1_OFFSET + ST3, port));
237 return 0;
238 }
239#endif
240 if (cmd != SIOCWANDEV)
241 return hdlc_ioctl(dev, ifr, cmd);
242
243 switch(ifr->ifr_settings.type) {
244 case IF_GET_IFACE:
245 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
246 if (ifr->ifr_settings.size < size) {
247 ifr->ifr_settings.size = size; /* data size wanted */
248 return -ENOBUFS;
249 }
250 if (copy_to_user(line, &port->settings, size))
251 return -EFAULT;
252 return 0;
253
254 case IF_IFACE_SYNC_SERIAL:
255 if(!capable(CAP_NET_ADMIN))
256 return -EPERM;
257
258 if (copy_from_user(&new_line, line, size))
259 return -EFAULT;
260
261 if (new_line.clock_type != CLOCK_EXT &&
262 new_line.clock_type != CLOCK_TXFROMRX &&
263 new_line.clock_type != CLOCK_INT &&
264 new_line.clock_type != CLOCK_TXINT)
265 return -EINVAL; /* No such clock setting */
266
267 if (new_line.loopback != 0 && new_line.loopback != 1)
268 return -EINVAL;
269
270 memcpy(&port->settings, &new_line, size); /* Update settings */
271 c101_set_iface(port);
272 return 0;
273
274 default:
275 return hdlc_ioctl(dev, ifr, cmd);
276 }
277}
278
279
280
281static void c101_destroy_card(card_t *card)
282{
283 readb(card->win0base + C101_PAGE); /* Resets SCA? */
284
285 if (card->irq)
286 free_irq(card->irq, card);
287
288 if (card->win0base) {
289 iounmap(card->win0base);
290 release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE);
291 }
292
293 free_netdev(card->dev);
294
295 kfree(card);
296}
297
298static const struct net_device_ops c101_ops = {
299 .ndo_open = c101_open,
300 .ndo_stop = c101_close,
301 .ndo_start_xmit = hdlc_start_xmit,
302 .ndo_do_ioctl = c101_ioctl,
303};
304
305static int __init c101_run(unsigned long irq, unsigned long winbase)
306{
307 struct net_device *dev;
308 hdlc_device *hdlc;
309 card_t *card;
310 int result;
311
312 if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
313 pr_err("invalid IRQ value\n");
314 return -ENODEV;
315 }
316
317 if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
318 pr_err("invalid RAM value\n");
319 return -ENODEV;
320 }
321
322 card = kzalloc(sizeof(card_t), GFP_KERNEL);
323 if (card == NULL)
324 return -ENOBUFS;
325
326 card->dev = alloc_hdlcdev(card);
327 if (!card->dev) {
328 pr_err("unable to allocate memory\n");
329 kfree(card);
330 return -ENOBUFS;
331 }
332
333 if (request_irq(irq, sca_intr, 0, devname, card)) {
334 pr_err("could not allocate IRQ\n");
335 c101_destroy_card(card);
336 return -EBUSY;
337 }
338 card->irq = irq;
339
340 if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
341 pr_err("could not request RAM window\n");
342 c101_destroy_card(card);
343 return -EBUSY;
344 }
345 card->phy_winbase = winbase;
346 card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
347 if (!card->win0base) {
348 pr_err("could not map I/O address\n");
349 c101_destroy_card(card);
350 return -EFAULT;
351 }
352
353 card->tx_ring_buffers = TX_RING_BUFFERS;
354 card->rx_ring_buffers = RX_RING_BUFFERS;
355 card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */
356
357 readb(card->win0base + C101_PAGE); /* Resets SCA? */
358 udelay(100);
359 writeb(0, card->win0base + C101_PAGE);
360 writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */
361
362 sca_init(card, 0);
363
364 dev = port_to_dev(card);
365 hdlc = dev_to_hdlc(dev);
366
367 spin_lock_init(&card->lock);
368 dev->irq = irq;
369 dev->mem_start = winbase;
370 dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
371 dev->tx_queue_len = 50;
372 dev->netdev_ops = &c101_ops;
373 hdlc->attach = sca_attach;
374 hdlc->xmit = sca_xmit;
375 card->settings.clock_type = CLOCK_EXT;
376
377 result = register_hdlc_device(dev);
378 if (result) {
379 pr_warn("unable to register hdlc device\n");
380 c101_destroy_card(card);
381 return result;
382 }
383
384 sca_init_port(card); /* Set up C101 memory */
385 set_carrier(card);
386
387 netdev_info(dev, "Moxa C101 on IRQ%u, using %u TX + %u RX packets rings\n",
388 card->irq, card->tx_ring_buffers, card->rx_ring_buffers);
389
390 *new_card = card;
391 new_card = &card->next_card;
392 return 0;
393}
394
395
396
397static int __init c101_init(void)
398{
399 if (hw == NULL) {
400#ifdef MODULE
401 pr_info("no card initialized\n");
402#endif
403 return -EINVAL; /* no parameters specified, abort */
404 }
405
406 pr_info("%s\n", version);
407
408 do {
409 unsigned long irq, ram;
410
411 irq = simple_strtoul(hw, &hw, 0);
412
413 if (*hw++ != ',')
414 break;
415 ram = simple_strtoul(hw, &hw, 0);
416
417 if (*hw == ':' || *hw == '\x0')
418 c101_run(irq, ram);
419
420 if (*hw == '\x0')
421 return first_card ? 0 : -EINVAL;
422 }while(*hw++ == ':');
423
424 pr_err("invalid hardware parameters\n");
425 return first_card ? 0 : -EINVAL;
426}
427
428
429static void __exit c101_cleanup(void)
430{
431 card_t *card = first_card;
432
433 while (card) {
434 card_t *ptr = card;
435 card = card->next_card;
436 unregister_hdlc_device(port_to_dev(ptr));
437 c101_destroy_card(ptr);
438 }
439}
440
441
442module_init(c101_init);
443module_exit(c101_cleanup);
444
445MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
446MODULE_DESCRIPTION("Moxa C101 serial port driver");
447MODULE_LICENSE("GPL v2");
448module_param(hw, charp, 0444);
449MODULE_PARM_DESC(hw, "irq,ram:irq,...");