Loading...
1/*
2 * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
3 * Copyright (c) 2017, I2SE GmbH
4 *
5 * Permission to use, copy, modify, and/or distribute this software
6 * for any purpose with or without fee is hereby granted, provided
7 * that the above copyright notice and this permission notice appear
8 * in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
13 * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
14 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
16 * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* This module implements the Qualcomm Atheros UART protocol for
21 * kernel-based UART device; it is essentially an Ethernet-to-UART
22 * serial converter;
23 */
24
25#include <linux/device.h>
26#include <linux/errno.h>
27#include <linux/etherdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/jiffies.h>
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/netdevice.h>
34#include <linux/of.h>
35#include <linux/of_net.h>
36#include <linux/sched.h>
37#include <linux/serdev.h>
38#include <linux/skbuff.h>
39#include <linux/types.h>
40
41#include "qca_7k_common.h"
42
43#define QCAUART_DRV_VERSION "0.1.0"
44#define QCAUART_DRV_NAME "qcauart"
45#define QCAUART_TX_TIMEOUT (1 * HZ)
46
47struct qcauart {
48 struct net_device *net_dev;
49 spinlock_t lock; /* transmit lock */
50 struct work_struct tx_work; /* Flushes transmit buffer */
51
52 struct serdev_device *serdev;
53 struct qcafrm_handle frm_handle;
54 struct sk_buff *rx_skb;
55
56 unsigned char *tx_head; /* pointer to next XMIT byte */
57 int tx_left; /* bytes left in XMIT queue */
58 unsigned char *tx_buffer;
59};
60
61static ssize_t
62qca_tty_receive(struct serdev_device *serdev, const u8 *data, size_t count)
63{
64 struct qcauart *qca = serdev_device_get_drvdata(serdev);
65 struct net_device *netdev = qca->net_dev;
66 struct net_device_stats *n_stats = &netdev->stats;
67 size_t i;
68
69 if (!qca->rx_skb) {
70 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
71 netdev->mtu +
72 VLAN_ETH_HLEN);
73 if (!qca->rx_skb) {
74 n_stats->rx_errors++;
75 n_stats->rx_dropped++;
76 return 0;
77 }
78 }
79
80 for (i = 0; i < count; i++) {
81 s32 retcode;
82
83 retcode = qcafrm_fsm_decode(&qca->frm_handle,
84 qca->rx_skb->data,
85 skb_tailroom(qca->rx_skb),
86 data[i]);
87
88 switch (retcode) {
89 case QCAFRM_GATHER:
90 case QCAFRM_NOHEAD:
91 break;
92 case QCAFRM_NOTAIL:
93 netdev_dbg(netdev, "recv: no RX tail\n");
94 n_stats->rx_errors++;
95 n_stats->rx_dropped++;
96 break;
97 case QCAFRM_INVLEN:
98 netdev_dbg(netdev, "recv: invalid RX length\n");
99 n_stats->rx_errors++;
100 n_stats->rx_dropped++;
101 break;
102 default:
103 n_stats->rx_packets++;
104 n_stats->rx_bytes += retcode;
105 skb_put(qca->rx_skb, retcode);
106 qca->rx_skb->protocol = eth_type_trans(
107 qca->rx_skb, qca->rx_skb->dev);
108 skb_checksum_none_assert(qca->rx_skb);
109 netif_rx(qca->rx_skb);
110 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
111 netdev->mtu +
112 VLAN_ETH_HLEN);
113 if (!qca->rx_skb) {
114 netdev_dbg(netdev, "recv: out of RX resources\n");
115 n_stats->rx_errors++;
116 return i;
117 }
118 }
119 }
120
121 return i;
122}
123
124/* Write out any remaining transmit buffer. Scheduled when tty is writable */
125static void qcauart_transmit(struct work_struct *work)
126{
127 struct qcauart *qca = container_of(work, struct qcauart, tx_work);
128 struct net_device_stats *n_stats = &qca->net_dev->stats;
129 int written;
130
131 spin_lock_bh(&qca->lock);
132
133 /* First make sure we're connected. */
134 if (!netif_running(qca->net_dev)) {
135 spin_unlock_bh(&qca->lock);
136 return;
137 }
138
139 if (qca->tx_left <= 0) {
140 /* Now serial buffer is almost free & we can start
141 * transmission of another packet
142 */
143 n_stats->tx_packets++;
144 spin_unlock_bh(&qca->lock);
145 netif_wake_queue(qca->net_dev);
146 return;
147 }
148
149 written = serdev_device_write_buf(qca->serdev, qca->tx_head,
150 qca->tx_left);
151 if (written > 0) {
152 qca->tx_left -= written;
153 qca->tx_head += written;
154 }
155 spin_unlock_bh(&qca->lock);
156}
157
158/* Called by the driver when there's room for more data.
159 * Schedule the transmit.
160 */
161static void qca_tty_wakeup(struct serdev_device *serdev)
162{
163 struct qcauart *qca = serdev_device_get_drvdata(serdev);
164
165 schedule_work(&qca->tx_work);
166}
167
168static const struct serdev_device_ops qca_serdev_ops = {
169 .receive_buf = qca_tty_receive,
170 .write_wakeup = qca_tty_wakeup,
171};
172
173static int qcauart_netdev_open(struct net_device *dev)
174{
175 struct qcauart *qca = netdev_priv(dev);
176
177 netif_start_queue(qca->net_dev);
178
179 return 0;
180}
181
182static int qcauart_netdev_close(struct net_device *dev)
183{
184 struct qcauart *qca = netdev_priv(dev);
185
186 netif_stop_queue(dev);
187 flush_work(&qca->tx_work);
188
189 spin_lock_bh(&qca->lock);
190 qca->tx_left = 0;
191 spin_unlock_bh(&qca->lock);
192
193 return 0;
194}
195
196static netdev_tx_t
197qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
198{
199 struct net_device_stats *n_stats = &dev->stats;
200 struct qcauart *qca = netdev_priv(dev);
201 u8 pad_len = 0;
202 int written;
203 u8 *pos;
204
205 spin_lock(&qca->lock);
206
207 WARN_ON(qca->tx_left);
208
209 if (!netif_running(dev)) {
210 spin_unlock(&qca->lock);
211 netdev_warn(qca->net_dev, "xmit: iface is down\n");
212 goto out;
213 }
214
215 pos = qca->tx_buffer;
216
217 if (skb->len < QCAFRM_MIN_LEN)
218 pad_len = QCAFRM_MIN_LEN - skb->len;
219
220 pos += qcafrm_create_header(pos, skb->len + pad_len);
221
222 memcpy(pos, skb->data, skb->len);
223 pos += skb->len;
224
225 if (pad_len) {
226 memset(pos, 0, pad_len);
227 pos += pad_len;
228 }
229
230 pos += qcafrm_create_footer(pos);
231
232 netif_stop_queue(qca->net_dev);
233
234 written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
235 pos - qca->tx_buffer);
236 if (written > 0) {
237 qca->tx_left = (pos - qca->tx_buffer) - written;
238 qca->tx_head = qca->tx_buffer + written;
239 n_stats->tx_bytes += written;
240 }
241 spin_unlock(&qca->lock);
242
243 netif_trans_update(dev);
244out:
245 dev_kfree_skb_any(skb);
246 return NETDEV_TX_OK;
247}
248
249static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
250{
251 struct qcauart *qca = netdev_priv(dev);
252
253 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
254 jiffies, dev_trans_start(dev));
255 dev->stats.tx_errors++;
256 dev->stats.tx_dropped++;
257}
258
259static int qcauart_netdev_init(struct net_device *dev)
260{
261 struct qcauart *qca = netdev_priv(dev);
262 size_t len;
263
264 /* Finish setting up the device info. */
265 dev->mtu = QCAFRM_MAX_MTU;
266 dev->type = ARPHRD_ETHER;
267
268 len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
269 qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
270 if (!qca->tx_buffer)
271 return -ENOMEM;
272
273 qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
274 qca->net_dev->mtu +
275 VLAN_ETH_HLEN);
276 if (!qca->rx_skb)
277 return -ENOBUFS;
278
279 return 0;
280}
281
282static void qcauart_netdev_uninit(struct net_device *dev)
283{
284 struct qcauart *qca = netdev_priv(dev);
285
286 dev_kfree_skb(qca->rx_skb);
287}
288
289static const struct net_device_ops qcauart_netdev_ops = {
290 .ndo_init = qcauart_netdev_init,
291 .ndo_uninit = qcauart_netdev_uninit,
292 .ndo_open = qcauart_netdev_open,
293 .ndo_stop = qcauart_netdev_close,
294 .ndo_start_xmit = qcauart_netdev_xmit,
295 .ndo_set_mac_address = eth_mac_addr,
296 .ndo_tx_timeout = qcauart_netdev_tx_timeout,
297 .ndo_validate_addr = eth_validate_addr,
298};
299
300static void qcauart_netdev_setup(struct net_device *dev)
301{
302 dev->netdev_ops = &qcauart_netdev_ops;
303 dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
304 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
305 dev->tx_queue_len = 100;
306
307 /* MTU range: 46 - 1500 */
308 dev->min_mtu = QCAFRM_MIN_MTU;
309 dev->max_mtu = QCAFRM_MAX_MTU;
310}
311
312static const struct of_device_id qca_uart_of_match[] = {
313 {
314 .compatible = "qca,qca7000",
315 },
316 {}
317};
318MODULE_DEVICE_TABLE(of, qca_uart_of_match);
319
320static int qca_uart_probe(struct serdev_device *serdev)
321{
322 struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
323 struct qcauart *qca;
324 u32 speed = 115200;
325 int ret;
326
327 if (!qcauart_dev)
328 return -ENOMEM;
329
330 qcauart_netdev_setup(qcauart_dev);
331 SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
332
333 qca = netdev_priv(qcauart_dev);
334 if (!qca) {
335 pr_err("qca_uart: Fail to retrieve private structure\n");
336 ret = -ENOMEM;
337 goto free;
338 }
339 qca->net_dev = qcauart_dev;
340 qca->serdev = serdev;
341 qcafrm_fsm_init_uart(&qca->frm_handle);
342
343 spin_lock_init(&qca->lock);
344 INIT_WORK(&qca->tx_work, qcauart_transmit);
345
346 of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
347
348 ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
349 if (ret) {
350 eth_hw_addr_random(qca->net_dev);
351 dev_info(&serdev->dev, "Using random MAC address: %pM\n",
352 qca->net_dev->dev_addr);
353 }
354
355 netif_carrier_on(qca->net_dev);
356 serdev_device_set_drvdata(serdev, qca);
357 serdev_device_set_client_ops(serdev, &qca_serdev_ops);
358
359 ret = serdev_device_open(serdev);
360 if (ret) {
361 dev_err(&serdev->dev, "Unable to open device %s\n",
362 qcauart_dev->name);
363 goto free;
364 }
365
366 speed = serdev_device_set_baudrate(serdev, speed);
367 dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
368
369 serdev_device_set_flow_control(serdev, false);
370
371 ret = register_netdev(qcauart_dev);
372 if (ret) {
373 dev_err(&serdev->dev, "Unable to register net device %s\n",
374 qcauart_dev->name);
375 serdev_device_close(serdev);
376 cancel_work_sync(&qca->tx_work);
377 goto free;
378 }
379
380 return 0;
381
382free:
383 free_netdev(qcauart_dev);
384 return ret;
385}
386
387static void qca_uart_remove(struct serdev_device *serdev)
388{
389 struct qcauart *qca = serdev_device_get_drvdata(serdev);
390
391 unregister_netdev(qca->net_dev);
392
393 /* Flush any pending characters in the driver. */
394 serdev_device_close(serdev);
395 cancel_work_sync(&qca->tx_work);
396
397 free_netdev(qca->net_dev);
398}
399
400static struct serdev_device_driver qca_uart_driver = {
401 .probe = qca_uart_probe,
402 .remove = qca_uart_remove,
403 .driver = {
404 .name = QCAUART_DRV_NAME,
405 .of_match_table = qca_uart_of_match,
406 },
407};
408
409module_serdev_device_driver(qca_uart_driver);
410
411MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
412MODULE_AUTHOR("Qualcomm Atheros Communications");
413MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
414MODULE_LICENSE("Dual BSD/GPL");
415MODULE_VERSION(QCAUART_DRV_VERSION);
1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2/*
3 * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
4 * Copyright (c) 2017, I2SE GmbH
5 */
6
7/* This module implements the Qualcomm Atheros UART protocol for
8 * kernel-based UART device; it is essentially an Ethernet-to-UART
9 * serial converter;
10 */
11
12#include <linux/device.h>
13#include <linux/errno.h>
14#include <linux/etherdevice.h>
15#include <linux/if_arp.h>
16#include <linux/if_ether.h>
17#include <linux/jiffies.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/netdevice.h>
21#include <linux/of.h>
22#include <linux/of_net.h>
23#include <linux/sched.h>
24#include <linux/serdev.h>
25#include <linux/skbuff.h>
26#include <linux/types.h>
27
28#include "qca_7k_common.h"
29
30#define QCAUART_DRV_VERSION "0.1.0"
31#define QCAUART_DRV_NAME "qcauart"
32#define QCAUART_TX_TIMEOUT (1 * HZ)
33
34struct qcauart {
35 struct net_device *net_dev;
36 spinlock_t lock; /* transmit lock */
37 struct work_struct tx_work; /* Flushes transmit buffer */
38
39 struct serdev_device *serdev;
40 struct qcafrm_handle frm_handle;
41 struct sk_buff *rx_skb;
42
43 unsigned char *tx_head; /* pointer to next XMIT byte */
44 int tx_left; /* bytes left in XMIT queue */
45 unsigned char *tx_buffer;
46};
47
48static size_t
49qca_tty_receive(struct serdev_device *serdev, const u8 *data, size_t count)
50{
51 struct qcauart *qca = serdev_device_get_drvdata(serdev);
52 struct net_device *netdev = qca->net_dev;
53 struct net_device_stats *n_stats = &netdev->stats;
54 size_t i;
55
56 if (!qca->rx_skb) {
57 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
58 netdev->mtu +
59 VLAN_ETH_HLEN);
60 if (!qca->rx_skb) {
61 n_stats->rx_errors++;
62 n_stats->rx_dropped++;
63 return 0;
64 }
65 }
66
67 for (i = 0; i < count; i++) {
68 s32 retcode;
69
70 retcode = qcafrm_fsm_decode(&qca->frm_handle,
71 qca->rx_skb->data,
72 skb_tailroom(qca->rx_skb),
73 data[i]);
74
75 switch (retcode) {
76 case QCAFRM_GATHER:
77 case QCAFRM_NOHEAD:
78 break;
79 case QCAFRM_NOTAIL:
80 netdev_dbg(netdev, "recv: no RX tail\n");
81 n_stats->rx_errors++;
82 n_stats->rx_dropped++;
83 break;
84 case QCAFRM_INVLEN:
85 netdev_dbg(netdev, "recv: invalid RX length\n");
86 n_stats->rx_errors++;
87 n_stats->rx_dropped++;
88 break;
89 default:
90 n_stats->rx_packets++;
91 n_stats->rx_bytes += retcode;
92 skb_put(qca->rx_skb, retcode);
93 qca->rx_skb->protocol = eth_type_trans(
94 qca->rx_skb, qca->rx_skb->dev);
95 skb_checksum_none_assert(qca->rx_skb);
96 netif_rx(qca->rx_skb);
97 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
98 netdev->mtu +
99 VLAN_ETH_HLEN);
100 if (!qca->rx_skb) {
101 netdev_dbg(netdev, "recv: out of RX resources\n");
102 n_stats->rx_errors++;
103 return i;
104 }
105 }
106 }
107
108 return i;
109}
110
111/* Write out any remaining transmit buffer. Scheduled when tty is writable */
112static void qcauart_transmit(struct work_struct *work)
113{
114 struct qcauart *qca = container_of(work, struct qcauart, tx_work);
115 struct net_device_stats *n_stats = &qca->net_dev->stats;
116 int written;
117
118 spin_lock_bh(&qca->lock);
119
120 /* First make sure we're connected. */
121 if (!netif_running(qca->net_dev)) {
122 spin_unlock_bh(&qca->lock);
123 return;
124 }
125
126 if (qca->tx_left <= 0) {
127 /* Now serial buffer is almost free & we can start
128 * transmission of another packet
129 */
130 n_stats->tx_packets++;
131 spin_unlock_bh(&qca->lock);
132 netif_wake_queue(qca->net_dev);
133 return;
134 }
135
136 written = serdev_device_write_buf(qca->serdev, qca->tx_head,
137 qca->tx_left);
138 if (written > 0) {
139 qca->tx_left -= written;
140 qca->tx_head += written;
141 }
142 spin_unlock_bh(&qca->lock);
143}
144
145/* Called by the driver when there's room for more data.
146 * Schedule the transmit.
147 */
148static void qca_tty_wakeup(struct serdev_device *serdev)
149{
150 struct qcauart *qca = serdev_device_get_drvdata(serdev);
151
152 schedule_work(&qca->tx_work);
153}
154
155static const struct serdev_device_ops qca_serdev_ops = {
156 .receive_buf = qca_tty_receive,
157 .write_wakeup = qca_tty_wakeup,
158};
159
160static int qcauart_netdev_open(struct net_device *dev)
161{
162 struct qcauart *qca = netdev_priv(dev);
163
164 netif_start_queue(qca->net_dev);
165
166 return 0;
167}
168
169static int qcauart_netdev_close(struct net_device *dev)
170{
171 struct qcauart *qca = netdev_priv(dev);
172
173 netif_stop_queue(dev);
174 flush_work(&qca->tx_work);
175
176 spin_lock_bh(&qca->lock);
177 qca->tx_left = 0;
178 spin_unlock_bh(&qca->lock);
179
180 return 0;
181}
182
183static netdev_tx_t
184qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
185{
186 struct net_device_stats *n_stats = &dev->stats;
187 struct qcauart *qca = netdev_priv(dev);
188 u8 pad_len = 0;
189 int written;
190 u8 *pos;
191
192 spin_lock(&qca->lock);
193
194 WARN_ON(qca->tx_left);
195
196 if (!netif_running(dev)) {
197 spin_unlock(&qca->lock);
198 netdev_warn(qca->net_dev, "xmit: iface is down\n");
199 goto out;
200 }
201
202 pos = qca->tx_buffer;
203
204 if (skb->len < QCAFRM_MIN_LEN)
205 pad_len = QCAFRM_MIN_LEN - skb->len;
206
207 pos += qcafrm_create_header(pos, skb->len + pad_len);
208
209 memcpy(pos, skb->data, skb->len);
210 pos += skb->len;
211
212 if (pad_len) {
213 memset(pos, 0, pad_len);
214 pos += pad_len;
215 }
216
217 pos += qcafrm_create_footer(pos);
218
219 netif_stop_queue(qca->net_dev);
220
221 written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
222 pos - qca->tx_buffer);
223 if (written > 0) {
224 qca->tx_left = (pos - qca->tx_buffer) - written;
225 qca->tx_head = qca->tx_buffer + written;
226 n_stats->tx_bytes += written;
227 }
228 spin_unlock(&qca->lock);
229
230 netif_trans_update(dev);
231out:
232 dev_kfree_skb_any(skb);
233 return NETDEV_TX_OK;
234}
235
236static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
237{
238 struct qcauart *qca = netdev_priv(dev);
239
240 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
241 jiffies, dev_trans_start(dev));
242 dev->stats.tx_errors++;
243 dev->stats.tx_dropped++;
244}
245
246static int qcauart_netdev_init(struct net_device *dev)
247{
248 struct qcauart *qca = netdev_priv(dev);
249 size_t len;
250
251 /* Finish setting up the device info. */
252 dev->mtu = QCAFRM_MAX_MTU;
253 dev->type = ARPHRD_ETHER;
254
255 len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
256 qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
257 if (!qca->tx_buffer)
258 return -ENOMEM;
259
260 qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
261 qca->net_dev->mtu +
262 VLAN_ETH_HLEN);
263 if (!qca->rx_skb)
264 return -ENOBUFS;
265
266 return 0;
267}
268
269static void qcauart_netdev_uninit(struct net_device *dev)
270{
271 struct qcauart *qca = netdev_priv(dev);
272
273 dev_kfree_skb(qca->rx_skb);
274}
275
276static const struct net_device_ops qcauart_netdev_ops = {
277 .ndo_init = qcauart_netdev_init,
278 .ndo_uninit = qcauart_netdev_uninit,
279 .ndo_open = qcauart_netdev_open,
280 .ndo_stop = qcauart_netdev_close,
281 .ndo_start_xmit = qcauart_netdev_xmit,
282 .ndo_set_mac_address = eth_mac_addr,
283 .ndo_tx_timeout = qcauart_netdev_tx_timeout,
284 .ndo_validate_addr = eth_validate_addr,
285};
286
287static void qcauart_netdev_setup(struct net_device *dev)
288{
289 dev->netdev_ops = &qcauart_netdev_ops;
290 dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
291 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
292 dev->tx_queue_len = 100;
293
294 /* MTU range: 46 - 1500 */
295 dev->min_mtu = QCAFRM_MIN_MTU;
296 dev->max_mtu = QCAFRM_MAX_MTU;
297}
298
299static const struct of_device_id qca_uart_of_match[] = {
300 {
301 .compatible = "qca,qca7000",
302 },
303 {}
304};
305MODULE_DEVICE_TABLE(of, qca_uart_of_match);
306
307static int qca_uart_probe(struct serdev_device *serdev)
308{
309 struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
310 struct qcauart *qca;
311 u32 speed = 115200;
312 int ret;
313
314 if (!qcauart_dev)
315 return -ENOMEM;
316
317 qcauart_netdev_setup(qcauart_dev);
318 SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
319
320 qca = netdev_priv(qcauart_dev);
321 if (!qca) {
322 pr_err("qca_uart: Fail to retrieve private structure\n");
323 ret = -ENOMEM;
324 goto free;
325 }
326 qca->net_dev = qcauart_dev;
327 qca->serdev = serdev;
328 qcafrm_fsm_init_uart(&qca->frm_handle);
329
330 spin_lock_init(&qca->lock);
331 INIT_WORK(&qca->tx_work, qcauart_transmit);
332
333 of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
334
335 ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
336 if (ret) {
337 eth_hw_addr_random(qca->net_dev);
338 dev_info(&serdev->dev, "Using random MAC address: %pM\n",
339 qca->net_dev->dev_addr);
340 }
341
342 netif_carrier_on(qca->net_dev);
343 serdev_device_set_drvdata(serdev, qca);
344 serdev_device_set_client_ops(serdev, &qca_serdev_ops);
345
346 ret = serdev_device_open(serdev);
347 if (ret) {
348 dev_err(&serdev->dev, "Unable to open device %s\n",
349 qcauart_dev->name);
350 goto free;
351 }
352
353 speed = serdev_device_set_baudrate(serdev, speed);
354 dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
355
356 serdev_device_set_flow_control(serdev, false);
357
358 ret = register_netdev(qcauart_dev);
359 if (ret) {
360 dev_err(&serdev->dev, "Unable to register net device %s\n",
361 qcauart_dev->name);
362 serdev_device_close(serdev);
363 cancel_work_sync(&qca->tx_work);
364 goto free;
365 }
366
367 return 0;
368
369free:
370 free_netdev(qcauart_dev);
371 return ret;
372}
373
374static void qca_uart_remove(struct serdev_device *serdev)
375{
376 struct qcauart *qca = serdev_device_get_drvdata(serdev);
377
378 unregister_netdev(qca->net_dev);
379
380 /* Flush any pending characters in the driver. */
381 serdev_device_close(serdev);
382 cancel_work_sync(&qca->tx_work);
383
384 free_netdev(qca->net_dev);
385}
386
387static struct serdev_device_driver qca_uart_driver = {
388 .probe = qca_uart_probe,
389 .remove = qca_uart_remove,
390 .driver = {
391 .name = QCAUART_DRV_NAME,
392 .of_match_table = qca_uart_of_match,
393 },
394};
395
396module_serdev_device_driver(qca_uart_driver);
397
398MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
399MODULE_AUTHOR("Qualcomm Atheros Communications");
400MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
401MODULE_LICENSE("Dual BSD/GPL");
402MODULE_VERSION(QCAUART_DRV_VERSION);