Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
  3 *   Copyright (c) 2017, I2SE GmbH
  4 *
  5 *   Permission to use, copy, modify, and/or distribute this software
  6 *   for any purpose with or without fee is hereby granted, provided
  7 *   that the above copyright notice and this permission notice appear
  8 *   in all copies.
  9 *
 10 *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
 11 *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
 12 *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
 13 *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
 14 *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
 15 *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
 16 *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
 17 *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 18 */
 19
 20/*   This module implements the Qualcomm Atheros UART protocol for
 21 *   kernel-based UART device; it is essentially an Ethernet-to-UART
 22 *   serial converter;
 23 */
 24
 25#include <linux/device.h>
 26#include <linux/errno.h>
 27#include <linux/etherdevice.h>
 28#include <linux/if_arp.h>
 29#include <linux/if_ether.h>
 30#include <linux/jiffies.h>
 31#include <linux/kernel.h>
 32#include <linux/module.h>
 33#include <linux/netdevice.h>
 34#include <linux/of.h>
 
 35#include <linux/of_net.h>
 36#include <linux/sched.h>
 37#include <linux/serdev.h>
 38#include <linux/skbuff.h>
 39#include <linux/types.h>
 40
 41#include "qca_7k_common.h"
 42
 43#define QCAUART_DRV_VERSION "0.1.0"
 44#define QCAUART_DRV_NAME "qcauart"
 45#define QCAUART_TX_TIMEOUT (1 * HZ)
 46
 47struct qcauart {
 48	struct net_device *net_dev;
 49	spinlock_t lock;			/* transmit lock */
 50	struct work_struct tx_work;		/* Flushes transmit buffer   */
 51
 52	struct serdev_device *serdev;
 53	struct qcafrm_handle frm_handle;
 54	struct sk_buff *rx_skb;
 55
 56	unsigned char *tx_head;			/* pointer to next XMIT byte */
 57	int tx_left;				/* bytes left in XMIT queue  */
 58	unsigned char *tx_buffer;
 59};
 60
 61static ssize_t
 62qca_tty_receive(struct serdev_device *serdev, const u8 *data, size_t count)
 
 63{
 64	struct qcauart *qca = serdev_device_get_drvdata(serdev);
 65	struct net_device *netdev = qca->net_dev;
 66	struct net_device_stats *n_stats = &netdev->stats;
 67	size_t i;
 68
 69	if (!qca->rx_skb) {
 70		qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
 71							netdev->mtu +
 72							VLAN_ETH_HLEN);
 73		if (!qca->rx_skb) {
 74			n_stats->rx_errors++;
 75			n_stats->rx_dropped++;
 76			return 0;
 77		}
 78	}
 79
 80	for (i = 0; i < count; i++) {
 81		s32 retcode;
 82
 83		retcode = qcafrm_fsm_decode(&qca->frm_handle,
 84					    qca->rx_skb->data,
 85					    skb_tailroom(qca->rx_skb),
 86					    data[i]);
 87
 88		switch (retcode) {
 89		case QCAFRM_GATHER:
 90		case QCAFRM_NOHEAD:
 91			break;
 92		case QCAFRM_NOTAIL:
 93			netdev_dbg(netdev, "recv: no RX tail\n");
 94			n_stats->rx_errors++;
 95			n_stats->rx_dropped++;
 96			break;
 97		case QCAFRM_INVLEN:
 98			netdev_dbg(netdev, "recv: invalid RX length\n");
 99			n_stats->rx_errors++;
100			n_stats->rx_dropped++;
101			break;
102		default:
103			n_stats->rx_packets++;
104			n_stats->rx_bytes += retcode;
105			skb_put(qca->rx_skb, retcode);
106			qca->rx_skb->protocol = eth_type_trans(
107						qca->rx_skb, qca->rx_skb->dev);
108			skb_checksum_none_assert(qca->rx_skb);
109			netif_rx(qca->rx_skb);
110			qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
111								netdev->mtu +
112								VLAN_ETH_HLEN);
113			if (!qca->rx_skb) {
114				netdev_dbg(netdev, "recv: out of RX resources\n");
115				n_stats->rx_errors++;
116				return i;
117			}
118		}
119	}
120
121	return i;
122}
123
124/* Write out any remaining transmit buffer. Scheduled when tty is writable */
125static void qcauart_transmit(struct work_struct *work)
126{
127	struct qcauart *qca = container_of(work, struct qcauart, tx_work);
128	struct net_device_stats *n_stats = &qca->net_dev->stats;
129	int written;
130
131	spin_lock_bh(&qca->lock);
132
133	/* First make sure we're connected. */
134	if (!netif_running(qca->net_dev)) {
135		spin_unlock_bh(&qca->lock);
136		return;
137	}
138
139	if (qca->tx_left <= 0)  {
140		/* Now serial buffer is almost free & we can start
141		 * transmission of another packet
142		 */
143		n_stats->tx_packets++;
144		spin_unlock_bh(&qca->lock);
145		netif_wake_queue(qca->net_dev);
146		return;
147	}
148
149	written = serdev_device_write_buf(qca->serdev, qca->tx_head,
150					  qca->tx_left);
151	if (written > 0) {
152		qca->tx_left -= written;
153		qca->tx_head += written;
154	}
155	spin_unlock_bh(&qca->lock);
156}
157
158/* Called by the driver when there's room for more data.
159 * Schedule the transmit.
160 */
161static void qca_tty_wakeup(struct serdev_device *serdev)
162{
163	struct qcauart *qca = serdev_device_get_drvdata(serdev);
164
165	schedule_work(&qca->tx_work);
166}
167
168static const struct serdev_device_ops qca_serdev_ops = {
169	.receive_buf = qca_tty_receive,
170	.write_wakeup = qca_tty_wakeup,
171};
172
173static int qcauart_netdev_open(struct net_device *dev)
174{
175	struct qcauart *qca = netdev_priv(dev);
176
177	netif_start_queue(qca->net_dev);
178
179	return 0;
180}
181
182static int qcauart_netdev_close(struct net_device *dev)
183{
184	struct qcauart *qca = netdev_priv(dev);
185
186	netif_stop_queue(dev);
187	flush_work(&qca->tx_work);
188
189	spin_lock_bh(&qca->lock);
190	qca->tx_left = 0;
191	spin_unlock_bh(&qca->lock);
192
193	return 0;
194}
195
196static netdev_tx_t
197qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
198{
199	struct net_device_stats *n_stats = &dev->stats;
200	struct qcauart *qca = netdev_priv(dev);
201	u8 pad_len = 0;
202	int written;
203	u8 *pos;
204
205	spin_lock(&qca->lock);
206
207	WARN_ON(qca->tx_left);
208
209	if (!netif_running(dev))  {
210		spin_unlock(&qca->lock);
211		netdev_warn(qca->net_dev, "xmit: iface is down\n");
212		goto out;
213	}
214
215	pos = qca->tx_buffer;
216
217	if (skb->len < QCAFRM_MIN_LEN)
218		pad_len = QCAFRM_MIN_LEN - skb->len;
219
220	pos += qcafrm_create_header(pos, skb->len + pad_len);
221
222	memcpy(pos, skb->data, skb->len);
223	pos += skb->len;
224
225	if (pad_len) {
226		memset(pos, 0, pad_len);
227		pos += pad_len;
228	}
229
230	pos += qcafrm_create_footer(pos);
231
232	netif_stop_queue(qca->net_dev);
233
234	written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
235					  pos - qca->tx_buffer);
236	if (written > 0) {
237		qca->tx_left = (pos - qca->tx_buffer) - written;
238		qca->tx_head = qca->tx_buffer + written;
239		n_stats->tx_bytes += written;
240	}
241	spin_unlock(&qca->lock);
242
243	netif_trans_update(dev);
244out:
245	dev_kfree_skb_any(skb);
246	return NETDEV_TX_OK;
247}
248
249static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
250{
251	struct qcauart *qca = netdev_priv(dev);
252
253	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
254		    jiffies, dev_trans_start(dev));
255	dev->stats.tx_errors++;
256	dev->stats.tx_dropped++;
257}
258
259static int qcauart_netdev_init(struct net_device *dev)
260{
261	struct qcauart *qca = netdev_priv(dev);
262	size_t len;
263
264	/* Finish setting up the device info. */
265	dev->mtu = QCAFRM_MAX_MTU;
266	dev->type = ARPHRD_ETHER;
267
268	len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
269	qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
270	if (!qca->tx_buffer)
271		return -ENOMEM;
272
273	qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
274						qca->net_dev->mtu +
275						VLAN_ETH_HLEN);
276	if (!qca->rx_skb)
277		return -ENOBUFS;
278
279	return 0;
280}
281
282static void qcauart_netdev_uninit(struct net_device *dev)
283{
284	struct qcauart *qca = netdev_priv(dev);
285
286	dev_kfree_skb(qca->rx_skb);
 
287}
288
289static const struct net_device_ops qcauart_netdev_ops = {
290	.ndo_init = qcauart_netdev_init,
291	.ndo_uninit = qcauart_netdev_uninit,
292	.ndo_open = qcauart_netdev_open,
293	.ndo_stop = qcauart_netdev_close,
294	.ndo_start_xmit = qcauart_netdev_xmit,
295	.ndo_set_mac_address = eth_mac_addr,
296	.ndo_tx_timeout = qcauart_netdev_tx_timeout,
297	.ndo_validate_addr = eth_validate_addr,
298};
299
300static void qcauart_netdev_setup(struct net_device *dev)
301{
302	dev->netdev_ops = &qcauart_netdev_ops;
303	dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
304	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
305	dev->tx_queue_len = 100;
306
307	/* MTU range: 46 - 1500 */
308	dev->min_mtu = QCAFRM_MIN_MTU;
309	dev->max_mtu = QCAFRM_MAX_MTU;
310}
311
312static const struct of_device_id qca_uart_of_match[] = {
313	{
314	 .compatible = "qca,qca7000",
315	},
316	{}
317};
318MODULE_DEVICE_TABLE(of, qca_uart_of_match);
319
320static int qca_uart_probe(struct serdev_device *serdev)
321{
322	struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
323	struct qcauart *qca;
 
324	u32 speed = 115200;
325	int ret;
326
327	if (!qcauart_dev)
328		return -ENOMEM;
329
330	qcauart_netdev_setup(qcauart_dev);
331	SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
332
333	qca = netdev_priv(qcauart_dev);
334	if (!qca) {
335		pr_err("qca_uart: Fail to retrieve private structure\n");
336		ret = -ENOMEM;
337		goto free;
338	}
339	qca->net_dev = qcauart_dev;
340	qca->serdev = serdev;
341	qcafrm_fsm_init_uart(&qca->frm_handle);
342
343	spin_lock_init(&qca->lock);
344	INIT_WORK(&qca->tx_work, qcauart_transmit);
345
346	of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
347
348	ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
349	if (ret) {
 
 
 
 
350		eth_hw_addr_random(qca->net_dev);
351		dev_info(&serdev->dev, "Using random MAC address: %pM\n",
352			 qca->net_dev->dev_addr);
353	}
354
355	netif_carrier_on(qca->net_dev);
356	serdev_device_set_drvdata(serdev, qca);
357	serdev_device_set_client_ops(serdev, &qca_serdev_ops);
358
359	ret = serdev_device_open(serdev);
360	if (ret) {
361		dev_err(&serdev->dev, "Unable to open device %s\n",
362			qcauart_dev->name);
363		goto free;
364	}
365
366	speed = serdev_device_set_baudrate(serdev, speed);
367	dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
368
369	serdev_device_set_flow_control(serdev, false);
370
371	ret = register_netdev(qcauart_dev);
372	if (ret) {
373		dev_err(&serdev->dev, "Unable to register net device %s\n",
374			qcauart_dev->name);
375		serdev_device_close(serdev);
376		cancel_work_sync(&qca->tx_work);
377		goto free;
378	}
379
380	return 0;
381
382free:
383	free_netdev(qcauart_dev);
384	return ret;
385}
386
387static void qca_uart_remove(struct serdev_device *serdev)
388{
389	struct qcauart *qca = serdev_device_get_drvdata(serdev);
390
391	unregister_netdev(qca->net_dev);
392
393	/* Flush any pending characters in the driver. */
394	serdev_device_close(serdev);
395	cancel_work_sync(&qca->tx_work);
396
397	free_netdev(qca->net_dev);
398}
399
400static struct serdev_device_driver qca_uart_driver = {
401	.probe = qca_uart_probe,
402	.remove = qca_uart_remove,
403	.driver = {
404		.name = QCAUART_DRV_NAME,
405		.of_match_table = qca_uart_of_match,
406	},
407};
408
409module_serdev_device_driver(qca_uart_driver);
410
411MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
412MODULE_AUTHOR("Qualcomm Atheros Communications");
413MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
414MODULE_LICENSE("Dual BSD/GPL");
415MODULE_VERSION(QCAUART_DRV_VERSION);
v4.17
  1/*
  2 *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
  3 *   Copyright (c) 2017, I2SE GmbH
  4 *
  5 *   Permission to use, copy, modify, and/or distribute this software
  6 *   for any purpose with or without fee is hereby granted, provided
  7 *   that the above copyright notice and this permission notice appear
  8 *   in all copies.
  9 *
 10 *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
 11 *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
 12 *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
 13 *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
 14 *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
 15 *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
 16 *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
 17 *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 18 */
 19
 20/*   This module implements the Qualcomm Atheros UART protocol for
 21 *   kernel-based UART device; it is essentially an Ethernet-to-UART
 22 *   serial converter;
 23 */
 24
 25#include <linux/device.h>
 26#include <linux/errno.h>
 27#include <linux/etherdevice.h>
 28#include <linux/if_arp.h>
 29#include <linux/if_ether.h>
 30#include <linux/jiffies.h>
 31#include <linux/kernel.h>
 32#include <linux/module.h>
 33#include <linux/netdevice.h>
 34#include <linux/of.h>
 35#include <linux/of_device.h>
 36#include <linux/of_net.h>
 37#include <linux/sched.h>
 38#include <linux/serdev.h>
 39#include <linux/skbuff.h>
 40#include <linux/types.h>
 41
 42#include "qca_7k_common.h"
 43
 44#define QCAUART_DRV_VERSION "0.1.0"
 45#define QCAUART_DRV_NAME "qcauart"
 46#define QCAUART_TX_TIMEOUT (1 * HZ)
 47
 48struct qcauart {
 49	struct net_device *net_dev;
 50	spinlock_t lock;			/* transmit lock */
 51	struct work_struct tx_work;		/* Flushes transmit buffer   */
 52
 53	struct serdev_device *serdev;
 54	struct qcafrm_handle frm_handle;
 55	struct sk_buff *rx_skb;
 56
 57	unsigned char *tx_head;			/* pointer to next XMIT byte */
 58	int tx_left;				/* bytes left in XMIT queue  */
 59	unsigned char *tx_buffer;
 60};
 61
 62static int
 63qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
 64		size_t count)
 65{
 66	struct qcauart *qca = serdev_device_get_drvdata(serdev);
 67	struct net_device *netdev = qca->net_dev;
 68	struct net_device_stats *n_stats = &netdev->stats;
 69	size_t i;
 70
 71	if (!qca->rx_skb) {
 72		qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
 73							netdev->mtu +
 74							VLAN_ETH_HLEN);
 75		if (!qca->rx_skb) {
 76			n_stats->rx_errors++;
 77			n_stats->rx_dropped++;
 78			return 0;
 79		}
 80	}
 81
 82	for (i = 0; i < count; i++) {
 83		s32 retcode;
 84
 85		retcode = qcafrm_fsm_decode(&qca->frm_handle,
 86					    qca->rx_skb->data,
 87					    skb_tailroom(qca->rx_skb),
 88					    data[i]);
 89
 90		switch (retcode) {
 91		case QCAFRM_GATHER:
 92		case QCAFRM_NOHEAD:
 93			break;
 94		case QCAFRM_NOTAIL:
 95			netdev_dbg(netdev, "recv: no RX tail\n");
 96			n_stats->rx_errors++;
 97			n_stats->rx_dropped++;
 98			break;
 99		case QCAFRM_INVLEN:
100			netdev_dbg(netdev, "recv: invalid RX length\n");
101			n_stats->rx_errors++;
102			n_stats->rx_dropped++;
103			break;
104		default:
105			n_stats->rx_packets++;
106			n_stats->rx_bytes += retcode;
107			skb_put(qca->rx_skb, retcode);
108			qca->rx_skb->protocol = eth_type_trans(
109						qca->rx_skb, qca->rx_skb->dev);
110			qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
111			netif_rx_ni(qca->rx_skb);
112			qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
113								netdev->mtu +
114								VLAN_ETH_HLEN);
115			if (!qca->rx_skb) {
116				netdev_dbg(netdev, "recv: out of RX resources\n");
117				n_stats->rx_errors++;
118				return i;
119			}
120		}
121	}
122
123	return i;
124}
125
126/* Write out any remaining transmit buffer. Scheduled when tty is writable */
127static void qcauart_transmit(struct work_struct *work)
128{
129	struct qcauart *qca = container_of(work, struct qcauart, tx_work);
130	struct net_device_stats *n_stats = &qca->net_dev->stats;
131	int written;
132
133	spin_lock_bh(&qca->lock);
134
135	/* First make sure we're connected. */
136	if (!netif_running(qca->net_dev)) {
137		spin_unlock_bh(&qca->lock);
138		return;
139	}
140
141	if (qca->tx_left <= 0)  {
142		/* Now serial buffer is almost free & we can start
143		 * transmission of another packet
144		 */
145		n_stats->tx_packets++;
146		spin_unlock_bh(&qca->lock);
147		netif_wake_queue(qca->net_dev);
148		return;
149	}
150
151	written = serdev_device_write_buf(qca->serdev, qca->tx_head,
152					  qca->tx_left);
153	if (written > 0) {
154		qca->tx_left -= written;
155		qca->tx_head += written;
156	}
157	spin_unlock_bh(&qca->lock);
158}
159
160/* Called by the driver when there's room for more data.
161 * Schedule the transmit.
162 */
163static void qca_tty_wakeup(struct serdev_device *serdev)
164{
165	struct qcauart *qca = serdev_device_get_drvdata(serdev);
166
167	schedule_work(&qca->tx_work);
168}
169
170static struct serdev_device_ops qca_serdev_ops = {
171	.receive_buf = qca_tty_receive,
172	.write_wakeup = qca_tty_wakeup,
173};
174
175static int qcauart_netdev_open(struct net_device *dev)
176{
177	struct qcauart *qca = netdev_priv(dev);
178
179	netif_start_queue(qca->net_dev);
180
181	return 0;
182}
183
184static int qcauart_netdev_close(struct net_device *dev)
185{
186	struct qcauart *qca = netdev_priv(dev);
187
188	netif_stop_queue(dev);
189	flush_work(&qca->tx_work);
190
191	spin_lock_bh(&qca->lock);
192	qca->tx_left = 0;
193	spin_unlock_bh(&qca->lock);
194
195	return 0;
196}
197
198static netdev_tx_t
199qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
200{
201	struct net_device_stats *n_stats = &dev->stats;
202	struct qcauart *qca = netdev_priv(dev);
203	u8 pad_len = 0;
204	int written;
205	u8 *pos;
206
207	spin_lock(&qca->lock);
208
209	WARN_ON(qca->tx_left);
210
211	if (!netif_running(dev))  {
212		spin_unlock(&qca->lock);
213		netdev_warn(qca->net_dev, "xmit: iface is down\n");
214		goto out;
215	}
216
217	pos = qca->tx_buffer;
218
219	if (skb->len < QCAFRM_MIN_LEN)
220		pad_len = QCAFRM_MIN_LEN - skb->len;
221
222	pos += qcafrm_create_header(pos, skb->len + pad_len);
223
224	memcpy(pos, skb->data, skb->len);
225	pos += skb->len;
226
227	if (pad_len) {
228		memset(pos, 0, pad_len);
229		pos += pad_len;
230	}
231
232	pos += qcafrm_create_footer(pos);
233
234	netif_stop_queue(qca->net_dev);
235
236	written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
237					  pos - qca->tx_buffer);
238	if (written > 0) {
239		qca->tx_left = (pos - qca->tx_buffer) - written;
240		qca->tx_head = qca->tx_buffer + written;
241		n_stats->tx_bytes += written;
242	}
243	spin_unlock(&qca->lock);
244
245	netif_trans_update(dev);
246out:
247	dev_kfree_skb_any(skb);
248	return NETDEV_TX_OK;
249}
250
251static void qcauart_netdev_tx_timeout(struct net_device *dev)
252{
253	struct qcauart *qca = netdev_priv(dev);
254
255	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
256		    jiffies, dev_trans_start(dev));
257	dev->stats.tx_errors++;
258	dev->stats.tx_dropped++;
259}
260
261static int qcauart_netdev_init(struct net_device *dev)
262{
263	struct qcauart *qca = netdev_priv(dev);
264	size_t len;
265
266	/* Finish setting up the device info. */
267	dev->mtu = QCAFRM_MAX_MTU;
268	dev->type = ARPHRD_ETHER;
269
270	len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
271	qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
272	if (!qca->tx_buffer)
273		return -ENOMEM;
274
275	qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
276						qca->net_dev->mtu +
277						VLAN_ETH_HLEN);
278	if (!qca->rx_skb)
279		return -ENOBUFS;
280
281	return 0;
282}
283
284static void qcauart_netdev_uninit(struct net_device *dev)
285{
286	struct qcauart *qca = netdev_priv(dev);
287
288	if (qca->rx_skb)
289		dev_kfree_skb(qca->rx_skb);
290}
291
292static const struct net_device_ops qcauart_netdev_ops = {
293	.ndo_init = qcauart_netdev_init,
294	.ndo_uninit = qcauart_netdev_uninit,
295	.ndo_open = qcauart_netdev_open,
296	.ndo_stop = qcauart_netdev_close,
297	.ndo_start_xmit = qcauart_netdev_xmit,
298	.ndo_set_mac_address = eth_mac_addr,
299	.ndo_tx_timeout = qcauart_netdev_tx_timeout,
300	.ndo_validate_addr = eth_validate_addr,
301};
302
303static void qcauart_netdev_setup(struct net_device *dev)
304{
305	dev->netdev_ops = &qcauart_netdev_ops;
306	dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
307	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
308	dev->tx_queue_len = 100;
309
310	/* MTU range: 46 - 1500 */
311	dev->min_mtu = QCAFRM_MIN_MTU;
312	dev->max_mtu = QCAFRM_MAX_MTU;
313}
314
315static const struct of_device_id qca_uart_of_match[] = {
316	{
317	 .compatible = "qca,qca7000",
318	},
319	{}
320};
321MODULE_DEVICE_TABLE(of, qca_uart_of_match);
322
323static int qca_uart_probe(struct serdev_device *serdev)
324{
325	struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
326	struct qcauart *qca;
327	const char *mac;
328	u32 speed = 115200;
329	int ret;
330
331	if (!qcauart_dev)
332		return -ENOMEM;
333
334	qcauart_netdev_setup(qcauart_dev);
335	SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
336
337	qca = netdev_priv(qcauart_dev);
338	if (!qca) {
339		pr_err("qca_uart: Fail to retrieve private structure\n");
340		ret = -ENOMEM;
341		goto free;
342	}
343	qca->net_dev = qcauart_dev;
344	qca->serdev = serdev;
345	qcafrm_fsm_init_uart(&qca->frm_handle);
346
347	spin_lock_init(&qca->lock);
348	INIT_WORK(&qca->tx_work, qcauart_transmit);
349
350	of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
351
352	mac = of_get_mac_address(serdev->dev.of_node);
353
354	if (mac)
355		ether_addr_copy(qca->net_dev->dev_addr, mac);
356
357	if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
358		eth_hw_addr_random(qca->net_dev);
359		dev_info(&serdev->dev, "Using random MAC address: %pM\n",
360			 qca->net_dev->dev_addr);
361	}
362
363	netif_carrier_on(qca->net_dev);
364	serdev_device_set_drvdata(serdev, qca);
365	serdev_device_set_client_ops(serdev, &qca_serdev_ops);
366
367	ret = serdev_device_open(serdev);
368	if (ret) {
369		dev_err(&serdev->dev, "Unable to open device %s\n",
370			qcauart_dev->name);
371		goto free;
372	}
373
374	speed = serdev_device_set_baudrate(serdev, speed);
375	dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
376
377	serdev_device_set_flow_control(serdev, false);
378
379	ret = register_netdev(qcauart_dev);
380	if (ret) {
381		dev_err(&serdev->dev, "Unable to register net device %s\n",
382			qcauart_dev->name);
383		serdev_device_close(serdev);
384		cancel_work_sync(&qca->tx_work);
385		goto free;
386	}
387
388	return 0;
389
390free:
391	free_netdev(qcauart_dev);
392	return ret;
393}
394
395static void qca_uart_remove(struct serdev_device *serdev)
396{
397	struct qcauart *qca = serdev_device_get_drvdata(serdev);
398
399	unregister_netdev(qca->net_dev);
400
401	/* Flush any pending characters in the driver. */
402	serdev_device_close(serdev);
403	cancel_work_sync(&qca->tx_work);
404
405	free_netdev(qca->net_dev);
406}
407
408static struct serdev_device_driver qca_uart_driver = {
409	.probe = qca_uart_probe,
410	.remove = qca_uart_remove,
411	.driver = {
412		.name = QCAUART_DRV_NAME,
413		.of_match_table = of_match_ptr(qca_uart_of_match),
414	},
415};
416
417module_serdev_device_driver(qca_uart_driver);
418
419MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
420MODULE_AUTHOR("Qualcomm Atheros Communications");
421MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
422MODULE_LICENSE("Dual BSD/GPL");
423MODULE_VERSION(QCAUART_DRV_VERSION);