Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* MHI Network driver - Network over MHI bus
  3 *
  4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
  5 */
  6
  7#include <linux/if_arp.h>
  8#include <linux/mhi.h>
  9#include <linux/mod_devicetable.h>
 10#include <linux/module.h>
 11#include <linux/netdevice.h>
 12#include <linux/skbuff.h>
 13#include <linux/u64_stats_sync.h>
 14#include <linux/wwan.h>
 15
 16#include "mhi.h"
 17
 18#define MHI_NET_MIN_MTU		ETH_MIN_MTU
 19#define MHI_NET_MAX_MTU		0xffff
 20#define MHI_NET_DEFAULT_MTU	0x4000
 21
 22/* When set to false, the default netdev (link 0) is not created, and it's up
 23 * to user to create the link (via wwan rtnetlink).
 24 */
 25static bool create_default_iface = true;
 26module_param(create_default_iface, bool, 0);
 27
 28struct mhi_device_info {
 29	const char *netname;
 30	const struct mhi_net_proto *proto;
 31};
 32
 33static int mhi_ndo_open(struct net_device *ndev)
 34{
 35	struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
 36
 37	/* Feed the rx buffer pool */
 38	schedule_delayed_work(&mhi_netdev->rx_refill, 0);
 39
 40	/* Carrier is established via out-of-band channel (e.g. qmi) */
 41	netif_carrier_on(ndev);
 42
 43	netif_start_queue(ndev);
 44
 45	return 0;
 46}
 47
 48static int mhi_ndo_stop(struct net_device *ndev)
 49{
 50	struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
 51
 52	netif_stop_queue(ndev);
 53	netif_carrier_off(ndev);
 54	cancel_delayed_work_sync(&mhi_netdev->rx_refill);
 55
 56	return 0;
 57}
 58
 59static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
 60{
 61	struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
 62	const struct mhi_net_proto *proto = mhi_netdev->proto;
 63	struct mhi_device *mdev = mhi_netdev->mdev;
 64	int err;
 65
 66	if (proto && proto->tx_fixup) {
 67		skb = proto->tx_fixup(mhi_netdev, skb);
 68		if (unlikely(!skb))
 69			goto exit_drop;
 70	}
 71
 72	err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
 73	if (unlikely(err)) {
 74		net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
 75				    ndev->name, err);
 76		dev_kfree_skb_any(skb);
 77		goto exit_drop;
 78	}
 79
 80	if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
 81		netif_stop_queue(ndev);
 82
 83	return NETDEV_TX_OK;
 84
 85exit_drop:
 86	u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
 87	u64_stats_inc(&mhi_netdev->stats.tx_dropped);
 88	u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
 89
 90	return NETDEV_TX_OK;
 91}
 92
 93static void mhi_ndo_get_stats64(struct net_device *ndev,
 94				struct rtnl_link_stats64 *stats)
 95{
 96	struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
 97	unsigned int start;
 98
 99	do {
100		start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
101		stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
102		stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
103		stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
104		stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
105		stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors);
106	} while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
107
108	do {
109		start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
110		stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
111		stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
112		stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
113		stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
114	} while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
115}
116
117static const struct net_device_ops mhi_netdev_ops = {
118	.ndo_open               = mhi_ndo_open,
119	.ndo_stop               = mhi_ndo_stop,
120	.ndo_start_xmit         = mhi_ndo_xmit,
121	.ndo_get_stats64	= mhi_ndo_get_stats64,
122};
123
124static void mhi_net_setup(struct net_device *ndev)
125{
126	ndev->header_ops = NULL;  /* No header */
127	ndev->type = ARPHRD_RAWIP;
128	ndev->hard_header_len = 0;
129	ndev->addr_len = 0;
130	ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
131	ndev->netdev_ops = &mhi_netdev_ops;
132	ndev->mtu = MHI_NET_DEFAULT_MTU;
133	ndev->min_mtu = MHI_NET_MIN_MTU;
134	ndev->max_mtu = MHI_NET_MAX_MTU;
135	ndev->tx_queue_len = 1000;
136}
137
138static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
139				       struct sk_buff *skb)
140{
141	struct sk_buff *head = mhi_netdev->skbagg_head;
142	struct sk_buff *tail = mhi_netdev->skbagg_tail;
143
144	/* This is non-paged skb chaining using frag_list */
145	if (!head) {
146		mhi_netdev->skbagg_head = skb;
147		return skb;
148	}
149
150	if (!skb_shinfo(head)->frag_list)
151		skb_shinfo(head)->frag_list = skb;
152	else
153		tail->next = skb;
154
155	head->len += skb->len;
156	head->data_len += skb->len;
157	head->truesize += skb->truesize;
158
159	mhi_netdev->skbagg_tail = skb;
160
161	return mhi_netdev->skbagg_head;
162}
163
164static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
165				struct mhi_result *mhi_res)
166{
167	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
168	const struct mhi_net_proto *proto = mhi_netdev->proto;
169	struct sk_buff *skb = mhi_res->buf_addr;
170	int free_desc_count;
171
172	free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
173
174	if (unlikely(mhi_res->transaction_status)) {
175		switch (mhi_res->transaction_status) {
176		case -EOVERFLOW:
177			/* Packet can not fit in one MHI buffer and has been
178			 * split over multiple MHI transfers, do re-aggregation.
179			 * That usually means the device side MTU is larger than
180			 * the host side MTU/MRU. Since this is not optimal,
181			 * print a warning (once).
182			 */
183			netdev_warn_once(mhi_netdev->ndev,
184					 "Fragmented packets received, fix MTU?\n");
185			skb_put(skb, mhi_res->bytes_xferd);
186			mhi_net_skb_agg(mhi_netdev, skb);
187			break;
188		case -ENOTCONN:
189			/* MHI layer stopping/resetting the DL channel */
190			dev_kfree_skb_any(skb);
191			return;
192		default:
193			/* Unknown error, simply drop */
194			dev_kfree_skb_any(skb);
195			u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
196			u64_stats_inc(&mhi_netdev->stats.rx_errors);
197			u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
198		}
199	} else {
200		skb_put(skb, mhi_res->bytes_xferd);
201
202		if (mhi_netdev->skbagg_head) {
203			/* Aggregate the final fragment */
204			skb = mhi_net_skb_agg(mhi_netdev, skb);
205			mhi_netdev->skbagg_head = NULL;
206		}
207
208		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
209		u64_stats_inc(&mhi_netdev->stats.rx_packets);
210		u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
211		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
212
213		switch (skb->data[0] & 0xf0) {
214		case 0x40:
215			skb->protocol = htons(ETH_P_IP);
216			break;
217		case 0x60:
218			skb->protocol = htons(ETH_P_IPV6);
219			break;
220		default:
221			skb->protocol = htons(ETH_P_MAP);
222			break;
223		}
224
225		if (proto && proto->rx)
226			proto->rx(mhi_netdev, skb);
227		else
228			netif_rx(skb);
229	}
230
231	/* Refill if RX buffers queue becomes low */
232	if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
233		schedule_delayed_work(&mhi_netdev->rx_refill, 0);
234}
235
236static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
237				struct mhi_result *mhi_res)
238{
239	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
240	struct net_device *ndev = mhi_netdev->ndev;
241	struct mhi_device *mdev = mhi_netdev->mdev;
242	struct sk_buff *skb = mhi_res->buf_addr;
243
244	/* Hardware has consumed the buffer, so free the skb (which is not
245	 * freed by the MHI stack) and perform accounting.
246	 */
247	dev_consume_skb_any(skb);
248
249	u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
250	if (unlikely(mhi_res->transaction_status)) {
251
252		/* MHI layer stopping/resetting the UL channel */
253		if (mhi_res->transaction_status == -ENOTCONN) {
254			u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
255			return;
256		}
257
258		u64_stats_inc(&mhi_netdev->stats.tx_errors);
259	} else {
260		u64_stats_inc(&mhi_netdev->stats.tx_packets);
261		u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
262	}
263	u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
264
265	if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE))
266		netif_wake_queue(ndev);
267}
268
269static void mhi_net_rx_refill_work(struct work_struct *work)
270{
271	struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
272						      rx_refill.work);
273	struct net_device *ndev = mhi_netdev->ndev;
274	struct mhi_device *mdev = mhi_netdev->mdev;
275	struct sk_buff *skb;
276	unsigned int size;
277	int err;
278
279	size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
280
281	while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
282		skb = netdev_alloc_skb(ndev, size);
283		if (unlikely(!skb))
284			break;
285
286		err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
287		if (unlikely(err)) {
288			net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
289					    ndev->name, err);
290			kfree_skb(skb);
291			break;
292		}
293
294		/* Do not hog the CPU if rx buffers are consumed faster than
295		 * queued (unlikely).
296		 */
297		cond_resched();
298	}
299
300	/* If we're still starved of rx buffers, reschedule later */
301	if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
302		schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
303}
304
305static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
306			   struct netlink_ext_ack *extack)
307{
308	const struct mhi_device_info *info;
309	struct mhi_device *mhi_dev = ctxt;
310	struct mhi_net_dev *mhi_netdev;
311	int err;
312
313	info = (struct mhi_device_info *)mhi_dev->id->driver_data;
314
315	/* For now we only support one link (link context 0), driver must be
316	 * reworked to break 1:1 relationship for net MBIM and to forward setup
317	 * call to rmnet(QMAP) otherwise.
318	 */
319	if (if_id != 0)
320		return -EINVAL;
321
322	if (dev_get_drvdata(&mhi_dev->dev))
323		return -EBUSY;
324
325	mhi_netdev = wwan_netdev_drvpriv(ndev);
326
327	dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
328	mhi_netdev->ndev = ndev;
329	mhi_netdev->mdev = mhi_dev;
330	mhi_netdev->skbagg_head = NULL;
331	mhi_netdev->proto = info->proto;
332
333	INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
334	u64_stats_init(&mhi_netdev->stats.rx_syncp);
335	u64_stats_init(&mhi_netdev->stats.tx_syncp);
336
337	/* Start MHI channels */
338	err = mhi_prepare_for_transfer(mhi_dev);
339	if (err)
340		return err;
341
342	/* Number of transfer descriptors determines size of the queue */
343	mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
344
345	if (extack)
346		err = register_netdevice(ndev);
347	else
348		err = register_netdev(ndev);
349	if (err)
350		return err;
351
352	if (mhi_netdev->proto) {
353		err = mhi_netdev->proto->init(mhi_netdev);
354		if (err)
355			goto out_err_proto;
356	}
357
358	return 0;
359
360out_err_proto:
361	unregister_netdevice(ndev);
362	return err;
363}
364
365static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
366			    struct list_head *head)
367{
368	struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
369	struct mhi_device *mhi_dev = ctxt;
370
371	if (head)
372		unregister_netdevice_queue(ndev, head);
373	else
374		unregister_netdev(ndev);
375
376	mhi_unprepare_from_transfer(mhi_dev);
377
378	kfree_skb(mhi_netdev->skbagg_head);
379
380	dev_set_drvdata(&mhi_dev->dev, NULL);
381}
382
383static const struct wwan_ops mhi_wwan_ops = {
384	.priv_size = sizeof(struct mhi_net_dev),
385	.setup = mhi_net_setup,
386	.newlink = mhi_net_newlink,
387	.dellink = mhi_net_dellink,
388};
389
390static int mhi_net_probe(struct mhi_device *mhi_dev,
391			 const struct mhi_device_id *id)
392{
393	const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
394	struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
395	struct net_device *ndev;
396	int err;
397
398	err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev,
399				WWAN_NO_DEFAULT_LINK);
400	if (err)
401		return err;
402
403	if (!create_default_iface)
404		return 0;
405
406	/* Create a default interface which is used as either RMNET real-dev,
407	 * MBIM link 0 or ip link 0)
408	 */
409	ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname,
410			    NET_NAME_PREDICTABLE, mhi_net_setup);
411	if (!ndev) {
412		err = -ENOMEM;
413		goto err_unregister;
414	}
415
416	SET_NETDEV_DEV(ndev, &mhi_dev->dev);
417
418	err = mhi_net_newlink(mhi_dev, ndev, 0, NULL);
419	if (err)
420		goto err_release;
421
422	return 0;
423
424err_release:
425	free_netdev(ndev);
426err_unregister:
427	wwan_unregister_ops(&cntrl->mhi_dev->dev);
428
429	return err;
430}
431
432static void mhi_net_remove(struct mhi_device *mhi_dev)
433{
434	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
435	struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
436
437	/* WWAN core takes care of removing remaining links */
438	wwan_unregister_ops(&cntrl->mhi_dev->dev);
439
440	if (create_default_iface)
441		mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL);
442}
443
444static const struct mhi_device_info mhi_hwip0 = {
445	.netname = "mhi_hwip%d",
446};
447
448static const struct mhi_device_info mhi_swip0 = {
449	.netname = "mhi_swip%d",
450};
451
452static const struct mhi_device_info mhi_hwip0_mbim = {
453	.netname = "mhi_mbim%d",
454	.proto = &proto_mbim,
455};
456
457static const struct mhi_device_id mhi_net_id_table[] = {
458	/* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
459	{ .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 },
460	/* Software data PATH (to modem CPU) */
461	{ .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
462	/* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
463	{ .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim },
464	{}
465};
466MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
467
468static struct mhi_driver mhi_net_driver = {
469	.probe = mhi_net_probe,
470	.remove = mhi_net_remove,
471	.dl_xfer_cb = mhi_net_dl_callback,
472	.ul_xfer_cb = mhi_net_ul_callback,
473	.id_table = mhi_net_id_table,
474	.driver = {
475		.name = "mhi_net",
476		.owner = THIS_MODULE,
477	},
478};
479
480module_mhi_driver(mhi_net_driver);
481
482MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
483MODULE_DESCRIPTION("Network over MHI");
484MODULE_LICENSE("GPL v2");