Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * CAIF Interface registration.
  4 * Copyright (C) ST-Ericsson AB 2010
  5 * Author:	Sjur Brendeland
  6 *
  7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  8 *  and Sakari Ailus <sakari.ailus@nokia.com>
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
 12
 13#include <linux/kernel.h>
 14#include <linux/if_arp.h>
 15#include <linux/net.h>
 16#include <linux/netdevice.h>
 17#include <linux/mutex.h>
 18#include <linux/module.h>
 19#include <linux/spinlock.h>
 20#include <net/netns/generic.h>
 21#include <net/net_namespace.h>
 22#include <net/pkt_sched.h>
 23#include <net/caif/caif_device.h>
 24#include <net/caif/caif_layer.h>
 25#include <net/caif/caif_dev.h>
 26#include <net/caif/cfpkt.h>
 27#include <net/caif/cfcnfg.h>
 28#include <net/caif/cfserl.h>
 29
 30MODULE_DESCRIPTION("ST-Ericsson CAIF modem protocol support");
 31MODULE_LICENSE("GPL");
 32
 33/* Used for local tracking of the CAIF net devices */
 34struct caif_device_entry {
 35	struct cflayer layer;
 36	struct list_head list;
 37	struct net_device *netdev;
 38	int __percpu *pcpu_refcnt;
 39	spinlock_t flow_lock;
 40	struct sk_buff *xoff_skb;
 41	void (*xoff_skb_dtor)(struct sk_buff *skb);
 42	bool xoff;
 43};
 44
 45struct caif_device_entry_list {
 46	struct list_head list;
 47	/* Protects simulanous deletes in list */
 48	struct mutex lock;
 49};
 50
 51struct caif_net {
 52	struct cfcnfg *cfg;
 53	struct caif_device_entry_list caifdevs;
 54};
 55
 56static unsigned int caif_net_id;
 57static int q_high = 50; /* Percent */
 58
 59struct cfcnfg *get_cfcnfg(struct net *net)
 60{
 61	struct caif_net *caifn;
 62	caifn = net_generic(net, caif_net_id);
 63	return caifn->cfg;
 64}
 65EXPORT_SYMBOL(get_cfcnfg);
 66
 67static struct caif_device_entry_list *caif_device_list(struct net *net)
 68{
 69	struct caif_net *caifn;
 70	caifn = net_generic(net, caif_net_id);
 71	return &caifn->caifdevs;
 72}
 73
 74static void caifd_put(struct caif_device_entry *e)
 75{
 76	this_cpu_dec(*e->pcpu_refcnt);
 77}
 78
 79static void caifd_hold(struct caif_device_entry *e)
 80{
 81	this_cpu_inc(*e->pcpu_refcnt);
 82}
 83
 84static int caifd_refcnt_read(struct caif_device_entry *e)
 85{
 86	int i, refcnt = 0;
 87	for_each_possible_cpu(i)
 88		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
 89	return refcnt;
 90}
 91
 92/* Allocate new CAIF device. */
 93static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
 94{
 95	struct caif_device_entry *caifd;
 96
 97	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
 98	if (!caifd)
 99		return NULL;
100	caifd->pcpu_refcnt = alloc_percpu(int);
101	if (!caifd->pcpu_refcnt) {
102		kfree(caifd);
103		return NULL;
104	}
105	caifd->netdev = dev;
106	dev_hold(dev);
107	return caifd;
108}
109
110static struct caif_device_entry *caif_get(struct net_device *dev)
111{
112	struct caif_device_entry_list *caifdevs =
113	    caif_device_list(dev_net(dev));
114	struct caif_device_entry *caifd;
115
116	list_for_each_entry_rcu(caifd, &caifdevs->list, list,
117				lockdep_rtnl_is_held()) {
118		if (caifd->netdev == dev)
119			return caifd;
120	}
121	return NULL;
122}
123
124static void caif_flow_cb(struct sk_buff *skb)
125{
126	struct caif_device_entry *caifd;
127	void (*dtor)(struct sk_buff *skb) = NULL;
128	bool send_xoff;
129
130	WARN_ON(skb->dev == NULL);
131
132	rcu_read_lock();
133	caifd = caif_get(skb->dev);
134
135	WARN_ON(caifd == NULL);
136	if (!caifd) {
137		rcu_read_unlock();
138		return;
139	}
140
141	caifd_hold(caifd);
142	rcu_read_unlock();
143
144	spin_lock_bh(&caifd->flow_lock);
145	send_xoff = caifd->xoff;
146	caifd->xoff = false;
147	dtor = caifd->xoff_skb_dtor;
148
149	if (WARN_ON(caifd->xoff_skb != skb))
150		skb = NULL;
151
152	caifd->xoff_skb = NULL;
153	caifd->xoff_skb_dtor = NULL;
154
155	spin_unlock_bh(&caifd->flow_lock);
156
157	if (dtor && skb)
158		dtor(skb);
159
160	if (send_xoff)
161		caifd->layer.up->
162			ctrlcmd(caifd->layer.up,
163				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
164				caifd->layer.id);
165	caifd_put(caifd);
166}
167
168static int transmit(struct cflayer *layer, struct cfpkt *pkt)
169{
170	int err, high = 0, qlen = 0;
171	struct caif_device_entry *caifd =
172	    container_of(layer, struct caif_device_entry, layer);
173	struct sk_buff *skb;
174	struct netdev_queue *txq;
175
176	rcu_read_lock_bh();
177
178	skb = cfpkt_tonative(pkt);
179	skb->dev = caifd->netdev;
180	skb_reset_network_header(skb);
181	skb->protocol = htons(ETH_P_CAIF);
182
183	/* Check if we need to handle xoff */
184	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
185		goto noxoff;
186
187	if (unlikely(caifd->xoff))
188		goto noxoff;
189
190	if (likely(!netif_queue_stopped(caifd->netdev))) {
191		struct Qdisc *sch;
192
193		/* If we run with a TX queue, check if the queue is too long*/
194		txq = netdev_get_tx_queue(skb->dev, 0);
195		sch = rcu_dereference_bh(txq->qdisc);
196		if (likely(qdisc_is_empty(sch)))
197			goto noxoff;
198
199		/* can check for explicit qdisc len value only !NOLOCK,
200		 * always set flow off otherwise
201		 */
202		high = (caifd->netdev->tx_queue_len * q_high) / 100;
203		if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
204			goto noxoff;
205	}
206
207	/* Hold lock while accessing xoff */
208	spin_lock_bh(&caifd->flow_lock);
209	if (caifd->xoff) {
210		spin_unlock_bh(&caifd->flow_lock);
211		goto noxoff;
212	}
213
214	/*
215	 * Handle flow off, we do this by temporary hi-jacking this
216	 * skb's destructor function, and replace it with our own
217	 * flow-on callback. The callback will set flow-on and call
218	 * the original destructor.
219	 */
220
221	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
222			netif_queue_stopped(caifd->netdev),
223			qlen, high);
224	caifd->xoff = true;
225	caifd->xoff_skb = skb;
226	caifd->xoff_skb_dtor = skb->destructor;
227	skb->destructor = caif_flow_cb;
228	spin_unlock_bh(&caifd->flow_lock);
229
230	caifd->layer.up->ctrlcmd(caifd->layer.up,
231					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
232					caifd->layer.id);
233noxoff:
234	rcu_read_unlock_bh();
235
236	err = dev_queue_xmit(skb);
237	if (err > 0)
238		err = -EIO;
239
240	return err;
241}
242
243/*
244 * Stuff received packets into the CAIF stack.
245 * On error, returns non-zero and releases the skb.
246 */
247static int receive(struct sk_buff *skb, struct net_device *dev,
248		   struct packet_type *pkttype, struct net_device *orig_dev)
249{
250	struct cfpkt *pkt;
251	struct caif_device_entry *caifd;
252	int err;
253
254	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
255
256	rcu_read_lock();
257	caifd = caif_get(dev);
258
259	if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
260			!netif_oper_up(caifd->netdev)) {
261		rcu_read_unlock();
262		kfree_skb(skb);
263		return NET_RX_DROP;
264	}
265
266	/* Hold reference to netdevice while using CAIF stack */
267	caifd_hold(caifd);
268	rcu_read_unlock();
269
270	err = caifd->layer.up->receive(caifd->layer.up, pkt);
271
272	/* For -EILSEQ the packet is not freed so free it now */
273	if (err == -EILSEQ)
274		cfpkt_destroy(pkt);
275
276	/* Release reference to stack upwards */
277	caifd_put(caifd);
278
279	if (err != 0)
280		err = NET_RX_DROP;
281	return err;
282}
283
284static struct packet_type caif_packet_type __read_mostly = {
285	.type = cpu_to_be16(ETH_P_CAIF),
286	.func = receive,
287};
288
289static void dev_flowctrl(struct net_device *dev, int on)
290{
291	struct caif_device_entry *caifd;
292
293	rcu_read_lock();
294
295	caifd = caif_get(dev);
296	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
297		rcu_read_unlock();
298		return;
299	}
300
301	caifd_hold(caifd);
302	rcu_read_unlock();
303
304	caifd->layer.up->ctrlcmd(caifd->layer.up,
305				 on ?
306				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
307				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
308				 caifd->layer.id);
309	caifd_put(caifd);
310}
311
312int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
313		     struct cflayer *link_support, int head_room,
314		     struct cflayer **layer,
315		     int (**rcv_func)(struct sk_buff *, struct net_device *,
316				      struct packet_type *,
317				      struct net_device *))
318{
319	struct caif_device_entry *caifd;
320	enum cfcnfg_phy_preference pref;
321	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
322	struct caif_device_entry_list *caifdevs;
323	int res;
324
325	caifdevs = caif_device_list(dev_net(dev));
326	caifd = caif_device_alloc(dev);
327	if (!caifd)
328		return -ENOMEM;
329	*layer = &caifd->layer;
330	spin_lock_init(&caifd->flow_lock);
331
332	switch (caifdev->link_select) {
333	case CAIF_LINK_HIGH_BANDW:
334		pref = CFPHYPREF_HIGH_BW;
335		break;
336	case CAIF_LINK_LOW_LATENCY:
337		pref = CFPHYPREF_LOW_LAT;
338		break;
339	default:
340		pref = CFPHYPREF_HIGH_BW;
341		break;
342	}
343	mutex_lock(&caifdevs->lock);
344	list_add_rcu(&caifd->list, &caifdevs->list);
345
346	strscpy(caifd->layer.name, dev->name,
347		sizeof(caifd->layer.name));
348	caifd->layer.transmit = transmit;
349	res = cfcnfg_add_phy_layer(cfg,
350				dev,
351				&caifd->layer,
352				pref,
353				link_support,
354				caifdev->use_fcs,
355				head_room);
356	mutex_unlock(&caifdevs->lock);
357	if (rcv_func)
358		*rcv_func = receive;
359	return res;
360}
361EXPORT_SYMBOL(caif_enroll_dev);
362
363/* notify Caif of device events */
364static int caif_device_notify(struct notifier_block *me, unsigned long what,
365			      void *ptr)
366{
367	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
368	struct caif_device_entry *caifd = NULL;
369	struct caif_dev_common *caifdev;
370	struct cfcnfg *cfg;
371	struct cflayer *layer, *link_support;
372	int head_room = 0;
373	struct caif_device_entry_list *caifdevs;
374	int res;
375
376	cfg = get_cfcnfg(dev_net(dev));
377	caifdevs = caif_device_list(dev_net(dev));
378
379	caifd = caif_get(dev);
380	if (caifd == NULL && dev->type != ARPHRD_CAIF)
381		return 0;
382
383	switch (what) {
384	case NETDEV_REGISTER:
385		if (caifd != NULL)
386			break;
387
388		caifdev = netdev_priv(dev);
389
390		link_support = NULL;
391		if (caifdev->use_frag) {
392			head_room = 1;
393			link_support = cfserl_create(dev->ifindex,
394							caifdev->use_stx);
395			if (!link_support) {
396				pr_warn("Out of memory\n");
397				break;
398			}
399		}
400		res = caif_enroll_dev(dev, caifdev, link_support, head_room,
401				&layer, NULL);
402		if (res)
403			cfserl_release(link_support);
404		caifdev->flowctrl = dev_flowctrl;
405		break;
406
407	case NETDEV_UP:
408		rcu_read_lock();
409
410		caifd = caif_get(dev);
411		if (caifd == NULL) {
412			rcu_read_unlock();
413			break;
414		}
415
416		caifd->xoff = false;
417		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
418		rcu_read_unlock();
419
420		break;
421
422	case NETDEV_DOWN:
423		rcu_read_lock();
424
425		caifd = caif_get(dev);
426		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
427			rcu_read_unlock();
428			return -EINVAL;
429		}
430
431		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
432		caifd_hold(caifd);
433		rcu_read_unlock();
434
435		caifd->layer.up->ctrlcmd(caifd->layer.up,
436					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
437					 caifd->layer.id);
438
439		spin_lock_bh(&caifd->flow_lock);
440
441		/*
442		 * Replace our xoff-destructor with original destructor.
443		 * We trust that skb->destructor *always* is called before
444		 * the skb reference is invalid. The hijacked SKB destructor
445		 * takes the flow_lock so manipulating the skb->destructor here
446		 * should be safe.
447		*/
448		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
449			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
450
451		caifd->xoff = false;
452		caifd->xoff_skb_dtor = NULL;
453		caifd->xoff_skb = NULL;
454
455		spin_unlock_bh(&caifd->flow_lock);
456		caifd_put(caifd);
457		break;
458
459	case NETDEV_UNREGISTER:
460		mutex_lock(&caifdevs->lock);
461
462		caifd = caif_get(dev);
463		if (caifd == NULL) {
464			mutex_unlock(&caifdevs->lock);
465			break;
466		}
467		list_del_rcu(&caifd->list);
468
469		/*
470		 * NETDEV_UNREGISTER is called repeatedly until all reference
471		 * counts for the net-device are released. If references to
472		 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
473		 * the next call to NETDEV_UNREGISTER.
474		 *
475		 * If any packets are in flight down the CAIF Stack,
476		 * cfcnfg_del_phy_layer will return nonzero.
477		 * If no packets are in flight, the CAIF Stack associated
478		 * with the net-device un-registering is freed.
479		 */
480
481		if (caifd_refcnt_read(caifd) != 0 ||
482			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
483
484			pr_info("Wait for device inuse\n");
485			/* Enrole device if CAIF Stack is still in use */
486			list_add_rcu(&caifd->list, &caifdevs->list);
487			mutex_unlock(&caifdevs->lock);
488			break;
489		}
490
491		synchronize_rcu();
492		dev_put(caifd->netdev);
493		free_percpu(caifd->pcpu_refcnt);
494		kfree(caifd);
495
496		mutex_unlock(&caifdevs->lock);
497		break;
498	}
499	return 0;
500}
501
502static struct notifier_block caif_device_notifier = {
503	.notifier_call = caif_device_notify,
504	.priority = 0,
505};
506
507/* Per-namespace Caif devices handling */
508static int caif_init_net(struct net *net)
509{
510	struct caif_net *caifn = net_generic(net, caif_net_id);
511	INIT_LIST_HEAD(&caifn->caifdevs.list);
512	mutex_init(&caifn->caifdevs.lock);
513
514	caifn->cfg = cfcnfg_create();
515	if (!caifn->cfg)
516		return -ENOMEM;
517
518	return 0;
519}
520
521static void caif_exit_net(struct net *net)
522{
523	struct caif_device_entry *caifd, *tmp;
524	struct caif_device_entry_list *caifdevs =
525	    caif_device_list(net);
526	struct cfcnfg *cfg =  get_cfcnfg(net);
527
528	rtnl_lock();
529	mutex_lock(&caifdevs->lock);
530
531	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
532		int i = 0;
533		list_del_rcu(&caifd->list);
534		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
535
536		while (i < 10 &&
537			(caifd_refcnt_read(caifd) != 0 ||
538			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
539
540			pr_info("Wait for device inuse\n");
541			msleep(250);
542			i++;
543		}
544		synchronize_rcu();
545		dev_put(caifd->netdev);
546		free_percpu(caifd->pcpu_refcnt);
547		kfree(caifd);
548	}
549	cfcnfg_remove(cfg);
550
551	mutex_unlock(&caifdevs->lock);
552	rtnl_unlock();
553}
554
555static struct pernet_operations caif_net_ops = {
556	.init = caif_init_net,
557	.exit = caif_exit_net,
558	.id   = &caif_net_id,
559	.size = sizeof(struct caif_net),
560};
561
562/* Initialize Caif devices list */
563static int __init caif_device_init(void)
564{
565	int result;
566
567	result = register_pernet_subsys(&caif_net_ops);
568
569	if (result)
570		return result;
571
572	register_netdevice_notifier(&caif_device_notifier);
573	dev_add_pack(&caif_packet_type);
574
575	return result;
576}
577
578static void __exit caif_device_exit(void)
579{
580	unregister_netdevice_notifier(&caif_device_notifier);
581	dev_remove_pack(&caif_packet_type);
582	unregister_pernet_subsys(&caif_net_ops);
583}
584
585module_init(caif_device_init);
586module_exit(caif_device_exit);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * CAIF Interface registration.
  4 * Copyright (C) ST-Ericsson AB 2010
  5 * Author:	Sjur Brendeland
  6 *
  7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  8 *  and Sakari Ailus <sakari.ailus@nokia.com>
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
 12
 13#include <linux/kernel.h>
 14#include <linux/if_arp.h>
 15#include <linux/net.h>
 16#include <linux/netdevice.h>
 17#include <linux/mutex.h>
 18#include <linux/module.h>
 19#include <linux/spinlock.h>
 20#include <net/netns/generic.h>
 21#include <net/net_namespace.h>
 22#include <net/pkt_sched.h>
 23#include <net/caif/caif_device.h>
 24#include <net/caif/caif_layer.h>
 25#include <net/caif/caif_dev.h>
 26#include <net/caif/cfpkt.h>
 27#include <net/caif/cfcnfg.h>
 28#include <net/caif/cfserl.h>
 29
 
 30MODULE_LICENSE("GPL");
 31
 32/* Used for local tracking of the CAIF net devices */
 33struct caif_device_entry {
 34	struct cflayer layer;
 35	struct list_head list;
 36	struct net_device *netdev;
 37	int __percpu *pcpu_refcnt;
 38	spinlock_t flow_lock;
 39	struct sk_buff *xoff_skb;
 40	void (*xoff_skb_dtor)(struct sk_buff *skb);
 41	bool xoff;
 42};
 43
 44struct caif_device_entry_list {
 45	struct list_head list;
 46	/* Protects simulanous deletes in list */
 47	struct mutex lock;
 48};
 49
 50struct caif_net {
 51	struct cfcnfg *cfg;
 52	struct caif_device_entry_list caifdevs;
 53};
 54
 55static unsigned int caif_net_id;
 56static int q_high = 50; /* Percent */
 57
 58struct cfcnfg *get_cfcnfg(struct net *net)
 59{
 60	struct caif_net *caifn;
 61	caifn = net_generic(net, caif_net_id);
 62	return caifn->cfg;
 63}
 64EXPORT_SYMBOL(get_cfcnfg);
 65
 66static struct caif_device_entry_list *caif_device_list(struct net *net)
 67{
 68	struct caif_net *caifn;
 69	caifn = net_generic(net, caif_net_id);
 70	return &caifn->caifdevs;
 71}
 72
 73static void caifd_put(struct caif_device_entry *e)
 74{
 75	this_cpu_dec(*e->pcpu_refcnt);
 76}
 77
 78static void caifd_hold(struct caif_device_entry *e)
 79{
 80	this_cpu_inc(*e->pcpu_refcnt);
 81}
 82
 83static int caifd_refcnt_read(struct caif_device_entry *e)
 84{
 85	int i, refcnt = 0;
 86	for_each_possible_cpu(i)
 87		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
 88	return refcnt;
 89}
 90
 91/* Allocate new CAIF device. */
 92static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
 93{
 94	struct caif_device_entry *caifd;
 95
 96	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
 97	if (!caifd)
 98		return NULL;
 99	caifd->pcpu_refcnt = alloc_percpu(int);
100	if (!caifd->pcpu_refcnt) {
101		kfree(caifd);
102		return NULL;
103	}
104	caifd->netdev = dev;
105	dev_hold(dev);
106	return caifd;
107}
108
109static struct caif_device_entry *caif_get(struct net_device *dev)
110{
111	struct caif_device_entry_list *caifdevs =
112	    caif_device_list(dev_net(dev));
113	struct caif_device_entry *caifd;
114
115	list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
 
116		if (caifd->netdev == dev)
117			return caifd;
118	}
119	return NULL;
120}
121
122static void caif_flow_cb(struct sk_buff *skb)
123{
124	struct caif_device_entry *caifd;
125	void (*dtor)(struct sk_buff *skb) = NULL;
126	bool send_xoff;
127
128	WARN_ON(skb->dev == NULL);
129
130	rcu_read_lock();
131	caifd = caif_get(skb->dev);
132
133	WARN_ON(caifd == NULL);
134	if (!caifd) {
135		rcu_read_unlock();
136		return;
137	}
138
139	caifd_hold(caifd);
140	rcu_read_unlock();
141
142	spin_lock_bh(&caifd->flow_lock);
143	send_xoff = caifd->xoff;
144	caifd->xoff = 0;
145	dtor = caifd->xoff_skb_dtor;
146
147	if (WARN_ON(caifd->xoff_skb != skb))
148		skb = NULL;
149
150	caifd->xoff_skb = NULL;
151	caifd->xoff_skb_dtor = NULL;
152
153	spin_unlock_bh(&caifd->flow_lock);
154
155	if (dtor && skb)
156		dtor(skb);
157
158	if (send_xoff)
159		caifd->layer.up->
160			ctrlcmd(caifd->layer.up,
161				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
162				caifd->layer.id);
163	caifd_put(caifd);
164}
165
166static int transmit(struct cflayer *layer, struct cfpkt *pkt)
167{
168	int err, high = 0, qlen = 0;
169	struct caif_device_entry *caifd =
170	    container_of(layer, struct caif_device_entry, layer);
171	struct sk_buff *skb;
172	struct netdev_queue *txq;
173
174	rcu_read_lock_bh();
175
176	skb = cfpkt_tonative(pkt);
177	skb->dev = caifd->netdev;
178	skb_reset_network_header(skb);
179	skb->protocol = htons(ETH_P_CAIF);
180
181	/* Check if we need to handle xoff */
182	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
183		goto noxoff;
184
185	if (unlikely(caifd->xoff))
186		goto noxoff;
187
188	if (likely(!netif_queue_stopped(caifd->netdev))) {
189		struct Qdisc *sch;
190
191		/* If we run with a TX queue, check if the queue is too long*/
192		txq = netdev_get_tx_queue(skb->dev, 0);
193		sch = rcu_dereference_bh(txq->qdisc);
194		if (likely(qdisc_is_empty(sch)))
195			goto noxoff;
196
197		/* can check for explicit qdisc len value only !NOLOCK,
198		 * always set flow off otherwise
199		 */
200		high = (caifd->netdev->tx_queue_len * q_high) / 100;
201		if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
202			goto noxoff;
203	}
204
205	/* Hold lock while accessing xoff */
206	spin_lock_bh(&caifd->flow_lock);
207	if (caifd->xoff) {
208		spin_unlock_bh(&caifd->flow_lock);
209		goto noxoff;
210	}
211
212	/*
213	 * Handle flow off, we do this by temporary hi-jacking this
214	 * skb's destructor function, and replace it with our own
215	 * flow-on callback. The callback will set flow-on and call
216	 * the original destructor.
217	 */
218
219	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
220			netif_queue_stopped(caifd->netdev),
221			qlen, high);
222	caifd->xoff = 1;
223	caifd->xoff_skb = skb;
224	caifd->xoff_skb_dtor = skb->destructor;
225	skb->destructor = caif_flow_cb;
226	spin_unlock_bh(&caifd->flow_lock);
227
228	caifd->layer.up->ctrlcmd(caifd->layer.up,
229					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
230					caifd->layer.id);
231noxoff:
232	rcu_read_unlock_bh();
233
234	err = dev_queue_xmit(skb);
235	if (err > 0)
236		err = -EIO;
237
238	return err;
239}
240
241/*
242 * Stuff received packets into the CAIF stack.
243 * On error, returns non-zero and releases the skb.
244 */
245static int receive(struct sk_buff *skb, struct net_device *dev,
246		   struct packet_type *pkttype, struct net_device *orig_dev)
247{
248	struct cfpkt *pkt;
249	struct caif_device_entry *caifd;
250	int err;
251
252	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
253
254	rcu_read_lock();
255	caifd = caif_get(dev);
256
257	if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
258			!netif_oper_up(caifd->netdev)) {
259		rcu_read_unlock();
260		kfree_skb(skb);
261		return NET_RX_DROP;
262	}
263
264	/* Hold reference to netdevice while using CAIF stack */
265	caifd_hold(caifd);
266	rcu_read_unlock();
267
268	err = caifd->layer.up->receive(caifd->layer.up, pkt);
269
270	/* For -EILSEQ the packet is not freed so so it now */
271	if (err == -EILSEQ)
272		cfpkt_destroy(pkt);
273
274	/* Release reference to stack upwards */
275	caifd_put(caifd);
276
277	if (err != 0)
278		err = NET_RX_DROP;
279	return err;
280}
281
282static struct packet_type caif_packet_type __read_mostly = {
283	.type = cpu_to_be16(ETH_P_CAIF),
284	.func = receive,
285};
286
287static void dev_flowctrl(struct net_device *dev, int on)
288{
289	struct caif_device_entry *caifd;
290
291	rcu_read_lock();
292
293	caifd = caif_get(dev);
294	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
295		rcu_read_unlock();
296		return;
297	}
298
299	caifd_hold(caifd);
300	rcu_read_unlock();
301
302	caifd->layer.up->ctrlcmd(caifd->layer.up,
303				 on ?
304				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
305				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
306				 caifd->layer.id);
307	caifd_put(caifd);
308}
309
310void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
311		     struct cflayer *link_support, int head_room,
312		     struct cflayer **layer,
313		     int (**rcv_func)(struct sk_buff *, struct net_device *,
314				      struct packet_type *,
315				      struct net_device *))
316{
317	struct caif_device_entry *caifd;
318	enum cfcnfg_phy_preference pref;
319	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
320	struct caif_device_entry_list *caifdevs;
 
321
322	caifdevs = caif_device_list(dev_net(dev));
323	caifd = caif_device_alloc(dev);
324	if (!caifd)
325		return;
326	*layer = &caifd->layer;
327	spin_lock_init(&caifd->flow_lock);
328
329	switch (caifdev->link_select) {
330	case CAIF_LINK_HIGH_BANDW:
331		pref = CFPHYPREF_HIGH_BW;
332		break;
333	case CAIF_LINK_LOW_LATENCY:
334		pref = CFPHYPREF_LOW_LAT;
335		break;
336	default:
337		pref = CFPHYPREF_HIGH_BW;
338		break;
339	}
340	mutex_lock(&caifdevs->lock);
341	list_add_rcu(&caifd->list, &caifdevs->list);
342
343	strlcpy(caifd->layer.name, dev->name,
344		sizeof(caifd->layer.name));
345	caifd->layer.transmit = transmit;
346	cfcnfg_add_phy_layer(cfg,
347				dev,
348				&caifd->layer,
349				pref,
350				link_support,
351				caifdev->use_fcs,
352				head_room);
353	mutex_unlock(&caifdevs->lock);
354	if (rcv_func)
355		*rcv_func = receive;
 
356}
357EXPORT_SYMBOL(caif_enroll_dev);
358
359/* notify Caif of device events */
360static int caif_device_notify(struct notifier_block *me, unsigned long what,
361			      void *ptr)
362{
363	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
364	struct caif_device_entry *caifd = NULL;
365	struct caif_dev_common *caifdev;
366	struct cfcnfg *cfg;
367	struct cflayer *layer, *link_support;
368	int head_room = 0;
369	struct caif_device_entry_list *caifdevs;
 
370
371	cfg = get_cfcnfg(dev_net(dev));
372	caifdevs = caif_device_list(dev_net(dev));
373
374	caifd = caif_get(dev);
375	if (caifd == NULL && dev->type != ARPHRD_CAIF)
376		return 0;
377
378	switch (what) {
379	case NETDEV_REGISTER:
380		if (caifd != NULL)
381			break;
382
383		caifdev = netdev_priv(dev);
384
385		link_support = NULL;
386		if (caifdev->use_frag) {
387			head_room = 1;
388			link_support = cfserl_create(dev->ifindex,
389							caifdev->use_stx);
390			if (!link_support) {
391				pr_warn("Out of memory\n");
392				break;
393			}
394		}
395		caif_enroll_dev(dev, caifdev, link_support, head_room,
396				&layer, NULL);
 
 
397		caifdev->flowctrl = dev_flowctrl;
398		break;
399
400	case NETDEV_UP:
401		rcu_read_lock();
402
403		caifd = caif_get(dev);
404		if (caifd == NULL) {
405			rcu_read_unlock();
406			break;
407		}
408
409		caifd->xoff = 0;
410		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
411		rcu_read_unlock();
412
413		break;
414
415	case NETDEV_DOWN:
416		rcu_read_lock();
417
418		caifd = caif_get(dev);
419		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
420			rcu_read_unlock();
421			return -EINVAL;
422		}
423
424		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
425		caifd_hold(caifd);
426		rcu_read_unlock();
427
428		caifd->layer.up->ctrlcmd(caifd->layer.up,
429					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
430					 caifd->layer.id);
431
432		spin_lock_bh(&caifd->flow_lock);
433
434		/*
435		 * Replace our xoff-destructor with original destructor.
436		 * We trust that skb->destructor *always* is called before
437		 * the skb reference is invalid. The hijacked SKB destructor
438		 * takes the flow_lock so manipulating the skb->destructor here
439		 * should be safe.
440		*/
441		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
442			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
443
444		caifd->xoff = 0;
445		caifd->xoff_skb_dtor = NULL;
446		caifd->xoff_skb = NULL;
447
448		spin_unlock_bh(&caifd->flow_lock);
449		caifd_put(caifd);
450		break;
451
452	case NETDEV_UNREGISTER:
453		mutex_lock(&caifdevs->lock);
454
455		caifd = caif_get(dev);
456		if (caifd == NULL) {
457			mutex_unlock(&caifdevs->lock);
458			break;
459		}
460		list_del_rcu(&caifd->list);
461
462		/*
463		 * NETDEV_UNREGISTER is called repeatedly until all reference
464		 * counts for the net-device are released. If references to
465		 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
466		 * the next call to NETDEV_UNREGISTER.
467		 *
468		 * If any packets are in flight down the CAIF Stack,
469		 * cfcnfg_del_phy_layer will return nonzero.
470		 * If no packets are in flight, the CAIF Stack associated
471		 * with the net-device un-registering is freed.
472		 */
473
474		if (caifd_refcnt_read(caifd) != 0 ||
475			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
476
477			pr_info("Wait for device inuse\n");
478			/* Enrole device if CAIF Stack is still in use */
479			list_add_rcu(&caifd->list, &caifdevs->list);
480			mutex_unlock(&caifdevs->lock);
481			break;
482		}
483
484		synchronize_rcu();
485		dev_put(caifd->netdev);
486		free_percpu(caifd->pcpu_refcnt);
487		kfree(caifd);
488
489		mutex_unlock(&caifdevs->lock);
490		break;
491	}
492	return 0;
493}
494
495static struct notifier_block caif_device_notifier = {
496	.notifier_call = caif_device_notify,
497	.priority = 0,
498};
499
500/* Per-namespace Caif devices handling */
501static int caif_init_net(struct net *net)
502{
503	struct caif_net *caifn = net_generic(net, caif_net_id);
504	INIT_LIST_HEAD(&caifn->caifdevs.list);
505	mutex_init(&caifn->caifdevs.lock);
506
507	caifn->cfg = cfcnfg_create();
508	if (!caifn->cfg)
509		return -ENOMEM;
510
511	return 0;
512}
513
514static void caif_exit_net(struct net *net)
515{
516	struct caif_device_entry *caifd, *tmp;
517	struct caif_device_entry_list *caifdevs =
518	    caif_device_list(net);
519	struct cfcnfg *cfg =  get_cfcnfg(net);
520
521	rtnl_lock();
522	mutex_lock(&caifdevs->lock);
523
524	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
525		int i = 0;
526		list_del_rcu(&caifd->list);
527		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
528
529		while (i < 10 &&
530			(caifd_refcnt_read(caifd) != 0 ||
531			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
532
533			pr_info("Wait for device inuse\n");
534			msleep(250);
535			i++;
536		}
537		synchronize_rcu();
538		dev_put(caifd->netdev);
539		free_percpu(caifd->pcpu_refcnt);
540		kfree(caifd);
541	}
542	cfcnfg_remove(cfg);
543
544	mutex_unlock(&caifdevs->lock);
545	rtnl_unlock();
546}
547
548static struct pernet_operations caif_net_ops = {
549	.init = caif_init_net,
550	.exit = caif_exit_net,
551	.id   = &caif_net_id,
552	.size = sizeof(struct caif_net),
553};
554
555/* Initialize Caif devices list */
556static int __init caif_device_init(void)
557{
558	int result;
559
560	result = register_pernet_subsys(&caif_net_ops);
561
562	if (result)
563		return result;
564
565	register_netdevice_notifier(&caif_device_notifier);
566	dev_add_pack(&caif_packet_type);
567
568	return result;
569}
570
571static void __exit caif_device_exit(void)
572{
573	unregister_netdevice_notifier(&caif_device_notifier);
574	dev_remove_pack(&caif_packet_type);
575	unregister_pernet_subsys(&caif_net_ops);
576}
577
578module_init(caif_device_init);
579module_exit(caif_device_exit);