Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * CAIF Interface registration.
  4 * Copyright (C) ST-Ericsson AB 2010
  5 * Author:	Sjur Brendeland
  6 *
  7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  8 *  and Sakari Ailus <sakari.ailus@nokia.com>
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
 12
 13#include <linux/kernel.h>
 14#include <linux/if_arp.h>
 15#include <linux/net.h>
 16#include <linux/netdevice.h>
 17#include <linux/mutex.h>
 18#include <linux/module.h>
 19#include <linux/spinlock.h>
 20#include <net/netns/generic.h>
 21#include <net/net_namespace.h>
 22#include <net/pkt_sched.h>
 23#include <net/caif/caif_device.h>
 24#include <net/caif/caif_layer.h>
 25#include <net/caif/caif_dev.h>
 26#include <net/caif/cfpkt.h>
 27#include <net/caif/cfcnfg.h>
 28#include <net/caif/cfserl.h>
 29
 30MODULE_LICENSE("GPL");
 31
 32/* Used for local tracking of the CAIF net devices */
 33struct caif_device_entry {
 34	struct cflayer layer;
 35	struct list_head list;
 36	struct net_device *netdev;
 37	int __percpu *pcpu_refcnt;
 38	spinlock_t flow_lock;
 39	struct sk_buff *xoff_skb;
 40	void (*xoff_skb_dtor)(struct sk_buff *skb);
 41	bool xoff;
 42};
 43
 44struct caif_device_entry_list {
 45	struct list_head list;
 46	/* Protects simulanous deletes in list */
 47	struct mutex lock;
 48};
 49
 50struct caif_net {
 51	struct cfcnfg *cfg;
 52	struct caif_device_entry_list caifdevs;
 53};
 54
 55static unsigned int caif_net_id;
 56static int q_high = 50; /* Percent */
 57
 58struct cfcnfg *get_cfcnfg(struct net *net)
 59{
 60	struct caif_net *caifn;
 61	caifn = net_generic(net, caif_net_id);
 62	return caifn->cfg;
 63}
 64EXPORT_SYMBOL(get_cfcnfg);
 65
 66static struct caif_device_entry_list *caif_device_list(struct net *net)
 67{
 68	struct caif_net *caifn;
 69	caifn = net_generic(net, caif_net_id);
 70	return &caifn->caifdevs;
 71}
 72
 73static void caifd_put(struct caif_device_entry *e)
 74{
 75	this_cpu_dec(*e->pcpu_refcnt);
 76}
 77
 78static void caifd_hold(struct caif_device_entry *e)
 79{
 80	this_cpu_inc(*e->pcpu_refcnt);
 81}
 82
 83static int caifd_refcnt_read(struct caif_device_entry *e)
 84{
 85	int i, refcnt = 0;
 86	for_each_possible_cpu(i)
 87		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
 88	return refcnt;
 89}
 90
 91/* Allocate new CAIF device. */
 92static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
 93{
 94	struct caif_device_entry *caifd;
 95
 96	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
 97	if (!caifd)
 98		return NULL;
 99	caifd->pcpu_refcnt = alloc_percpu(int);
100	if (!caifd->pcpu_refcnt) {
101		kfree(caifd);
102		return NULL;
103	}
104	caifd->netdev = dev;
105	dev_hold(dev);
106	return caifd;
107}
108
109static struct caif_device_entry *caif_get(struct net_device *dev)
110{
111	struct caif_device_entry_list *caifdevs =
112	    caif_device_list(dev_net(dev));
113	struct caif_device_entry *caifd;
114
115	list_for_each_entry_rcu(caifd, &caifdevs->list, list,
116				lockdep_rtnl_is_held()) {
117		if (caifd->netdev == dev)
118			return caifd;
119	}
120	return NULL;
121}
122
123static void caif_flow_cb(struct sk_buff *skb)
124{
125	struct caif_device_entry *caifd;
126	void (*dtor)(struct sk_buff *skb) = NULL;
127	bool send_xoff;
128
129	WARN_ON(skb->dev == NULL);
130
131	rcu_read_lock();
132	caifd = caif_get(skb->dev);
133
134	WARN_ON(caifd == NULL);
135	if (!caifd) {
136		rcu_read_unlock();
137		return;
138	}
139
140	caifd_hold(caifd);
141	rcu_read_unlock();
142
143	spin_lock_bh(&caifd->flow_lock);
144	send_xoff = caifd->xoff;
145	caifd->xoff = false;
146	dtor = caifd->xoff_skb_dtor;
147
148	if (WARN_ON(caifd->xoff_skb != skb))
149		skb = NULL;
150
151	caifd->xoff_skb = NULL;
152	caifd->xoff_skb_dtor = NULL;
153
154	spin_unlock_bh(&caifd->flow_lock);
155
156	if (dtor && skb)
157		dtor(skb);
158
159	if (send_xoff)
160		caifd->layer.up->
161			ctrlcmd(caifd->layer.up,
162				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
163				caifd->layer.id);
164	caifd_put(caifd);
165}
166
167static int transmit(struct cflayer *layer, struct cfpkt *pkt)
168{
169	int err, high = 0, qlen = 0;
170	struct caif_device_entry *caifd =
171	    container_of(layer, struct caif_device_entry, layer);
172	struct sk_buff *skb;
173	struct netdev_queue *txq;
174
175	rcu_read_lock_bh();
176
177	skb = cfpkt_tonative(pkt);
178	skb->dev = caifd->netdev;
179	skb_reset_network_header(skb);
180	skb->protocol = htons(ETH_P_CAIF);
181
182	/* Check if we need to handle xoff */
183	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
184		goto noxoff;
185
186	if (unlikely(caifd->xoff))
187		goto noxoff;
188
189	if (likely(!netif_queue_stopped(caifd->netdev))) {
190		struct Qdisc *sch;
191
192		/* If we run with a TX queue, check if the queue is too long*/
193		txq = netdev_get_tx_queue(skb->dev, 0);
194		sch = rcu_dereference_bh(txq->qdisc);
195		if (likely(qdisc_is_empty(sch)))
196			goto noxoff;
197
198		/* can check for explicit qdisc len value only !NOLOCK,
199		 * always set flow off otherwise
200		 */
201		high = (caifd->netdev->tx_queue_len * q_high) / 100;
202		if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
203			goto noxoff;
204	}
205
206	/* Hold lock while accessing xoff */
207	spin_lock_bh(&caifd->flow_lock);
208	if (caifd->xoff) {
209		spin_unlock_bh(&caifd->flow_lock);
210		goto noxoff;
211	}
212
213	/*
214	 * Handle flow off, we do this by temporary hi-jacking this
215	 * skb's destructor function, and replace it with our own
216	 * flow-on callback. The callback will set flow-on and call
217	 * the original destructor.
218	 */
219
220	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
221			netif_queue_stopped(caifd->netdev),
222			qlen, high);
223	caifd->xoff = true;
224	caifd->xoff_skb = skb;
225	caifd->xoff_skb_dtor = skb->destructor;
226	skb->destructor = caif_flow_cb;
227	spin_unlock_bh(&caifd->flow_lock);
228
229	caifd->layer.up->ctrlcmd(caifd->layer.up,
230					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
231					caifd->layer.id);
232noxoff:
233	rcu_read_unlock_bh();
234
235	err = dev_queue_xmit(skb);
236	if (err > 0)
237		err = -EIO;
238
239	return err;
240}
241
242/*
243 * Stuff received packets into the CAIF stack.
244 * On error, returns non-zero and releases the skb.
245 */
246static int receive(struct sk_buff *skb, struct net_device *dev,
247		   struct packet_type *pkttype, struct net_device *orig_dev)
248{
249	struct cfpkt *pkt;
250	struct caif_device_entry *caifd;
251	int err;
252
253	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
254
255	rcu_read_lock();
256	caifd = caif_get(dev);
257
258	if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
259			!netif_oper_up(caifd->netdev)) {
260		rcu_read_unlock();
261		kfree_skb(skb);
262		return NET_RX_DROP;
263	}
264
265	/* Hold reference to netdevice while using CAIF stack */
266	caifd_hold(caifd);
267	rcu_read_unlock();
268
269	err = caifd->layer.up->receive(caifd->layer.up, pkt);
270
271	/* For -EILSEQ the packet is not freed so free it now */
272	if (err == -EILSEQ)
273		cfpkt_destroy(pkt);
274
275	/* Release reference to stack upwards */
276	caifd_put(caifd);
277
278	if (err != 0)
279		err = NET_RX_DROP;
280	return err;
281}
282
283static struct packet_type caif_packet_type __read_mostly = {
284	.type = cpu_to_be16(ETH_P_CAIF),
285	.func = receive,
286};
287
288static void dev_flowctrl(struct net_device *dev, int on)
289{
290	struct caif_device_entry *caifd;
291
292	rcu_read_lock();
293
294	caifd = caif_get(dev);
295	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
296		rcu_read_unlock();
297		return;
298	}
299
300	caifd_hold(caifd);
301	rcu_read_unlock();
302
303	caifd->layer.up->ctrlcmd(caifd->layer.up,
304				 on ?
305				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
306				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
307				 caifd->layer.id);
308	caifd_put(caifd);
309}
310
311int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
312		     struct cflayer *link_support, int head_room,
313		     struct cflayer **layer,
314		     int (**rcv_func)(struct sk_buff *, struct net_device *,
315				      struct packet_type *,
316				      struct net_device *))
317{
318	struct caif_device_entry *caifd;
319	enum cfcnfg_phy_preference pref;
320	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
321	struct caif_device_entry_list *caifdevs;
322	int res;
323
324	caifdevs = caif_device_list(dev_net(dev));
325	caifd = caif_device_alloc(dev);
326	if (!caifd)
327		return -ENOMEM;
328	*layer = &caifd->layer;
329	spin_lock_init(&caifd->flow_lock);
330
331	switch (caifdev->link_select) {
332	case CAIF_LINK_HIGH_BANDW:
333		pref = CFPHYPREF_HIGH_BW;
334		break;
335	case CAIF_LINK_LOW_LATENCY:
336		pref = CFPHYPREF_LOW_LAT;
337		break;
338	default:
339		pref = CFPHYPREF_HIGH_BW;
340		break;
341	}
342	mutex_lock(&caifdevs->lock);
343	list_add_rcu(&caifd->list, &caifdevs->list);
344
345	strscpy(caifd->layer.name, dev->name,
346		sizeof(caifd->layer.name));
347	caifd->layer.transmit = transmit;
348	res = cfcnfg_add_phy_layer(cfg,
349				dev,
350				&caifd->layer,
351				pref,
352				link_support,
353				caifdev->use_fcs,
354				head_room);
355	mutex_unlock(&caifdevs->lock);
356	if (rcv_func)
357		*rcv_func = receive;
358	return res;
359}
360EXPORT_SYMBOL(caif_enroll_dev);
361
362/* notify Caif of device events */
363static int caif_device_notify(struct notifier_block *me, unsigned long what,
364			      void *ptr)
365{
366	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
367	struct caif_device_entry *caifd = NULL;
368	struct caif_dev_common *caifdev;
369	struct cfcnfg *cfg;
370	struct cflayer *layer, *link_support;
371	int head_room = 0;
372	struct caif_device_entry_list *caifdevs;
373	int res;
374
375	cfg = get_cfcnfg(dev_net(dev));
376	caifdevs = caif_device_list(dev_net(dev));
377
378	caifd = caif_get(dev);
379	if (caifd == NULL && dev->type != ARPHRD_CAIF)
380		return 0;
381
382	switch (what) {
383	case NETDEV_REGISTER:
384		if (caifd != NULL)
385			break;
386
387		caifdev = netdev_priv(dev);
388
389		link_support = NULL;
390		if (caifdev->use_frag) {
391			head_room = 1;
392			link_support = cfserl_create(dev->ifindex,
393							caifdev->use_stx);
394			if (!link_support) {
395				pr_warn("Out of memory\n");
396				break;
397			}
398		}
399		res = caif_enroll_dev(dev, caifdev, link_support, head_room,
400				&layer, NULL);
401		if (res)
402			cfserl_release(link_support);
403		caifdev->flowctrl = dev_flowctrl;
404		break;
405
406	case NETDEV_UP:
407		rcu_read_lock();
408
409		caifd = caif_get(dev);
410		if (caifd == NULL) {
411			rcu_read_unlock();
412			break;
413		}
414
415		caifd->xoff = false;
416		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
417		rcu_read_unlock();
418
419		break;
420
421	case NETDEV_DOWN:
422		rcu_read_lock();
423
424		caifd = caif_get(dev);
425		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
426			rcu_read_unlock();
427			return -EINVAL;
428		}
429
430		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
431		caifd_hold(caifd);
432		rcu_read_unlock();
433
434		caifd->layer.up->ctrlcmd(caifd->layer.up,
435					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
436					 caifd->layer.id);
437
438		spin_lock_bh(&caifd->flow_lock);
439
440		/*
441		 * Replace our xoff-destructor with original destructor.
442		 * We trust that skb->destructor *always* is called before
443		 * the skb reference is invalid. The hijacked SKB destructor
444		 * takes the flow_lock so manipulating the skb->destructor here
445		 * should be safe.
446		*/
447		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
448			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
449
450		caifd->xoff = false;
451		caifd->xoff_skb_dtor = NULL;
452		caifd->xoff_skb = NULL;
453
454		spin_unlock_bh(&caifd->flow_lock);
455		caifd_put(caifd);
456		break;
457
458	case NETDEV_UNREGISTER:
459		mutex_lock(&caifdevs->lock);
460
461		caifd = caif_get(dev);
462		if (caifd == NULL) {
463			mutex_unlock(&caifdevs->lock);
464			break;
465		}
466		list_del_rcu(&caifd->list);
467
468		/*
469		 * NETDEV_UNREGISTER is called repeatedly until all reference
470		 * counts for the net-device are released. If references to
471		 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
472		 * the next call to NETDEV_UNREGISTER.
473		 *
474		 * If any packets are in flight down the CAIF Stack,
475		 * cfcnfg_del_phy_layer will return nonzero.
476		 * If no packets are in flight, the CAIF Stack associated
477		 * with the net-device un-registering is freed.
478		 */
479
480		if (caifd_refcnt_read(caifd) != 0 ||
481			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
482
483			pr_info("Wait for device inuse\n");
484			/* Enrole device if CAIF Stack is still in use */
485			list_add_rcu(&caifd->list, &caifdevs->list);
486			mutex_unlock(&caifdevs->lock);
487			break;
488		}
489
490		synchronize_rcu();
491		dev_put(caifd->netdev);
492		free_percpu(caifd->pcpu_refcnt);
493		kfree(caifd);
494
495		mutex_unlock(&caifdevs->lock);
496		break;
497	}
498	return 0;
499}
500
501static struct notifier_block caif_device_notifier = {
502	.notifier_call = caif_device_notify,
503	.priority = 0,
504};
505
506/* Per-namespace Caif devices handling */
507static int caif_init_net(struct net *net)
508{
509	struct caif_net *caifn = net_generic(net, caif_net_id);
510	INIT_LIST_HEAD(&caifn->caifdevs.list);
511	mutex_init(&caifn->caifdevs.lock);
512
513	caifn->cfg = cfcnfg_create();
514	if (!caifn->cfg)
515		return -ENOMEM;
516
517	return 0;
518}
519
520static void caif_exit_net(struct net *net)
521{
522	struct caif_device_entry *caifd, *tmp;
523	struct caif_device_entry_list *caifdevs =
524	    caif_device_list(net);
525	struct cfcnfg *cfg =  get_cfcnfg(net);
526
527	rtnl_lock();
528	mutex_lock(&caifdevs->lock);
529
530	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
531		int i = 0;
532		list_del_rcu(&caifd->list);
533		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
534
535		while (i < 10 &&
536			(caifd_refcnt_read(caifd) != 0 ||
537			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
538
539			pr_info("Wait for device inuse\n");
540			msleep(250);
541			i++;
542		}
543		synchronize_rcu();
544		dev_put(caifd->netdev);
545		free_percpu(caifd->pcpu_refcnt);
546		kfree(caifd);
547	}
548	cfcnfg_remove(cfg);
549
550	mutex_unlock(&caifdevs->lock);
551	rtnl_unlock();
552}
553
554static struct pernet_operations caif_net_ops = {
555	.init = caif_init_net,
556	.exit = caif_exit_net,
557	.id   = &caif_net_id,
558	.size = sizeof(struct caif_net),
559};
560
561/* Initialize Caif devices list */
562static int __init caif_device_init(void)
563{
564	int result;
565
566	result = register_pernet_subsys(&caif_net_ops);
567
568	if (result)
569		return result;
570
571	register_netdevice_notifier(&caif_device_notifier);
572	dev_add_pack(&caif_packet_type);
573
574	return result;
575}
576
577static void __exit caif_device_exit(void)
578{
579	unregister_netdevice_notifier(&caif_device_notifier);
580	dev_remove_pack(&caif_packet_type);
581	unregister_pernet_subsys(&caif_net_ops);
582}
583
584module_init(caif_device_init);
585module_exit(caif_device_exit);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * CAIF Interface registration.
  4 * Copyright (C) ST-Ericsson AB 2010
  5 * Author:	Sjur Brendeland
  6 *
  7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  8 *  and Sakari Ailus <sakari.ailus@nokia.com>
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
 12
 13#include <linux/kernel.h>
 14#include <linux/if_arp.h>
 15#include <linux/net.h>
 16#include <linux/netdevice.h>
 17#include <linux/mutex.h>
 18#include <linux/module.h>
 19#include <linux/spinlock.h>
 20#include <net/netns/generic.h>
 21#include <net/net_namespace.h>
 22#include <net/pkt_sched.h>
 23#include <net/caif/caif_device.h>
 24#include <net/caif/caif_layer.h>
 25#include <net/caif/caif_dev.h>
 26#include <net/caif/cfpkt.h>
 27#include <net/caif/cfcnfg.h>
 28#include <net/caif/cfserl.h>
 29
 30MODULE_LICENSE("GPL");
 31
 32/* Used for local tracking of the CAIF net devices */
 33struct caif_device_entry {
 34	struct cflayer layer;
 35	struct list_head list;
 36	struct net_device *netdev;
 37	int __percpu *pcpu_refcnt;
 38	spinlock_t flow_lock;
 39	struct sk_buff *xoff_skb;
 40	void (*xoff_skb_dtor)(struct sk_buff *skb);
 41	bool xoff;
 42};
 43
 44struct caif_device_entry_list {
 45	struct list_head list;
 46	/* Protects simulanous deletes in list */
 47	struct mutex lock;
 48};
 49
 50struct caif_net {
 51	struct cfcnfg *cfg;
 52	struct caif_device_entry_list caifdevs;
 53};
 54
 55static unsigned int caif_net_id;
 56static int q_high = 50; /* Percent */
 57
 58struct cfcnfg *get_cfcnfg(struct net *net)
 59{
 60	struct caif_net *caifn;
 61	caifn = net_generic(net, caif_net_id);
 62	return caifn->cfg;
 63}
 64EXPORT_SYMBOL(get_cfcnfg);
 65
 66static struct caif_device_entry_list *caif_device_list(struct net *net)
 67{
 68	struct caif_net *caifn;
 69	caifn = net_generic(net, caif_net_id);
 70	return &caifn->caifdevs;
 71}
 72
 73static void caifd_put(struct caif_device_entry *e)
 74{
 75	this_cpu_dec(*e->pcpu_refcnt);
 76}
 77
 78static void caifd_hold(struct caif_device_entry *e)
 79{
 80	this_cpu_inc(*e->pcpu_refcnt);
 81}
 82
 83static int caifd_refcnt_read(struct caif_device_entry *e)
 84{
 85	int i, refcnt = 0;
 86	for_each_possible_cpu(i)
 87		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
 88	return refcnt;
 89}
 90
 91/* Allocate new CAIF device. */
 92static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
 93{
 94	struct caif_device_entry *caifd;
 95
 96	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
 97	if (!caifd)
 98		return NULL;
 99	caifd->pcpu_refcnt = alloc_percpu(int);
100	if (!caifd->pcpu_refcnt) {
101		kfree(caifd);
102		return NULL;
103	}
104	caifd->netdev = dev;
105	dev_hold(dev);
106	return caifd;
107}
108
109static struct caif_device_entry *caif_get(struct net_device *dev)
110{
111	struct caif_device_entry_list *caifdevs =
112	    caif_device_list(dev_net(dev));
113	struct caif_device_entry *caifd;
114
115	list_for_each_entry_rcu(caifd, &caifdevs->list, list,
116				lockdep_rtnl_is_held()) {
117		if (caifd->netdev == dev)
118			return caifd;
119	}
120	return NULL;
121}
122
123static void caif_flow_cb(struct sk_buff *skb)
124{
125	struct caif_device_entry *caifd;
126	void (*dtor)(struct sk_buff *skb) = NULL;
127	bool send_xoff;
128
129	WARN_ON(skb->dev == NULL);
130
131	rcu_read_lock();
132	caifd = caif_get(skb->dev);
133
134	WARN_ON(caifd == NULL);
135	if (!caifd) {
136		rcu_read_unlock();
137		return;
138	}
139
140	caifd_hold(caifd);
141	rcu_read_unlock();
142
143	spin_lock_bh(&caifd->flow_lock);
144	send_xoff = caifd->xoff;
145	caifd->xoff = false;
146	dtor = caifd->xoff_skb_dtor;
147
148	if (WARN_ON(caifd->xoff_skb != skb))
149		skb = NULL;
150
151	caifd->xoff_skb = NULL;
152	caifd->xoff_skb_dtor = NULL;
153
154	spin_unlock_bh(&caifd->flow_lock);
155
156	if (dtor && skb)
157		dtor(skb);
158
159	if (send_xoff)
160		caifd->layer.up->
161			ctrlcmd(caifd->layer.up,
162				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
163				caifd->layer.id);
164	caifd_put(caifd);
165}
166
167static int transmit(struct cflayer *layer, struct cfpkt *pkt)
168{
169	int err, high = 0, qlen = 0;
170	struct caif_device_entry *caifd =
171	    container_of(layer, struct caif_device_entry, layer);
172	struct sk_buff *skb;
173	struct netdev_queue *txq;
174
175	rcu_read_lock_bh();
176
177	skb = cfpkt_tonative(pkt);
178	skb->dev = caifd->netdev;
179	skb_reset_network_header(skb);
180	skb->protocol = htons(ETH_P_CAIF);
181
182	/* Check if we need to handle xoff */
183	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
184		goto noxoff;
185
186	if (unlikely(caifd->xoff))
187		goto noxoff;
188
189	if (likely(!netif_queue_stopped(caifd->netdev))) {
190		struct Qdisc *sch;
191
192		/* If we run with a TX queue, check if the queue is too long*/
193		txq = netdev_get_tx_queue(skb->dev, 0);
194		sch = rcu_dereference_bh(txq->qdisc);
195		if (likely(qdisc_is_empty(sch)))
196			goto noxoff;
197
198		/* can check for explicit qdisc len value only !NOLOCK,
199		 * always set flow off otherwise
200		 */
201		high = (caifd->netdev->tx_queue_len * q_high) / 100;
202		if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
203			goto noxoff;
204	}
205
206	/* Hold lock while accessing xoff */
207	spin_lock_bh(&caifd->flow_lock);
208	if (caifd->xoff) {
209		spin_unlock_bh(&caifd->flow_lock);
210		goto noxoff;
211	}
212
213	/*
214	 * Handle flow off, we do this by temporary hi-jacking this
215	 * skb's destructor function, and replace it with our own
216	 * flow-on callback. The callback will set flow-on and call
217	 * the original destructor.
218	 */
219
220	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
221			netif_queue_stopped(caifd->netdev),
222			qlen, high);
223	caifd->xoff = true;
224	caifd->xoff_skb = skb;
225	caifd->xoff_skb_dtor = skb->destructor;
226	skb->destructor = caif_flow_cb;
227	spin_unlock_bh(&caifd->flow_lock);
228
229	caifd->layer.up->ctrlcmd(caifd->layer.up,
230					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
231					caifd->layer.id);
232noxoff:
233	rcu_read_unlock_bh();
234
235	err = dev_queue_xmit(skb);
236	if (err > 0)
237		err = -EIO;
238
239	return err;
240}
241
242/*
243 * Stuff received packets into the CAIF stack.
244 * On error, returns non-zero and releases the skb.
245 */
246static int receive(struct sk_buff *skb, struct net_device *dev,
247		   struct packet_type *pkttype, struct net_device *orig_dev)
248{
249	struct cfpkt *pkt;
250	struct caif_device_entry *caifd;
251	int err;
252
253	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
254
255	rcu_read_lock();
256	caifd = caif_get(dev);
257
258	if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
259			!netif_oper_up(caifd->netdev)) {
260		rcu_read_unlock();
261		kfree_skb(skb);
262		return NET_RX_DROP;
263	}
264
265	/* Hold reference to netdevice while using CAIF stack */
266	caifd_hold(caifd);
267	rcu_read_unlock();
268
269	err = caifd->layer.up->receive(caifd->layer.up, pkt);
270
271	/* For -EILSEQ the packet is not freed so so it now */
272	if (err == -EILSEQ)
273		cfpkt_destroy(pkt);
274
275	/* Release reference to stack upwards */
276	caifd_put(caifd);
277
278	if (err != 0)
279		err = NET_RX_DROP;
280	return err;
281}
282
283static struct packet_type caif_packet_type __read_mostly = {
284	.type = cpu_to_be16(ETH_P_CAIF),
285	.func = receive,
286};
287
288static void dev_flowctrl(struct net_device *dev, int on)
289{
290	struct caif_device_entry *caifd;
291
292	rcu_read_lock();
293
294	caifd = caif_get(dev);
295	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
296		rcu_read_unlock();
297		return;
298	}
299
300	caifd_hold(caifd);
301	rcu_read_unlock();
302
303	caifd->layer.up->ctrlcmd(caifd->layer.up,
304				 on ?
305				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
306				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
307				 caifd->layer.id);
308	caifd_put(caifd);
309}
310
311void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
312		     struct cflayer *link_support, int head_room,
313		     struct cflayer **layer,
314		     int (**rcv_func)(struct sk_buff *, struct net_device *,
315				      struct packet_type *,
316				      struct net_device *))
317{
318	struct caif_device_entry *caifd;
319	enum cfcnfg_phy_preference pref;
320	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
321	struct caif_device_entry_list *caifdevs;
 
322
323	caifdevs = caif_device_list(dev_net(dev));
324	caifd = caif_device_alloc(dev);
325	if (!caifd)
326		return;
327	*layer = &caifd->layer;
328	spin_lock_init(&caifd->flow_lock);
329
330	switch (caifdev->link_select) {
331	case CAIF_LINK_HIGH_BANDW:
332		pref = CFPHYPREF_HIGH_BW;
333		break;
334	case CAIF_LINK_LOW_LATENCY:
335		pref = CFPHYPREF_LOW_LAT;
336		break;
337	default:
338		pref = CFPHYPREF_HIGH_BW;
339		break;
340	}
341	mutex_lock(&caifdevs->lock);
342	list_add_rcu(&caifd->list, &caifdevs->list);
343
344	strlcpy(caifd->layer.name, dev->name,
345		sizeof(caifd->layer.name));
346	caifd->layer.transmit = transmit;
347	cfcnfg_add_phy_layer(cfg,
348				dev,
349				&caifd->layer,
350				pref,
351				link_support,
352				caifdev->use_fcs,
353				head_room);
354	mutex_unlock(&caifdevs->lock);
355	if (rcv_func)
356		*rcv_func = receive;
 
357}
358EXPORT_SYMBOL(caif_enroll_dev);
359
360/* notify Caif of device events */
361static int caif_device_notify(struct notifier_block *me, unsigned long what,
362			      void *ptr)
363{
364	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
365	struct caif_device_entry *caifd = NULL;
366	struct caif_dev_common *caifdev;
367	struct cfcnfg *cfg;
368	struct cflayer *layer, *link_support;
369	int head_room = 0;
370	struct caif_device_entry_list *caifdevs;
 
371
372	cfg = get_cfcnfg(dev_net(dev));
373	caifdevs = caif_device_list(dev_net(dev));
374
375	caifd = caif_get(dev);
376	if (caifd == NULL && dev->type != ARPHRD_CAIF)
377		return 0;
378
379	switch (what) {
380	case NETDEV_REGISTER:
381		if (caifd != NULL)
382			break;
383
384		caifdev = netdev_priv(dev);
385
386		link_support = NULL;
387		if (caifdev->use_frag) {
388			head_room = 1;
389			link_support = cfserl_create(dev->ifindex,
390							caifdev->use_stx);
391			if (!link_support) {
392				pr_warn("Out of memory\n");
393				break;
394			}
395		}
396		caif_enroll_dev(dev, caifdev, link_support, head_room,
397				&layer, NULL);
 
 
398		caifdev->flowctrl = dev_flowctrl;
399		break;
400
401	case NETDEV_UP:
402		rcu_read_lock();
403
404		caifd = caif_get(dev);
405		if (caifd == NULL) {
406			rcu_read_unlock();
407			break;
408		}
409
410		caifd->xoff = false;
411		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
412		rcu_read_unlock();
413
414		break;
415
416	case NETDEV_DOWN:
417		rcu_read_lock();
418
419		caifd = caif_get(dev);
420		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
421			rcu_read_unlock();
422			return -EINVAL;
423		}
424
425		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
426		caifd_hold(caifd);
427		rcu_read_unlock();
428
429		caifd->layer.up->ctrlcmd(caifd->layer.up,
430					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
431					 caifd->layer.id);
432
433		spin_lock_bh(&caifd->flow_lock);
434
435		/*
436		 * Replace our xoff-destructor with original destructor.
437		 * We trust that skb->destructor *always* is called before
438		 * the skb reference is invalid. The hijacked SKB destructor
439		 * takes the flow_lock so manipulating the skb->destructor here
440		 * should be safe.
441		*/
442		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
443			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
444
445		caifd->xoff = false;
446		caifd->xoff_skb_dtor = NULL;
447		caifd->xoff_skb = NULL;
448
449		spin_unlock_bh(&caifd->flow_lock);
450		caifd_put(caifd);
451		break;
452
453	case NETDEV_UNREGISTER:
454		mutex_lock(&caifdevs->lock);
455
456		caifd = caif_get(dev);
457		if (caifd == NULL) {
458			mutex_unlock(&caifdevs->lock);
459			break;
460		}
461		list_del_rcu(&caifd->list);
462
463		/*
464		 * NETDEV_UNREGISTER is called repeatedly until all reference
465		 * counts for the net-device are released. If references to
466		 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
467		 * the next call to NETDEV_UNREGISTER.
468		 *
469		 * If any packets are in flight down the CAIF Stack,
470		 * cfcnfg_del_phy_layer will return nonzero.
471		 * If no packets are in flight, the CAIF Stack associated
472		 * with the net-device un-registering is freed.
473		 */
474
475		if (caifd_refcnt_read(caifd) != 0 ||
476			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
477
478			pr_info("Wait for device inuse\n");
479			/* Enrole device if CAIF Stack is still in use */
480			list_add_rcu(&caifd->list, &caifdevs->list);
481			mutex_unlock(&caifdevs->lock);
482			break;
483		}
484
485		synchronize_rcu();
486		dev_put(caifd->netdev);
487		free_percpu(caifd->pcpu_refcnt);
488		kfree(caifd);
489
490		mutex_unlock(&caifdevs->lock);
491		break;
492	}
493	return 0;
494}
495
496static struct notifier_block caif_device_notifier = {
497	.notifier_call = caif_device_notify,
498	.priority = 0,
499};
500
501/* Per-namespace Caif devices handling */
502static int caif_init_net(struct net *net)
503{
504	struct caif_net *caifn = net_generic(net, caif_net_id);
505	INIT_LIST_HEAD(&caifn->caifdevs.list);
506	mutex_init(&caifn->caifdevs.lock);
507
508	caifn->cfg = cfcnfg_create();
509	if (!caifn->cfg)
510		return -ENOMEM;
511
512	return 0;
513}
514
515static void caif_exit_net(struct net *net)
516{
517	struct caif_device_entry *caifd, *tmp;
518	struct caif_device_entry_list *caifdevs =
519	    caif_device_list(net);
520	struct cfcnfg *cfg =  get_cfcnfg(net);
521
522	rtnl_lock();
523	mutex_lock(&caifdevs->lock);
524
525	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
526		int i = 0;
527		list_del_rcu(&caifd->list);
528		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
529
530		while (i < 10 &&
531			(caifd_refcnt_read(caifd) != 0 ||
532			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
533
534			pr_info("Wait for device inuse\n");
535			msleep(250);
536			i++;
537		}
538		synchronize_rcu();
539		dev_put(caifd->netdev);
540		free_percpu(caifd->pcpu_refcnt);
541		kfree(caifd);
542	}
543	cfcnfg_remove(cfg);
544
545	mutex_unlock(&caifdevs->lock);
546	rtnl_unlock();
547}
548
549static struct pernet_operations caif_net_ops = {
550	.init = caif_init_net,
551	.exit = caif_exit_net,
552	.id   = &caif_net_id,
553	.size = sizeof(struct caif_net),
554};
555
556/* Initialize Caif devices list */
557static int __init caif_device_init(void)
558{
559	int result;
560
561	result = register_pernet_subsys(&caif_net_ops);
562
563	if (result)
564		return result;
565
566	register_netdevice_notifier(&caif_device_notifier);
567	dev_add_pack(&caif_packet_type);
568
569	return result;
570}
571
572static void __exit caif_device_exit(void)
573{
574	unregister_netdevice_notifier(&caif_device_notifier);
575	dev_remove_pack(&caif_packet_type);
576	unregister_pernet_subsys(&caif_net_ops);
577}
578
579module_init(caif_device_init);
580module_exit(caif_device_exit);