Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v3.5.6
  1/*
  2 * CAIF Interface registration.
  3 * Copyright (C) ST-Ericsson AB 2010
  4 * Author:	Sjur Brendeland/sjur.brandeland@stericsson.com
  5 * License terms: GNU General Public License (GPL) version 2
  6 *
  7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  8 *  and Sakari Ailus <sakari.ailus@nokia.com>
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
 12
 13#include <linux/kernel.h>
 14#include <linux/if_arp.h>
 15#include <linux/net.h>
 16#include <linux/netdevice.h>
 17#include <linux/mutex.h>
 18#include <linux/module.h>
 19#include <linux/spinlock.h>
 20#include <net/netns/generic.h>
 21#include <net/net_namespace.h>
 22#include <net/pkt_sched.h>
 23#include <net/caif/caif_device.h>
 24#include <net/caif/caif_layer.h>
 
 25#include <net/caif/cfpkt.h>
 26#include <net/caif/cfcnfg.h>
 27#include <net/caif/cfserl.h>
 28
 29MODULE_LICENSE("GPL");
 30
 31/* Used for local tracking of the CAIF net devices */
 32struct caif_device_entry {
 33	struct cflayer layer;
 34	struct list_head list;
 35	struct net_device *netdev;
 36	int __percpu *pcpu_refcnt;
 37	spinlock_t flow_lock;
 38	struct sk_buff *xoff_skb;
 39	void (*xoff_skb_dtor)(struct sk_buff *skb);
 40	bool xoff;
 41};
 42
 43struct caif_device_entry_list {
 44	struct list_head list;
 45	/* Protects simulanous deletes in list */
 46	struct mutex lock;
 47};
 48
 49struct caif_net {
 50	struct cfcnfg *cfg;
 51	struct caif_device_entry_list caifdevs;
 52};
 53
 54static int caif_net_id;
 55static int q_high = 50; /* Percent */
 56
 57struct cfcnfg *get_cfcnfg(struct net *net)
 58{
 59	struct caif_net *caifn;
 60	caifn = net_generic(net, caif_net_id);
 61	return caifn->cfg;
 62}
 63EXPORT_SYMBOL(get_cfcnfg);
 64
 65static struct caif_device_entry_list *caif_device_list(struct net *net)
 66{
 67	struct caif_net *caifn;
 68	caifn = net_generic(net, caif_net_id);
 69	return &caifn->caifdevs;
 70}
 71
 72static void caifd_put(struct caif_device_entry *e)
 73{
 74	this_cpu_dec(*e->pcpu_refcnt);
 75}
 76
 77static void caifd_hold(struct caif_device_entry *e)
 78{
 79	this_cpu_inc(*e->pcpu_refcnt);
 80}
 81
 82static int caifd_refcnt_read(struct caif_device_entry *e)
 83{
 84	int i, refcnt = 0;
 85	for_each_possible_cpu(i)
 86		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
 87	return refcnt;
 88}
 89
 90/* Allocate new CAIF device. */
 91static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
 92{
 93	struct caif_device_entry_list *caifdevs;
 94	struct caif_device_entry *caifd;
 95
 96	caifdevs = caif_device_list(dev_net(dev));
 97
 98	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
 99	if (!caifd)
100		return NULL;
101	caifd->pcpu_refcnt = alloc_percpu(int);
102	if (!caifd->pcpu_refcnt) {
103		kfree(caifd);
104		return NULL;
105	}
106	caifd->netdev = dev;
107	dev_hold(dev);
108	return caifd;
109}
110
111static struct caif_device_entry *caif_get(struct net_device *dev)
112{
113	struct caif_device_entry_list *caifdevs =
114	    caif_device_list(dev_net(dev));
115	struct caif_device_entry *caifd;
116
117	list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
118		if (caifd->netdev == dev)
119			return caifd;
120	}
121	return NULL;
122}
123
124void caif_flow_cb(struct sk_buff *skb)
125{
126	struct caif_device_entry *caifd;
127	void (*dtor)(struct sk_buff *skb) = NULL;
128	bool send_xoff;
129
130	WARN_ON(skb->dev == NULL);
131
132	rcu_read_lock();
133	caifd = caif_get(skb->dev);
 
 
 
 
 
134	caifd_hold(caifd);
135	rcu_read_unlock();
136
137	spin_lock_bh(&caifd->flow_lock);
138	send_xoff = caifd->xoff;
139	caifd->xoff = 0;
140	dtor = caifd->xoff_skb_dtor;
141
142	if (WARN_ON(caifd->xoff_skb != skb))
143		skb = NULL;
144
145	caifd->xoff_skb = NULL;
146	caifd->xoff_skb_dtor = NULL;
147
148	spin_unlock_bh(&caifd->flow_lock);
149
150	if (dtor && skb)
151		dtor(skb);
152
153	if (send_xoff)
154		caifd->layer.up->
155			ctrlcmd(caifd->layer.up,
156				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
157				caifd->layer.id);
158	caifd_put(caifd);
159}
160
161static int transmit(struct cflayer *layer, struct cfpkt *pkt)
162{
163	int err, high = 0, qlen = 0;
164	struct caif_device_entry *caifd =
165	    container_of(layer, struct caif_device_entry, layer);
166	struct sk_buff *skb;
167	struct netdev_queue *txq;
168
169	rcu_read_lock_bh();
170
171	skb = cfpkt_tonative(pkt);
172	skb->dev = caifd->netdev;
173	skb_reset_network_header(skb);
174	skb->protocol = htons(ETH_P_CAIF);
175
176	/* Check if we need to handle xoff */
177	if (likely(caifd->netdev->tx_queue_len == 0))
178		goto noxoff;
179
180	if (unlikely(caifd->xoff))
181		goto noxoff;
182
183	if (likely(!netif_queue_stopped(caifd->netdev))) {
184		/* If we run with a TX queue, check if the queue is too long*/
185		txq = netdev_get_tx_queue(skb->dev, 0);
186		qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
187
188		if (likely(qlen == 0))
189			goto noxoff;
190
191		high = (caifd->netdev->tx_queue_len * q_high) / 100;
192		if (likely(qlen < high))
193			goto noxoff;
194	}
195
196	/* Hold lock while accessing xoff */
197	spin_lock_bh(&caifd->flow_lock);
198	if (caifd->xoff) {
199		spin_unlock_bh(&caifd->flow_lock);
200		goto noxoff;
201	}
202
203	/*
204	 * Handle flow off, we do this by temporary hi-jacking this
205	 * skb's destructor function, and replace it with our own
206	 * flow-on callback. The callback will set flow-on and call
207	 * the original destructor.
208	 */
209
210	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
211			netif_queue_stopped(caifd->netdev),
212			qlen, high);
213	caifd->xoff = 1;
214	caifd->xoff_skb = skb;
215	caifd->xoff_skb_dtor = skb->destructor;
216	skb->destructor = caif_flow_cb;
217	spin_unlock_bh(&caifd->flow_lock);
218
219	caifd->layer.up->ctrlcmd(caifd->layer.up,
220					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
221					caifd->layer.id);
222noxoff:
223	rcu_read_unlock_bh();
224
225	err = dev_queue_xmit(skb);
226	if (err > 0)
227		err = -EIO;
228
229	return err;
230}
231
232/*
233 * Stuff received packets into the CAIF stack.
234 * On error, returns non-zero and releases the skb.
235 */
236static int receive(struct sk_buff *skb, struct net_device *dev,
237		   struct packet_type *pkttype, struct net_device *orig_dev)
238{
239	struct cfpkt *pkt;
240	struct caif_device_entry *caifd;
241	int err;
242
243	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
244
245	rcu_read_lock();
246	caifd = caif_get(dev);
247
248	if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
249			!netif_oper_up(caifd->netdev)) {
250		rcu_read_unlock();
251		kfree_skb(skb);
252		return NET_RX_DROP;
253	}
254
255	/* Hold reference to netdevice while using CAIF stack */
256	caifd_hold(caifd);
257	rcu_read_unlock();
258
259	err = caifd->layer.up->receive(caifd->layer.up, pkt);
260
261	/* For -EILSEQ the packet is not freed so so it now */
262	if (err == -EILSEQ)
263		cfpkt_destroy(pkt);
264
265	/* Release reference to stack upwards */
266	caifd_put(caifd);
267
268	if (err != 0)
269		err = NET_RX_DROP;
270	return err;
271}
272
273static struct packet_type caif_packet_type __read_mostly = {
274	.type = cpu_to_be16(ETH_P_CAIF),
275	.func = receive,
276};
277
278static void dev_flowctrl(struct net_device *dev, int on)
279{
280	struct caif_device_entry *caifd;
281
282	rcu_read_lock();
283
284	caifd = caif_get(dev);
285	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
286		rcu_read_unlock();
287		return;
288	}
289
290	caifd_hold(caifd);
291	rcu_read_unlock();
292
293	caifd->layer.up->ctrlcmd(caifd->layer.up,
294				 on ?
295				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
296				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
297				 caifd->layer.id);
298	caifd_put(caifd);
299}
300
301void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
302			struct cflayer *link_support, int head_room,
303			struct cflayer **layer, int (**rcv_func)(
304				struct sk_buff *, struct net_device *,
305				struct packet_type *, struct net_device *))
 
306{
307	struct caif_device_entry *caifd;
308	enum cfcnfg_phy_preference pref;
309	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
310	struct caif_device_entry_list *caifdevs;
311
312	caifdevs = caif_device_list(dev_net(dev));
313	caifd = caif_device_alloc(dev);
314	if (!caifd)
315		return;
316	*layer = &caifd->layer;
317	spin_lock_init(&caifd->flow_lock);
318
319	switch (caifdev->link_select) {
320	case CAIF_LINK_HIGH_BANDW:
321		pref = CFPHYPREF_HIGH_BW;
322		break;
323	case CAIF_LINK_LOW_LATENCY:
324		pref = CFPHYPREF_LOW_LAT;
325		break;
326	default:
327		pref = CFPHYPREF_HIGH_BW;
328		break;
329	}
330	mutex_lock(&caifdevs->lock);
331	list_add_rcu(&caifd->list, &caifdevs->list);
332
333	strncpy(caifd->layer.name, dev->name,
334		sizeof(caifd->layer.name) - 1);
335	caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
336	caifd->layer.transmit = transmit;
337	cfcnfg_add_phy_layer(cfg,
338				dev,
339				&caifd->layer,
340				pref,
341				link_support,
342				caifdev->use_fcs,
343				head_room);
344	mutex_unlock(&caifdevs->lock);
345	if (rcv_func)
346		*rcv_func = receive;
347}
348EXPORT_SYMBOL(caif_enroll_dev);
349
350/* notify Caif of device events */
351static int caif_device_notify(struct notifier_block *me, unsigned long what,
352			      void *arg)
353{
354	struct net_device *dev = arg;
355	struct caif_device_entry *caifd = NULL;
356	struct caif_dev_common *caifdev;
357	struct cfcnfg *cfg;
358	struct cflayer *layer, *link_support;
359	int head_room = 0;
360	struct caif_device_entry_list *caifdevs;
361
362	cfg = get_cfcnfg(dev_net(dev));
363	caifdevs = caif_device_list(dev_net(dev));
364
365	caifd = caif_get(dev);
366	if (caifd == NULL && dev->type != ARPHRD_CAIF)
367		return 0;
368
369	switch (what) {
370	case NETDEV_REGISTER:
371		if (caifd != NULL)
372			break;
373
374		caifdev = netdev_priv(dev);
375
376		link_support = NULL;
377		if (caifdev->use_frag) {
378			head_room = 1;
379			link_support = cfserl_create(dev->ifindex,
380							caifdev->use_stx);
381			if (!link_support) {
382				pr_warn("Out of memory\n");
383				break;
384			}
385		}
386		caif_enroll_dev(dev, caifdev, link_support, head_room,
387				&layer, NULL);
388		caifdev->flowctrl = dev_flowctrl;
389		break;
390
391	case NETDEV_UP:
392		rcu_read_lock();
393
394		caifd = caif_get(dev);
395		if (caifd == NULL) {
396			rcu_read_unlock();
397			break;
398		}
399
400		caifd->xoff = 0;
401		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
402		rcu_read_unlock();
403
404		break;
405
406	case NETDEV_DOWN:
407		rcu_read_lock();
408
409		caifd = caif_get(dev);
410		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
411			rcu_read_unlock();
412			return -EINVAL;
413		}
414
415		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
416		caifd_hold(caifd);
417		rcu_read_unlock();
418
419		caifd->layer.up->ctrlcmd(caifd->layer.up,
420					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
421					 caifd->layer.id);
422
423		spin_lock_bh(&caifd->flow_lock);
424
425		/*
426		 * Replace our xoff-destructor with original destructor.
427		 * We trust that skb->destructor *always* is called before
428		 * the skb reference is invalid. The hijacked SKB destructor
429		 * takes the flow_lock so manipulating the skb->destructor here
430		 * should be safe.
431		*/
432		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
433			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
434
435		caifd->xoff = 0;
436		caifd->xoff_skb_dtor = NULL;
437		caifd->xoff_skb = NULL;
438
439		spin_unlock_bh(&caifd->flow_lock);
440		caifd_put(caifd);
441		break;
442
443	case NETDEV_UNREGISTER:
444		mutex_lock(&caifdevs->lock);
445
446		caifd = caif_get(dev);
447		if (caifd == NULL) {
448			mutex_unlock(&caifdevs->lock);
449			break;
450		}
451		list_del_rcu(&caifd->list);
452
453		/*
454		 * NETDEV_UNREGISTER is called repeatedly until all reference
455		 * counts for the net-device are released. If references to
456		 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
457		 * the next call to NETDEV_UNREGISTER.
458		 *
459		 * If any packets are in flight down the CAIF Stack,
460		 * cfcnfg_del_phy_layer will return nonzero.
461		 * If no packets are in flight, the CAIF Stack associated
462		 * with the net-device un-registering is freed.
463		 */
464
465		if (caifd_refcnt_read(caifd) != 0 ||
466			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
467
468			pr_info("Wait for device inuse\n");
469			/* Enrole device if CAIF Stack is still in use */
470			list_add_rcu(&caifd->list, &caifdevs->list);
471			mutex_unlock(&caifdevs->lock);
472			break;
473		}
474
475		synchronize_rcu();
476		dev_put(caifd->netdev);
477		free_percpu(caifd->pcpu_refcnt);
478		kfree(caifd);
479
480		mutex_unlock(&caifdevs->lock);
481		break;
482	}
483	return 0;
484}
485
486static struct notifier_block caif_device_notifier = {
487	.notifier_call = caif_device_notify,
488	.priority = 0,
489};
490
491/* Per-namespace Caif devices handling */
492static int caif_init_net(struct net *net)
493{
494	struct caif_net *caifn = net_generic(net, caif_net_id);
495	INIT_LIST_HEAD(&caifn->caifdevs.list);
496	mutex_init(&caifn->caifdevs.lock);
497
498	caifn->cfg = cfcnfg_create();
499	if (!caifn->cfg)
500		return -ENOMEM;
501
502	return 0;
503}
504
505static void caif_exit_net(struct net *net)
506{
507	struct caif_device_entry *caifd, *tmp;
508	struct caif_device_entry_list *caifdevs =
509	    caif_device_list(net);
510	struct cfcnfg *cfg =  get_cfcnfg(net);
511
512	rtnl_lock();
513	mutex_lock(&caifdevs->lock);
514
515	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
516		int i = 0;
517		list_del_rcu(&caifd->list);
518		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
519
520		while (i < 10 &&
521			(caifd_refcnt_read(caifd) != 0 ||
522			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
523
524			pr_info("Wait for device inuse\n");
525			msleep(250);
526			i++;
527		}
528		synchronize_rcu();
529		dev_put(caifd->netdev);
530		free_percpu(caifd->pcpu_refcnt);
531		kfree(caifd);
532	}
533	cfcnfg_remove(cfg);
534
535	mutex_unlock(&caifdevs->lock);
536	rtnl_unlock();
537}
538
539static struct pernet_operations caif_net_ops = {
540	.init = caif_init_net,
541	.exit = caif_exit_net,
542	.id   = &caif_net_id,
543	.size = sizeof(struct caif_net),
544};
545
546/* Initialize Caif devices list */
547static int __init caif_device_init(void)
548{
549	int result;
550
551	result = register_pernet_subsys(&caif_net_ops);
552
553	if (result)
554		return result;
555
556	register_netdevice_notifier(&caif_device_notifier);
557	dev_add_pack(&caif_packet_type);
558
559	return result;
560}
561
562static void __exit caif_device_exit(void)
563{
564	unregister_netdevice_notifier(&caif_device_notifier);
565	dev_remove_pack(&caif_packet_type);
566	unregister_pernet_subsys(&caif_net_ops);
567}
568
569module_init(caif_device_init);
570module_exit(caif_device_exit);
v4.6
  1/*
  2 * CAIF Interface registration.
  3 * Copyright (C) ST-Ericsson AB 2010
  4 * Author:	Sjur Brendeland
  5 * License terms: GNU General Public License (GPL) version 2
  6 *
  7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  8 *  and Sakari Ailus <sakari.ailus@nokia.com>
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
 12
 13#include <linux/kernel.h>
 14#include <linux/if_arp.h>
 15#include <linux/net.h>
 16#include <linux/netdevice.h>
 17#include <linux/mutex.h>
 18#include <linux/module.h>
 19#include <linux/spinlock.h>
 20#include <net/netns/generic.h>
 21#include <net/net_namespace.h>
 22#include <net/pkt_sched.h>
 23#include <net/caif/caif_device.h>
 24#include <net/caif/caif_layer.h>
 25#include <net/caif/caif_dev.h>
 26#include <net/caif/cfpkt.h>
 27#include <net/caif/cfcnfg.h>
 28#include <net/caif/cfserl.h>
 29
 30MODULE_LICENSE("GPL");
 31
 32/* Used for local tracking of the CAIF net devices */
 33struct caif_device_entry {
 34	struct cflayer layer;
 35	struct list_head list;
 36	struct net_device *netdev;
 37	int __percpu *pcpu_refcnt;
 38	spinlock_t flow_lock;
 39	struct sk_buff *xoff_skb;
 40	void (*xoff_skb_dtor)(struct sk_buff *skb);
 41	bool xoff;
 42};
 43
 44struct caif_device_entry_list {
 45	struct list_head list;
 46	/* Protects simulanous deletes in list */
 47	struct mutex lock;
 48};
 49
 50struct caif_net {
 51	struct cfcnfg *cfg;
 52	struct caif_device_entry_list caifdevs;
 53};
 54
 55static int caif_net_id;
 56static int q_high = 50; /* Percent */
 57
 58struct cfcnfg *get_cfcnfg(struct net *net)
 59{
 60	struct caif_net *caifn;
 61	caifn = net_generic(net, caif_net_id);
 62	return caifn->cfg;
 63}
 64EXPORT_SYMBOL(get_cfcnfg);
 65
 66static struct caif_device_entry_list *caif_device_list(struct net *net)
 67{
 68	struct caif_net *caifn;
 69	caifn = net_generic(net, caif_net_id);
 70	return &caifn->caifdevs;
 71}
 72
 73static void caifd_put(struct caif_device_entry *e)
 74{
 75	this_cpu_dec(*e->pcpu_refcnt);
 76}
 77
 78static void caifd_hold(struct caif_device_entry *e)
 79{
 80	this_cpu_inc(*e->pcpu_refcnt);
 81}
 82
 83static int caifd_refcnt_read(struct caif_device_entry *e)
 84{
 85	int i, refcnt = 0;
 86	for_each_possible_cpu(i)
 87		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
 88	return refcnt;
 89}
 90
 91/* Allocate new CAIF device. */
 92static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
 93{
 
 94	struct caif_device_entry *caifd;
 95
 
 
 96	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
 97	if (!caifd)
 98		return NULL;
 99	caifd->pcpu_refcnt = alloc_percpu(int);
100	if (!caifd->pcpu_refcnt) {
101		kfree(caifd);
102		return NULL;
103	}
104	caifd->netdev = dev;
105	dev_hold(dev);
106	return caifd;
107}
108
109static struct caif_device_entry *caif_get(struct net_device *dev)
110{
111	struct caif_device_entry_list *caifdevs =
112	    caif_device_list(dev_net(dev));
113	struct caif_device_entry *caifd;
114
115	list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
116		if (caifd->netdev == dev)
117			return caifd;
118	}
119	return NULL;
120}
121
122static void caif_flow_cb(struct sk_buff *skb)
123{
124	struct caif_device_entry *caifd;
125	void (*dtor)(struct sk_buff *skb) = NULL;
126	bool send_xoff;
127
128	WARN_ON(skb->dev == NULL);
129
130	rcu_read_lock();
131	caifd = caif_get(skb->dev);
132
133	WARN_ON(caifd == NULL);
134	if (caifd == NULL)
135		return;
136
137	caifd_hold(caifd);
138	rcu_read_unlock();
139
140	spin_lock_bh(&caifd->flow_lock);
141	send_xoff = caifd->xoff;
142	caifd->xoff = 0;
143	dtor = caifd->xoff_skb_dtor;
144
145	if (WARN_ON(caifd->xoff_skb != skb))
146		skb = NULL;
147
148	caifd->xoff_skb = NULL;
149	caifd->xoff_skb_dtor = NULL;
150
151	spin_unlock_bh(&caifd->flow_lock);
152
153	if (dtor && skb)
154		dtor(skb);
155
156	if (send_xoff)
157		caifd->layer.up->
158			ctrlcmd(caifd->layer.up,
159				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
160				caifd->layer.id);
161	caifd_put(caifd);
162}
163
164static int transmit(struct cflayer *layer, struct cfpkt *pkt)
165{
166	int err, high = 0, qlen = 0;
167	struct caif_device_entry *caifd =
168	    container_of(layer, struct caif_device_entry, layer);
169	struct sk_buff *skb;
170	struct netdev_queue *txq;
171
172	rcu_read_lock_bh();
173
174	skb = cfpkt_tonative(pkt);
175	skb->dev = caifd->netdev;
176	skb_reset_network_header(skb);
177	skb->protocol = htons(ETH_P_CAIF);
178
179	/* Check if we need to handle xoff */
180	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
181		goto noxoff;
182
183	if (unlikely(caifd->xoff))
184		goto noxoff;
185
186	if (likely(!netif_queue_stopped(caifd->netdev))) {
187		/* If we run with a TX queue, check if the queue is too long*/
188		txq = netdev_get_tx_queue(skb->dev, 0);
189		qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
190
191		if (likely(qlen == 0))
192			goto noxoff;
193
194		high = (caifd->netdev->tx_queue_len * q_high) / 100;
195		if (likely(qlen < high))
196			goto noxoff;
197	}
198
199	/* Hold lock while accessing xoff */
200	spin_lock_bh(&caifd->flow_lock);
201	if (caifd->xoff) {
202		spin_unlock_bh(&caifd->flow_lock);
203		goto noxoff;
204	}
205
206	/*
207	 * Handle flow off, we do this by temporary hi-jacking this
208	 * skb's destructor function, and replace it with our own
209	 * flow-on callback. The callback will set flow-on and call
210	 * the original destructor.
211	 */
212
213	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
214			netif_queue_stopped(caifd->netdev),
215			qlen, high);
216	caifd->xoff = 1;
217	caifd->xoff_skb = skb;
218	caifd->xoff_skb_dtor = skb->destructor;
219	skb->destructor = caif_flow_cb;
220	spin_unlock_bh(&caifd->flow_lock);
221
222	caifd->layer.up->ctrlcmd(caifd->layer.up,
223					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
224					caifd->layer.id);
225noxoff:
226	rcu_read_unlock_bh();
227
228	err = dev_queue_xmit(skb);
229	if (err > 0)
230		err = -EIO;
231
232	return err;
233}
234
235/*
236 * Stuff received packets into the CAIF stack.
237 * On error, returns non-zero and releases the skb.
238 */
239static int receive(struct sk_buff *skb, struct net_device *dev,
240		   struct packet_type *pkttype, struct net_device *orig_dev)
241{
242	struct cfpkt *pkt;
243	struct caif_device_entry *caifd;
244	int err;
245
246	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
247
248	rcu_read_lock();
249	caifd = caif_get(dev);
250
251	if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
252			!netif_oper_up(caifd->netdev)) {
253		rcu_read_unlock();
254		kfree_skb(skb);
255		return NET_RX_DROP;
256	}
257
258	/* Hold reference to netdevice while using CAIF stack */
259	caifd_hold(caifd);
260	rcu_read_unlock();
261
262	err = caifd->layer.up->receive(caifd->layer.up, pkt);
263
264	/* For -EILSEQ the packet is not freed so so it now */
265	if (err == -EILSEQ)
266		cfpkt_destroy(pkt);
267
268	/* Release reference to stack upwards */
269	caifd_put(caifd);
270
271	if (err != 0)
272		err = NET_RX_DROP;
273	return err;
274}
275
276static struct packet_type caif_packet_type __read_mostly = {
277	.type = cpu_to_be16(ETH_P_CAIF),
278	.func = receive,
279};
280
281static void dev_flowctrl(struct net_device *dev, int on)
282{
283	struct caif_device_entry *caifd;
284
285	rcu_read_lock();
286
287	caifd = caif_get(dev);
288	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
289		rcu_read_unlock();
290		return;
291	}
292
293	caifd_hold(caifd);
294	rcu_read_unlock();
295
296	caifd->layer.up->ctrlcmd(caifd->layer.up,
297				 on ?
298				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
299				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
300				 caifd->layer.id);
301	caifd_put(caifd);
302}
303
304void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
305		     struct cflayer *link_support, int head_room,
306		     struct cflayer **layer,
307		     int (**rcv_func)(struct sk_buff *, struct net_device *,
308				      struct packet_type *,
309				      struct net_device *))
310{
311	struct caif_device_entry *caifd;
312	enum cfcnfg_phy_preference pref;
313	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
314	struct caif_device_entry_list *caifdevs;
315
316	caifdevs = caif_device_list(dev_net(dev));
317	caifd = caif_device_alloc(dev);
318	if (!caifd)
319		return;
320	*layer = &caifd->layer;
321	spin_lock_init(&caifd->flow_lock);
322
323	switch (caifdev->link_select) {
324	case CAIF_LINK_HIGH_BANDW:
325		pref = CFPHYPREF_HIGH_BW;
326		break;
327	case CAIF_LINK_LOW_LATENCY:
328		pref = CFPHYPREF_LOW_LAT;
329		break;
330	default:
331		pref = CFPHYPREF_HIGH_BW;
332		break;
333	}
334	mutex_lock(&caifdevs->lock);
335	list_add_rcu(&caifd->list, &caifdevs->list);
336
337	strncpy(caifd->layer.name, dev->name,
338		sizeof(caifd->layer.name) - 1);
339	caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
340	caifd->layer.transmit = transmit;
341	cfcnfg_add_phy_layer(cfg,
342				dev,
343				&caifd->layer,
344				pref,
345				link_support,
346				caifdev->use_fcs,
347				head_room);
348	mutex_unlock(&caifdevs->lock);
349	if (rcv_func)
350		*rcv_func = receive;
351}
352EXPORT_SYMBOL(caif_enroll_dev);
353
354/* notify Caif of device events */
355static int caif_device_notify(struct notifier_block *me, unsigned long what,
356			      void *ptr)
357{
358	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
359	struct caif_device_entry *caifd = NULL;
360	struct caif_dev_common *caifdev;
361	struct cfcnfg *cfg;
362	struct cflayer *layer, *link_support;
363	int head_room = 0;
364	struct caif_device_entry_list *caifdevs;
365
366	cfg = get_cfcnfg(dev_net(dev));
367	caifdevs = caif_device_list(dev_net(dev));
368
369	caifd = caif_get(dev);
370	if (caifd == NULL && dev->type != ARPHRD_CAIF)
371		return 0;
372
373	switch (what) {
374	case NETDEV_REGISTER:
375		if (caifd != NULL)
376			break;
377
378		caifdev = netdev_priv(dev);
379
380		link_support = NULL;
381		if (caifdev->use_frag) {
382			head_room = 1;
383			link_support = cfserl_create(dev->ifindex,
384							caifdev->use_stx);
385			if (!link_support) {
386				pr_warn("Out of memory\n");
387				break;
388			}
389		}
390		caif_enroll_dev(dev, caifdev, link_support, head_room,
391				&layer, NULL);
392		caifdev->flowctrl = dev_flowctrl;
393		break;
394
395	case NETDEV_UP:
396		rcu_read_lock();
397
398		caifd = caif_get(dev);
399		if (caifd == NULL) {
400			rcu_read_unlock();
401			break;
402		}
403
404		caifd->xoff = 0;
405		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
406		rcu_read_unlock();
407
408		break;
409
410	case NETDEV_DOWN:
411		rcu_read_lock();
412
413		caifd = caif_get(dev);
414		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
415			rcu_read_unlock();
416			return -EINVAL;
417		}
418
419		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
420		caifd_hold(caifd);
421		rcu_read_unlock();
422
423		caifd->layer.up->ctrlcmd(caifd->layer.up,
424					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
425					 caifd->layer.id);
426
427		spin_lock_bh(&caifd->flow_lock);
428
429		/*
430		 * Replace our xoff-destructor with original destructor.
431		 * We trust that skb->destructor *always* is called before
432		 * the skb reference is invalid. The hijacked SKB destructor
433		 * takes the flow_lock so manipulating the skb->destructor here
434		 * should be safe.
435		*/
436		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
437			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
438
439		caifd->xoff = 0;
440		caifd->xoff_skb_dtor = NULL;
441		caifd->xoff_skb = NULL;
442
443		spin_unlock_bh(&caifd->flow_lock);
444		caifd_put(caifd);
445		break;
446
447	case NETDEV_UNREGISTER:
448		mutex_lock(&caifdevs->lock);
449
450		caifd = caif_get(dev);
451		if (caifd == NULL) {
452			mutex_unlock(&caifdevs->lock);
453			break;
454		}
455		list_del_rcu(&caifd->list);
456
457		/*
458		 * NETDEV_UNREGISTER is called repeatedly until all reference
459		 * counts for the net-device are released. If references to
460		 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
461		 * the next call to NETDEV_UNREGISTER.
462		 *
463		 * If any packets are in flight down the CAIF Stack,
464		 * cfcnfg_del_phy_layer will return nonzero.
465		 * If no packets are in flight, the CAIF Stack associated
466		 * with the net-device un-registering is freed.
467		 */
468
469		if (caifd_refcnt_read(caifd) != 0 ||
470			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
471
472			pr_info("Wait for device inuse\n");
473			/* Enrole device if CAIF Stack is still in use */
474			list_add_rcu(&caifd->list, &caifdevs->list);
475			mutex_unlock(&caifdevs->lock);
476			break;
477		}
478
479		synchronize_rcu();
480		dev_put(caifd->netdev);
481		free_percpu(caifd->pcpu_refcnt);
482		kfree(caifd);
483
484		mutex_unlock(&caifdevs->lock);
485		break;
486	}
487	return 0;
488}
489
490static struct notifier_block caif_device_notifier = {
491	.notifier_call = caif_device_notify,
492	.priority = 0,
493};
494
495/* Per-namespace Caif devices handling */
496static int caif_init_net(struct net *net)
497{
498	struct caif_net *caifn = net_generic(net, caif_net_id);
499	INIT_LIST_HEAD(&caifn->caifdevs.list);
500	mutex_init(&caifn->caifdevs.lock);
501
502	caifn->cfg = cfcnfg_create();
503	if (!caifn->cfg)
504		return -ENOMEM;
505
506	return 0;
507}
508
509static void caif_exit_net(struct net *net)
510{
511	struct caif_device_entry *caifd, *tmp;
512	struct caif_device_entry_list *caifdevs =
513	    caif_device_list(net);
514	struct cfcnfg *cfg =  get_cfcnfg(net);
515
516	rtnl_lock();
517	mutex_lock(&caifdevs->lock);
518
519	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
520		int i = 0;
521		list_del_rcu(&caifd->list);
522		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
523
524		while (i < 10 &&
525			(caifd_refcnt_read(caifd) != 0 ||
526			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
527
528			pr_info("Wait for device inuse\n");
529			msleep(250);
530			i++;
531		}
532		synchronize_rcu();
533		dev_put(caifd->netdev);
534		free_percpu(caifd->pcpu_refcnt);
535		kfree(caifd);
536	}
537	cfcnfg_remove(cfg);
538
539	mutex_unlock(&caifdevs->lock);
540	rtnl_unlock();
541}
542
543static struct pernet_operations caif_net_ops = {
544	.init = caif_init_net,
545	.exit = caif_exit_net,
546	.id   = &caif_net_id,
547	.size = sizeof(struct caif_net),
548};
549
550/* Initialize Caif devices list */
551static int __init caif_device_init(void)
552{
553	int result;
554
555	result = register_pernet_subsys(&caif_net_ops);
556
557	if (result)
558		return result;
559
560	register_netdevice_notifier(&caif_device_notifier);
561	dev_add_pack(&caif_packet_type);
562
563	return result;
564}
565
566static void __exit caif_device_exit(void)
567{
568	unregister_netdevice_notifier(&caif_device_notifier);
569	dev_remove_pack(&caif_packet_type);
570	unregister_pernet_subsys(&caif_net_ops);
571}
572
573module_init(caif_device_init);
574module_exit(caif_device_exit);