Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CAIF Interface registration.
4 * Copyright (C) ST-Ericsson AB 2010
5 * Author: Sjur Brendeland
6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
8 * and Sakari Ailus <sakari.ailus@nokia.com>
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
12
13#include <linux/kernel.h>
14#include <linux/if_arp.h>
15#include <linux/net.h>
16#include <linux/netdevice.h>
17#include <linux/mutex.h>
18#include <linux/module.h>
19#include <linux/spinlock.h>
20#include <net/netns/generic.h>
21#include <net/net_namespace.h>
22#include <net/pkt_sched.h>
23#include <net/caif/caif_device.h>
24#include <net/caif/caif_layer.h>
25#include <net/caif/caif_dev.h>
26#include <net/caif/cfpkt.h>
27#include <net/caif/cfcnfg.h>
28#include <net/caif/cfserl.h>
29
30MODULE_LICENSE("GPL");
31
32/* Used for local tracking of the CAIF net devices */
33struct caif_device_entry {
34 struct cflayer layer;
35 struct list_head list;
36 struct net_device *netdev;
37 int __percpu *pcpu_refcnt;
38 spinlock_t flow_lock;
39 struct sk_buff *xoff_skb;
40 void (*xoff_skb_dtor)(struct sk_buff *skb);
41 bool xoff;
42};
43
44struct caif_device_entry_list {
45 struct list_head list;
46 /* Protects simulanous deletes in list */
47 struct mutex lock;
48};
49
50struct caif_net {
51 struct cfcnfg *cfg;
52 struct caif_device_entry_list caifdevs;
53};
54
55static unsigned int caif_net_id;
56static int q_high = 50; /* Percent */
57
58struct cfcnfg *get_cfcnfg(struct net *net)
59{
60 struct caif_net *caifn;
61 caifn = net_generic(net, caif_net_id);
62 return caifn->cfg;
63}
64EXPORT_SYMBOL(get_cfcnfg);
65
66static struct caif_device_entry_list *caif_device_list(struct net *net)
67{
68 struct caif_net *caifn;
69 caifn = net_generic(net, caif_net_id);
70 return &caifn->caifdevs;
71}
72
73static void caifd_put(struct caif_device_entry *e)
74{
75 this_cpu_dec(*e->pcpu_refcnt);
76}
77
78static void caifd_hold(struct caif_device_entry *e)
79{
80 this_cpu_inc(*e->pcpu_refcnt);
81}
82
83static int caifd_refcnt_read(struct caif_device_entry *e)
84{
85 int i, refcnt = 0;
86 for_each_possible_cpu(i)
87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
88 return refcnt;
89}
90
91/* Allocate new CAIF device. */
92static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93{
94 struct caif_device_entry *caifd;
95
96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97 if (!caifd)
98 return NULL;
99 caifd->pcpu_refcnt = alloc_percpu(int);
100 if (!caifd->pcpu_refcnt) {
101 kfree(caifd);
102 return NULL;
103 }
104 caifd->netdev = dev;
105 dev_hold(dev);
106 return caifd;
107}
108
109static struct caif_device_entry *caif_get(struct net_device *dev)
110{
111 struct caif_device_entry_list *caifdevs =
112 caif_device_list(dev_net(dev));
113 struct caif_device_entry *caifd;
114
115 list_for_each_entry_rcu(caifd, &caifdevs->list, list,
116 lockdep_rtnl_is_held()) {
117 if (caifd->netdev == dev)
118 return caifd;
119 }
120 return NULL;
121}
122
123static void caif_flow_cb(struct sk_buff *skb)
124{
125 struct caif_device_entry *caifd;
126 void (*dtor)(struct sk_buff *skb) = NULL;
127 bool send_xoff;
128
129 WARN_ON(skb->dev == NULL);
130
131 rcu_read_lock();
132 caifd = caif_get(skb->dev);
133
134 WARN_ON(caifd == NULL);
135 if (!caifd) {
136 rcu_read_unlock();
137 return;
138 }
139
140 caifd_hold(caifd);
141 rcu_read_unlock();
142
143 spin_lock_bh(&caifd->flow_lock);
144 send_xoff = caifd->xoff;
145 caifd->xoff = false;
146 dtor = caifd->xoff_skb_dtor;
147
148 if (WARN_ON(caifd->xoff_skb != skb))
149 skb = NULL;
150
151 caifd->xoff_skb = NULL;
152 caifd->xoff_skb_dtor = NULL;
153
154 spin_unlock_bh(&caifd->flow_lock);
155
156 if (dtor && skb)
157 dtor(skb);
158
159 if (send_xoff)
160 caifd->layer.up->
161 ctrlcmd(caifd->layer.up,
162 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
163 caifd->layer.id);
164 caifd_put(caifd);
165}
166
167static int transmit(struct cflayer *layer, struct cfpkt *pkt)
168{
169 int err, high = 0, qlen = 0;
170 struct caif_device_entry *caifd =
171 container_of(layer, struct caif_device_entry, layer);
172 struct sk_buff *skb;
173 struct netdev_queue *txq;
174
175 rcu_read_lock_bh();
176
177 skb = cfpkt_tonative(pkt);
178 skb->dev = caifd->netdev;
179 skb_reset_network_header(skb);
180 skb->protocol = htons(ETH_P_CAIF);
181
182 /* Check if we need to handle xoff */
183 if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
184 goto noxoff;
185
186 if (unlikely(caifd->xoff))
187 goto noxoff;
188
189 if (likely(!netif_queue_stopped(caifd->netdev))) {
190 struct Qdisc *sch;
191
192 /* If we run with a TX queue, check if the queue is too long*/
193 txq = netdev_get_tx_queue(skb->dev, 0);
194 sch = rcu_dereference_bh(txq->qdisc);
195 if (likely(qdisc_is_empty(sch)))
196 goto noxoff;
197
198 /* can check for explicit qdisc len value only !NOLOCK,
199 * always set flow off otherwise
200 */
201 high = (caifd->netdev->tx_queue_len * q_high) / 100;
202 if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
203 goto noxoff;
204 }
205
206 /* Hold lock while accessing xoff */
207 spin_lock_bh(&caifd->flow_lock);
208 if (caifd->xoff) {
209 spin_unlock_bh(&caifd->flow_lock);
210 goto noxoff;
211 }
212
213 /*
214 * Handle flow off, we do this by temporary hi-jacking this
215 * skb's destructor function, and replace it with our own
216 * flow-on callback. The callback will set flow-on and call
217 * the original destructor.
218 */
219
220 pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
221 netif_queue_stopped(caifd->netdev),
222 qlen, high);
223 caifd->xoff = true;
224 caifd->xoff_skb = skb;
225 caifd->xoff_skb_dtor = skb->destructor;
226 skb->destructor = caif_flow_cb;
227 spin_unlock_bh(&caifd->flow_lock);
228
229 caifd->layer.up->ctrlcmd(caifd->layer.up,
230 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
231 caifd->layer.id);
232noxoff:
233 rcu_read_unlock_bh();
234
235 err = dev_queue_xmit(skb);
236 if (err > 0)
237 err = -EIO;
238
239 return err;
240}
241
242/*
243 * Stuff received packets into the CAIF stack.
244 * On error, returns non-zero and releases the skb.
245 */
246static int receive(struct sk_buff *skb, struct net_device *dev,
247 struct packet_type *pkttype, struct net_device *orig_dev)
248{
249 struct cfpkt *pkt;
250 struct caif_device_entry *caifd;
251 int err;
252
253 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
254
255 rcu_read_lock();
256 caifd = caif_get(dev);
257
258 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
259 !netif_oper_up(caifd->netdev)) {
260 rcu_read_unlock();
261 kfree_skb(skb);
262 return NET_RX_DROP;
263 }
264
265 /* Hold reference to netdevice while using CAIF stack */
266 caifd_hold(caifd);
267 rcu_read_unlock();
268
269 err = caifd->layer.up->receive(caifd->layer.up, pkt);
270
271 /* For -EILSEQ the packet is not freed so so it now */
272 if (err == -EILSEQ)
273 cfpkt_destroy(pkt);
274
275 /* Release reference to stack upwards */
276 caifd_put(caifd);
277
278 if (err != 0)
279 err = NET_RX_DROP;
280 return err;
281}
282
283static struct packet_type caif_packet_type __read_mostly = {
284 .type = cpu_to_be16(ETH_P_CAIF),
285 .func = receive,
286};
287
288static void dev_flowctrl(struct net_device *dev, int on)
289{
290 struct caif_device_entry *caifd;
291
292 rcu_read_lock();
293
294 caifd = caif_get(dev);
295 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
296 rcu_read_unlock();
297 return;
298 }
299
300 caifd_hold(caifd);
301 rcu_read_unlock();
302
303 caifd->layer.up->ctrlcmd(caifd->layer.up,
304 on ?
305 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
306 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
307 caifd->layer.id);
308 caifd_put(caifd);
309}
310
311int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
312 struct cflayer *link_support, int head_room,
313 struct cflayer **layer,
314 int (**rcv_func)(struct sk_buff *, struct net_device *,
315 struct packet_type *,
316 struct net_device *))
317{
318 struct caif_device_entry *caifd;
319 enum cfcnfg_phy_preference pref;
320 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
321 struct caif_device_entry_list *caifdevs;
322 int res;
323
324 caifdevs = caif_device_list(dev_net(dev));
325 caifd = caif_device_alloc(dev);
326 if (!caifd)
327 return -ENOMEM;
328 *layer = &caifd->layer;
329 spin_lock_init(&caifd->flow_lock);
330
331 switch (caifdev->link_select) {
332 case CAIF_LINK_HIGH_BANDW:
333 pref = CFPHYPREF_HIGH_BW;
334 break;
335 case CAIF_LINK_LOW_LATENCY:
336 pref = CFPHYPREF_LOW_LAT;
337 break;
338 default:
339 pref = CFPHYPREF_HIGH_BW;
340 break;
341 }
342 mutex_lock(&caifdevs->lock);
343 list_add_rcu(&caifd->list, &caifdevs->list);
344
345 strlcpy(caifd->layer.name, dev->name,
346 sizeof(caifd->layer.name));
347 caifd->layer.transmit = transmit;
348 res = cfcnfg_add_phy_layer(cfg,
349 dev,
350 &caifd->layer,
351 pref,
352 link_support,
353 caifdev->use_fcs,
354 head_room);
355 mutex_unlock(&caifdevs->lock);
356 if (rcv_func)
357 *rcv_func = receive;
358 return res;
359}
360EXPORT_SYMBOL(caif_enroll_dev);
361
362/* notify Caif of device events */
363static int caif_device_notify(struct notifier_block *me, unsigned long what,
364 void *ptr)
365{
366 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
367 struct caif_device_entry *caifd = NULL;
368 struct caif_dev_common *caifdev;
369 struct cfcnfg *cfg;
370 struct cflayer *layer, *link_support;
371 int head_room = 0;
372 struct caif_device_entry_list *caifdevs;
373 int res;
374
375 cfg = get_cfcnfg(dev_net(dev));
376 caifdevs = caif_device_list(dev_net(dev));
377
378 caifd = caif_get(dev);
379 if (caifd == NULL && dev->type != ARPHRD_CAIF)
380 return 0;
381
382 switch (what) {
383 case NETDEV_REGISTER:
384 if (caifd != NULL)
385 break;
386
387 caifdev = netdev_priv(dev);
388
389 link_support = NULL;
390 if (caifdev->use_frag) {
391 head_room = 1;
392 link_support = cfserl_create(dev->ifindex,
393 caifdev->use_stx);
394 if (!link_support) {
395 pr_warn("Out of memory\n");
396 break;
397 }
398 }
399 res = caif_enroll_dev(dev, caifdev, link_support, head_room,
400 &layer, NULL);
401 if (res)
402 cfserl_release(link_support);
403 caifdev->flowctrl = dev_flowctrl;
404 break;
405
406 case NETDEV_UP:
407 rcu_read_lock();
408
409 caifd = caif_get(dev);
410 if (caifd == NULL) {
411 rcu_read_unlock();
412 break;
413 }
414
415 caifd->xoff = false;
416 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
417 rcu_read_unlock();
418
419 break;
420
421 case NETDEV_DOWN:
422 rcu_read_lock();
423
424 caifd = caif_get(dev);
425 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
426 rcu_read_unlock();
427 return -EINVAL;
428 }
429
430 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
431 caifd_hold(caifd);
432 rcu_read_unlock();
433
434 caifd->layer.up->ctrlcmd(caifd->layer.up,
435 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
436 caifd->layer.id);
437
438 spin_lock_bh(&caifd->flow_lock);
439
440 /*
441 * Replace our xoff-destructor with original destructor.
442 * We trust that skb->destructor *always* is called before
443 * the skb reference is invalid. The hijacked SKB destructor
444 * takes the flow_lock so manipulating the skb->destructor here
445 * should be safe.
446 */
447 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
448 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
449
450 caifd->xoff = false;
451 caifd->xoff_skb_dtor = NULL;
452 caifd->xoff_skb = NULL;
453
454 spin_unlock_bh(&caifd->flow_lock);
455 caifd_put(caifd);
456 break;
457
458 case NETDEV_UNREGISTER:
459 mutex_lock(&caifdevs->lock);
460
461 caifd = caif_get(dev);
462 if (caifd == NULL) {
463 mutex_unlock(&caifdevs->lock);
464 break;
465 }
466 list_del_rcu(&caifd->list);
467
468 /*
469 * NETDEV_UNREGISTER is called repeatedly until all reference
470 * counts for the net-device are released. If references to
471 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
472 * the next call to NETDEV_UNREGISTER.
473 *
474 * If any packets are in flight down the CAIF Stack,
475 * cfcnfg_del_phy_layer will return nonzero.
476 * If no packets are in flight, the CAIF Stack associated
477 * with the net-device un-registering is freed.
478 */
479
480 if (caifd_refcnt_read(caifd) != 0 ||
481 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
482
483 pr_info("Wait for device inuse\n");
484 /* Enrole device if CAIF Stack is still in use */
485 list_add_rcu(&caifd->list, &caifdevs->list);
486 mutex_unlock(&caifdevs->lock);
487 break;
488 }
489
490 synchronize_rcu();
491 dev_put(caifd->netdev);
492 free_percpu(caifd->pcpu_refcnt);
493 kfree(caifd);
494
495 mutex_unlock(&caifdevs->lock);
496 break;
497 }
498 return 0;
499}
500
501static struct notifier_block caif_device_notifier = {
502 .notifier_call = caif_device_notify,
503 .priority = 0,
504};
505
506/* Per-namespace Caif devices handling */
507static int caif_init_net(struct net *net)
508{
509 struct caif_net *caifn = net_generic(net, caif_net_id);
510 INIT_LIST_HEAD(&caifn->caifdevs.list);
511 mutex_init(&caifn->caifdevs.lock);
512
513 caifn->cfg = cfcnfg_create();
514 if (!caifn->cfg)
515 return -ENOMEM;
516
517 return 0;
518}
519
520static void caif_exit_net(struct net *net)
521{
522 struct caif_device_entry *caifd, *tmp;
523 struct caif_device_entry_list *caifdevs =
524 caif_device_list(net);
525 struct cfcnfg *cfg = get_cfcnfg(net);
526
527 rtnl_lock();
528 mutex_lock(&caifdevs->lock);
529
530 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
531 int i = 0;
532 list_del_rcu(&caifd->list);
533 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
534
535 while (i < 10 &&
536 (caifd_refcnt_read(caifd) != 0 ||
537 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
538
539 pr_info("Wait for device inuse\n");
540 msleep(250);
541 i++;
542 }
543 synchronize_rcu();
544 dev_put(caifd->netdev);
545 free_percpu(caifd->pcpu_refcnt);
546 kfree(caifd);
547 }
548 cfcnfg_remove(cfg);
549
550 mutex_unlock(&caifdevs->lock);
551 rtnl_unlock();
552}
553
554static struct pernet_operations caif_net_ops = {
555 .init = caif_init_net,
556 .exit = caif_exit_net,
557 .id = &caif_net_id,
558 .size = sizeof(struct caif_net),
559};
560
561/* Initialize Caif devices list */
562static int __init caif_device_init(void)
563{
564 int result;
565
566 result = register_pernet_subsys(&caif_net_ops);
567
568 if (result)
569 return result;
570
571 register_netdevice_notifier(&caif_device_notifier);
572 dev_add_pack(&caif_packet_type);
573
574 return result;
575}
576
577static void __exit caif_device_exit(void)
578{
579 unregister_netdevice_notifier(&caif_device_notifier);
580 dev_remove_pack(&caif_packet_type);
581 unregister_pernet_subsys(&caif_net_ops);
582}
583
584module_init(caif_device_init);
585module_exit(caif_device_exit);
1/*
2 * CAIF Interface registration.
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
8 * and Sakari Ailus <sakari.ailus@nokia.com>
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
12
13#include <linux/kernel.h>
14#include <linux/if_arp.h>
15#include <linux/net.h>
16#include <linux/netdevice.h>
17#include <linux/mutex.h>
18#include <linux/module.h>
19#include <linux/spinlock.h>
20#include <net/netns/generic.h>
21#include <net/net_namespace.h>
22#include <net/pkt_sched.h>
23#include <net/caif/caif_device.h>
24#include <net/caif/caif_layer.h>
25#include <net/caif/caif_dev.h>
26#include <net/caif/cfpkt.h>
27#include <net/caif/cfcnfg.h>
28#include <net/caif/cfserl.h>
29
30MODULE_LICENSE("GPL");
31
32/* Used for local tracking of the CAIF net devices */
33struct caif_device_entry {
34 struct cflayer layer;
35 struct list_head list;
36 struct net_device *netdev;
37 int __percpu *pcpu_refcnt;
38 spinlock_t flow_lock;
39 struct sk_buff *xoff_skb;
40 void (*xoff_skb_dtor)(struct sk_buff *skb);
41 bool xoff;
42};
43
44struct caif_device_entry_list {
45 struct list_head list;
46 /* Protects simulanous deletes in list */
47 struct mutex lock;
48};
49
50struct caif_net {
51 struct cfcnfg *cfg;
52 struct caif_device_entry_list caifdevs;
53};
54
55static int caif_net_id;
56static int q_high = 50; /* Percent */
57
58struct cfcnfg *get_cfcnfg(struct net *net)
59{
60 struct caif_net *caifn;
61 caifn = net_generic(net, caif_net_id);
62 return caifn->cfg;
63}
64EXPORT_SYMBOL(get_cfcnfg);
65
66static struct caif_device_entry_list *caif_device_list(struct net *net)
67{
68 struct caif_net *caifn;
69 caifn = net_generic(net, caif_net_id);
70 return &caifn->caifdevs;
71}
72
73static void caifd_put(struct caif_device_entry *e)
74{
75 this_cpu_dec(*e->pcpu_refcnt);
76}
77
78static void caifd_hold(struct caif_device_entry *e)
79{
80 this_cpu_inc(*e->pcpu_refcnt);
81}
82
83static int caifd_refcnt_read(struct caif_device_entry *e)
84{
85 int i, refcnt = 0;
86 for_each_possible_cpu(i)
87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
88 return refcnt;
89}
90
91/* Allocate new CAIF device. */
92static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93{
94 struct caif_device_entry *caifd;
95
96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97 if (!caifd)
98 return NULL;
99 caifd->pcpu_refcnt = alloc_percpu(int);
100 if (!caifd->pcpu_refcnt) {
101 kfree(caifd);
102 return NULL;
103 }
104 caifd->netdev = dev;
105 dev_hold(dev);
106 return caifd;
107}
108
109static struct caif_device_entry *caif_get(struct net_device *dev)
110{
111 struct caif_device_entry_list *caifdevs =
112 caif_device_list(dev_net(dev));
113 struct caif_device_entry *caifd;
114
115 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
116 if (caifd->netdev == dev)
117 return caifd;
118 }
119 return NULL;
120}
121
122static void caif_flow_cb(struct sk_buff *skb)
123{
124 struct caif_device_entry *caifd;
125 void (*dtor)(struct sk_buff *skb) = NULL;
126 bool send_xoff;
127
128 WARN_ON(skb->dev == NULL);
129
130 rcu_read_lock();
131 caifd = caif_get(skb->dev);
132
133 WARN_ON(caifd == NULL);
134 if (caifd == NULL)
135 return;
136
137 caifd_hold(caifd);
138 rcu_read_unlock();
139
140 spin_lock_bh(&caifd->flow_lock);
141 send_xoff = caifd->xoff;
142 caifd->xoff = 0;
143 dtor = caifd->xoff_skb_dtor;
144
145 if (WARN_ON(caifd->xoff_skb != skb))
146 skb = NULL;
147
148 caifd->xoff_skb = NULL;
149 caifd->xoff_skb_dtor = NULL;
150
151 spin_unlock_bh(&caifd->flow_lock);
152
153 if (dtor && skb)
154 dtor(skb);
155
156 if (send_xoff)
157 caifd->layer.up->
158 ctrlcmd(caifd->layer.up,
159 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
160 caifd->layer.id);
161 caifd_put(caifd);
162}
163
164static int transmit(struct cflayer *layer, struct cfpkt *pkt)
165{
166 int err, high = 0, qlen = 0;
167 struct caif_device_entry *caifd =
168 container_of(layer, struct caif_device_entry, layer);
169 struct sk_buff *skb;
170 struct netdev_queue *txq;
171
172 rcu_read_lock_bh();
173
174 skb = cfpkt_tonative(pkt);
175 skb->dev = caifd->netdev;
176 skb_reset_network_header(skb);
177 skb->protocol = htons(ETH_P_CAIF);
178
179 /* Check if we need to handle xoff */
180 if (likely(caifd->netdev->tx_queue_len == 0))
181 goto noxoff;
182
183 if (unlikely(caifd->xoff))
184 goto noxoff;
185
186 if (likely(!netif_queue_stopped(caifd->netdev))) {
187 /* If we run with a TX queue, check if the queue is too long*/
188 txq = netdev_get_tx_queue(skb->dev, 0);
189 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
190
191 if (likely(qlen == 0))
192 goto noxoff;
193
194 high = (caifd->netdev->tx_queue_len * q_high) / 100;
195 if (likely(qlen < high))
196 goto noxoff;
197 }
198
199 /* Hold lock while accessing xoff */
200 spin_lock_bh(&caifd->flow_lock);
201 if (caifd->xoff) {
202 spin_unlock_bh(&caifd->flow_lock);
203 goto noxoff;
204 }
205
206 /*
207 * Handle flow off, we do this by temporary hi-jacking this
208 * skb's destructor function, and replace it with our own
209 * flow-on callback. The callback will set flow-on and call
210 * the original destructor.
211 */
212
213 pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
214 netif_queue_stopped(caifd->netdev),
215 qlen, high);
216 caifd->xoff = 1;
217 caifd->xoff_skb = skb;
218 caifd->xoff_skb_dtor = skb->destructor;
219 skb->destructor = caif_flow_cb;
220 spin_unlock_bh(&caifd->flow_lock);
221
222 caifd->layer.up->ctrlcmd(caifd->layer.up,
223 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
224 caifd->layer.id);
225noxoff:
226 rcu_read_unlock_bh();
227
228 err = dev_queue_xmit(skb);
229 if (err > 0)
230 err = -EIO;
231
232 return err;
233}
234
235/*
236 * Stuff received packets into the CAIF stack.
237 * On error, returns non-zero and releases the skb.
238 */
239static int receive(struct sk_buff *skb, struct net_device *dev,
240 struct packet_type *pkttype, struct net_device *orig_dev)
241{
242 struct cfpkt *pkt;
243 struct caif_device_entry *caifd;
244 int err;
245
246 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
247
248 rcu_read_lock();
249 caifd = caif_get(dev);
250
251 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
252 !netif_oper_up(caifd->netdev)) {
253 rcu_read_unlock();
254 kfree_skb(skb);
255 return NET_RX_DROP;
256 }
257
258 /* Hold reference to netdevice while using CAIF stack */
259 caifd_hold(caifd);
260 rcu_read_unlock();
261
262 err = caifd->layer.up->receive(caifd->layer.up, pkt);
263
264 /* For -EILSEQ the packet is not freed so so it now */
265 if (err == -EILSEQ)
266 cfpkt_destroy(pkt);
267
268 /* Release reference to stack upwards */
269 caifd_put(caifd);
270
271 if (err != 0)
272 err = NET_RX_DROP;
273 return err;
274}
275
276static struct packet_type caif_packet_type __read_mostly = {
277 .type = cpu_to_be16(ETH_P_CAIF),
278 .func = receive,
279};
280
281static void dev_flowctrl(struct net_device *dev, int on)
282{
283 struct caif_device_entry *caifd;
284
285 rcu_read_lock();
286
287 caifd = caif_get(dev);
288 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
289 rcu_read_unlock();
290 return;
291 }
292
293 caifd_hold(caifd);
294 rcu_read_unlock();
295
296 caifd->layer.up->ctrlcmd(caifd->layer.up,
297 on ?
298 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
299 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
300 caifd->layer.id);
301 caifd_put(caifd);
302}
303
304void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
305 struct cflayer *link_support, int head_room,
306 struct cflayer **layer,
307 int (**rcv_func)(struct sk_buff *, struct net_device *,
308 struct packet_type *,
309 struct net_device *))
310{
311 struct caif_device_entry *caifd;
312 enum cfcnfg_phy_preference pref;
313 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
314 struct caif_device_entry_list *caifdevs;
315
316 caifdevs = caif_device_list(dev_net(dev));
317 caifd = caif_device_alloc(dev);
318 if (!caifd)
319 return;
320 *layer = &caifd->layer;
321 spin_lock_init(&caifd->flow_lock);
322
323 switch (caifdev->link_select) {
324 case CAIF_LINK_HIGH_BANDW:
325 pref = CFPHYPREF_HIGH_BW;
326 break;
327 case CAIF_LINK_LOW_LATENCY:
328 pref = CFPHYPREF_LOW_LAT;
329 break;
330 default:
331 pref = CFPHYPREF_HIGH_BW;
332 break;
333 }
334 mutex_lock(&caifdevs->lock);
335 list_add_rcu(&caifd->list, &caifdevs->list);
336
337 strncpy(caifd->layer.name, dev->name,
338 sizeof(caifd->layer.name) - 1);
339 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
340 caifd->layer.transmit = transmit;
341 cfcnfg_add_phy_layer(cfg,
342 dev,
343 &caifd->layer,
344 pref,
345 link_support,
346 caifdev->use_fcs,
347 head_room);
348 mutex_unlock(&caifdevs->lock);
349 if (rcv_func)
350 *rcv_func = receive;
351}
352EXPORT_SYMBOL(caif_enroll_dev);
353
354/* notify Caif of device events */
355static int caif_device_notify(struct notifier_block *me, unsigned long what,
356 void *ptr)
357{
358 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
359 struct caif_device_entry *caifd = NULL;
360 struct caif_dev_common *caifdev;
361 struct cfcnfg *cfg;
362 struct cflayer *layer, *link_support;
363 int head_room = 0;
364 struct caif_device_entry_list *caifdevs;
365
366 cfg = get_cfcnfg(dev_net(dev));
367 caifdevs = caif_device_list(dev_net(dev));
368
369 caifd = caif_get(dev);
370 if (caifd == NULL && dev->type != ARPHRD_CAIF)
371 return 0;
372
373 switch (what) {
374 case NETDEV_REGISTER:
375 if (caifd != NULL)
376 break;
377
378 caifdev = netdev_priv(dev);
379
380 link_support = NULL;
381 if (caifdev->use_frag) {
382 head_room = 1;
383 link_support = cfserl_create(dev->ifindex,
384 caifdev->use_stx);
385 if (!link_support) {
386 pr_warn("Out of memory\n");
387 break;
388 }
389 }
390 caif_enroll_dev(dev, caifdev, link_support, head_room,
391 &layer, NULL);
392 caifdev->flowctrl = dev_flowctrl;
393 break;
394
395 case NETDEV_UP:
396 rcu_read_lock();
397
398 caifd = caif_get(dev);
399 if (caifd == NULL) {
400 rcu_read_unlock();
401 break;
402 }
403
404 caifd->xoff = 0;
405 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
406 rcu_read_unlock();
407
408 break;
409
410 case NETDEV_DOWN:
411 rcu_read_lock();
412
413 caifd = caif_get(dev);
414 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
415 rcu_read_unlock();
416 return -EINVAL;
417 }
418
419 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
420 caifd_hold(caifd);
421 rcu_read_unlock();
422
423 caifd->layer.up->ctrlcmd(caifd->layer.up,
424 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
425 caifd->layer.id);
426
427 spin_lock_bh(&caifd->flow_lock);
428
429 /*
430 * Replace our xoff-destructor with original destructor.
431 * We trust that skb->destructor *always* is called before
432 * the skb reference is invalid. The hijacked SKB destructor
433 * takes the flow_lock so manipulating the skb->destructor here
434 * should be safe.
435 */
436 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
437 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
438
439 caifd->xoff = 0;
440 caifd->xoff_skb_dtor = NULL;
441 caifd->xoff_skb = NULL;
442
443 spin_unlock_bh(&caifd->flow_lock);
444 caifd_put(caifd);
445 break;
446
447 case NETDEV_UNREGISTER:
448 mutex_lock(&caifdevs->lock);
449
450 caifd = caif_get(dev);
451 if (caifd == NULL) {
452 mutex_unlock(&caifdevs->lock);
453 break;
454 }
455 list_del_rcu(&caifd->list);
456
457 /*
458 * NETDEV_UNREGISTER is called repeatedly until all reference
459 * counts for the net-device are released. If references to
460 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
461 * the next call to NETDEV_UNREGISTER.
462 *
463 * If any packets are in flight down the CAIF Stack,
464 * cfcnfg_del_phy_layer will return nonzero.
465 * If no packets are in flight, the CAIF Stack associated
466 * with the net-device un-registering is freed.
467 */
468
469 if (caifd_refcnt_read(caifd) != 0 ||
470 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
471
472 pr_info("Wait for device inuse\n");
473 /* Enrole device if CAIF Stack is still in use */
474 list_add_rcu(&caifd->list, &caifdevs->list);
475 mutex_unlock(&caifdevs->lock);
476 break;
477 }
478
479 synchronize_rcu();
480 dev_put(caifd->netdev);
481 free_percpu(caifd->pcpu_refcnt);
482 kfree(caifd);
483
484 mutex_unlock(&caifdevs->lock);
485 break;
486 }
487 return 0;
488}
489
490static struct notifier_block caif_device_notifier = {
491 .notifier_call = caif_device_notify,
492 .priority = 0,
493};
494
495/* Per-namespace Caif devices handling */
496static int caif_init_net(struct net *net)
497{
498 struct caif_net *caifn = net_generic(net, caif_net_id);
499 INIT_LIST_HEAD(&caifn->caifdevs.list);
500 mutex_init(&caifn->caifdevs.lock);
501
502 caifn->cfg = cfcnfg_create();
503 if (!caifn->cfg)
504 return -ENOMEM;
505
506 return 0;
507}
508
509static void caif_exit_net(struct net *net)
510{
511 struct caif_device_entry *caifd, *tmp;
512 struct caif_device_entry_list *caifdevs =
513 caif_device_list(net);
514 struct cfcnfg *cfg = get_cfcnfg(net);
515
516 rtnl_lock();
517 mutex_lock(&caifdevs->lock);
518
519 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
520 int i = 0;
521 list_del_rcu(&caifd->list);
522 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
523
524 while (i < 10 &&
525 (caifd_refcnt_read(caifd) != 0 ||
526 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
527
528 pr_info("Wait for device inuse\n");
529 msleep(250);
530 i++;
531 }
532 synchronize_rcu();
533 dev_put(caifd->netdev);
534 free_percpu(caifd->pcpu_refcnt);
535 kfree(caifd);
536 }
537 cfcnfg_remove(cfg);
538
539 mutex_unlock(&caifdevs->lock);
540 rtnl_unlock();
541}
542
543static struct pernet_operations caif_net_ops = {
544 .init = caif_init_net,
545 .exit = caif_exit_net,
546 .id = &caif_net_id,
547 .size = sizeof(struct caif_net),
548};
549
550/* Initialize Caif devices list */
551static int __init caif_device_init(void)
552{
553 int result;
554
555 result = register_pernet_subsys(&caif_net_ops);
556
557 if (result)
558 return result;
559
560 register_netdevice_notifier(&caif_device_notifier);
561 dev_add_pack(&caif_packet_type);
562
563 return result;
564}
565
566static void __exit caif_device_exit(void)
567{
568 unregister_netdevice_notifier(&caif_device_notifier);
569 dev_remove_pack(&caif_packet_type);
570 unregister_pernet_subsys(&caif_net_ops);
571}
572
573module_init(caif_device_init);
574module_exit(caif_device_exit);