Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CAIF Interface registration.
4 * Copyright (C) ST-Ericsson AB 2010
5 * Author: Sjur Brendeland
6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
8 * and Sakari Ailus <sakari.ailus@nokia.com>
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
12
13#include <linux/kernel.h>
14#include <linux/if_arp.h>
15#include <linux/net.h>
16#include <linux/netdevice.h>
17#include <linux/mutex.h>
18#include <linux/module.h>
19#include <linux/spinlock.h>
20#include <net/netns/generic.h>
21#include <net/net_namespace.h>
22#include <net/pkt_sched.h>
23#include <net/caif/caif_device.h>
24#include <net/caif/caif_layer.h>
25#include <net/caif/caif_dev.h>
26#include <net/caif/cfpkt.h>
27#include <net/caif/cfcnfg.h>
28#include <net/caif/cfserl.h>
29
30MODULE_LICENSE("GPL");
31
32/* Used for local tracking of the CAIF net devices */
33struct caif_device_entry {
34 struct cflayer layer;
35 struct list_head list;
36 struct net_device *netdev;
37 int __percpu *pcpu_refcnt;
38 spinlock_t flow_lock;
39 struct sk_buff *xoff_skb;
40 void (*xoff_skb_dtor)(struct sk_buff *skb);
41 bool xoff;
42};
43
44struct caif_device_entry_list {
45 struct list_head list;
46 /* Protects simulanous deletes in list */
47 struct mutex lock;
48};
49
50struct caif_net {
51 struct cfcnfg *cfg;
52 struct caif_device_entry_list caifdevs;
53};
54
55static unsigned int caif_net_id;
56static int q_high = 50; /* Percent */
57
58struct cfcnfg *get_cfcnfg(struct net *net)
59{
60 struct caif_net *caifn;
61 caifn = net_generic(net, caif_net_id);
62 return caifn->cfg;
63}
64EXPORT_SYMBOL(get_cfcnfg);
65
66static struct caif_device_entry_list *caif_device_list(struct net *net)
67{
68 struct caif_net *caifn;
69 caifn = net_generic(net, caif_net_id);
70 return &caifn->caifdevs;
71}
72
73static void caifd_put(struct caif_device_entry *e)
74{
75 this_cpu_dec(*e->pcpu_refcnt);
76}
77
78static void caifd_hold(struct caif_device_entry *e)
79{
80 this_cpu_inc(*e->pcpu_refcnt);
81}
82
83static int caifd_refcnt_read(struct caif_device_entry *e)
84{
85 int i, refcnt = 0;
86 for_each_possible_cpu(i)
87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
88 return refcnt;
89}
90
91/* Allocate new CAIF device. */
92static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93{
94 struct caif_device_entry *caifd;
95
96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97 if (!caifd)
98 return NULL;
99 caifd->pcpu_refcnt = alloc_percpu(int);
100 if (!caifd->pcpu_refcnt) {
101 kfree(caifd);
102 return NULL;
103 }
104 caifd->netdev = dev;
105 dev_hold(dev);
106 return caifd;
107}
108
109static struct caif_device_entry *caif_get(struct net_device *dev)
110{
111 struct caif_device_entry_list *caifdevs =
112 caif_device_list(dev_net(dev));
113 struct caif_device_entry *caifd;
114
115 list_for_each_entry_rcu(caifd, &caifdevs->list, list,
116 lockdep_rtnl_is_held()) {
117 if (caifd->netdev == dev)
118 return caifd;
119 }
120 return NULL;
121}
122
123static void caif_flow_cb(struct sk_buff *skb)
124{
125 struct caif_device_entry *caifd;
126 void (*dtor)(struct sk_buff *skb) = NULL;
127 bool send_xoff;
128
129 WARN_ON(skb->dev == NULL);
130
131 rcu_read_lock();
132 caifd = caif_get(skb->dev);
133
134 WARN_ON(caifd == NULL);
135 if (!caifd) {
136 rcu_read_unlock();
137 return;
138 }
139
140 caifd_hold(caifd);
141 rcu_read_unlock();
142
143 spin_lock_bh(&caifd->flow_lock);
144 send_xoff = caifd->xoff;
145 caifd->xoff = false;
146 dtor = caifd->xoff_skb_dtor;
147
148 if (WARN_ON(caifd->xoff_skb != skb))
149 skb = NULL;
150
151 caifd->xoff_skb = NULL;
152 caifd->xoff_skb_dtor = NULL;
153
154 spin_unlock_bh(&caifd->flow_lock);
155
156 if (dtor && skb)
157 dtor(skb);
158
159 if (send_xoff)
160 caifd->layer.up->
161 ctrlcmd(caifd->layer.up,
162 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
163 caifd->layer.id);
164 caifd_put(caifd);
165}
166
167static int transmit(struct cflayer *layer, struct cfpkt *pkt)
168{
169 int err, high = 0, qlen = 0;
170 struct caif_device_entry *caifd =
171 container_of(layer, struct caif_device_entry, layer);
172 struct sk_buff *skb;
173 struct netdev_queue *txq;
174
175 rcu_read_lock_bh();
176
177 skb = cfpkt_tonative(pkt);
178 skb->dev = caifd->netdev;
179 skb_reset_network_header(skb);
180 skb->protocol = htons(ETH_P_CAIF);
181
182 /* Check if we need to handle xoff */
183 if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
184 goto noxoff;
185
186 if (unlikely(caifd->xoff))
187 goto noxoff;
188
189 if (likely(!netif_queue_stopped(caifd->netdev))) {
190 struct Qdisc *sch;
191
192 /* If we run with a TX queue, check if the queue is too long*/
193 txq = netdev_get_tx_queue(skb->dev, 0);
194 sch = rcu_dereference_bh(txq->qdisc);
195 if (likely(qdisc_is_empty(sch)))
196 goto noxoff;
197
198 /* can check for explicit qdisc len value only !NOLOCK,
199 * always set flow off otherwise
200 */
201 high = (caifd->netdev->tx_queue_len * q_high) / 100;
202 if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
203 goto noxoff;
204 }
205
206 /* Hold lock while accessing xoff */
207 spin_lock_bh(&caifd->flow_lock);
208 if (caifd->xoff) {
209 spin_unlock_bh(&caifd->flow_lock);
210 goto noxoff;
211 }
212
213 /*
214 * Handle flow off, we do this by temporary hi-jacking this
215 * skb's destructor function, and replace it with our own
216 * flow-on callback. The callback will set flow-on and call
217 * the original destructor.
218 */
219
220 pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
221 netif_queue_stopped(caifd->netdev),
222 qlen, high);
223 caifd->xoff = true;
224 caifd->xoff_skb = skb;
225 caifd->xoff_skb_dtor = skb->destructor;
226 skb->destructor = caif_flow_cb;
227 spin_unlock_bh(&caifd->flow_lock);
228
229 caifd->layer.up->ctrlcmd(caifd->layer.up,
230 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
231 caifd->layer.id);
232noxoff:
233 rcu_read_unlock_bh();
234
235 err = dev_queue_xmit(skb);
236 if (err > 0)
237 err = -EIO;
238
239 return err;
240}
241
242/*
243 * Stuff received packets into the CAIF stack.
244 * On error, returns non-zero and releases the skb.
245 */
246static int receive(struct sk_buff *skb, struct net_device *dev,
247 struct packet_type *pkttype, struct net_device *orig_dev)
248{
249 struct cfpkt *pkt;
250 struct caif_device_entry *caifd;
251 int err;
252
253 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
254
255 rcu_read_lock();
256 caifd = caif_get(dev);
257
258 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
259 !netif_oper_up(caifd->netdev)) {
260 rcu_read_unlock();
261 kfree_skb(skb);
262 return NET_RX_DROP;
263 }
264
265 /* Hold reference to netdevice while using CAIF stack */
266 caifd_hold(caifd);
267 rcu_read_unlock();
268
269 err = caifd->layer.up->receive(caifd->layer.up, pkt);
270
271 /* For -EILSEQ the packet is not freed so so it now */
272 if (err == -EILSEQ)
273 cfpkt_destroy(pkt);
274
275 /* Release reference to stack upwards */
276 caifd_put(caifd);
277
278 if (err != 0)
279 err = NET_RX_DROP;
280 return err;
281}
282
283static struct packet_type caif_packet_type __read_mostly = {
284 .type = cpu_to_be16(ETH_P_CAIF),
285 .func = receive,
286};
287
288static void dev_flowctrl(struct net_device *dev, int on)
289{
290 struct caif_device_entry *caifd;
291
292 rcu_read_lock();
293
294 caifd = caif_get(dev);
295 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
296 rcu_read_unlock();
297 return;
298 }
299
300 caifd_hold(caifd);
301 rcu_read_unlock();
302
303 caifd->layer.up->ctrlcmd(caifd->layer.up,
304 on ?
305 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
306 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
307 caifd->layer.id);
308 caifd_put(caifd);
309}
310
311int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
312 struct cflayer *link_support, int head_room,
313 struct cflayer **layer,
314 int (**rcv_func)(struct sk_buff *, struct net_device *,
315 struct packet_type *,
316 struct net_device *))
317{
318 struct caif_device_entry *caifd;
319 enum cfcnfg_phy_preference pref;
320 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
321 struct caif_device_entry_list *caifdevs;
322 int res;
323
324 caifdevs = caif_device_list(dev_net(dev));
325 caifd = caif_device_alloc(dev);
326 if (!caifd)
327 return -ENOMEM;
328 *layer = &caifd->layer;
329 spin_lock_init(&caifd->flow_lock);
330
331 switch (caifdev->link_select) {
332 case CAIF_LINK_HIGH_BANDW:
333 pref = CFPHYPREF_HIGH_BW;
334 break;
335 case CAIF_LINK_LOW_LATENCY:
336 pref = CFPHYPREF_LOW_LAT;
337 break;
338 default:
339 pref = CFPHYPREF_HIGH_BW;
340 break;
341 }
342 mutex_lock(&caifdevs->lock);
343 list_add_rcu(&caifd->list, &caifdevs->list);
344
345 strlcpy(caifd->layer.name, dev->name,
346 sizeof(caifd->layer.name));
347 caifd->layer.transmit = transmit;
348 res = cfcnfg_add_phy_layer(cfg,
349 dev,
350 &caifd->layer,
351 pref,
352 link_support,
353 caifdev->use_fcs,
354 head_room);
355 mutex_unlock(&caifdevs->lock);
356 if (rcv_func)
357 *rcv_func = receive;
358 return res;
359}
360EXPORT_SYMBOL(caif_enroll_dev);
361
362/* notify Caif of device events */
363static int caif_device_notify(struct notifier_block *me, unsigned long what,
364 void *ptr)
365{
366 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
367 struct caif_device_entry *caifd = NULL;
368 struct caif_dev_common *caifdev;
369 struct cfcnfg *cfg;
370 struct cflayer *layer, *link_support;
371 int head_room = 0;
372 struct caif_device_entry_list *caifdevs;
373 int res;
374
375 cfg = get_cfcnfg(dev_net(dev));
376 caifdevs = caif_device_list(dev_net(dev));
377
378 caifd = caif_get(dev);
379 if (caifd == NULL && dev->type != ARPHRD_CAIF)
380 return 0;
381
382 switch (what) {
383 case NETDEV_REGISTER:
384 if (caifd != NULL)
385 break;
386
387 caifdev = netdev_priv(dev);
388
389 link_support = NULL;
390 if (caifdev->use_frag) {
391 head_room = 1;
392 link_support = cfserl_create(dev->ifindex,
393 caifdev->use_stx);
394 if (!link_support) {
395 pr_warn("Out of memory\n");
396 break;
397 }
398 }
399 res = caif_enroll_dev(dev, caifdev, link_support, head_room,
400 &layer, NULL);
401 if (res)
402 cfserl_release(link_support);
403 caifdev->flowctrl = dev_flowctrl;
404 break;
405
406 case NETDEV_UP:
407 rcu_read_lock();
408
409 caifd = caif_get(dev);
410 if (caifd == NULL) {
411 rcu_read_unlock();
412 break;
413 }
414
415 caifd->xoff = false;
416 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
417 rcu_read_unlock();
418
419 break;
420
421 case NETDEV_DOWN:
422 rcu_read_lock();
423
424 caifd = caif_get(dev);
425 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
426 rcu_read_unlock();
427 return -EINVAL;
428 }
429
430 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
431 caifd_hold(caifd);
432 rcu_read_unlock();
433
434 caifd->layer.up->ctrlcmd(caifd->layer.up,
435 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
436 caifd->layer.id);
437
438 spin_lock_bh(&caifd->flow_lock);
439
440 /*
441 * Replace our xoff-destructor with original destructor.
442 * We trust that skb->destructor *always* is called before
443 * the skb reference is invalid. The hijacked SKB destructor
444 * takes the flow_lock so manipulating the skb->destructor here
445 * should be safe.
446 */
447 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
448 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
449
450 caifd->xoff = false;
451 caifd->xoff_skb_dtor = NULL;
452 caifd->xoff_skb = NULL;
453
454 spin_unlock_bh(&caifd->flow_lock);
455 caifd_put(caifd);
456 break;
457
458 case NETDEV_UNREGISTER:
459 mutex_lock(&caifdevs->lock);
460
461 caifd = caif_get(dev);
462 if (caifd == NULL) {
463 mutex_unlock(&caifdevs->lock);
464 break;
465 }
466 list_del_rcu(&caifd->list);
467
468 /*
469 * NETDEV_UNREGISTER is called repeatedly until all reference
470 * counts for the net-device are released. If references to
471 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
472 * the next call to NETDEV_UNREGISTER.
473 *
474 * If any packets are in flight down the CAIF Stack,
475 * cfcnfg_del_phy_layer will return nonzero.
476 * If no packets are in flight, the CAIF Stack associated
477 * with the net-device un-registering is freed.
478 */
479
480 if (caifd_refcnt_read(caifd) != 0 ||
481 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
482
483 pr_info("Wait for device inuse\n");
484 /* Enrole device if CAIF Stack is still in use */
485 list_add_rcu(&caifd->list, &caifdevs->list);
486 mutex_unlock(&caifdevs->lock);
487 break;
488 }
489
490 synchronize_rcu();
491 dev_put(caifd->netdev);
492 free_percpu(caifd->pcpu_refcnt);
493 kfree(caifd);
494
495 mutex_unlock(&caifdevs->lock);
496 break;
497 }
498 return 0;
499}
500
501static struct notifier_block caif_device_notifier = {
502 .notifier_call = caif_device_notify,
503 .priority = 0,
504};
505
506/* Per-namespace Caif devices handling */
507static int caif_init_net(struct net *net)
508{
509 struct caif_net *caifn = net_generic(net, caif_net_id);
510 INIT_LIST_HEAD(&caifn->caifdevs.list);
511 mutex_init(&caifn->caifdevs.lock);
512
513 caifn->cfg = cfcnfg_create();
514 if (!caifn->cfg)
515 return -ENOMEM;
516
517 return 0;
518}
519
520static void caif_exit_net(struct net *net)
521{
522 struct caif_device_entry *caifd, *tmp;
523 struct caif_device_entry_list *caifdevs =
524 caif_device_list(net);
525 struct cfcnfg *cfg = get_cfcnfg(net);
526
527 rtnl_lock();
528 mutex_lock(&caifdevs->lock);
529
530 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
531 int i = 0;
532 list_del_rcu(&caifd->list);
533 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
534
535 while (i < 10 &&
536 (caifd_refcnt_read(caifd) != 0 ||
537 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
538
539 pr_info("Wait for device inuse\n");
540 msleep(250);
541 i++;
542 }
543 synchronize_rcu();
544 dev_put(caifd->netdev);
545 free_percpu(caifd->pcpu_refcnt);
546 kfree(caifd);
547 }
548 cfcnfg_remove(cfg);
549
550 mutex_unlock(&caifdevs->lock);
551 rtnl_unlock();
552}
553
554static struct pernet_operations caif_net_ops = {
555 .init = caif_init_net,
556 .exit = caif_exit_net,
557 .id = &caif_net_id,
558 .size = sizeof(struct caif_net),
559};
560
561/* Initialize Caif devices list */
562static int __init caif_device_init(void)
563{
564 int result;
565
566 result = register_pernet_subsys(&caif_net_ops);
567
568 if (result)
569 return result;
570
571 register_netdevice_notifier(&caif_device_notifier);
572 dev_add_pack(&caif_packet_type);
573
574 return result;
575}
576
577static void __exit caif_device_exit(void)
578{
579 unregister_netdevice_notifier(&caif_device_notifier);
580 dev_remove_pack(&caif_packet_type);
581 unregister_pernet_subsys(&caif_net_ops);
582}
583
584module_init(caif_device_init);
585module_exit(caif_device_exit);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CAIF Interface registration.
4 * Copyright (C) ST-Ericsson AB 2010
5 * Author: Sjur Brendeland
6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
8 * and Sakari Ailus <sakari.ailus@nokia.com>
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
12
13#include <linux/kernel.h>
14#include <linux/if_arp.h>
15#include <linux/net.h>
16#include <linux/netdevice.h>
17#include <linux/mutex.h>
18#include <linux/module.h>
19#include <linux/spinlock.h>
20#include <net/netns/generic.h>
21#include <net/net_namespace.h>
22#include <net/pkt_sched.h>
23#include <net/caif/caif_device.h>
24#include <net/caif/caif_layer.h>
25#include <net/caif/caif_dev.h>
26#include <net/caif/cfpkt.h>
27#include <net/caif/cfcnfg.h>
28#include <net/caif/cfserl.h>
29
30MODULE_DESCRIPTION("ST-Ericsson CAIF modem protocol support");
31MODULE_LICENSE("GPL");
32
33/* Used for local tracking of the CAIF net devices */
34struct caif_device_entry {
35 struct cflayer layer;
36 struct list_head list;
37 struct net_device *netdev;
38 int __percpu *pcpu_refcnt;
39 spinlock_t flow_lock;
40 struct sk_buff *xoff_skb;
41 void (*xoff_skb_dtor)(struct sk_buff *skb);
42 bool xoff;
43};
44
45struct caif_device_entry_list {
46 struct list_head list;
47 /* Protects simulanous deletes in list */
48 struct mutex lock;
49};
50
51struct caif_net {
52 struct cfcnfg *cfg;
53 struct caif_device_entry_list caifdevs;
54};
55
56static unsigned int caif_net_id;
57static int q_high = 50; /* Percent */
58
59struct cfcnfg *get_cfcnfg(struct net *net)
60{
61 struct caif_net *caifn;
62 caifn = net_generic(net, caif_net_id);
63 return caifn->cfg;
64}
65EXPORT_SYMBOL(get_cfcnfg);
66
67static struct caif_device_entry_list *caif_device_list(struct net *net)
68{
69 struct caif_net *caifn;
70 caifn = net_generic(net, caif_net_id);
71 return &caifn->caifdevs;
72}
73
74static void caifd_put(struct caif_device_entry *e)
75{
76 this_cpu_dec(*e->pcpu_refcnt);
77}
78
79static void caifd_hold(struct caif_device_entry *e)
80{
81 this_cpu_inc(*e->pcpu_refcnt);
82}
83
84static int caifd_refcnt_read(struct caif_device_entry *e)
85{
86 int i, refcnt = 0;
87 for_each_possible_cpu(i)
88 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
89 return refcnt;
90}
91
92/* Allocate new CAIF device. */
93static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
94{
95 struct caif_device_entry *caifd;
96
97 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
98 if (!caifd)
99 return NULL;
100 caifd->pcpu_refcnt = alloc_percpu(int);
101 if (!caifd->pcpu_refcnt) {
102 kfree(caifd);
103 return NULL;
104 }
105 caifd->netdev = dev;
106 dev_hold(dev);
107 return caifd;
108}
109
110static struct caif_device_entry *caif_get(struct net_device *dev)
111{
112 struct caif_device_entry_list *caifdevs =
113 caif_device_list(dev_net(dev));
114 struct caif_device_entry *caifd;
115
116 list_for_each_entry_rcu(caifd, &caifdevs->list, list,
117 lockdep_rtnl_is_held()) {
118 if (caifd->netdev == dev)
119 return caifd;
120 }
121 return NULL;
122}
123
124static void caif_flow_cb(struct sk_buff *skb)
125{
126 struct caif_device_entry *caifd;
127 void (*dtor)(struct sk_buff *skb) = NULL;
128 bool send_xoff;
129
130 WARN_ON(skb->dev == NULL);
131
132 rcu_read_lock();
133 caifd = caif_get(skb->dev);
134
135 WARN_ON(caifd == NULL);
136 if (!caifd) {
137 rcu_read_unlock();
138 return;
139 }
140
141 caifd_hold(caifd);
142 rcu_read_unlock();
143
144 spin_lock_bh(&caifd->flow_lock);
145 send_xoff = caifd->xoff;
146 caifd->xoff = false;
147 dtor = caifd->xoff_skb_dtor;
148
149 if (WARN_ON(caifd->xoff_skb != skb))
150 skb = NULL;
151
152 caifd->xoff_skb = NULL;
153 caifd->xoff_skb_dtor = NULL;
154
155 spin_unlock_bh(&caifd->flow_lock);
156
157 if (dtor && skb)
158 dtor(skb);
159
160 if (send_xoff)
161 caifd->layer.up->
162 ctrlcmd(caifd->layer.up,
163 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
164 caifd->layer.id);
165 caifd_put(caifd);
166}
167
168static int transmit(struct cflayer *layer, struct cfpkt *pkt)
169{
170 int err, high = 0, qlen = 0;
171 struct caif_device_entry *caifd =
172 container_of(layer, struct caif_device_entry, layer);
173 struct sk_buff *skb;
174 struct netdev_queue *txq;
175
176 rcu_read_lock_bh();
177
178 skb = cfpkt_tonative(pkt);
179 skb->dev = caifd->netdev;
180 skb_reset_network_header(skb);
181 skb->protocol = htons(ETH_P_CAIF);
182
183 /* Check if we need to handle xoff */
184 if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
185 goto noxoff;
186
187 if (unlikely(caifd->xoff))
188 goto noxoff;
189
190 if (likely(!netif_queue_stopped(caifd->netdev))) {
191 struct Qdisc *sch;
192
193 /* If we run with a TX queue, check if the queue is too long*/
194 txq = netdev_get_tx_queue(skb->dev, 0);
195 sch = rcu_dereference_bh(txq->qdisc);
196 if (likely(qdisc_is_empty(sch)))
197 goto noxoff;
198
199 /* can check for explicit qdisc len value only !NOLOCK,
200 * always set flow off otherwise
201 */
202 high = (caifd->netdev->tx_queue_len * q_high) / 100;
203 if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
204 goto noxoff;
205 }
206
207 /* Hold lock while accessing xoff */
208 spin_lock_bh(&caifd->flow_lock);
209 if (caifd->xoff) {
210 spin_unlock_bh(&caifd->flow_lock);
211 goto noxoff;
212 }
213
214 /*
215 * Handle flow off, we do this by temporary hi-jacking this
216 * skb's destructor function, and replace it with our own
217 * flow-on callback. The callback will set flow-on and call
218 * the original destructor.
219 */
220
221 pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
222 netif_queue_stopped(caifd->netdev),
223 qlen, high);
224 caifd->xoff = true;
225 caifd->xoff_skb = skb;
226 caifd->xoff_skb_dtor = skb->destructor;
227 skb->destructor = caif_flow_cb;
228 spin_unlock_bh(&caifd->flow_lock);
229
230 caifd->layer.up->ctrlcmd(caifd->layer.up,
231 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
232 caifd->layer.id);
233noxoff:
234 rcu_read_unlock_bh();
235
236 err = dev_queue_xmit(skb);
237 if (err > 0)
238 err = -EIO;
239
240 return err;
241}
242
243/*
244 * Stuff received packets into the CAIF stack.
245 * On error, returns non-zero and releases the skb.
246 */
247static int receive(struct sk_buff *skb, struct net_device *dev,
248 struct packet_type *pkttype, struct net_device *orig_dev)
249{
250 struct cfpkt *pkt;
251 struct caif_device_entry *caifd;
252 int err;
253
254 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
255
256 rcu_read_lock();
257 caifd = caif_get(dev);
258
259 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
260 !netif_oper_up(caifd->netdev)) {
261 rcu_read_unlock();
262 kfree_skb(skb);
263 return NET_RX_DROP;
264 }
265
266 /* Hold reference to netdevice while using CAIF stack */
267 caifd_hold(caifd);
268 rcu_read_unlock();
269
270 err = caifd->layer.up->receive(caifd->layer.up, pkt);
271
272 /* For -EILSEQ the packet is not freed so free it now */
273 if (err == -EILSEQ)
274 cfpkt_destroy(pkt);
275
276 /* Release reference to stack upwards */
277 caifd_put(caifd);
278
279 if (err != 0)
280 err = NET_RX_DROP;
281 return err;
282}
283
284static struct packet_type caif_packet_type __read_mostly = {
285 .type = cpu_to_be16(ETH_P_CAIF),
286 .func = receive,
287};
288
289static void dev_flowctrl(struct net_device *dev, int on)
290{
291 struct caif_device_entry *caifd;
292
293 rcu_read_lock();
294
295 caifd = caif_get(dev);
296 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
297 rcu_read_unlock();
298 return;
299 }
300
301 caifd_hold(caifd);
302 rcu_read_unlock();
303
304 caifd->layer.up->ctrlcmd(caifd->layer.up,
305 on ?
306 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
307 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
308 caifd->layer.id);
309 caifd_put(caifd);
310}
311
312int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
313 struct cflayer *link_support, int head_room,
314 struct cflayer **layer,
315 int (**rcv_func)(struct sk_buff *, struct net_device *,
316 struct packet_type *,
317 struct net_device *))
318{
319 struct caif_device_entry *caifd;
320 enum cfcnfg_phy_preference pref;
321 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
322 struct caif_device_entry_list *caifdevs;
323 int res;
324
325 caifdevs = caif_device_list(dev_net(dev));
326 caifd = caif_device_alloc(dev);
327 if (!caifd)
328 return -ENOMEM;
329 *layer = &caifd->layer;
330 spin_lock_init(&caifd->flow_lock);
331
332 switch (caifdev->link_select) {
333 case CAIF_LINK_HIGH_BANDW:
334 pref = CFPHYPREF_HIGH_BW;
335 break;
336 case CAIF_LINK_LOW_LATENCY:
337 pref = CFPHYPREF_LOW_LAT;
338 break;
339 default:
340 pref = CFPHYPREF_HIGH_BW;
341 break;
342 }
343 mutex_lock(&caifdevs->lock);
344 list_add_rcu(&caifd->list, &caifdevs->list);
345
346 strscpy(caifd->layer.name, dev->name,
347 sizeof(caifd->layer.name));
348 caifd->layer.transmit = transmit;
349 res = cfcnfg_add_phy_layer(cfg,
350 dev,
351 &caifd->layer,
352 pref,
353 link_support,
354 caifdev->use_fcs,
355 head_room);
356 mutex_unlock(&caifdevs->lock);
357 if (rcv_func)
358 *rcv_func = receive;
359 return res;
360}
361EXPORT_SYMBOL(caif_enroll_dev);
362
363/* notify Caif of device events */
364static int caif_device_notify(struct notifier_block *me, unsigned long what,
365 void *ptr)
366{
367 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
368 struct caif_device_entry *caifd = NULL;
369 struct caif_dev_common *caifdev;
370 struct cfcnfg *cfg;
371 struct cflayer *layer, *link_support;
372 int head_room = 0;
373 struct caif_device_entry_list *caifdevs;
374 int res;
375
376 cfg = get_cfcnfg(dev_net(dev));
377 caifdevs = caif_device_list(dev_net(dev));
378
379 caifd = caif_get(dev);
380 if (caifd == NULL && dev->type != ARPHRD_CAIF)
381 return 0;
382
383 switch (what) {
384 case NETDEV_REGISTER:
385 if (caifd != NULL)
386 break;
387
388 caifdev = netdev_priv(dev);
389
390 link_support = NULL;
391 if (caifdev->use_frag) {
392 head_room = 1;
393 link_support = cfserl_create(dev->ifindex,
394 caifdev->use_stx);
395 if (!link_support) {
396 pr_warn("Out of memory\n");
397 break;
398 }
399 }
400 res = caif_enroll_dev(dev, caifdev, link_support, head_room,
401 &layer, NULL);
402 if (res)
403 cfserl_release(link_support);
404 caifdev->flowctrl = dev_flowctrl;
405 break;
406
407 case NETDEV_UP:
408 rcu_read_lock();
409
410 caifd = caif_get(dev);
411 if (caifd == NULL) {
412 rcu_read_unlock();
413 break;
414 }
415
416 caifd->xoff = false;
417 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
418 rcu_read_unlock();
419
420 break;
421
422 case NETDEV_DOWN:
423 rcu_read_lock();
424
425 caifd = caif_get(dev);
426 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
427 rcu_read_unlock();
428 return -EINVAL;
429 }
430
431 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
432 caifd_hold(caifd);
433 rcu_read_unlock();
434
435 caifd->layer.up->ctrlcmd(caifd->layer.up,
436 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
437 caifd->layer.id);
438
439 spin_lock_bh(&caifd->flow_lock);
440
441 /*
442 * Replace our xoff-destructor with original destructor.
443 * We trust that skb->destructor *always* is called before
444 * the skb reference is invalid. The hijacked SKB destructor
445 * takes the flow_lock so manipulating the skb->destructor here
446 * should be safe.
447 */
448 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
449 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
450
451 caifd->xoff = false;
452 caifd->xoff_skb_dtor = NULL;
453 caifd->xoff_skb = NULL;
454
455 spin_unlock_bh(&caifd->flow_lock);
456 caifd_put(caifd);
457 break;
458
459 case NETDEV_UNREGISTER:
460 mutex_lock(&caifdevs->lock);
461
462 caifd = caif_get(dev);
463 if (caifd == NULL) {
464 mutex_unlock(&caifdevs->lock);
465 break;
466 }
467 list_del_rcu(&caifd->list);
468
469 /*
470 * NETDEV_UNREGISTER is called repeatedly until all reference
471 * counts for the net-device are released. If references to
472 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
473 * the next call to NETDEV_UNREGISTER.
474 *
475 * If any packets are in flight down the CAIF Stack,
476 * cfcnfg_del_phy_layer will return nonzero.
477 * If no packets are in flight, the CAIF Stack associated
478 * with the net-device un-registering is freed.
479 */
480
481 if (caifd_refcnt_read(caifd) != 0 ||
482 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
483
484 pr_info("Wait for device inuse\n");
485 /* Enrole device if CAIF Stack is still in use */
486 list_add_rcu(&caifd->list, &caifdevs->list);
487 mutex_unlock(&caifdevs->lock);
488 break;
489 }
490
491 synchronize_rcu();
492 dev_put(caifd->netdev);
493 free_percpu(caifd->pcpu_refcnt);
494 kfree(caifd);
495
496 mutex_unlock(&caifdevs->lock);
497 break;
498 }
499 return 0;
500}
501
502static struct notifier_block caif_device_notifier = {
503 .notifier_call = caif_device_notify,
504 .priority = 0,
505};
506
507/* Per-namespace Caif devices handling */
508static int caif_init_net(struct net *net)
509{
510 struct caif_net *caifn = net_generic(net, caif_net_id);
511 INIT_LIST_HEAD(&caifn->caifdevs.list);
512 mutex_init(&caifn->caifdevs.lock);
513
514 caifn->cfg = cfcnfg_create();
515 if (!caifn->cfg)
516 return -ENOMEM;
517
518 return 0;
519}
520
521static void caif_exit_net(struct net *net)
522{
523 struct caif_device_entry *caifd, *tmp;
524 struct caif_device_entry_list *caifdevs =
525 caif_device_list(net);
526 struct cfcnfg *cfg = get_cfcnfg(net);
527
528 rtnl_lock();
529 mutex_lock(&caifdevs->lock);
530
531 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
532 int i = 0;
533 list_del_rcu(&caifd->list);
534 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
535
536 while (i < 10 &&
537 (caifd_refcnt_read(caifd) != 0 ||
538 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
539
540 pr_info("Wait for device inuse\n");
541 msleep(250);
542 i++;
543 }
544 synchronize_rcu();
545 dev_put(caifd->netdev);
546 free_percpu(caifd->pcpu_refcnt);
547 kfree(caifd);
548 }
549 cfcnfg_remove(cfg);
550
551 mutex_unlock(&caifdevs->lock);
552 rtnl_unlock();
553}
554
555static struct pernet_operations caif_net_ops = {
556 .init = caif_init_net,
557 .exit = caif_exit_net,
558 .id = &caif_net_id,
559 .size = sizeof(struct caif_net),
560};
561
562/* Initialize Caif devices list */
563static int __init caif_device_init(void)
564{
565 int result;
566
567 result = register_pernet_subsys(&caif_net_ops);
568
569 if (result)
570 return result;
571
572 register_netdevice_notifier(&caif_device_notifier);
573 dev_add_pack(&caif_packet_type);
574
575 return result;
576}
577
578static void __exit caif_device_exit(void)
579{
580 unregister_netdevice_notifier(&caif_device_notifier);
581 dev_remove_pack(&caif_packet_type);
582 unregister_pernet_subsys(&caif_net_ops);
583}
584
585module_init(caif_device_init);
586module_exit(caif_device_exit);