Loading...
1/*
2 Copyright (c) 2013-2014 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/if_arp.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19
20#include <net/ipv6.h>
21#include <net/ip6_route.h>
22#include <net/addrconf.h>
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26#include <net/bluetooth/l2cap.h>
27
28#include <net/6lowpan.h> /* for the compression support */
29
30#define VERSION "0.1"
31
32static struct dentry *lowpan_enable_debugfs;
33static struct dentry *lowpan_control_debugfs;
34
35#define IFACE_NAME_TEMPLATE "bt%d"
36
37struct skb_cb {
38 struct in6_addr addr;
39 struct in6_addr gw;
40 struct l2cap_chan *chan;
41 int status;
42};
43#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
44
45/* The devices list contains those devices that we are acting
46 * as a proxy. The BT 6LoWPAN device is a virtual device that
47 * connects to the Bluetooth LE device. The real connection to
48 * BT device is done via l2cap layer. There exists one
49 * virtual device / one BT 6LoWPAN network (=hciX device).
50 * The list contains struct lowpan_dev elements.
51 */
52static LIST_HEAD(bt_6lowpan_devices);
53static DEFINE_SPINLOCK(devices_lock);
54
55static bool enable_6lowpan;
56
57/* We are listening incoming connections via this channel
58 */
59static struct l2cap_chan *listen_chan;
60
61struct lowpan_peer {
62 struct list_head list;
63 struct rcu_head rcu;
64 struct l2cap_chan *chan;
65
66 /* peer addresses in various formats */
67 unsigned char eui64_addr[EUI64_ADDR_LEN];
68 struct in6_addr peer_addr;
69};
70
71struct lowpan_dev {
72 struct list_head list;
73
74 struct hci_dev *hdev;
75 struct net_device *netdev;
76 struct list_head peers;
77 atomic_t peer_count; /* number of items in peers list */
78
79 struct work_struct delete_netdev;
80 struct delayed_work notify_peers;
81};
82
83static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
84{
85 return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
86}
87
88static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
89{
90 list_add_rcu(&peer->list, &dev->peers);
91 atomic_inc(&dev->peer_count);
92}
93
94static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
95{
96 list_del_rcu(&peer->list);
97 kfree_rcu(peer, rcu);
98
99 module_put(THIS_MODULE);
100
101 if (atomic_dec_and_test(&dev->peer_count)) {
102 BT_DBG("last peer");
103 return true;
104 }
105
106 return false;
107}
108
109static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
110 bdaddr_t *ba, __u8 type)
111{
112 struct lowpan_peer *peer;
113
114 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
115 ba, type);
116
117 rcu_read_lock();
118
119 list_for_each_entry_rcu(peer, &dev->peers, list) {
120 BT_DBG("dst addr %pMR dst type %d",
121 &peer->chan->dst, peer->chan->dst_type);
122
123 if (bacmp(&peer->chan->dst, ba))
124 continue;
125
126 if (type == peer->chan->dst_type) {
127 rcu_read_unlock();
128 return peer;
129 }
130 }
131
132 rcu_read_unlock();
133
134 return NULL;
135}
136
137static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev,
138 struct l2cap_chan *chan)
139{
140 struct lowpan_peer *peer;
141
142 list_for_each_entry_rcu(peer, &dev->peers, list) {
143 if (peer->chan == chan)
144 return peer;
145 }
146
147 return NULL;
148}
149
150static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev,
151 struct l2cap_conn *conn)
152{
153 struct lowpan_peer *peer;
154
155 list_for_each_entry_rcu(peer, &dev->peers, list) {
156 if (peer->chan->conn == conn)
157 return peer;
158 }
159
160 return NULL;
161}
162
163static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
164 struct in6_addr *daddr,
165 struct sk_buff *skb)
166{
167 struct lowpan_peer *peer;
168 struct in6_addr *nexthop;
169 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
170 int count = atomic_read(&dev->peer_count);
171
172 BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
173
174 /* If we have multiple 6lowpan peers, then check where we should
175 * send the packet. If only one peer exists, then we can send the
176 * packet right away.
177 */
178 if (count == 1) {
179 rcu_read_lock();
180 peer = list_first_or_null_rcu(&dev->peers, struct lowpan_peer,
181 list);
182 rcu_read_unlock();
183 return peer;
184 }
185
186 if (!rt) {
187 nexthop = &lowpan_cb(skb)->gw;
188
189 if (ipv6_addr_any(nexthop))
190 return NULL;
191 } else {
192 nexthop = rt6_nexthop(rt, daddr);
193
194 /* We need to remember the address because it is needed
195 * by bt_xmit() when sending the packet. In bt_xmit(), the
196 * destination routing info is not set.
197 */
198 memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
199 }
200
201 BT_DBG("gw %pI6c", nexthop);
202
203 rcu_read_lock();
204
205 list_for_each_entry_rcu(peer, &dev->peers, list) {
206 BT_DBG("dst addr %pMR dst type %d ip %pI6c",
207 &peer->chan->dst, peer->chan->dst_type,
208 &peer->peer_addr);
209
210 if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
211 rcu_read_unlock();
212 return peer;
213 }
214 }
215
216 rcu_read_unlock();
217
218 return NULL;
219}
220
221static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
222{
223 struct lowpan_dev *entry;
224 struct lowpan_peer *peer = NULL;
225
226 rcu_read_lock();
227
228 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
229 peer = __peer_lookup_conn(entry, conn);
230 if (peer)
231 break;
232 }
233
234 rcu_read_unlock();
235
236 return peer;
237}
238
239static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
240{
241 struct lowpan_dev *entry;
242 struct lowpan_dev *dev = NULL;
243
244 rcu_read_lock();
245
246 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
247 if (conn->hcon->hdev == entry->hdev) {
248 dev = entry;
249 break;
250 }
251 }
252
253 rcu_read_unlock();
254
255 return dev;
256}
257
258static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
259{
260 struct sk_buff *skb_cp;
261
262 skb_cp = skb_copy(skb, GFP_ATOMIC);
263 if (!skb_cp)
264 return NET_RX_DROP;
265
266 return netif_rx_ni(skb_cp);
267}
268
269static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
270 struct l2cap_chan *chan)
271{
272 const u8 *saddr, *daddr;
273 struct lowpan_dev *dev;
274 struct lowpan_peer *peer;
275
276 dev = lowpan_dev(netdev);
277
278 rcu_read_lock();
279 peer = __peer_lookup_chan(dev, chan);
280 rcu_read_unlock();
281 if (!peer)
282 return -EINVAL;
283
284 saddr = peer->eui64_addr;
285 daddr = dev->netdev->dev_addr;
286
287 return lowpan_header_decompress(skb, netdev, daddr, saddr);
288}
289
290static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
291 struct l2cap_chan *chan)
292{
293 struct sk_buff *local_skb;
294 int ret;
295
296 if (!netif_running(dev))
297 goto drop;
298
299 if (dev->type != ARPHRD_6LOWPAN || !skb->len)
300 goto drop;
301
302 skb_reset_network_header(skb);
303
304 skb = skb_share_check(skb, GFP_ATOMIC);
305 if (!skb)
306 goto drop;
307
308 /* check that it's our buffer */
309 if (lowpan_is_ipv6(*skb_network_header(skb))) {
310 /* Pull off the 1-byte of 6lowpan header. */
311 skb_pull(skb, 1);
312
313 /* Copy the packet so that the IPv6 header is
314 * properly aligned.
315 */
316 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
317 skb_tailroom(skb), GFP_ATOMIC);
318 if (!local_skb)
319 goto drop;
320
321 local_skb->protocol = htons(ETH_P_IPV6);
322 local_skb->pkt_type = PACKET_HOST;
323 local_skb->dev = dev;
324
325 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
326
327 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
328 kfree_skb(local_skb);
329 goto drop;
330 }
331
332 dev->stats.rx_bytes += skb->len;
333 dev->stats.rx_packets++;
334
335 consume_skb(local_skb);
336 consume_skb(skb);
337 } else if (lowpan_is_iphc(*skb_network_header(skb))) {
338 local_skb = skb_clone(skb, GFP_ATOMIC);
339 if (!local_skb)
340 goto drop;
341
342 local_skb->dev = dev;
343
344 ret = iphc_decompress(local_skb, dev, chan);
345 if (ret < 0) {
346 kfree_skb(local_skb);
347 goto drop;
348 }
349
350 local_skb->protocol = htons(ETH_P_IPV6);
351 local_skb->pkt_type = PACKET_HOST;
352
353 if (give_skb_to_upper(local_skb, dev)
354 != NET_RX_SUCCESS) {
355 kfree_skb(local_skb);
356 goto drop;
357 }
358
359 dev->stats.rx_bytes += skb->len;
360 dev->stats.rx_packets++;
361
362 consume_skb(local_skb);
363 consume_skb(skb);
364 } else {
365 goto drop;
366 }
367
368 return NET_RX_SUCCESS;
369
370drop:
371 dev->stats.rx_dropped++;
372 return NET_RX_DROP;
373}
374
375/* Packet from BT LE device */
376static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
377{
378 struct lowpan_dev *dev;
379 struct lowpan_peer *peer;
380 int err;
381
382 peer = lookup_peer(chan->conn);
383 if (!peer)
384 return -ENOENT;
385
386 dev = lookup_dev(chan->conn);
387 if (!dev || !dev->netdev)
388 return -ENOENT;
389
390 err = recv_pkt(skb, dev->netdev, chan);
391 if (err) {
392 BT_DBG("recv pkt %d", err);
393 err = -EAGAIN;
394 }
395
396 return err;
397}
398
399static u8 get_addr_type_from_eui64(u8 byte)
400{
401 /* Is universal(0) or local(1) bit */
402 return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC);
403}
404
405static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
406{
407 u8 *eui64 = ip6_daddr->s6_addr + 8;
408
409 addr->b[0] = eui64[7];
410 addr->b[1] = eui64[6];
411 addr->b[2] = eui64[5];
412 addr->b[3] = eui64[2];
413 addr->b[4] = eui64[1];
414 addr->b[5] = eui64[0];
415}
416
417static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
418 bdaddr_t *addr, u8 *addr_type)
419{
420 copy_to_bdaddr(ip6_daddr, addr);
421
422 /* We need to toggle the U/L bit that we got from IPv6 address
423 * so that we get the proper address and type of the BD address.
424 */
425 addr->b[5] ^= 0x02;
426
427 *addr_type = get_addr_type_from_eui64(addr->b[5]);
428}
429
430static int setup_header(struct sk_buff *skb, struct net_device *netdev,
431 bdaddr_t *peer_addr, u8 *peer_addr_type)
432{
433 struct in6_addr ipv6_daddr;
434 struct lowpan_dev *dev;
435 struct lowpan_peer *peer;
436 bdaddr_t addr, *any = BDADDR_ANY;
437 u8 *daddr = any->b;
438 int err, status = 0;
439
440 dev = lowpan_dev(netdev);
441
442 memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr));
443
444 if (ipv6_addr_is_multicast(&ipv6_daddr)) {
445 lowpan_cb(skb)->chan = NULL;
446 } else {
447 u8 addr_type;
448
449 /* Get destination BT device from skb.
450 * If there is no such peer then discard the packet.
451 */
452 convert_dest_bdaddr(&ipv6_daddr, &addr, &addr_type);
453
454 BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
455 addr_type, &ipv6_daddr);
456
457 peer = peer_lookup_ba(dev, &addr, addr_type);
458 if (!peer) {
459 /* The packet might be sent to 6lowpan interface
460 * because of routing (either via default route
461 * or user set route) so get peer according to
462 * the destination address.
463 */
464 peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
465 if (!peer) {
466 BT_DBG("no such peer %pMR found", &addr);
467 return -ENOENT;
468 }
469 }
470
471 daddr = peer->eui64_addr;
472 *peer_addr = addr;
473 *peer_addr_type = addr_type;
474 lowpan_cb(skb)->chan = peer->chan;
475
476 status = 1;
477 }
478
479 lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
480
481 err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
482 if (err < 0)
483 return err;
484
485 return status;
486}
487
488static int header_create(struct sk_buff *skb, struct net_device *netdev,
489 unsigned short type, const void *_daddr,
490 const void *_saddr, unsigned int len)
491{
492 struct ipv6hdr *hdr;
493
494 if (type != ETH_P_IPV6)
495 return -EINVAL;
496
497 hdr = ipv6_hdr(skb);
498
499 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr));
500
501 return 0;
502}
503
504/* Packet to BT LE device */
505static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
506 struct net_device *netdev)
507{
508 struct msghdr msg;
509 struct kvec iv;
510 int err;
511
512 /* Remember the skb so that we can send EAGAIN to the caller if
513 * we run out of credits.
514 */
515 chan->data = skb;
516
517 iv.iov_base = skb->data;
518 iv.iov_len = skb->len;
519
520 memset(&msg, 0, sizeof(msg));
521 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len);
522
523 err = l2cap_chan_send(chan, &msg, skb->len);
524 if (err > 0) {
525 netdev->stats.tx_bytes += err;
526 netdev->stats.tx_packets++;
527 return 0;
528 }
529
530 if (!err)
531 err = lowpan_cb(skb)->status;
532
533 if (err < 0) {
534 if (err == -EAGAIN)
535 netdev->stats.tx_dropped++;
536 else
537 netdev->stats.tx_errors++;
538 }
539
540 return err;
541}
542
543static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
544{
545 struct sk_buff *local_skb;
546 struct lowpan_dev *entry;
547 int err = 0;
548
549 rcu_read_lock();
550
551 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
552 struct lowpan_peer *pentry;
553 struct lowpan_dev *dev;
554
555 if (entry->netdev != netdev)
556 continue;
557
558 dev = lowpan_dev(entry->netdev);
559
560 list_for_each_entry_rcu(pentry, &dev->peers, list) {
561 int ret;
562
563 local_skb = skb_clone(skb, GFP_ATOMIC);
564
565 BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
566 netdev->name,
567 &pentry->chan->dst, pentry->chan->dst_type,
568 &pentry->peer_addr, pentry->chan);
569 ret = send_pkt(pentry->chan, local_skb, netdev);
570 if (ret < 0)
571 err = ret;
572
573 kfree_skb(local_skb);
574 }
575 }
576
577 rcu_read_unlock();
578
579 return err;
580}
581
582static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
583{
584 int err = 0;
585 bdaddr_t addr;
586 u8 addr_type;
587
588 /* We must take a copy of the skb before we modify/replace the ipv6
589 * header as the header could be used elsewhere
590 */
591 skb = skb_unshare(skb, GFP_ATOMIC);
592 if (!skb)
593 return NET_XMIT_DROP;
594
595 /* Return values from setup_header()
596 * <0 - error, packet is dropped
597 * 0 - this is a multicast packet
598 * 1 - this is unicast packet
599 */
600 err = setup_header(skb, netdev, &addr, &addr_type);
601 if (err < 0) {
602 kfree_skb(skb);
603 return NET_XMIT_DROP;
604 }
605
606 if (err) {
607 if (lowpan_cb(skb)->chan) {
608 BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
609 netdev->name, &addr, addr_type,
610 &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
611 err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
612 } else {
613 err = -ENOENT;
614 }
615 } else {
616 /* We need to send the packet to every device behind this
617 * interface.
618 */
619 err = send_mcast_pkt(skb, netdev);
620 }
621
622 dev_kfree_skb(skb);
623
624 if (err)
625 BT_DBG("ERROR: xmit failed (%d)", err);
626
627 return err < 0 ? NET_XMIT_DROP : err;
628}
629
630static struct lock_class_key bt_tx_busylock;
631static struct lock_class_key bt_netdev_xmit_lock_key;
632
633static void bt_set_lockdep_class_one(struct net_device *dev,
634 struct netdev_queue *txq,
635 void *_unused)
636{
637 lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key);
638}
639
640static int bt_dev_init(struct net_device *dev)
641{
642 netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL);
643 dev->qdisc_tx_busylock = &bt_tx_busylock;
644
645 return 0;
646}
647
648static const struct net_device_ops netdev_ops = {
649 .ndo_init = bt_dev_init,
650 .ndo_start_xmit = bt_xmit,
651};
652
653static struct header_ops header_ops = {
654 .create = header_create,
655};
656
657static void netdev_setup(struct net_device *dev)
658{
659 dev->hard_header_len = 0;
660 dev->needed_tailroom = 0;
661 dev->flags = IFF_RUNNING | IFF_POINTOPOINT |
662 IFF_MULTICAST;
663 dev->watchdog_timeo = 0;
664
665 dev->netdev_ops = &netdev_ops;
666 dev->header_ops = &header_ops;
667 dev->destructor = free_netdev;
668}
669
670static struct device_type bt_type = {
671 .name = "bluetooth",
672};
673
674static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
675{
676 /* addr is the BT address in little-endian format */
677 eui[0] = addr[5];
678 eui[1] = addr[4];
679 eui[2] = addr[3];
680 eui[3] = 0xFF;
681 eui[4] = 0xFE;
682 eui[5] = addr[2];
683 eui[6] = addr[1];
684 eui[7] = addr[0];
685
686 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
687 if (addr_type == BDADDR_LE_PUBLIC)
688 eui[0] &= ~0x02;
689 else
690 eui[0] |= 0x02;
691
692 BT_DBG("type %d addr %*phC", addr_type, 8, eui);
693}
694
695static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
696 u8 addr_type)
697{
698 netdev->addr_assign_type = NET_ADDR_PERM;
699 set_addr(netdev->dev_addr, addr->b, addr_type);
700}
701
702static void ifup(struct net_device *netdev)
703{
704 int err;
705
706 rtnl_lock();
707 err = dev_open(netdev);
708 if (err < 0)
709 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
710 rtnl_unlock();
711}
712
713static void ifdown(struct net_device *netdev)
714{
715 int err;
716
717 rtnl_lock();
718 err = dev_close(netdev);
719 if (err < 0)
720 BT_INFO("iface %s cannot be closed (%d)", netdev->name, err);
721 rtnl_unlock();
722}
723
724static void do_notify_peers(struct work_struct *work)
725{
726 struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
727 notify_peers.work);
728
729 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
730}
731
732static bool is_bt_6lowpan(struct hci_conn *hcon)
733{
734 if (hcon->type != LE_LINK)
735 return false;
736
737 if (!enable_6lowpan)
738 return false;
739
740 return true;
741}
742
743static struct l2cap_chan *chan_create(void)
744{
745 struct l2cap_chan *chan;
746
747 chan = l2cap_chan_create();
748 if (!chan)
749 return NULL;
750
751 l2cap_chan_set_defaults(chan);
752
753 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
754 chan->mode = L2CAP_MODE_LE_FLOWCTL;
755 chan->imtu = 1280;
756
757 return chan;
758}
759
760static void set_ip_addr_bits(u8 addr_type, u8 *addr)
761{
762 if (addr_type == BDADDR_LE_PUBLIC)
763 *addr |= 0x02;
764 else
765 *addr &= ~0x02;
766}
767
768static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
769 struct lowpan_dev *dev)
770{
771 struct lowpan_peer *peer;
772
773 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
774 if (!peer)
775 return NULL;
776
777 peer->chan = chan;
778 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
779
780 /* RFC 2464 ch. 5 */
781 peer->peer_addr.s6_addr[0] = 0xFE;
782 peer->peer_addr.s6_addr[1] = 0x80;
783 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b,
784 chan->dst_type);
785
786 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
787 EUI64_ADDR_LEN);
788
789 /* IPv6 address needs to have the U/L bit set properly so toggle
790 * it back here.
791 */
792 set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8);
793
794 spin_lock(&devices_lock);
795 INIT_LIST_HEAD(&peer->list);
796 peer_add(dev, peer);
797 spin_unlock(&devices_lock);
798
799 /* Notifying peers about us needs to be done without locks held */
800 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
801 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
802
803 return peer->chan;
804}
805
806static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
807{
808 struct net_device *netdev;
809 int err = 0;
810
811 netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
812 IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
813 netdev_setup);
814 if (!netdev)
815 return -ENOMEM;
816
817 set_dev_addr(netdev, &chan->src, chan->src_type);
818
819 netdev->netdev_ops = &netdev_ops;
820 SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
821 SET_NETDEV_DEVTYPE(netdev, &bt_type);
822
823 *dev = lowpan_dev(netdev);
824 (*dev)->netdev = netdev;
825 (*dev)->hdev = chan->conn->hcon->hdev;
826 INIT_LIST_HEAD(&(*dev)->peers);
827
828 spin_lock(&devices_lock);
829 INIT_LIST_HEAD(&(*dev)->list);
830 list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
831 spin_unlock(&devices_lock);
832
833 err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
834 if (err < 0) {
835 BT_INFO("register_netdev failed %d", err);
836 spin_lock(&devices_lock);
837 list_del_rcu(&(*dev)->list);
838 spin_unlock(&devices_lock);
839 free_netdev(netdev);
840 goto out;
841 }
842
843 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
844 netdev->ifindex, &chan->dst, chan->dst_type,
845 &chan->src, chan->src_type);
846 set_bit(__LINK_STATE_PRESENT, &netdev->state);
847
848 return 0;
849
850out:
851 return err;
852}
853
854static inline void chan_ready_cb(struct l2cap_chan *chan)
855{
856 struct lowpan_dev *dev;
857
858 dev = lookup_dev(chan->conn);
859
860 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
861
862 if (!dev) {
863 if (setup_netdev(chan, &dev) < 0) {
864 l2cap_chan_del(chan, -ENOENT);
865 return;
866 }
867 }
868
869 if (!try_module_get(THIS_MODULE))
870 return;
871
872 add_peer_chan(chan, dev);
873 ifup(dev->netdev);
874}
875
876static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
877{
878 struct l2cap_chan *chan;
879
880 chan = chan_create();
881 if (!chan)
882 return NULL;
883
884 chan->ops = pchan->ops;
885
886 BT_DBG("chan %p pchan %p", chan, pchan);
887
888 return chan;
889}
890
891static void delete_netdev(struct work_struct *work)
892{
893 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
894 delete_netdev);
895
896 lowpan_unregister_netdev(entry->netdev);
897
898 /* The entry pointer is deleted by the netdev destructor. */
899}
900
901static void chan_close_cb(struct l2cap_chan *chan)
902{
903 struct lowpan_dev *entry;
904 struct lowpan_dev *dev = NULL;
905 struct lowpan_peer *peer;
906 int err = -ENOENT;
907 bool last = false, remove = true;
908
909 BT_DBG("chan %p conn %p", chan, chan->conn);
910
911 if (chan->conn && chan->conn->hcon) {
912 if (!is_bt_6lowpan(chan->conn->hcon))
913 return;
914
915 /* If conn is set, then the netdev is also there and we should
916 * not remove it.
917 */
918 remove = false;
919 }
920
921 spin_lock(&devices_lock);
922
923 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
924 dev = lowpan_dev(entry->netdev);
925 peer = __peer_lookup_chan(dev, chan);
926 if (peer) {
927 last = peer_del(dev, peer);
928 err = 0;
929
930 BT_DBG("dev %p removing %speer %p", dev,
931 last ? "last " : "1 ", peer);
932 BT_DBG("chan %p orig refcnt %d", chan,
933 atomic_read(&chan->kref.refcount));
934
935 l2cap_chan_put(chan);
936 break;
937 }
938 }
939
940 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
941 spin_unlock(&devices_lock);
942
943 cancel_delayed_work_sync(&dev->notify_peers);
944
945 ifdown(dev->netdev);
946
947 if (remove) {
948 INIT_WORK(&entry->delete_netdev, delete_netdev);
949 schedule_work(&entry->delete_netdev);
950 }
951 } else {
952 spin_unlock(&devices_lock);
953 }
954
955 return;
956}
957
958static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
959{
960 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
961 state_to_string(state), err);
962}
963
964static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
965 unsigned long hdr_len,
966 unsigned long len, int nb)
967{
968 /* Note that we must allocate using GFP_ATOMIC here as
969 * this function is called originally from netdev hard xmit
970 * function in atomic context.
971 */
972 return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
973}
974
975static void chan_suspend_cb(struct l2cap_chan *chan)
976{
977 struct sk_buff *skb = chan->data;
978
979 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
980
981 if (!skb)
982 return;
983
984 lowpan_cb(skb)->status = -EAGAIN;
985}
986
987static void chan_resume_cb(struct l2cap_chan *chan)
988{
989 struct sk_buff *skb = chan->data;
990
991 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
992
993 if (!skb)
994 return;
995
996 lowpan_cb(skb)->status = 0;
997}
998
999static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
1000{
1001 return L2CAP_CONN_TIMEOUT;
1002}
1003
1004static const struct l2cap_ops bt_6lowpan_chan_ops = {
1005 .name = "L2CAP 6LoWPAN channel",
1006 .new_connection = chan_new_conn_cb,
1007 .recv = chan_recv_cb,
1008 .close = chan_close_cb,
1009 .state_change = chan_state_change_cb,
1010 .ready = chan_ready_cb,
1011 .resume = chan_resume_cb,
1012 .suspend = chan_suspend_cb,
1013 .get_sndtimeo = chan_get_sndtimeo_cb,
1014 .alloc_skb = chan_alloc_skb_cb,
1015
1016 .teardown = l2cap_chan_no_teardown,
1017 .defer = l2cap_chan_no_defer,
1018 .set_shutdown = l2cap_chan_no_set_shutdown,
1019};
1020
1021static inline __u8 bdaddr_type(__u8 type)
1022{
1023 if (type == ADDR_LE_DEV_PUBLIC)
1024 return BDADDR_LE_PUBLIC;
1025 else
1026 return BDADDR_LE_RANDOM;
1027}
1028
1029static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
1030{
1031 struct l2cap_chan *chan;
1032 int err;
1033
1034 chan = chan_create();
1035 if (!chan)
1036 return -EINVAL;
1037
1038 chan->ops = &bt_6lowpan_chan_ops;
1039
1040 err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
1041 addr, dst_type);
1042
1043 BT_DBG("chan %p err %d", chan, err);
1044 if (err < 0)
1045 l2cap_chan_put(chan);
1046
1047 return err;
1048}
1049
1050static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
1051{
1052 struct lowpan_peer *peer;
1053
1054 BT_DBG("conn %p dst type %d", conn, dst_type);
1055
1056 peer = lookup_peer(conn);
1057 if (!peer)
1058 return -ENOENT;
1059
1060 BT_DBG("peer %p chan %p", peer, peer->chan);
1061
1062 l2cap_chan_close(peer->chan, ENOENT);
1063
1064 return 0;
1065}
1066
1067static struct l2cap_chan *bt_6lowpan_listen(void)
1068{
1069 bdaddr_t *addr = BDADDR_ANY;
1070 struct l2cap_chan *chan;
1071 int err;
1072
1073 if (!enable_6lowpan)
1074 return NULL;
1075
1076 chan = chan_create();
1077 if (!chan)
1078 return NULL;
1079
1080 chan->ops = &bt_6lowpan_chan_ops;
1081 chan->state = BT_LISTEN;
1082 chan->src_type = BDADDR_LE_PUBLIC;
1083
1084 atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
1085
1086 BT_DBG("chan %p src type %d", chan, chan->src_type);
1087
1088 err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
1089 if (err) {
1090 l2cap_chan_put(chan);
1091 BT_ERR("psm cannot be added err %d", err);
1092 return NULL;
1093 }
1094
1095 return chan;
1096}
1097
1098static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
1099 struct l2cap_conn **conn)
1100{
1101 struct hci_conn *hcon;
1102 struct hci_dev *hdev;
1103 bdaddr_t *src = BDADDR_ANY;
1104 int n;
1105
1106 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1107 &addr->b[5], &addr->b[4], &addr->b[3],
1108 &addr->b[2], &addr->b[1], &addr->b[0],
1109 addr_type);
1110
1111 if (n < 7)
1112 return -EINVAL;
1113
1114 hdev = hci_get_route(addr, src);
1115 if (!hdev)
1116 return -ENOENT;
1117
1118 hci_dev_lock(hdev);
1119 hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
1120 hci_dev_unlock(hdev);
1121
1122 if (!hcon)
1123 return -ENOENT;
1124
1125 *conn = (struct l2cap_conn *)hcon->l2cap_data;
1126
1127 BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1128
1129 return 0;
1130}
1131
1132static void disconnect_all_peers(void)
1133{
1134 struct lowpan_dev *entry;
1135 struct lowpan_peer *peer, *tmp_peer, *new_peer;
1136 struct list_head peers;
1137
1138 INIT_LIST_HEAD(&peers);
1139
1140 /* We make a separate list of peers as the close_cb() will
1141 * modify the device peers list so it is better not to mess
1142 * with the same list at the same time.
1143 */
1144
1145 rcu_read_lock();
1146
1147 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1148 list_for_each_entry_rcu(peer, &entry->peers, list) {
1149 new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1150 if (!new_peer)
1151 break;
1152
1153 new_peer->chan = peer->chan;
1154 INIT_LIST_HEAD(&new_peer->list);
1155
1156 list_add(&new_peer->list, &peers);
1157 }
1158 }
1159
1160 rcu_read_unlock();
1161
1162 spin_lock(&devices_lock);
1163 list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1164 l2cap_chan_close(peer->chan, ENOENT);
1165
1166 list_del_rcu(&peer->list);
1167 kfree_rcu(peer, rcu);
1168 }
1169 spin_unlock(&devices_lock);
1170}
1171
1172struct set_enable {
1173 struct work_struct work;
1174 bool flag;
1175};
1176
1177static void do_enable_set(struct work_struct *work)
1178{
1179 struct set_enable *set_enable = container_of(work,
1180 struct set_enable, work);
1181
1182 if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1183 /* Disconnect existing connections if 6lowpan is
1184 * disabled
1185 */
1186 disconnect_all_peers();
1187
1188 enable_6lowpan = set_enable->flag;
1189
1190 if (listen_chan) {
1191 l2cap_chan_close(listen_chan, 0);
1192 l2cap_chan_put(listen_chan);
1193 }
1194
1195 listen_chan = bt_6lowpan_listen();
1196
1197 kfree(set_enable);
1198}
1199
1200static int lowpan_enable_set(void *data, u64 val)
1201{
1202 struct set_enable *set_enable;
1203
1204 set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1205 if (!set_enable)
1206 return -ENOMEM;
1207
1208 set_enable->flag = !!val;
1209 INIT_WORK(&set_enable->work, do_enable_set);
1210
1211 schedule_work(&set_enable->work);
1212
1213 return 0;
1214}
1215
1216static int lowpan_enable_get(void *data, u64 *val)
1217{
1218 *val = enable_6lowpan;
1219 return 0;
1220}
1221
1222DEFINE_SIMPLE_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1223 lowpan_enable_set, "%llu\n");
1224
1225static ssize_t lowpan_control_write(struct file *fp,
1226 const char __user *user_buffer,
1227 size_t count,
1228 loff_t *position)
1229{
1230 char buf[32];
1231 size_t buf_size = min(count, sizeof(buf) - 1);
1232 int ret;
1233 bdaddr_t addr;
1234 u8 addr_type;
1235 struct l2cap_conn *conn = NULL;
1236
1237 if (copy_from_user(buf, user_buffer, buf_size))
1238 return -EFAULT;
1239
1240 buf[buf_size] = '\0';
1241
1242 if (memcmp(buf, "connect ", 8) == 0) {
1243 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1244 if (ret == -EINVAL)
1245 return ret;
1246
1247 if (listen_chan) {
1248 l2cap_chan_close(listen_chan, 0);
1249 l2cap_chan_put(listen_chan);
1250 listen_chan = NULL;
1251 }
1252
1253 if (conn) {
1254 struct lowpan_peer *peer;
1255
1256 if (!is_bt_6lowpan(conn->hcon))
1257 return -EINVAL;
1258
1259 peer = lookup_peer(conn);
1260 if (peer) {
1261 BT_DBG("6LoWPAN connection already exists");
1262 return -EALREADY;
1263 }
1264
1265 BT_DBG("conn %p dst %pMR type %d user %d", conn,
1266 &conn->hcon->dst, conn->hcon->dst_type,
1267 addr_type);
1268 }
1269
1270 ret = bt_6lowpan_connect(&addr, addr_type);
1271 if (ret < 0)
1272 return ret;
1273
1274 return count;
1275 }
1276
1277 if (memcmp(buf, "disconnect ", 11) == 0) {
1278 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1279 if (ret < 0)
1280 return ret;
1281
1282 ret = bt_6lowpan_disconnect(conn, addr_type);
1283 if (ret < 0)
1284 return ret;
1285
1286 return count;
1287 }
1288
1289 return count;
1290}
1291
1292static int lowpan_control_show(struct seq_file *f, void *ptr)
1293{
1294 struct lowpan_dev *entry;
1295 struct lowpan_peer *peer;
1296
1297 spin_lock(&devices_lock);
1298
1299 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1300 list_for_each_entry(peer, &entry->peers, list)
1301 seq_printf(f, "%pMR (type %u)\n",
1302 &peer->chan->dst, peer->chan->dst_type);
1303 }
1304
1305 spin_unlock(&devices_lock);
1306
1307 return 0;
1308}
1309
1310static int lowpan_control_open(struct inode *inode, struct file *file)
1311{
1312 return single_open(file, lowpan_control_show, inode->i_private);
1313}
1314
1315static const struct file_operations lowpan_control_fops = {
1316 .open = lowpan_control_open,
1317 .read = seq_read,
1318 .write = lowpan_control_write,
1319 .llseek = seq_lseek,
1320 .release = single_release,
1321};
1322
1323static void disconnect_devices(void)
1324{
1325 struct lowpan_dev *entry, *tmp, *new_dev;
1326 struct list_head devices;
1327
1328 INIT_LIST_HEAD(&devices);
1329
1330 /* We make a separate list of devices because the unregister_netdev()
1331 * will call device_event() which will also want to modify the same
1332 * devices list.
1333 */
1334
1335 rcu_read_lock();
1336
1337 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1338 new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1339 if (!new_dev)
1340 break;
1341
1342 new_dev->netdev = entry->netdev;
1343 INIT_LIST_HEAD(&new_dev->list);
1344
1345 list_add_rcu(&new_dev->list, &devices);
1346 }
1347
1348 rcu_read_unlock();
1349
1350 list_for_each_entry_safe(entry, tmp, &devices, list) {
1351 ifdown(entry->netdev);
1352 BT_DBG("Unregistering netdev %s %p",
1353 entry->netdev->name, entry->netdev);
1354 lowpan_unregister_netdev(entry->netdev);
1355 kfree(entry);
1356 }
1357}
1358
1359static int device_event(struct notifier_block *unused,
1360 unsigned long event, void *ptr)
1361{
1362 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1363 struct lowpan_dev *entry;
1364
1365 if (netdev->type != ARPHRD_6LOWPAN)
1366 return NOTIFY_DONE;
1367
1368 switch (event) {
1369 case NETDEV_UNREGISTER:
1370 spin_lock(&devices_lock);
1371 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1372 if (entry->netdev == netdev) {
1373 BT_DBG("Unregistered netdev %s %p",
1374 netdev->name, netdev);
1375 list_del(&entry->list);
1376 break;
1377 }
1378 }
1379 spin_unlock(&devices_lock);
1380 break;
1381 }
1382
1383 return NOTIFY_DONE;
1384}
1385
1386static struct notifier_block bt_6lowpan_dev_notifier = {
1387 .notifier_call = device_event,
1388};
1389
1390static int __init bt_6lowpan_init(void)
1391{
1392 lowpan_enable_debugfs = debugfs_create_file("6lowpan_enable", 0644,
1393 bt_debugfs, NULL,
1394 &lowpan_enable_fops);
1395 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1396 bt_debugfs, NULL,
1397 &lowpan_control_fops);
1398
1399 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1400}
1401
1402static void __exit bt_6lowpan_exit(void)
1403{
1404 debugfs_remove(lowpan_enable_debugfs);
1405 debugfs_remove(lowpan_control_debugfs);
1406
1407 if (listen_chan) {
1408 l2cap_chan_close(listen_chan, 0);
1409 l2cap_chan_put(listen_chan);
1410 }
1411
1412 disconnect_devices();
1413
1414 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1415}
1416
1417module_init(bt_6lowpan_init);
1418module_exit(bt_6lowpan_exit);
1419
1420MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1421MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1422MODULE_VERSION(VERSION);
1423MODULE_LICENSE("GPL");
1/*
2 Copyright (c) 2013-2014 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/if_arp.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19
20#include <net/ipv6.h>
21#include <net/ip6_route.h>
22#include <net/addrconf.h>
23#include <net/pkt_sched.h>
24
25#include <net/bluetooth/bluetooth.h>
26#include <net/bluetooth/hci_core.h>
27#include <net/bluetooth/l2cap.h>
28
29#include <net/6lowpan.h> /* for the compression support */
30
31#define VERSION "0.1"
32
33static struct dentry *lowpan_enable_debugfs;
34static struct dentry *lowpan_control_debugfs;
35
36#define IFACE_NAME_TEMPLATE "bt%d"
37
38struct skb_cb {
39 struct in6_addr addr;
40 struct in6_addr gw;
41 struct l2cap_chan *chan;
42};
43#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
44
45/* The devices list contains those devices that we are acting
46 * as a proxy. The BT 6LoWPAN device is a virtual device that
47 * connects to the Bluetooth LE device. The real connection to
48 * BT device is done via l2cap layer. There exists one
49 * virtual device / one BT 6LoWPAN network (=hciX device).
50 * The list contains struct lowpan_dev elements.
51 */
52static LIST_HEAD(bt_6lowpan_devices);
53static DEFINE_SPINLOCK(devices_lock);
54
55static bool enable_6lowpan;
56
57/* We are listening incoming connections via this channel
58 */
59static struct l2cap_chan *listen_chan;
60
61struct lowpan_peer {
62 struct list_head list;
63 struct rcu_head rcu;
64 struct l2cap_chan *chan;
65
66 /* peer addresses in various formats */
67 unsigned char lladdr[ETH_ALEN];
68 struct in6_addr peer_addr;
69};
70
71struct lowpan_btle_dev {
72 struct list_head list;
73
74 struct hci_dev *hdev;
75 struct net_device *netdev;
76 struct list_head peers;
77 atomic_t peer_count; /* number of items in peers list */
78
79 struct work_struct delete_netdev;
80 struct delayed_work notify_peers;
81};
82
83static inline struct lowpan_btle_dev *
84lowpan_btle_dev(const struct net_device *netdev)
85{
86 return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
87}
88
89static inline void peer_add(struct lowpan_btle_dev *dev,
90 struct lowpan_peer *peer)
91{
92 list_add_rcu(&peer->list, &dev->peers);
93 atomic_inc(&dev->peer_count);
94}
95
96static inline bool peer_del(struct lowpan_btle_dev *dev,
97 struct lowpan_peer *peer)
98{
99 list_del_rcu(&peer->list);
100 kfree_rcu(peer, rcu);
101
102 module_put(THIS_MODULE);
103
104 if (atomic_dec_and_test(&dev->peer_count)) {
105 BT_DBG("last peer");
106 return true;
107 }
108
109 return false;
110}
111
112static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev,
113 bdaddr_t *ba, __u8 type)
114{
115 struct lowpan_peer *peer;
116
117 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
118 ba, type);
119
120 rcu_read_lock();
121
122 list_for_each_entry_rcu(peer, &dev->peers, list) {
123 BT_DBG("dst addr %pMR dst type %d",
124 &peer->chan->dst, peer->chan->dst_type);
125
126 if (bacmp(&peer->chan->dst, ba))
127 continue;
128
129 if (type == peer->chan->dst_type) {
130 rcu_read_unlock();
131 return peer;
132 }
133 }
134
135 rcu_read_unlock();
136
137 return NULL;
138}
139
140static inline struct lowpan_peer *
141__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
142{
143 struct lowpan_peer *peer;
144
145 list_for_each_entry_rcu(peer, &dev->peers, list) {
146 if (peer->chan == chan)
147 return peer;
148 }
149
150 return NULL;
151}
152
153static inline struct lowpan_peer *
154__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
155{
156 struct lowpan_peer *peer;
157
158 list_for_each_entry_rcu(peer, &dev->peers, list) {
159 if (peer->chan->conn == conn)
160 return peer;
161 }
162
163 return NULL;
164}
165
166static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
167 struct in6_addr *daddr,
168 struct sk_buff *skb)
169{
170 struct lowpan_peer *peer;
171 struct in6_addr *nexthop;
172 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
173 int count = atomic_read(&dev->peer_count);
174
175 BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
176
177 /* If we have multiple 6lowpan peers, then check where we should
178 * send the packet. If only one peer exists, then we can send the
179 * packet right away.
180 */
181 if (count == 1) {
182 rcu_read_lock();
183 peer = list_first_or_null_rcu(&dev->peers, struct lowpan_peer,
184 list);
185 rcu_read_unlock();
186 return peer;
187 }
188
189 if (!rt) {
190 nexthop = &lowpan_cb(skb)->gw;
191
192 if (ipv6_addr_any(nexthop))
193 return NULL;
194 } else {
195 nexthop = rt6_nexthop(rt, daddr);
196
197 /* We need to remember the address because it is needed
198 * by bt_xmit() when sending the packet. In bt_xmit(), the
199 * destination routing info is not set.
200 */
201 memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
202 }
203
204 BT_DBG("gw %pI6c", nexthop);
205
206 rcu_read_lock();
207
208 list_for_each_entry_rcu(peer, &dev->peers, list) {
209 BT_DBG("dst addr %pMR dst type %d ip %pI6c",
210 &peer->chan->dst, peer->chan->dst_type,
211 &peer->peer_addr);
212
213 if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
214 rcu_read_unlock();
215 return peer;
216 }
217 }
218
219 rcu_read_unlock();
220
221 return NULL;
222}
223
224static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
225{
226 struct lowpan_btle_dev *entry;
227 struct lowpan_peer *peer = NULL;
228
229 rcu_read_lock();
230
231 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
232 peer = __peer_lookup_conn(entry, conn);
233 if (peer)
234 break;
235 }
236
237 rcu_read_unlock();
238
239 return peer;
240}
241
242static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
243{
244 struct lowpan_btle_dev *entry;
245 struct lowpan_btle_dev *dev = NULL;
246
247 rcu_read_lock();
248
249 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
250 if (conn->hcon->hdev == entry->hdev) {
251 dev = entry;
252 break;
253 }
254 }
255
256 rcu_read_unlock();
257
258 return dev;
259}
260
261static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
262{
263 struct sk_buff *skb_cp;
264
265 skb_cp = skb_copy(skb, GFP_ATOMIC);
266 if (!skb_cp)
267 return NET_RX_DROP;
268
269 return netif_rx_ni(skb_cp);
270}
271
272static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
273 struct lowpan_peer *peer)
274{
275 const u8 *saddr;
276
277 saddr = peer->lladdr;
278
279 return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
280}
281
282static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
283 struct lowpan_peer *peer)
284{
285 struct sk_buff *local_skb;
286 int ret;
287
288 if (!netif_running(dev))
289 goto drop;
290
291 if (dev->type != ARPHRD_6LOWPAN || !skb->len)
292 goto drop;
293
294 skb_reset_network_header(skb);
295
296 skb = skb_share_check(skb, GFP_ATOMIC);
297 if (!skb)
298 goto drop;
299
300 /* check that it's our buffer */
301 if (lowpan_is_ipv6(*skb_network_header(skb))) {
302 /* Pull off the 1-byte of 6lowpan header. */
303 skb_pull(skb, 1);
304
305 /* Copy the packet so that the IPv6 header is
306 * properly aligned.
307 */
308 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
309 skb_tailroom(skb), GFP_ATOMIC);
310 if (!local_skb)
311 goto drop;
312
313 local_skb->protocol = htons(ETH_P_IPV6);
314 local_skb->pkt_type = PACKET_HOST;
315 local_skb->dev = dev;
316
317 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
318
319 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
320 kfree_skb(local_skb);
321 goto drop;
322 }
323
324 dev->stats.rx_bytes += skb->len;
325 dev->stats.rx_packets++;
326
327 consume_skb(local_skb);
328 consume_skb(skb);
329 } else if (lowpan_is_iphc(*skb_network_header(skb))) {
330 local_skb = skb_clone(skb, GFP_ATOMIC);
331 if (!local_skb)
332 goto drop;
333
334 local_skb->dev = dev;
335
336 ret = iphc_decompress(local_skb, dev, peer);
337 if (ret < 0) {
338 BT_DBG("iphc_decompress failed: %d", ret);
339 kfree_skb(local_skb);
340 goto drop;
341 }
342
343 local_skb->protocol = htons(ETH_P_IPV6);
344 local_skb->pkt_type = PACKET_HOST;
345
346 if (give_skb_to_upper(local_skb, dev)
347 != NET_RX_SUCCESS) {
348 kfree_skb(local_skb);
349 goto drop;
350 }
351
352 dev->stats.rx_bytes += skb->len;
353 dev->stats.rx_packets++;
354
355 consume_skb(local_skb);
356 consume_skb(skb);
357 } else {
358 BT_DBG("unknown packet type");
359 goto drop;
360 }
361
362 return NET_RX_SUCCESS;
363
364drop:
365 dev->stats.rx_dropped++;
366 return NET_RX_DROP;
367}
368
369/* Packet from BT LE device */
370static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
371{
372 struct lowpan_btle_dev *dev;
373 struct lowpan_peer *peer;
374 int err;
375
376 peer = lookup_peer(chan->conn);
377 if (!peer)
378 return -ENOENT;
379
380 dev = lookup_dev(chan->conn);
381 if (!dev || !dev->netdev)
382 return -ENOENT;
383
384 err = recv_pkt(skb, dev->netdev, peer);
385 if (err) {
386 BT_DBG("recv pkt %d", err);
387 err = -EAGAIN;
388 }
389
390 return err;
391}
392
393static int setup_header(struct sk_buff *skb, struct net_device *netdev,
394 bdaddr_t *peer_addr, u8 *peer_addr_type)
395{
396 struct in6_addr ipv6_daddr;
397 struct ipv6hdr *hdr;
398 struct lowpan_btle_dev *dev;
399 struct lowpan_peer *peer;
400 u8 *daddr;
401 int err, status = 0;
402
403 hdr = ipv6_hdr(skb);
404
405 dev = lowpan_btle_dev(netdev);
406
407 memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
408
409 if (ipv6_addr_is_multicast(&ipv6_daddr)) {
410 lowpan_cb(skb)->chan = NULL;
411 daddr = NULL;
412 } else {
413 BT_DBG("dest IP %pI6c", &ipv6_daddr);
414
415 /* The packet might be sent to 6lowpan interface
416 * because of routing (either via default route
417 * or user set route) so get peer according to
418 * the destination address.
419 */
420 peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
421 if (!peer) {
422 BT_DBG("no such peer");
423 return -ENOENT;
424 }
425
426 daddr = peer->lladdr;
427 *peer_addr = peer->chan->dst;
428 *peer_addr_type = peer->chan->dst_type;
429 lowpan_cb(skb)->chan = peer->chan;
430
431 status = 1;
432 }
433
434 lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
435
436 err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
437 if (err < 0)
438 return err;
439
440 return status;
441}
442
443static int header_create(struct sk_buff *skb, struct net_device *netdev,
444 unsigned short type, const void *_daddr,
445 const void *_saddr, unsigned int len)
446{
447 if (type != ETH_P_IPV6)
448 return -EINVAL;
449
450 return 0;
451}
452
453/* Packet to BT LE device */
454static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
455 struct net_device *netdev)
456{
457 struct msghdr msg;
458 struct kvec iv;
459 int err;
460
461 /* Remember the skb so that we can send EAGAIN to the caller if
462 * we run out of credits.
463 */
464 chan->data = skb;
465
466 iv.iov_base = skb->data;
467 iv.iov_len = skb->len;
468
469 memset(&msg, 0, sizeof(msg));
470 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len);
471
472 err = l2cap_chan_send(chan, &msg, skb->len);
473 if (err > 0) {
474 netdev->stats.tx_bytes += err;
475 netdev->stats.tx_packets++;
476 return 0;
477 }
478
479 if (err < 0)
480 netdev->stats.tx_errors++;
481
482 return err;
483}
484
485static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
486{
487 struct sk_buff *local_skb;
488 struct lowpan_btle_dev *entry;
489 int err = 0;
490
491 rcu_read_lock();
492
493 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
494 struct lowpan_peer *pentry;
495 struct lowpan_btle_dev *dev;
496
497 if (entry->netdev != netdev)
498 continue;
499
500 dev = lowpan_btle_dev(entry->netdev);
501
502 list_for_each_entry_rcu(pentry, &dev->peers, list) {
503 int ret;
504
505 local_skb = skb_clone(skb, GFP_ATOMIC);
506
507 BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
508 netdev->name,
509 &pentry->chan->dst, pentry->chan->dst_type,
510 &pentry->peer_addr, pentry->chan);
511 ret = send_pkt(pentry->chan, local_skb, netdev);
512 if (ret < 0)
513 err = ret;
514
515 kfree_skb(local_skb);
516 }
517 }
518
519 rcu_read_unlock();
520
521 return err;
522}
523
524static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
525{
526 int err = 0;
527 bdaddr_t addr;
528 u8 addr_type;
529
530 /* We must take a copy of the skb before we modify/replace the ipv6
531 * header as the header could be used elsewhere
532 */
533 skb = skb_unshare(skb, GFP_ATOMIC);
534 if (!skb)
535 return NET_XMIT_DROP;
536
537 /* Return values from setup_header()
538 * <0 - error, packet is dropped
539 * 0 - this is a multicast packet
540 * 1 - this is unicast packet
541 */
542 err = setup_header(skb, netdev, &addr, &addr_type);
543 if (err < 0) {
544 kfree_skb(skb);
545 return NET_XMIT_DROP;
546 }
547
548 if (err) {
549 if (lowpan_cb(skb)->chan) {
550 BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
551 netdev->name, &addr, addr_type,
552 &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
553 err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
554 } else {
555 err = -ENOENT;
556 }
557 } else {
558 /* We need to send the packet to every device behind this
559 * interface.
560 */
561 err = send_mcast_pkt(skb, netdev);
562 }
563
564 dev_kfree_skb(skb);
565
566 if (err)
567 BT_DBG("ERROR: xmit failed (%d)", err);
568
569 return err < 0 ? NET_XMIT_DROP : err;
570}
571
572static int bt_dev_init(struct net_device *dev)
573{
574 netdev_lockdep_set_classes(dev);
575
576 return 0;
577}
578
579static const struct net_device_ops netdev_ops = {
580 .ndo_init = bt_dev_init,
581 .ndo_start_xmit = bt_xmit,
582};
583
584static struct header_ops header_ops = {
585 .create = header_create,
586};
587
588static void netdev_setup(struct net_device *dev)
589{
590 dev->hard_header_len = 0;
591 dev->needed_tailroom = 0;
592 dev->flags = IFF_RUNNING | IFF_MULTICAST;
593 dev->watchdog_timeo = 0;
594 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
595
596 dev->netdev_ops = &netdev_ops;
597 dev->header_ops = &header_ops;
598 dev->needs_free_netdev = true;
599}
600
601static struct device_type bt_type = {
602 .name = "bluetooth",
603};
604
605static void ifup(struct net_device *netdev)
606{
607 int err;
608
609 rtnl_lock();
610 err = dev_open(netdev);
611 if (err < 0)
612 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
613 rtnl_unlock();
614}
615
616static void ifdown(struct net_device *netdev)
617{
618 rtnl_lock();
619 dev_close(netdev);
620 rtnl_unlock();
621}
622
623static void do_notify_peers(struct work_struct *work)
624{
625 struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
626 notify_peers.work);
627
628 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
629}
630
631static bool is_bt_6lowpan(struct hci_conn *hcon)
632{
633 if (hcon->type != LE_LINK)
634 return false;
635
636 if (!enable_6lowpan)
637 return false;
638
639 return true;
640}
641
642static struct l2cap_chan *chan_create(void)
643{
644 struct l2cap_chan *chan;
645
646 chan = l2cap_chan_create();
647 if (!chan)
648 return NULL;
649
650 l2cap_chan_set_defaults(chan);
651
652 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
653 chan->mode = L2CAP_MODE_LE_FLOWCTL;
654 chan->imtu = 1280;
655
656 return chan;
657}
658
659static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
660 struct lowpan_btle_dev *dev,
661 bool new_netdev)
662{
663 struct lowpan_peer *peer;
664
665 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
666 if (!peer)
667 return NULL;
668
669 peer->chan = chan;
670 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
671
672 baswap((void *)peer->lladdr, &chan->dst);
673
674 lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
675
676 spin_lock(&devices_lock);
677 INIT_LIST_HEAD(&peer->list);
678 peer_add(dev, peer);
679 spin_unlock(&devices_lock);
680
681 /* Notifying peers about us needs to be done without locks held */
682 if (new_netdev)
683 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
684 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
685
686 return peer->chan;
687}
688
689static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
690{
691 struct net_device *netdev;
692 int err = 0;
693
694 netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
695 IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
696 netdev_setup);
697 if (!netdev)
698 return -ENOMEM;
699
700 netdev->addr_assign_type = NET_ADDR_PERM;
701 baswap((void *)netdev->dev_addr, &chan->src);
702
703 netdev->netdev_ops = &netdev_ops;
704 SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
705 SET_NETDEV_DEVTYPE(netdev, &bt_type);
706
707 *dev = lowpan_btle_dev(netdev);
708 (*dev)->netdev = netdev;
709 (*dev)->hdev = chan->conn->hcon->hdev;
710 INIT_LIST_HEAD(&(*dev)->peers);
711
712 spin_lock(&devices_lock);
713 INIT_LIST_HEAD(&(*dev)->list);
714 list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
715 spin_unlock(&devices_lock);
716
717 err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
718 if (err < 0) {
719 BT_INFO("register_netdev failed %d", err);
720 spin_lock(&devices_lock);
721 list_del_rcu(&(*dev)->list);
722 spin_unlock(&devices_lock);
723 free_netdev(netdev);
724 goto out;
725 }
726
727 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
728 netdev->ifindex, &chan->dst, chan->dst_type,
729 &chan->src, chan->src_type);
730 set_bit(__LINK_STATE_PRESENT, &netdev->state);
731
732 return 0;
733
734out:
735 return err;
736}
737
738static inline void chan_ready_cb(struct l2cap_chan *chan)
739{
740 struct lowpan_btle_dev *dev;
741 bool new_netdev = false;
742
743 dev = lookup_dev(chan->conn);
744
745 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
746
747 if (!dev) {
748 if (setup_netdev(chan, &dev) < 0) {
749 l2cap_chan_del(chan, -ENOENT);
750 return;
751 }
752 new_netdev = true;
753 }
754
755 if (!try_module_get(THIS_MODULE))
756 return;
757
758 add_peer_chan(chan, dev, new_netdev);
759 ifup(dev->netdev);
760}
761
762static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
763{
764 struct l2cap_chan *chan;
765
766 chan = chan_create();
767 if (!chan)
768 return NULL;
769
770 chan->ops = pchan->ops;
771
772 BT_DBG("chan %p pchan %p", chan, pchan);
773
774 return chan;
775}
776
777static void delete_netdev(struct work_struct *work)
778{
779 struct lowpan_btle_dev *entry = container_of(work,
780 struct lowpan_btle_dev,
781 delete_netdev);
782
783 lowpan_unregister_netdev(entry->netdev);
784
785 /* The entry pointer is deleted by the netdev destructor. */
786}
787
788static void chan_close_cb(struct l2cap_chan *chan)
789{
790 struct lowpan_btle_dev *entry;
791 struct lowpan_btle_dev *dev = NULL;
792 struct lowpan_peer *peer;
793 int err = -ENOENT;
794 bool last = false, remove = true;
795
796 BT_DBG("chan %p conn %p", chan, chan->conn);
797
798 if (chan->conn && chan->conn->hcon) {
799 if (!is_bt_6lowpan(chan->conn->hcon))
800 return;
801
802 /* If conn is set, then the netdev is also there and we should
803 * not remove it.
804 */
805 remove = false;
806 }
807
808 spin_lock(&devices_lock);
809
810 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
811 dev = lowpan_btle_dev(entry->netdev);
812 peer = __peer_lookup_chan(dev, chan);
813 if (peer) {
814 last = peer_del(dev, peer);
815 err = 0;
816
817 BT_DBG("dev %p removing %speer %p", dev,
818 last ? "last " : "1 ", peer);
819 BT_DBG("chan %p orig refcnt %d", chan,
820 kref_read(&chan->kref));
821
822 l2cap_chan_put(chan);
823 break;
824 }
825 }
826
827 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
828 spin_unlock(&devices_lock);
829
830 cancel_delayed_work_sync(&dev->notify_peers);
831
832 ifdown(dev->netdev);
833
834 if (remove) {
835 INIT_WORK(&entry->delete_netdev, delete_netdev);
836 schedule_work(&entry->delete_netdev);
837 }
838 } else {
839 spin_unlock(&devices_lock);
840 }
841
842 return;
843}
844
845static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
846{
847 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
848 state_to_string(state), err);
849}
850
851static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
852 unsigned long hdr_len,
853 unsigned long len, int nb)
854{
855 /* Note that we must allocate using GFP_ATOMIC here as
856 * this function is called originally from netdev hard xmit
857 * function in atomic context.
858 */
859 return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
860}
861
862static void chan_suspend_cb(struct l2cap_chan *chan)
863{
864 struct lowpan_btle_dev *dev;
865
866 BT_DBG("chan %p suspend", chan);
867
868 dev = lookup_dev(chan->conn);
869 if (!dev || !dev->netdev)
870 return;
871
872 netif_stop_queue(dev->netdev);
873}
874
875static void chan_resume_cb(struct l2cap_chan *chan)
876{
877 struct lowpan_btle_dev *dev;
878
879 BT_DBG("chan %p resume", chan);
880
881 dev = lookup_dev(chan->conn);
882 if (!dev || !dev->netdev)
883 return;
884
885 netif_wake_queue(dev->netdev);
886}
887
888static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
889{
890 return L2CAP_CONN_TIMEOUT;
891}
892
893static const struct l2cap_ops bt_6lowpan_chan_ops = {
894 .name = "L2CAP 6LoWPAN channel",
895 .new_connection = chan_new_conn_cb,
896 .recv = chan_recv_cb,
897 .close = chan_close_cb,
898 .state_change = chan_state_change_cb,
899 .ready = chan_ready_cb,
900 .resume = chan_resume_cb,
901 .suspend = chan_suspend_cb,
902 .get_sndtimeo = chan_get_sndtimeo_cb,
903 .alloc_skb = chan_alloc_skb_cb,
904
905 .teardown = l2cap_chan_no_teardown,
906 .defer = l2cap_chan_no_defer,
907 .set_shutdown = l2cap_chan_no_set_shutdown,
908};
909
910static inline __u8 bdaddr_type(__u8 type)
911{
912 if (type == ADDR_LE_DEV_PUBLIC)
913 return BDADDR_LE_PUBLIC;
914 else
915 return BDADDR_LE_RANDOM;
916}
917
918static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
919{
920 struct l2cap_chan *chan;
921 int err;
922
923 chan = chan_create();
924 if (!chan)
925 return -EINVAL;
926
927 chan->ops = &bt_6lowpan_chan_ops;
928
929 err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
930 addr, dst_type);
931
932 BT_DBG("chan %p err %d", chan, err);
933 if (err < 0)
934 l2cap_chan_put(chan);
935
936 return err;
937}
938
939static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
940{
941 struct lowpan_peer *peer;
942
943 BT_DBG("conn %p dst type %d", conn, dst_type);
944
945 peer = lookup_peer(conn);
946 if (!peer)
947 return -ENOENT;
948
949 BT_DBG("peer %p chan %p", peer, peer->chan);
950
951 l2cap_chan_close(peer->chan, ENOENT);
952
953 return 0;
954}
955
956static struct l2cap_chan *bt_6lowpan_listen(void)
957{
958 bdaddr_t *addr = BDADDR_ANY;
959 struct l2cap_chan *chan;
960 int err;
961
962 if (!enable_6lowpan)
963 return NULL;
964
965 chan = chan_create();
966 if (!chan)
967 return NULL;
968
969 chan->ops = &bt_6lowpan_chan_ops;
970 chan->state = BT_LISTEN;
971 chan->src_type = BDADDR_LE_PUBLIC;
972
973 atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
974
975 BT_DBG("chan %p src type %d", chan, chan->src_type);
976
977 err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
978 if (err) {
979 l2cap_chan_put(chan);
980 BT_ERR("psm cannot be added err %d", err);
981 return NULL;
982 }
983
984 return chan;
985}
986
987static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
988 struct l2cap_conn **conn)
989{
990 struct hci_conn *hcon;
991 struct hci_dev *hdev;
992 int n;
993
994 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
995 &addr->b[5], &addr->b[4], &addr->b[3],
996 &addr->b[2], &addr->b[1], &addr->b[0],
997 addr_type);
998
999 if (n < 7)
1000 return -EINVAL;
1001
1002 /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
1003 hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
1004 if (!hdev)
1005 return -ENOENT;
1006
1007 hci_dev_lock(hdev);
1008 hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
1009 hci_dev_unlock(hdev);
1010
1011 if (!hcon)
1012 return -ENOENT;
1013
1014 *conn = (struct l2cap_conn *)hcon->l2cap_data;
1015
1016 BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1017
1018 return 0;
1019}
1020
1021static void disconnect_all_peers(void)
1022{
1023 struct lowpan_btle_dev *entry;
1024 struct lowpan_peer *peer, *tmp_peer, *new_peer;
1025 struct list_head peers;
1026
1027 INIT_LIST_HEAD(&peers);
1028
1029 /* We make a separate list of peers as the close_cb() will
1030 * modify the device peers list so it is better not to mess
1031 * with the same list at the same time.
1032 */
1033
1034 rcu_read_lock();
1035
1036 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1037 list_for_each_entry_rcu(peer, &entry->peers, list) {
1038 new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1039 if (!new_peer)
1040 break;
1041
1042 new_peer->chan = peer->chan;
1043 INIT_LIST_HEAD(&new_peer->list);
1044
1045 list_add(&new_peer->list, &peers);
1046 }
1047 }
1048
1049 rcu_read_unlock();
1050
1051 spin_lock(&devices_lock);
1052 list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1053 l2cap_chan_close(peer->chan, ENOENT);
1054
1055 list_del_rcu(&peer->list);
1056 kfree_rcu(peer, rcu);
1057 }
1058 spin_unlock(&devices_lock);
1059}
1060
1061struct set_enable {
1062 struct work_struct work;
1063 bool flag;
1064};
1065
1066static void do_enable_set(struct work_struct *work)
1067{
1068 struct set_enable *set_enable = container_of(work,
1069 struct set_enable, work);
1070
1071 if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1072 /* Disconnect existing connections if 6lowpan is
1073 * disabled
1074 */
1075 disconnect_all_peers();
1076
1077 enable_6lowpan = set_enable->flag;
1078
1079 if (listen_chan) {
1080 l2cap_chan_close(listen_chan, 0);
1081 l2cap_chan_put(listen_chan);
1082 }
1083
1084 listen_chan = bt_6lowpan_listen();
1085
1086 kfree(set_enable);
1087}
1088
1089static int lowpan_enable_set(void *data, u64 val)
1090{
1091 struct set_enable *set_enable;
1092
1093 set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1094 if (!set_enable)
1095 return -ENOMEM;
1096
1097 set_enable->flag = !!val;
1098 INIT_WORK(&set_enable->work, do_enable_set);
1099
1100 schedule_work(&set_enable->work);
1101
1102 return 0;
1103}
1104
1105static int lowpan_enable_get(void *data, u64 *val)
1106{
1107 *val = enable_6lowpan;
1108 return 0;
1109}
1110
1111DEFINE_SIMPLE_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1112 lowpan_enable_set, "%llu\n");
1113
1114static ssize_t lowpan_control_write(struct file *fp,
1115 const char __user *user_buffer,
1116 size_t count,
1117 loff_t *position)
1118{
1119 char buf[32];
1120 size_t buf_size = min(count, sizeof(buf) - 1);
1121 int ret;
1122 bdaddr_t addr;
1123 u8 addr_type;
1124 struct l2cap_conn *conn = NULL;
1125
1126 if (copy_from_user(buf, user_buffer, buf_size))
1127 return -EFAULT;
1128
1129 buf[buf_size] = '\0';
1130
1131 if (memcmp(buf, "connect ", 8) == 0) {
1132 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1133 if (ret == -EINVAL)
1134 return ret;
1135
1136 if (listen_chan) {
1137 l2cap_chan_close(listen_chan, 0);
1138 l2cap_chan_put(listen_chan);
1139 listen_chan = NULL;
1140 }
1141
1142 if (conn) {
1143 struct lowpan_peer *peer;
1144
1145 if (!is_bt_6lowpan(conn->hcon))
1146 return -EINVAL;
1147
1148 peer = lookup_peer(conn);
1149 if (peer) {
1150 BT_DBG("6LoWPAN connection already exists");
1151 return -EALREADY;
1152 }
1153
1154 BT_DBG("conn %p dst %pMR type %d user %d", conn,
1155 &conn->hcon->dst, conn->hcon->dst_type,
1156 addr_type);
1157 }
1158
1159 ret = bt_6lowpan_connect(&addr, addr_type);
1160 if (ret < 0)
1161 return ret;
1162
1163 return count;
1164 }
1165
1166 if (memcmp(buf, "disconnect ", 11) == 0) {
1167 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1168 if (ret < 0)
1169 return ret;
1170
1171 ret = bt_6lowpan_disconnect(conn, addr_type);
1172 if (ret < 0)
1173 return ret;
1174
1175 return count;
1176 }
1177
1178 return count;
1179}
1180
1181static int lowpan_control_show(struct seq_file *f, void *ptr)
1182{
1183 struct lowpan_btle_dev *entry;
1184 struct lowpan_peer *peer;
1185
1186 spin_lock(&devices_lock);
1187
1188 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1189 list_for_each_entry(peer, &entry->peers, list)
1190 seq_printf(f, "%pMR (type %u)\n",
1191 &peer->chan->dst, peer->chan->dst_type);
1192 }
1193
1194 spin_unlock(&devices_lock);
1195
1196 return 0;
1197}
1198
1199static int lowpan_control_open(struct inode *inode, struct file *file)
1200{
1201 return single_open(file, lowpan_control_show, inode->i_private);
1202}
1203
1204static const struct file_operations lowpan_control_fops = {
1205 .open = lowpan_control_open,
1206 .read = seq_read,
1207 .write = lowpan_control_write,
1208 .llseek = seq_lseek,
1209 .release = single_release,
1210};
1211
1212static void disconnect_devices(void)
1213{
1214 struct lowpan_btle_dev *entry, *tmp, *new_dev;
1215 struct list_head devices;
1216
1217 INIT_LIST_HEAD(&devices);
1218
1219 /* We make a separate list of devices because the unregister_netdev()
1220 * will call device_event() which will also want to modify the same
1221 * devices list.
1222 */
1223
1224 rcu_read_lock();
1225
1226 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1227 new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1228 if (!new_dev)
1229 break;
1230
1231 new_dev->netdev = entry->netdev;
1232 INIT_LIST_HEAD(&new_dev->list);
1233
1234 list_add_rcu(&new_dev->list, &devices);
1235 }
1236
1237 rcu_read_unlock();
1238
1239 list_for_each_entry_safe(entry, tmp, &devices, list) {
1240 ifdown(entry->netdev);
1241 BT_DBG("Unregistering netdev %s %p",
1242 entry->netdev->name, entry->netdev);
1243 lowpan_unregister_netdev(entry->netdev);
1244 kfree(entry);
1245 }
1246}
1247
1248static int device_event(struct notifier_block *unused,
1249 unsigned long event, void *ptr)
1250{
1251 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1252 struct lowpan_btle_dev *entry;
1253
1254 if (netdev->type != ARPHRD_6LOWPAN)
1255 return NOTIFY_DONE;
1256
1257 switch (event) {
1258 case NETDEV_UNREGISTER:
1259 spin_lock(&devices_lock);
1260 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1261 if (entry->netdev == netdev) {
1262 BT_DBG("Unregistered netdev %s %p",
1263 netdev->name, netdev);
1264 list_del(&entry->list);
1265 break;
1266 }
1267 }
1268 spin_unlock(&devices_lock);
1269 break;
1270 }
1271
1272 return NOTIFY_DONE;
1273}
1274
1275static struct notifier_block bt_6lowpan_dev_notifier = {
1276 .notifier_call = device_event,
1277};
1278
1279static int __init bt_6lowpan_init(void)
1280{
1281 lowpan_enable_debugfs = debugfs_create_file("6lowpan_enable", 0644,
1282 bt_debugfs, NULL,
1283 &lowpan_enable_fops);
1284 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1285 bt_debugfs, NULL,
1286 &lowpan_control_fops);
1287
1288 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1289}
1290
1291static void __exit bt_6lowpan_exit(void)
1292{
1293 debugfs_remove(lowpan_enable_debugfs);
1294 debugfs_remove(lowpan_control_debugfs);
1295
1296 if (listen_chan) {
1297 l2cap_chan_close(listen_chan, 0);
1298 l2cap_chan_put(listen_chan);
1299 }
1300
1301 disconnect_devices();
1302
1303 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1304}
1305
1306module_init(bt_6lowpan_init);
1307module_exit(bt_6lowpan_exit);
1308
1309MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1310MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1311MODULE_VERSION(VERSION);
1312MODULE_LICENSE("GPL");