xref: /openbmc/linux/net/bluetooth/6lowpan.c (revision 842ed298)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3    Copyright (c) 2013-2014 Intel Corp.
4 
5 */
6 
7 #include <linux/if_arp.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/module.h>
11 #include <linux/debugfs.h>
12 
13 #include <net/ipv6.h>
14 #include <net/ip6_route.h>
15 #include <net/addrconf.h>
16 #include <net/pkt_sched.h>
17 
18 #include <net/bluetooth/bluetooth.h>
19 #include <net/bluetooth/hci_core.h>
20 #include <net/bluetooth/l2cap.h>
21 
22 #include <net/6lowpan.h> /* for the compression support */
23 
24 #define VERSION "0.1"
25 
26 static struct dentry *lowpan_enable_debugfs;
27 static struct dentry *lowpan_control_debugfs;
28 
29 #define IFACE_NAME_TEMPLATE "bt%d"
30 
31 struct skb_cb {
32 	struct in6_addr addr;
33 	struct in6_addr gw;
34 	struct l2cap_chan *chan;
35 };
36 #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
37 
38 /* The devices list contains those devices that we are acting
39  * as a proxy. The BT 6LoWPAN device is a virtual device that
40  * connects to the Bluetooth LE device. The real connection to
41  * BT device is done via l2cap layer. There exists one
42  * virtual device / one BT 6LoWPAN network (=hciX device).
43  * The list contains struct lowpan_dev elements.
44  */
45 static LIST_HEAD(bt_6lowpan_devices);
46 static DEFINE_SPINLOCK(devices_lock);
47 
48 static bool enable_6lowpan;
49 
50 /* We are listening incoming connections via this channel
51  */
52 static struct l2cap_chan *listen_chan;
53 static DEFINE_MUTEX(set_lock);
54 
55 struct lowpan_peer {
56 	struct list_head list;
57 	struct rcu_head rcu;
58 	struct l2cap_chan *chan;
59 
60 	/* peer addresses in various formats */
61 	unsigned char lladdr[ETH_ALEN];
62 	struct in6_addr peer_addr;
63 };
64 
65 struct lowpan_btle_dev {
66 	struct list_head list;
67 
68 	struct hci_dev *hdev;
69 	struct net_device *netdev;
70 	struct list_head peers;
71 	atomic_t peer_count; /* number of items in peers list */
72 
73 	struct work_struct delete_netdev;
74 	struct delayed_work notify_peers;
75 };
76 
77 static inline struct lowpan_btle_dev *
78 lowpan_btle_dev(const struct net_device *netdev)
79 {
80 	return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
81 }
82 
83 static inline void peer_add(struct lowpan_btle_dev *dev,
84 			    struct lowpan_peer *peer)
85 {
86 	list_add_rcu(&peer->list, &dev->peers);
87 	atomic_inc(&dev->peer_count);
88 }
89 
90 static inline bool peer_del(struct lowpan_btle_dev *dev,
91 			    struct lowpan_peer *peer)
92 {
93 	list_del_rcu(&peer->list);
94 	kfree_rcu(peer, rcu);
95 
96 	module_put(THIS_MODULE);
97 
98 	if (atomic_dec_and_test(&dev->peer_count)) {
99 		BT_DBG("last peer");
100 		return true;
101 	}
102 
103 	return false;
104 }
105 
106 static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev,
107 						 bdaddr_t *ba, __u8 type)
108 {
109 	struct lowpan_peer *peer;
110 
111 	BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
112 	       ba, type);
113 
114 	rcu_read_lock();
115 
116 	list_for_each_entry_rcu(peer, &dev->peers, list) {
117 		BT_DBG("dst addr %pMR dst type %d",
118 		       &peer->chan->dst, peer->chan->dst_type);
119 
120 		if (bacmp(&peer->chan->dst, ba))
121 			continue;
122 
123 		if (type == peer->chan->dst_type) {
124 			rcu_read_unlock();
125 			return peer;
126 		}
127 	}
128 
129 	rcu_read_unlock();
130 
131 	return NULL;
132 }
133 
134 static inline struct lowpan_peer *
135 __peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
136 {
137 	struct lowpan_peer *peer;
138 
139 	list_for_each_entry_rcu(peer, &dev->peers, list) {
140 		if (peer->chan == chan)
141 			return peer;
142 	}
143 
144 	return NULL;
145 }
146 
147 static inline struct lowpan_peer *
148 __peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
149 {
150 	struct lowpan_peer *peer;
151 
152 	list_for_each_entry_rcu(peer, &dev->peers, list) {
153 		if (peer->chan->conn == conn)
154 			return peer;
155 	}
156 
157 	return NULL;
158 }
159 
160 static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
161 						  struct in6_addr *daddr,
162 						  struct sk_buff *skb)
163 {
164 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
165 	int count = atomic_read(&dev->peer_count);
166 	const struct in6_addr *nexthop;
167 	struct lowpan_peer *peer;
168 	struct neighbour *neigh;
169 
170 	BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
171 
172 	if (!rt) {
173 		if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
174 			/* There is neither route nor gateway,
175 			 * probably the destination is a direct peer.
176 			 */
177 			nexthop = daddr;
178 		} else {
179 			/* There is a known gateway
180 			 */
181 			nexthop = &lowpan_cb(skb)->gw;
182 		}
183 	} else {
184 		nexthop = rt6_nexthop(rt, daddr);
185 
186 		/* We need to remember the address because it is needed
187 		 * by bt_xmit() when sending the packet. In bt_xmit(), the
188 		 * destination routing info is not set.
189 		 */
190 		memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
191 	}
192 
193 	BT_DBG("gw %pI6c", nexthop);
194 
195 	rcu_read_lock();
196 
197 	list_for_each_entry_rcu(peer, &dev->peers, list) {
198 		BT_DBG("dst addr %pMR dst type %d ip %pI6c",
199 		       &peer->chan->dst, peer->chan->dst_type,
200 		       &peer->peer_addr);
201 
202 		if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
203 			rcu_read_unlock();
204 			return peer;
205 		}
206 	}
207 
208 	/* use the neighbour cache for matching addresses assigned by SLAAC
209 	*/
210 	neigh = __ipv6_neigh_lookup(dev->netdev, nexthop);
211 	if (neigh) {
212 		list_for_each_entry_rcu(peer, &dev->peers, list) {
213 			if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) {
214 				neigh_release(neigh);
215 				rcu_read_unlock();
216 				return peer;
217 			}
218 		}
219 		neigh_release(neigh);
220 	}
221 
222 	rcu_read_unlock();
223 
224 	return NULL;
225 }
226 
227 static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
228 {
229 	struct lowpan_btle_dev *entry;
230 	struct lowpan_peer *peer = NULL;
231 
232 	rcu_read_lock();
233 
234 	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
235 		peer = __peer_lookup_conn(entry, conn);
236 		if (peer)
237 			break;
238 	}
239 
240 	rcu_read_unlock();
241 
242 	return peer;
243 }
244 
245 static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
246 {
247 	struct lowpan_btle_dev *entry;
248 	struct lowpan_btle_dev *dev = NULL;
249 
250 	rcu_read_lock();
251 
252 	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
253 		if (conn->hcon->hdev == entry->hdev) {
254 			dev = entry;
255 			break;
256 		}
257 	}
258 
259 	rcu_read_unlock();
260 
261 	return dev;
262 }
263 
264 static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
265 {
266 	struct sk_buff *skb_cp;
267 
268 	skb_cp = skb_copy(skb, GFP_ATOMIC);
269 	if (!skb_cp)
270 		return NET_RX_DROP;
271 
272 	return netif_rx_ni(skb_cp);
273 }
274 
275 static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
276 			   struct lowpan_peer *peer)
277 {
278 	const u8 *saddr;
279 
280 	saddr = peer->lladdr;
281 
282 	return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
283 }
284 
285 static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
286 		    struct lowpan_peer *peer)
287 {
288 	struct sk_buff *local_skb;
289 	int ret;
290 
291 	if (!netif_running(dev))
292 		goto drop;
293 
294 	if (dev->type != ARPHRD_6LOWPAN || !skb->len)
295 		goto drop;
296 
297 	skb_reset_network_header(skb);
298 
299 	skb = skb_share_check(skb, GFP_ATOMIC);
300 	if (!skb)
301 		goto drop;
302 
303 	/* check that it's our buffer */
304 	if (lowpan_is_ipv6(*skb_network_header(skb))) {
305 		/* Pull off the 1-byte of 6lowpan header. */
306 		skb_pull(skb, 1);
307 
308 		/* Copy the packet so that the IPv6 header is
309 		 * properly aligned.
310 		 */
311 		local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
312 					    skb_tailroom(skb), GFP_ATOMIC);
313 		if (!local_skb)
314 			goto drop;
315 
316 		local_skb->protocol = htons(ETH_P_IPV6);
317 		local_skb->pkt_type = PACKET_HOST;
318 		local_skb->dev = dev;
319 
320 		skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
321 
322 		if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
323 			kfree_skb(local_skb);
324 			goto drop;
325 		}
326 
327 		dev->stats.rx_bytes += skb->len;
328 		dev->stats.rx_packets++;
329 
330 		consume_skb(local_skb);
331 		consume_skb(skb);
332 	} else if (lowpan_is_iphc(*skb_network_header(skb))) {
333 		local_skb = skb_clone(skb, GFP_ATOMIC);
334 		if (!local_skb)
335 			goto drop;
336 
337 		local_skb->dev = dev;
338 
339 		ret = iphc_decompress(local_skb, dev, peer);
340 		if (ret < 0) {
341 			BT_DBG("iphc_decompress failed: %d", ret);
342 			kfree_skb(local_skb);
343 			goto drop;
344 		}
345 
346 		local_skb->protocol = htons(ETH_P_IPV6);
347 		local_skb->pkt_type = PACKET_HOST;
348 
349 		if (give_skb_to_upper(local_skb, dev)
350 				!= NET_RX_SUCCESS) {
351 			kfree_skb(local_skb);
352 			goto drop;
353 		}
354 
355 		dev->stats.rx_bytes += skb->len;
356 		dev->stats.rx_packets++;
357 
358 		consume_skb(local_skb);
359 		consume_skb(skb);
360 	} else {
361 		BT_DBG("unknown packet type");
362 		goto drop;
363 	}
364 
365 	return NET_RX_SUCCESS;
366 
367 drop:
368 	dev->stats.rx_dropped++;
369 	return NET_RX_DROP;
370 }
371 
372 /* Packet from BT LE device */
373 static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
374 {
375 	struct lowpan_btle_dev *dev;
376 	struct lowpan_peer *peer;
377 	int err;
378 
379 	peer = lookup_peer(chan->conn);
380 	if (!peer)
381 		return -ENOENT;
382 
383 	dev = lookup_dev(chan->conn);
384 	if (!dev || !dev->netdev)
385 		return -ENOENT;
386 
387 	err = recv_pkt(skb, dev->netdev, peer);
388 	if (err) {
389 		BT_DBG("recv pkt %d", err);
390 		err = -EAGAIN;
391 	}
392 
393 	return err;
394 }
395 
396 static int setup_header(struct sk_buff *skb, struct net_device *netdev,
397 			bdaddr_t *peer_addr, u8 *peer_addr_type)
398 {
399 	struct in6_addr ipv6_daddr;
400 	struct ipv6hdr *hdr;
401 	struct lowpan_btle_dev *dev;
402 	struct lowpan_peer *peer;
403 	u8 *daddr;
404 	int err, status = 0;
405 
406 	hdr = ipv6_hdr(skb);
407 
408 	dev = lowpan_btle_dev(netdev);
409 
410 	memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
411 
412 	if (ipv6_addr_is_multicast(&ipv6_daddr)) {
413 		lowpan_cb(skb)->chan = NULL;
414 		daddr = NULL;
415 	} else {
416 		BT_DBG("dest IP %pI6c", &ipv6_daddr);
417 
418 		/* The packet might be sent to 6lowpan interface
419 		 * because of routing (either via default route
420 		 * or user set route) so get peer according to
421 		 * the destination address.
422 		 */
423 		peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
424 		if (!peer) {
425 			BT_DBG("no such peer");
426 			return -ENOENT;
427 		}
428 
429 		daddr = peer->lladdr;
430 		*peer_addr = peer->chan->dst;
431 		*peer_addr_type = peer->chan->dst_type;
432 		lowpan_cb(skb)->chan = peer->chan;
433 
434 		status = 1;
435 	}
436 
437 	lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
438 
439 	err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
440 	if (err < 0)
441 		return err;
442 
443 	return status;
444 }
445 
446 static int header_create(struct sk_buff *skb, struct net_device *netdev,
447 			 unsigned short type, const void *_daddr,
448 			 const void *_saddr, unsigned int len)
449 {
450 	if (type != ETH_P_IPV6)
451 		return -EINVAL;
452 
453 	return 0;
454 }
455 
456 /* Packet to BT LE device */
457 static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
458 		    struct net_device *netdev)
459 {
460 	struct msghdr msg;
461 	struct kvec iv;
462 	int err;
463 
464 	/* Remember the skb so that we can send EAGAIN to the caller if
465 	 * we run out of credits.
466 	 */
467 	chan->data = skb;
468 
469 	iv.iov_base = skb->data;
470 	iv.iov_len = skb->len;
471 
472 	memset(&msg, 0, sizeof(msg));
473 	iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
474 
475 	err = l2cap_chan_send(chan, &msg, skb->len);
476 	if (err > 0) {
477 		netdev->stats.tx_bytes += err;
478 		netdev->stats.tx_packets++;
479 		return 0;
480 	}
481 
482 	if (err < 0)
483 		netdev->stats.tx_errors++;
484 
485 	return err;
486 }
487 
488 static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
489 {
490 	struct sk_buff *local_skb;
491 	struct lowpan_btle_dev *entry;
492 	int err = 0;
493 
494 	rcu_read_lock();
495 
496 	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
497 		struct lowpan_peer *pentry;
498 		struct lowpan_btle_dev *dev;
499 
500 		if (entry->netdev != netdev)
501 			continue;
502 
503 		dev = lowpan_btle_dev(entry->netdev);
504 
505 		list_for_each_entry_rcu(pentry, &dev->peers, list) {
506 			int ret;
507 
508 			local_skb = skb_clone(skb, GFP_ATOMIC);
509 
510 			BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
511 			       netdev->name,
512 			       &pentry->chan->dst, pentry->chan->dst_type,
513 			       &pentry->peer_addr, pentry->chan);
514 			ret = send_pkt(pentry->chan, local_skb, netdev);
515 			if (ret < 0)
516 				err = ret;
517 
518 			kfree_skb(local_skb);
519 		}
520 	}
521 
522 	rcu_read_unlock();
523 
524 	return err;
525 }
526 
527 static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
528 {
529 	int err = 0;
530 	bdaddr_t addr;
531 	u8 addr_type;
532 
533 	/* We must take a copy of the skb before we modify/replace the ipv6
534 	 * header as the header could be used elsewhere
535 	 */
536 	skb = skb_unshare(skb, GFP_ATOMIC);
537 	if (!skb)
538 		return NET_XMIT_DROP;
539 
540 	/* Return values from setup_header()
541 	 *  <0 - error, packet is dropped
542 	 *   0 - this is a multicast packet
543 	 *   1 - this is unicast packet
544 	 */
545 	err = setup_header(skb, netdev, &addr, &addr_type);
546 	if (err < 0) {
547 		kfree_skb(skb);
548 		return NET_XMIT_DROP;
549 	}
550 
551 	if (err) {
552 		if (lowpan_cb(skb)->chan) {
553 			BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
554 			       netdev->name, &addr, addr_type,
555 			       &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
556 			err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
557 		} else {
558 			err = -ENOENT;
559 		}
560 	} else {
561 		/* We need to send the packet to every device behind this
562 		 * interface.
563 		 */
564 		err = send_mcast_pkt(skb, netdev);
565 	}
566 
567 	dev_kfree_skb(skb);
568 
569 	if (err)
570 		BT_DBG("ERROR: xmit failed (%d)", err);
571 
572 	return err < 0 ? NET_XMIT_DROP : err;
573 }
574 
575 static int bt_dev_init(struct net_device *dev)
576 {
577 	netdev_lockdep_set_classes(dev);
578 
579 	return 0;
580 }
581 
582 static const struct net_device_ops netdev_ops = {
583 	.ndo_init		= bt_dev_init,
584 	.ndo_start_xmit		= bt_xmit,
585 };
586 
587 static const struct header_ops header_ops = {
588 	.create	= header_create,
589 };
590 
591 static void netdev_setup(struct net_device *dev)
592 {
593 	dev->hard_header_len	= 0;
594 	dev->needed_tailroom	= 0;
595 	dev->flags		= IFF_RUNNING | IFF_MULTICAST;
596 	dev->watchdog_timeo	= 0;
597 	dev->tx_queue_len	= DEFAULT_TX_QUEUE_LEN;
598 
599 	dev->netdev_ops		= &netdev_ops;
600 	dev->header_ops		= &header_ops;
601 	dev->needs_free_netdev	= true;
602 }
603 
604 static struct device_type bt_type = {
605 	.name	= "bluetooth",
606 };
607 
608 static void ifup(struct net_device *netdev)
609 {
610 	int err;
611 
612 	rtnl_lock();
613 	err = dev_open(netdev, NULL);
614 	if (err < 0)
615 		BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
616 	rtnl_unlock();
617 }
618 
619 static void ifdown(struct net_device *netdev)
620 {
621 	rtnl_lock();
622 	dev_close(netdev);
623 	rtnl_unlock();
624 }
625 
626 static void do_notify_peers(struct work_struct *work)
627 {
628 	struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
629 						   notify_peers.work);
630 
631 	netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
632 }
633 
634 static bool is_bt_6lowpan(struct hci_conn *hcon)
635 {
636 	if (hcon->type != LE_LINK)
637 		return false;
638 
639 	if (!enable_6lowpan)
640 		return false;
641 
642 	return true;
643 }
644 
645 static struct l2cap_chan *chan_create(void)
646 {
647 	struct l2cap_chan *chan;
648 
649 	chan = l2cap_chan_create();
650 	if (!chan)
651 		return NULL;
652 
653 	l2cap_chan_set_defaults(chan);
654 
655 	chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
656 	chan->mode = L2CAP_MODE_LE_FLOWCTL;
657 	chan->imtu = 1280;
658 
659 	return chan;
660 }
661 
662 static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
663 					struct lowpan_btle_dev *dev,
664 					bool new_netdev)
665 {
666 	struct lowpan_peer *peer;
667 
668 	peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
669 	if (!peer)
670 		return NULL;
671 
672 	peer->chan = chan;
673 	memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
674 
675 	baswap((void *)peer->lladdr, &chan->dst);
676 
677 	lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
678 
679 	spin_lock(&devices_lock);
680 	INIT_LIST_HEAD(&peer->list);
681 	peer_add(dev, peer);
682 	spin_unlock(&devices_lock);
683 
684 	/* Notifying peers about us needs to be done without locks held */
685 	if (new_netdev)
686 		INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
687 	schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
688 
689 	return peer->chan;
690 }
691 
692 static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
693 {
694 	struct net_device *netdev;
695 	int err = 0;
696 
697 	netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
698 			      IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
699 			      netdev_setup);
700 	if (!netdev)
701 		return -ENOMEM;
702 
703 	netdev->addr_assign_type = NET_ADDR_PERM;
704 	baswap((void *)netdev->dev_addr, &chan->src);
705 
706 	netdev->netdev_ops = &netdev_ops;
707 	SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
708 	SET_NETDEV_DEVTYPE(netdev, &bt_type);
709 
710 	*dev = lowpan_btle_dev(netdev);
711 	(*dev)->netdev = netdev;
712 	(*dev)->hdev = chan->conn->hcon->hdev;
713 	INIT_LIST_HEAD(&(*dev)->peers);
714 
715 	spin_lock(&devices_lock);
716 	INIT_LIST_HEAD(&(*dev)->list);
717 	list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
718 	spin_unlock(&devices_lock);
719 
720 	err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
721 	if (err < 0) {
722 		BT_INFO("register_netdev failed %d", err);
723 		spin_lock(&devices_lock);
724 		list_del_rcu(&(*dev)->list);
725 		spin_unlock(&devices_lock);
726 		free_netdev(netdev);
727 		goto out;
728 	}
729 
730 	BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
731 	       netdev->ifindex, &chan->dst, chan->dst_type,
732 	       &chan->src, chan->src_type);
733 	set_bit(__LINK_STATE_PRESENT, &netdev->state);
734 
735 	return 0;
736 
737 out:
738 	return err;
739 }
740 
741 static inline void chan_ready_cb(struct l2cap_chan *chan)
742 {
743 	struct lowpan_btle_dev *dev;
744 	bool new_netdev = false;
745 
746 	dev = lookup_dev(chan->conn);
747 
748 	BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
749 
750 	if (!dev) {
751 		if (setup_netdev(chan, &dev) < 0) {
752 			l2cap_chan_del(chan, -ENOENT);
753 			return;
754 		}
755 		new_netdev = true;
756 	}
757 
758 	if (!try_module_get(THIS_MODULE))
759 		return;
760 
761 	add_peer_chan(chan, dev, new_netdev);
762 	ifup(dev->netdev);
763 }
764 
765 static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
766 {
767 	struct l2cap_chan *chan;
768 
769 	chan = chan_create();
770 	if (!chan)
771 		return NULL;
772 
773 	chan->ops = pchan->ops;
774 
775 	BT_DBG("chan %p pchan %p", chan, pchan);
776 
777 	return chan;
778 }
779 
780 static void delete_netdev(struct work_struct *work)
781 {
782 	struct lowpan_btle_dev *entry = container_of(work,
783 						     struct lowpan_btle_dev,
784 						     delete_netdev);
785 
786 	lowpan_unregister_netdev(entry->netdev);
787 
788 	/* The entry pointer is deleted by the netdev destructor. */
789 }
790 
791 static void chan_close_cb(struct l2cap_chan *chan)
792 {
793 	struct lowpan_btle_dev *entry;
794 	struct lowpan_btle_dev *dev = NULL;
795 	struct lowpan_peer *peer;
796 	int err = -ENOENT;
797 	bool last = false, remove = true;
798 
799 	BT_DBG("chan %p conn %p", chan, chan->conn);
800 
801 	if (chan->conn && chan->conn->hcon) {
802 		if (!is_bt_6lowpan(chan->conn->hcon))
803 			return;
804 
805 		/* If conn is set, then the netdev is also there and we should
806 		 * not remove it.
807 		 */
808 		remove = false;
809 	}
810 
811 	spin_lock(&devices_lock);
812 
813 	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
814 		dev = lowpan_btle_dev(entry->netdev);
815 		peer = __peer_lookup_chan(dev, chan);
816 		if (peer) {
817 			last = peer_del(dev, peer);
818 			err = 0;
819 
820 			BT_DBG("dev %p removing %speer %p", dev,
821 			       last ? "last " : "1 ", peer);
822 			BT_DBG("chan %p orig refcnt %d", chan,
823 			       kref_read(&chan->kref));
824 
825 			l2cap_chan_put(chan);
826 			break;
827 		}
828 	}
829 
830 	if (!err && last && dev && !atomic_read(&dev->peer_count)) {
831 		spin_unlock(&devices_lock);
832 
833 		cancel_delayed_work_sync(&dev->notify_peers);
834 
835 		ifdown(dev->netdev);
836 
837 		if (remove) {
838 			INIT_WORK(&entry->delete_netdev, delete_netdev);
839 			schedule_work(&entry->delete_netdev);
840 		}
841 	} else {
842 		spin_unlock(&devices_lock);
843 	}
844 
845 	return;
846 }
847 
848 static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
849 {
850 	BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
851 	       state_to_string(state), err);
852 }
853 
854 static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
855 					 unsigned long hdr_len,
856 					 unsigned long len, int nb)
857 {
858 	/* Note that we must allocate using GFP_ATOMIC here as
859 	 * this function is called originally from netdev hard xmit
860 	 * function in atomic context.
861 	 */
862 	return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
863 }
864 
865 static void chan_suspend_cb(struct l2cap_chan *chan)
866 {
867 	struct lowpan_btle_dev *dev;
868 
869 	BT_DBG("chan %p suspend", chan);
870 
871 	dev = lookup_dev(chan->conn);
872 	if (!dev || !dev->netdev)
873 		return;
874 
875 	netif_stop_queue(dev->netdev);
876 }
877 
878 static void chan_resume_cb(struct l2cap_chan *chan)
879 {
880 	struct lowpan_btle_dev *dev;
881 
882 	BT_DBG("chan %p resume", chan);
883 
884 	dev = lookup_dev(chan->conn);
885 	if (!dev || !dev->netdev)
886 		return;
887 
888 	netif_wake_queue(dev->netdev);
889 }
890 
891 static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
892 {
893 	return L2CAP_CONN_TIMEOUT;
894 }
895 
896 static const struct l2cap_ops bt_6lowpan_chan_ops = {
897 	.name			= "L2CAP 6LoWPAN channel",
898 	.new_connection		= chan_new_conn_cb,
899 	.recv			= chan_recv_cb,
900 	.close			= chan_close_cb,
901 	.state_change		= chan_state_change_cb,
902 	.ready			= chan_ready_cb,
903 	.resume			= chan_resume_cb,
904 	.suspend		= chan_suspend_cb,
905 	.get_sndtimeo		= chan_get_sndtimeo_cb,
906 	.alloc_skb		= chan_alloc_skb_cb,
907 
908 	.teardown		= l2cap_chan_no_teardown,
909 	.defer			= l2cap_chan_no_defer,
910 	.set_shutdown		= l2cap_chan_no_set_shutdown,
911 };
912 
913 static inline __u8 bdaddr_type(__u8 type)
914 {
915 	if (type == ADDR_LE_DEV_PUBLIC)
916 		return BDADDR_LE_PUBLIC;
917 	else
918 		return BDADDR_LE_RANDOM;
919 }
920 
921 static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
922 {
923 	struct l2cap_chan *chan;
924 	int err;
925 
926 	chan = chan_create();
927 	if (!chan)
928 		return -EINVAL;
929 
930 	chan->ops = &bt_6lowpan_chan_ops;
931 
932 	err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
933 				 addr, dst_type);
934 
935 	BT_DBG("chan %p err %d", chan, err);
936 	if (err < 0)
937 		l2cap_chan_put(chan);
938 
939 	return err;
940 }
941 
942 static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
943 {
944 	struct lowpan_peer *peer;
945 
946 	BT_DBG("conn %p dst type %d", conn, dst_type);
947 
948 	peer = lookup_peer(conn);
949 	if (!peer)
950 		return -ENOENT;
951 
952 	BT_DBG("peer %p chan %p", peer, peer->chan);
953 
954 	l2cap_chan_close(peer->chan, ENOENT);
955 
956 	return 0;
957 }
958 
959 static struct l2cap_chan *bt_6lowpan_listen(void)
960 {
961 	bdaddr_t *addr = BDADDR_ANY;
962 	struct l2cap_chan *chan;
963 	int err;
964 
965 	if (!enable_6lowpan)
966 		return NULL;
967 
968 	chan = chan_create();
969 	if (!chan)
970 		return NULL;
971 
972 	chan->ops = &bt_6lowpan_chan_ops;
973 	chan->state = BT_LISTEN;
974 	chan->src_type = BDADDR_LE_PUBLIC;
975 
976 	atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
977 
978 	BT_DBG("chan %p src type %d", chan, chan->src_type);
979 
980 	err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
981 	if (err) {
982 		l2cap_chan_put(chan);
983 		BT_ERR("psm cannot be added err %d", err);
984 		return NULL;
985 	}
986 
987 	return chan;
988 }
989 
990 static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
991 			  struct l2cap_conn **conn)
992 {
993 	struct hci_conn *hcon;
994 	struct hci_dev *hdev;
995 	int n;
996 
997 	n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
998 		   &addr->b[5], &addr->b[4], &addr->b[3],
999 		   &addr->b[2], &addr->b[1], &addr->b[0],
1000 		   addr_type);
1001 
1002 	if (n < 7)
1003 		return -EINVAL;
1004 
1005 	/* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
1006 	hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
1007 	if (!hdev)
1008 		return -ENOENT;
1009 
1010 	hci_dev_lock(hdev);
1011 	hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
1012 	hci_dev_unlock(hdev);
1013 
1014 	if (!hcon)
1015 		return -ENOENT;
1016 
1017 	*conn = (struct l2cap_conn *)hcon->l2cap_data;
1018 
1019 	BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1020 
1021 	return 0;
1022 }
1023 
1024 static void disconnect_all_peers(void)
1025 {
1026 	struct lowpan_btle_dev *entry;
1027 	struct lowpan_peer *peer, *tmp_peer, *new_peer;
1028 	struct list_head peers;
1029 
1030 	INIT_LIST_HEAD(&peers);
1031 
1032 	/* We make a separate list of peers as the close_cb() will
1033 	 * modify the device peers list so it is better not to mess
1034 	 * with the same list at the same time.
1035 	 */
1036 
1037 	rcu_read_lock();
1038 
1039 	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1040 		list_for_each_entry_rcu(peer, &entry->peers, list) {
1041 			new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1042 			if (!new_peer)
1043 				break;
1044 
1045 			new_peer->chan = peer->chan;
1046 			INIT_LIST_HEAD(&new_peer->list);
1047 
1048 			list_add(&new_peer->list, &peers);
1049 		}
1050 	}
1051 
1052 	rcu_read_unlock();
1053 
1054 	spin_lock(&devices_lock);
1055 	list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1056 		l2cap_chan_close(peer->chan, ENOENT);
1057 
1058 		list_del_rcu(&peer->list);
1059 		kfree_rcu(peer, rcu);
1060 	}
1061 	spin_unlock(&devices_lock);
1062 }
1063 
1064 struct set_enable {
1065 	struct work_struct work;
1066 	bool flag;
1067 };
1068 
1069 static void do_enable_set(struct work_struct *work)
1070 {
1071 	struct set_enable *set_enable = container_of(work,
1072 						     struct set_enable, work);
1073 
1074 	if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1075 		/* Disconnect existing connections if 6lowpan is
1076 		 * disabled
1077 		 */
1078 		disconnect_all_peers();
1079 
1080 	enable_6lowpan = set_enable->flag;
1081 
1082 	mutex_lock(&set_lock);
1083 	if (listen_chan) {
1084 		l2cap_chan_close(listen_chan, 0);
1085 		l2cap_chan_put(listen_chan);
1086 	}
1087 
1088 	listen_chan = bt_6lowpan_listen();
1089 	mutex_unlock(&set_lock);
1090 
1091 	kfree(set_enable);
1092 }
1093 
1094 static int lowpan_enable_set(void *data, u64 val)
1095 {
1096 	struct set_enable *set_enable;
1097 
1098 	set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1099 	if (!set_enable)
1100 		return -ENOMEM;
1101 
1102 	set_enable->flag = !!val;
1103 	INIT_WORK(&set_enable->work, do_enable_set);
1104 
1105 	schedule_work(&set_enable->work);
1106 
1107 	return 0;
1108 }
1109 
1110 static int lowpan_enable_get(void *data, u64 *val)
1111 {
1112 	*val = enable_6lowpan;
1113 	return 0;
1114 }
1115 
1116 DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1117 			 lowpan_enable_set, "%llu\n");
1118 
1119 static ssize_t lowpan_control_write(struct file *fp,
1120 				    const char __user *user_buffer,
1121 				    size_t count,
1122 				    loff_t *position)
1123 {
1124 	char buf[32];
1125 	size_t buf_size = min(count, sizeof(buf) - 1);
1126 	int ret;
1127 	bdaddr_t addr;
1128 	u8 addr_type;
1129 	struct l2cap_conn *conn = NULL;
1130 
1131 	if (copy_from_user(buf, user_buffer, buf_size))
1132 		return -EFAULT;
1133 
1134 	buf[buf_size] = '\0';
1135 
1136 	if (memcmp(buf, "connect ", 8) == 0) {
1137 		ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1138 		if (ret == -EINVAL)
1139 			return ret;
1140 
1141 		mutex_lock(&set_lock);
1142 		if (listen_chan) {
1143 			l2cap_chan_close(listen_chan, 0);
1144 			l2cap_chan_put(listen_chan);
1145 			listen_chan = NULL;
1146 		}
1147 		mutex_unlock(&set_lock);
1148 
1149 		if (conn) {
1150 			struct lowpan_peer *peer;
1151 
1152 			if (!is_bt_6lowpan(conn->hcon))
1153 				return -EINVAL;
1154 
1155 			peer = lookup_peer(conn);
1156 			if (peer) {
1157 				BT_DBG("6LoWPAN connection already exists");
1158 				return -EALREADY;
1159 			}
1160 
1161 			BT_DBG("conn %p dst %pMR type %d user %d", conn,
1162 			       &conn->hcon->dst, conn->hcon->dst_type,
1163 			       addr_type);
1164 		}
1165 
1166 		ret = bt_6lowpan_connect(&addr, addr_type);
1167 		if (ret < 0)
1168 			return ret;
1169 
1170 		return count;
1171 	}
1172 
1173 	if (memcmp(buf, "disconnect ", 11) == 0) {
1174 		ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1175 		if (ret < 0)
1176 			return ret;
1177 
1178 		ret = bt_6lowpan_disconnect(conn, addr_type);
1179 		if (ret < 0)
1180 			return ret;
1181 
1182 		return count;
1183 	}
1184 
1185 	return count;
1186 }
1187 
1188 static int lowpan_control_show(struct seq_file *f, void *ptr)
1189 {
1190 	struct lowpan_btle_dev *entry;
1191 	struct lowpan_peer *peer;
1192 
1193 	spin_lock(&devices_lock);
1194 
1195 	list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1196 		list_for_each_entry(peer, &entry->peers, list)
1197 			seq_printf(f, "%pMR (type %u)\n",
1198 				   &peer->chan->dst, peer->chan->dst_type);
1199 	}
1200 
1201 	spin_unlock(&devices_lock);
1202 
1203 	return 0;
1204 }
1205 
1206 static int lowpan_control_open(struct inode *inode, struct file *file)
1207 {
1208 	return single_open(file, lowpan_control_show, inode->i_private);
1209 }
1210 
1211 static const struct file_operations lowpan_control_fops = {
1212 	.open		= lowpan_control_open,
1213 	.read		= seq_read,
1214 	.write		= lowpan_control_write,
1215 	.llseek		= seq_lseek,
1216 	.release	= single_release,
1217 };
1218 
1219 static void disconnect_devices(void)
1220 {
1221 	struct lowpan_btle_dev *entry, *tmp, *new_dev;
1222 	struct list_head devices;
1223 
1224 	INIT_LIST_HEAD(&devices);
1225 
1226 	/* We make a separate list of devices because the unregister_netdev()
1227 	 * will call device_event() which will also want to modify the same
1228 	 * devices list.
1229 	 */
1230 
1231 	rcu_read_lock();
1232 
1233 	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1234 		new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1235 		if (!new_dev)
1236 			break;
1237 
1238 		new_dev->netdev = entry->netdev;
1239 		INIT_LIST_HEAD(&new_dev->list);
1240 
1241 		list_add_rcu(&new_dev->list, &devices);
1242 	}
1243 
1244 	rcu_read_unlock();
1245 
1246 	list_for_each_entry_safe(entry, tmp, &devices, list) {
1247 		ifdown(entry->netdev);
1248 		BT_DBG("Unregistering netdev %s %p",
1249 		       entry->netdev->name, entry->netdev);
1250 		lowpan_unregister_netdev(entry->netdev);
1251 		kfree(entry);
1252 	}
1253 }
1254 
1255 static int device_event(struct notifier_block *unused,
1256 			unsigned long event, void *ptr)
1257 {
1258 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1259 	struct lowpan_btle_dev *entry;
1260 
1261 	if (netdev->type != ARPHRD_6LOWPAN)
1262 		return NOTIFY_DONE;
1263 
1264 	switch (event) {
1265 	case NETDEV_UNREGISTER:
1266 		spin_lock(&devices_lock);
1267 		list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1268 			if (entry->netdev == netdev) {
1269 				BT_DBG("Unregistered netdev %s %p",
1270 				       netdev->name, netdev);
1271 				list_del(&entry->list);
1272 				break;
1273 			}
1274 		}
1275 		spin_unlock(&devices_lock);
1276 		break;
1277 	}
1278 
1279 	return NOTIFY_DONE;
1280 }
1281 
1282 static struct notifier_block bt_6lowpan_dev_notifier = {
1283 	.notifier_call = device_event,
1284 };
1285 
1286 static int __init bt_6lowpan_init(void)
1287 {
1288 	lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable",
1289 							   0644, bt_debugfs,
1290 							   NULL,
1291 							   &lowpan_enable_fops);
1292 	lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1293 						     bt_debugfs, NULL,
1294 						     &lowpan_control_fops);
1295 
1296 	return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1297 }
1298 
1299 static void __exit bt_6lowpan_exit(void)
1300 {
1301 	debugfs_remove(lowpan_enable_debugfs);
1302 	debugfs_remove(lowpan_control_debugfs);
1303 
1304 	if (listen_chan) {
1305 		l2cap_chan_close(listen_chan, 0);
1306 		l2cap_chan_put(listen_chan);
1307 	}
1308 
1309 	disconnect_devices();
1310 
1311 	unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1312 }
1313 
1314 module_init(bt_6lowpan_init);
1315 module_exit(bt_6lowpan_exit);
1316 
1317 MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1318 MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1319 MODULE_VERSION(VERSION);
1320 MODULE_LICENSE("GPL");
1321