xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision e8f6f3b4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35 
36 /* ----- HCI socket interface ----- */
37 
38 /* Socket info */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
40 
41 struct hci_pinfo {
42 	struct bt_sock    bt;
43 	struct hci_dev    *hdev;
44 	struct hci_filter filter;
45 	__u32             cmsg_mask;
46 	unsigned short    channel;
47 };
48 
49 static inline int hci_test_bit(int nr, void *addr)
50 {
51 	return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
52 }
53 
54 /* Security filter */
55 #define HCI_SFLT_MAX_OGF  5
56 
57 struct hci_sec_filter {
58 	__u32 type_mask;
59 	__u32 event_mask[2];
60 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61 };
62 
63 static const struct hci_sec_filter hci_sec_filter = {
64 	/* Packet types */
65 	0x10,
66 	/* Events */
67 	{ 0x1000d9fe, 0x0000b00c },
68 	/* Commands */
69 	{
70 		{ 0x0 },
71 		/* OGF_LINK_CTL */
72 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73 		/* OGF_LINK_POLICY */
74 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
75 		/* OGF_HOST_CTL */
76 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77 		/* OGF_INFO_PARAM */
78 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 		/* OGF_STATUS_PARAM */
80 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
81 	}
82 };
83 
84 static struct bt_sock_list hci_sk_list = {
85 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
86 };
87 
88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
89 {
90 	struct hci_filter *flt;
91 	int flt_type, flt_event;
92 
93 	/* Apply filter */
94 	flt = &hci_pi(sk)->filter;
95 
96 	if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97 		flt_type = 0;
98 	else
99 		flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
100 
101 	if (!test_bit(flt_type, &flt->type_mask))
102 		return true;
103 
104 	/* Extra filter for event packets only */
105 	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106 		return false;
107 
108 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
109 
110 	if (!hci_test_bit(flt_event, &flt->event_mask))
111 		return true;
112 
113 	/* Check filter only when opcode is set */
114 	if (!flt->opcode)
115 		return false;
116 
117 	if (flt_event == HCI_EV_CMD_COMPLETE &&
118 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119 		return true;
120 
121 	if (flt_event == HCI_EV_CMD_STATUS &&
122 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123 		return true;
124 
125 	return false;
126 }
127 
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 	struct sock *sk;
132 	struct sk_buff *skb_copy = NULL;
133 
134 	BT_DBG("hdev %p len %d", hdev, skb->len);
135 
136 	read_lock(&hci_sk_list.lock);
137 
138 	sk_for_each(sk, &hci_sk_list.head) {
139 		struct sk_buff *nskb;
140 
141 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142 			continue;
143 
144 		/* Don't send frame to the socket it came from */
145 		if (skb->sk == sk)
146 			continue;
147 
148 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 			if (is_filtered_packet(sk, skb))
150 				continue;
151 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 			if (!bt_cb(skb)->incoming)
153 				continue;
154 			if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157 				continue;
158 		} else {
159 			/* Don't send frame to other channel types */
160 			continue;
161 		}
162 
163 		if (!skb_copy) {
164 			/* Create a private copy with headroom */
165 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
166 			if (!skb_copy)
167 				continue;
168 
169 			/* Put type byte before the data */
170 			memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
171 		}
172 
173 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
174 		if (!nskb)
175 			continue;
176 
177 		if (sock_queue_rcv_skb(sk, nskb))
178 			kfree_skb(nskb);
179 	}
180 
181 	read_unlock(&hci_sk_list.lock);
182 
183 	kfree_skb(skb_copy);
184 }
185 
186 /* Send frame to control socket */
187 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
188 {
189 	struct sock *sk;
190 
191 	BT_DBG("len %d", skb->len);
192 
193 	read_lock(&hci_sk_list.lock);
194 
195 	sk_for_each(sk, &hci_sk_list.head) {
196 		struct sk_buff *nskb;
197 
198 		/* Skip the original socket */
199 		if (sk == skip_sk)
200 			continue;
201 
202 		if (sk->sk_state != BT_BOUND)
203 			continue;
204 
205 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
206 			continue;
207 
208 		nskb = skb_clone(skb, GFP_ATOMIC);
209 		if (!nskb)
210 			continue;
211 
212 		if (sock_queue_rcv_skb(sk, nskb))
213 			kfree_skb(nskb);
214 	}
215 
216 	read_unlock(&hci_sk_list.lock);
217 }
218 
219 /* Send frame to monitor socket */
220 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
221 {
222 	struct sock *sk;
223 	struct sk_buff *skb_copy = NULL;
224 	__le16 opcode;
225 
226 	if (!atomic_read(&monitor_promisc))
227 		return;
228 
229 	BT_DBG("hdev %p len %d", hdev, skb->len);
230 
231 	switch (bt_cb(skb)->pkt_type) {
232 	case HCI_COMMAND_PKT:
233 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
234 		break;
235 	case HCI_EVENT_PKT:
236 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
237 		break;
238 	case HCI_ACLDATA_PKT:
239 		if (bt_cb(skb)->incoming)
240 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
241 		else
242 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
243 		break;
244 	case HCI_SCODATA_PKT:
245 		if (bt_cb(skb)->incoming)
246 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
247 		else
248 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
249 		break;
250 	default:
251 		return;
252 	}
253 
254 	read_lock(&hci_sk_list.lock);
255 
256 	sk_for_each(sk, &hci_sk_list.head) {
257 		struct sk_buff *nskb;
258 
259 		if (sk->sk_state != BT_BOUND)
260 			continue;
261 
262 		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
263 			continue;
264 
265 		if (!skb_copy) {
266 			struct hci_mon_hdr *hdr;
267 
268 			/* Create a private copy with headroom */
269 			skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
270 						      GFP_ATOMIC, true);
271 			if (!skb_copy)
272 				continue;
273 
274 			/* Put header before the data */
275 			hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
276 			hdr->opcode = opcode;
277 			hdr->index = cpu_to_le16(hdev->id);
278 			hdr->len = cpu_to_le16(skb->len);
279 		}
280 
281 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
282 		if (!nskb)
283 			continue;
284 
285 		if (sock_queue_rcv_skb(sk, nskb))
286 			kfree_skb(nskb);
287 	}
288 
289 	read_unlock(&hci_sk_list.lock);
290 
291 	kfree_skb(skb_copy);
292 }
293 
294 static void send_monitor_event(struct sk_buff *skb)
295 {
296 	struct sock *sk;
297 
298 	BT_DBG("len %d", skb->len);
299 
300 	read_lock(&hci_sk_list.lock);
301 
302 	sk_for_each(sk, &hci_sk_list.head) {
303 		struct sk_buff *nskb;
304 
305 		if (sk->sk_state != BT_BOUND)
306 			continue;
307 
308 		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
309 			continue;
310 
311 		nskb = skb_clone(skb, GFP_ATOMIC);
312 		if (!nskb)
313 			continue;
314 
315 		if (sock_queue_rcv_skb(sk, nskb))
316 			kfree_skb(nskb);
317 	}
318 
319 	read_unlock(&hci_sk_list.lock);
320 }
321 
322 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
323 {
324 	struct hci_mon_hdr *hdr;
325 	struct hci_mon_new_index *ni;
326 	struct sk_buff *skb;
327 	__le16 opcode;
328 
329 	switch (event) {
330 	case HCI_DEV_REG:
331 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
332 		if (!skb)
333 			return NULL;
334 
335 		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
336 		ni->type = hdev->dev_type;
337 		ni->bus = hdev->bus;
338 		bacpy(&ni->bdaddr, &hdev->bdaddr);
339 		memcpy(ni->name, hdev->name, 8);
340 
341 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
342 		break;
343 
344 	case HCI_DEV_UNREG:
345 		skb = bt_skb_alloc(0, GFP_ATOMIC);
346 		if (!skb)
347 			return NULL;
348 
349 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
350 		break;
351 
352 	default:
353 		return NULL;
354 	}
355 
356 	__net_timestamp(skb);
357 
358 	hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
359 	hdr->opcode = opcode;
360 	hdr->index = cpu_to_le16(hdev->id);
361 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
362 
363 	return skb;
364 }
365 
366 static void send_monitor_replay(struct sock *sk)
367 {
368 	struct hci_dev *hdev;
369 
370 	read_lock(&hci_dev_list_lock);
371 
372 	list_for_each_entry(hdev, &hci_dev_list, list) {
373 		struct sk_buff *skb;
374 
375 		skb = create_monitor_event(hdev, HCI_DEV_REG);
376 		if (!skb)
377 			continue;
378 
379 		if (sock_queue_rcv_skb(sk, skb))
380 			kfree_skb(skb);
381 	}
382 
383 	read_unlock(&hci_dev_list_lock);
384 }
385 
386 /* Generate internal stack event */
387 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
388 {
389 	struct hci_event_hdr *hdr;
390 	struct hci_ev_stack_internal *ev;
391 	struct sk_buff *skb;
392 
393 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
394 	if (!skb)
395 		return;
396 
397 	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
398 	hdr->evt  = HCI_EV_STACK_INTERNAL;
399 	hdr->plen = sizeof(*ev) + dlen;
400 
401 	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
402 	ev->type = type;
403 	memcpy(ev->data, data, dlen);
404 
405 	bt_cb(skb)->incoming = 1;
406 	__net_timestamp(skb);
407 
408 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
409 	hci_send_to_sock(hdev, skb);
410 	kfree_skb(skb);
411 }
412 
413 void hci_sock_dev_event(struct hci_dev *hdev, int event)
414 {
415 	struct hci_ev_si_device ev;
416 
417 	BT_DBG("hdev %s event %d", hdev->name, event);
418 
419 	/* Send event to monitor */
420 	if (atomic_read(&monitor_promisc)) {
421 		struct sk_buff *skb;
422 
423 		skb = create_monitor_event(hdev, event);
424 		if (skb) {
425 			send_monitor_event(skb);
426 			kfree_skb(skb);
427 		}
428 	}
429 
430 	/* Send event to sockets */
431 	ev.event  = event;
432 	ev.dev_id = hdev->id;
433 	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
434 
435 	if (event == HCI_DEV_UNREG) {
436 		struct sock *sk;
437 
438 		/* Detach sockets from device */
439 		read_lock(&hci_sk_list.lock);
440 		sk_for_each(sk, &hci_sk_list.head) {
441 			bh_lock_sock_nested(sk);
442 			if (hci_pi(sk)->hdev == hdev) {
443 				hci_pi(sk)->hdev = NULL;
444 				sk->sk_err = EPIPE;
445 				sk->sk_state = BT_OPEN;
446 				sk->sk_state_change(sk);
447 
448 				hci_dev_put(hdev);
449 			}
450 			bh_unlock_sock(sk);
451 		}
452 		read_unlock(&hci_sk_list.lock);
453 	}
454 }
455 
456 static int hci_sock_release(struct socket *sock)
457 {
458 	struct sock *sk = sock->sk;
459 	struct hci_dev *hdev;
460 
461 	BT_DBG("sock %p sk %p", sock, sk);
462 
463 	if (!sk)
464 		return 0;
465 
466 	hdev = hci_pi(sk)->hdev;
467 
468 	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
469 		atomic_dec(&monitor_promisc);
470 
471 	bt_sock_unlink(&hci_sk_list, sk);
472 
473 	if (hdev) {
474 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
475 			mgmt_index_added(hdev);
476 			clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
477 			hci_dev_close(hdev->id);
478 		}
479 
480 		atomic_dec(&hdev->promisc);
481 		hci_dev_put(hdev);
482 	}
483 
484 	sock_orphan(sk);
485 
486 	skb_queue_purge(&sk->sk_receive_queue);
487 	skb_queue_purge(&sk->sk_write_queue);
488 
489 	sock_put(sk);
490 	return 0;
491 }
492 
493 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
494 {
495 	bdaddr_t bdaddr;
496 	int err;
497 
498 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
499 		return -EFAULT;
500 
501 	hci_dev_lock(hdev);
502 
503 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
504 
505 	hci_dev_unlock(hdev);
506 
507 	return err;
508 }
509 
510 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
511 {
512 	bdaddr_t bdaddr;
513 	int err;
514 
515 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
516 		return -EFAULT;
517 
518 	hci_dev_lock(hdev);
519 
520 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
521 
522 	hci_dev_unlock(hdev);
523 
524 	return err;
525 }
526 
527 /* Ioctls that require bound socket */
528 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
529 				unsigned long arg)
530 {
531 	struct hci_dev *hdev = hci_pi(sk)->hdev;
532 
533 	if (!hdev)
534 		return -EBADFD;
535 
536 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
537 		return -EBUSY;
538 
539 	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
540 		return -EOPNOTSUPP;
541 
542 	if (hdev->dev_type != HCI_BREDR)
543 		return -EOPNOTSUPP;
544 
545 	switch (cmd) {
546 	case HCISETRAW:
547 		if (!capable(CAP_NET_ADMIN))
548 			return -EPERM;
549 		return -EOPNOTSUPP;
550 
551 	case HCIGETCONNINFO:
552 		return hci_get_conn_info(hdev, (void __user *) arg);
553 
554 	case HCIGETAUTHINFO:
555 		return hci_get_auth_info(hdev, (void __user *) arg);
556 
557 	case HCIBLOCKADDR:
558 		if (!capable(CAP_NET_ADMIN))
559 			return -EPERM;
560 		return hci_sock_blacklist_add(hdev, (void __user *) arg);
561 
562 	case HCIUNBLOCKADDR:
563 		if (!capable(CAP_NET_ADMIN))
564 			return -EPERM;
565 		return hci_sock_blacklist_del(hdev, (void __user *) arg);
566 	}
567 
568 	return -ENOIOCTLCMD;
569 }
570 
571 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
572 			  unsigned long arg)
573 {
574 	void __user *argp = (void __user *) arg;
575 	struct sock *sk = sock->sk;
576 	int err;
577 
578 	BT_DBG("cmd %x arg %lx", cmd, arg);
579 
580 	lock_sock(sk);
581 
582 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
583 		err = -EBADFD;
584 		goto done;
585 	}
586 
587 	release_sock(sk);
588 
589 	switch (cmd) {
590 	case HCIGETDEVLIST:
591 		return hci_get_dev_list(argp);
592 
593 	case HCIGETDEVINFO:
594 		return hci_get_dev_info(argp);
595 
596 	case HCIGETCONNLIST:
597 		return hci_get_conn_list(argp);
598 
599 	case HCIDEVUP:
600 		if (!capable(CAP_NET_ADMIN))
601 			return -EPERM;
602 		return hci_dev_open(arg);
603 
604 	case HCIDEVDOWN:
605 		if (!capable(CAP_NET_ADMIN))
606 			return -EPERM;
607 		return hci_dev_close(arg);
608 
609 	case HCIDEVRESET:
610 		if (!capable(CAP_NET_ADMIN))
611 			return -EPERM;
612 		return hci_dev_reset(arg);
613 
614 	case HCIDEVRESTAT:
615 		if (!capable(CAP_NET_ADMIN))
616 			return -EPERM;
617 		return hci_dev_reset_stat(arg);
618 
619 	case HCISETSCAN:
620 	case HCISETAUTH:
621 	case HCISETENCRYPT:
622 	case HCISETPTYPE:
623 	case HCISETLINKPOL:
624 	case HCISETLINKMODE:
625 	case HCISETACLMTU:
626 	case HCISETSCOMTU:
627 		if (!capable(CAP_NET_ADMIN))
628 			return -EPERM;
629 		return hci_dev_cmd(cmd, argp);
630 
631 	case HCIINQUIRY:
632 		return hci_inquiry(argp);
633 	}
634 
635 	lock_sock(sk);
636 
637 	err = hci_sock_bound_ioctl(sk, cmd, arg);
638 
639 done:
640 	release_sock(sk);
641 	return err;
642 }
643 
644 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
645 			 int addr_len)
646 {
647 	struct sockaddr_hci haddr;
648 	struct sock *sk = sock->sk;
649 	struct hci_dev *hdev = NULL;
650 	int len, err = 0;
651 
652 	BT_DBG("sock %p sk %p", sock, sk);
653 
654 	if (!addr)
655 		return -EINVAL;
656 
657 	memset(&haddr, 0, sizeof(haddr));
658 	len = min_t(unsigned int, sizeof(haddr), addr_len);
659 	memcpy(&haddr, addr, len);
660 
661 	if (haddr.hci_family != AF_BLUETOOTH)
662 		return -EINVAL;
663 
664 	lock_sock(sk);
665 
666 	if (sk->sk_state == BT_BOUND) {
667 		err = -EALREADY;
668 		goto done;
669 	}
670 
671 	switch (haddr.hci_channel) {
672 	case HCI_CHANNEL_RAW:
673 		if (hci_pi(sk)->hdev) {
674 			err = -EALREADY;
675 			goto done;
676 		}
677 
678 		if (haddr.hci_dev != HCI_DEV_NONE) {
679 			hdev = hci_dev_get(haddr.hci_dev);
680 			if (!hdev) {
681 				err = -ENODEV;
682 				goto done;
683 			}
684 
685 			atomic_inc(&hdev->promisc);
686 		}
687 
688 		hci_pi(sk)->hdev = hdev;
689 		break;
690 
691 	case HCI_CHANNEL_USER:
692 		if (hci_pi(sk)->hdev) {
693 			err = -EALREADY;
694 			goto done;
695 		}
696 
697 		if (haddr.hci_dev == HCI_DEV_NONE) {
698 			err = -EINVAL;
699 			goto done;
700 		}
701 
702 		if (!capable(CAP_NET_ADMIN)) {
703 			err = -EPERM;
704 			goto done;
705 		}
706 
707 		hdev = hci_dev_get(haddr.hci_dev);
708 		if (!hdev) {
709 			err = -ENODEV;
710 			goto done;
711 		}
712 
713 		if (test_bit(HCI_UP, &hdev->flags) ||
714 		    test_bit(HCI_INIT, &hdev->flags) ||
715 		    test_bit(HCI_SETUP, &hdev->dev_flags) ||
716 		    test_bit(HCI_CONFIG, &hdev->dev_flags)) {
717 			err = -EBUSY;
718 			hci_dev_put(hdev);
719 			goto done;
720 		}
721 
722 		if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
723 			err = -EUSERS;
724 			hci_dev_put(hdev);
725 			goto done;
726 		}
727 
728 		mgmt_index_removed(hdev);
729 
730 		err = hci_dev_open(hdev->id);
731 		if (err) {
732 			clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
733 			mgmt_index_added(hdev);
734 			hci_dev_put(hdev);
735 			goto done;
736 		}
737 
738 		atomic_inc(&hdev->promisc);
739 
740 		hci_pi(sk)->hdev = hdev;
741 		break;
742 
743 	case HCI_CHANNEL_CONTROL:
744 		if (haddr.hci_dev != HCI_DEV_NONE) {
745 			err = -EINVAL;
746 			goto done;
747 		}
748 
749 		if (!capable(CAP_NET_ADMIN)) {
750 			err = -EPERM;
751 			goto done;
752 		}
753 
754 		break;
755 
756 	case HCI_CHANNEL_MONITOR:
757 		if (haddr.hci_dev != HCI_DEV_NONE) {
758 			err = -EINVAL;
759 			goto done;
760 		}
761 
762 		if (!capable(CAP_NET_RAW)) {
763 			err = -EPERM;
764 			goto done;
765 		}
766 
767 		send_monitor_replay(sk);
768 
769 		atomic_inc(&monitor_promisc);
770 		break;
771 
772 	default:
773 		err = -EINVAL;
774 		goto done;
775 	}
776 
777 
778 	hci_pi(sk)->channel = haddr.hci_channel;
779 	sk->sk_state = BT_BOUND;
780 
781 done:
782 	release_sock(sk);
783 	return err;
784 }
785 
786 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
787 			    int *addr_len, int peer)
788 {
789 	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
790 	struct sock *sk = sock->sk;
791 	struct hci_dev *hdev;
792 	int err = 0;
793 
794 	BT_DBG("sock %p sk %p", sock, sk);
795 
796 	if (peer)
797 		return -EOPNOTSUPP;
798 
799 	lock_sock(sk);
800 
801 	hdev = hci_pi(sk)->hdev;
802 	if (!hdev) {
803 		err = -EBADFD;
804 		goto done;
805 	}
806 
807 	*addr_len = sizeof(*haddr);
808 	haddr->hci_family = AF_BLUETOOTH;
809 	haddr->hci_dev    = hdev->id;
810 	haddr->hci_channel= hci_pi(sk)->channel;
811 
812 done:
813 	release_sock(sk);
814 	return err;
815 }
816 
817 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
818 			  struct sk_buff *skb)
819 {
820 	__u32 mask = hci_pi(sk)->cmsg_mask;
821 
822 	if (mask & HCI_CMSG_DIR) {
823 		int incoming = bt_cb(skb)->incoming;
824 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
825 			 &incoming);
826 	}
827 
828 	if (mask & HCI_CMSG_TSTAMP) {
829 #ifdef CONFIG_COMPAT
830 		struct compat_timeval ctv;
831 #endif
832 		struct timeval tv;
833 		void *data;
834 		int len;
835 
836 		skb_get_timestamp(skb, &tv);
837 
838 		data = &tv;
839 		len = sizeof(tv);
840 #ifdef CONFIG_COMPAT
841 		if (!COMPAT_USE_64BIT_TIME &&
842 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
843 			ctv.tv_sec = tv.tv_sec;
844 			ctv.tv_usec = tv.tv_usec;
845 			data = &ctv;
846 			len = sizeof(ctv);
847 		}
848 #endif
849 
850 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
851 	}
852 }
853 
854 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
855 			    struct msghdr *msg, size_t len, int flags)
856 {
857 	int noblock = flags & MSG_DONTWAIT;
858 	struct sock *sk = sock->sk;
859 	struct sk_buff *skb;
860 	int copied, err;
861 
862 	BT_DBG("sock %p, sk %p", sock, sk);
863 
864 	if (flags & (MSG_OOB))
865 		return -EOPNOTSUPP;
866 
867 	if (sk->sk_state == BT_CLOSED)
868 		return 0;
869 
870 	skb = skb_recv_datagram(sk, flags, noblock, &err);
871 	if (!skb)
872 		return err;
873 
874 	copied = skb->len;
875 	if (len < copied) {
876 		msg->msg_flags |= MSG_TRUNC;
877 		copied = len;
878 	}
879 
880 	skb_reset_transport_header(skb);
881 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
882 
883 	switch (hci_pi(sk)->channel) {
884 	case HCI_CHANNEL_RAW:
885 		hci_sock_cmsg(sk, msg, skb);
886 		break;
887 	case HCI_CHANNEL_USER:
888 	case HCI_CHANNEL_CONTROL:
889 	case HCI_CHANNEL_MONITOR:
890 		sock_recv_timestamp(msg, sk, skb);
891 		break;
892 	}
893 
894 	skb_free_datagram(sk, skb);
895 
896 	return err ? : copied;
897 }
898 
899 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
900 			    struct msghdr *msg, size_t len)
901 {
902 	struct sock *sk = sock->sk;
903 	struct hci_dev *hdev;
904 	struct sk_buff *skb;
905 	int err;
906 
907 	BT_DBG("sock %p sk %p", sock, sk);
908 
909 	if (msg->msg_flags & MSG_OOB)
910 		return -EOPNOTSUPP;
911 
912 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
913 		return -EINVAL;
914 
915 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
916 		return -EINVAL;
917 
918 	lock_sock(sk);
919 
920 	switch (hci_pi(sk)->channel) {
921 	case HCI_CHANNEL_RAW:
922 	case HCI_CHANNEL_USER:
923 		break;
924 	case HCI_CHANNEL_CONTROL:
925 		err = mgmt_control(sk, msg, len);
926 		goto done;
927 	case HCI_CHANNEL_MONITOR:
928 		err = -EOPNOTSUPP;
929 		goto done;
930 	default:
931 		err = -EINVAL;
932 		goto done;
933 	}
934 
935 	hdev = hci_pi(sk)->hdev;
936 	if (!hdev) {
937 		err = -EBADFD;
938 		goto done;
939 	}
940 
941 	if (!test_bit(HCI_UP, &hdev->flags)) {
942 		err = -ENETDOWN;
943 		goto done;
944 	}
945 
946 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
947 	if (!skb)
948 		goto done;
949 
950 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
951 		err = -EFAULT;
952 		goto drop;
953 	}
954 
955 	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
956 	skb_pull(skb, 1);
957 
958 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
959 		/* No permission check is needed for user channel
960 		 * since that gets enforced when binding the socket.
961 		 *
962 		 * However check that the packet type is valid.
963 		 */
964 		if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
965 		    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
966 		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
967 			err = -EINVAL;
968 			goto drop;
969 		}
970 
971 		skb_queue_tail(&hdev->raw_q, skb);
972 		queue_work(hdev->workqueue, &hdev->tx_work);
973 	} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
974 		u16 opcode = get_unaligned_le16(skb->data);
975 		u16 ogf = hci_opcode_ogf(opcode);
976 		u16 ocf = hci_opcode_ocf(opcode);
977 
978 		if (((ogf > HCI_SFLT_MAX_OGF) ||
979 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
980 				   &hci_sec_filter.ocf_mask[ogf])) &&
981 		    !capable(CAP_NET_RAW)) {
982 			err = -EPERM;
983 			goto drop;
984 		}
985 
986 		if (ogf == 0x3f) {
987 			skb_queue_tail(&hdev->raw_q, skb);
988 			queue_work(hdev->workqueue, &hdev->tx_work);
989 		} else {
990 			/* Stand-alone HCI commands must be flagged as
991 			 * single-command requests.
992 			 */
993 			bt_cb(skb)->req.start = true;
994 
995 			skb_queue_tail(&hdev->cmd_q, skb);
996 			queue_work(hdev->workqueue, &hdev->cmd_work);
997 		}
998 	} else {
999 		if (!capable(CAP_NET_RAW)) {
1000 			err = -EPERM;
1001 			goto drop;
1002 		}
1003 
1004 		skb_queue_tail(&hdev->raw_q, skb);
1005 		queue_work(hdev->workqueue, &hdev->tx_work);
1006 	}
1007 
1008 	err = len;
1009 
1010 done:
1011 	release_sock(sk);
1012 	return err;
1013 
1014 drop:
1015 	kfree_skb(skb);
1016 	goto done;
1017 }
1018 
1019 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1020 			       char __user *optval, unsigned int len)
1021 {
1022 	struct hci_ufilter uf = { .opcode = 0 };
1023 	struct sock *sk = sock->sk;
1024 	int err = 0, opt = 0;
1025 
1026 	BT_DBG("sk %p, opt %d", sk, optname);
1027 
1028 	lock_sock(sk);
1029 
1030 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1031 		err = -EBADFD;
1032 		goto done;
1033 	}
1034 
1035 	switch (optname) {
1036 	case HCI_DATA_DIR:
1037 		if (get_user(opt, (int __user *)optval)) {
1038 			err = -EFAULT;
1039 			break;
1040 		}
1041 
1042 		if (opt)
1043 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1044 		else
1045 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1046 		break;
1047 
1048 	case HCI_TIME_STAMP:
1049 		if (get_user(opt, (int __user *)optval)) {
1050 			err = -EFAULT;
1051 			break;
1052 		}
1053 
1054 		if (opt)
1055 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1056 		else
1057 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1058 		break;
1059 
1060 	case HCI_FILTER:
1061 		{
1062 			struct hci_filter *f = &hci_pi(sk)->filter;
1063 
1064 			uf.type_mask = f->type_mask;
1065 			uf.opcode    = f->opcode;
1066 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1067 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1068 		}
1069 
1070 		len = min_t(unsigned int, len, sizeof(uf));
1071 		if (copy_from_user(&uf, optval, len)) {
1072 			err = -EFAULT;
1073 			break;
1074 		}
1075 
1076 		if (!capable(CAP_NET_RAW)) {
1077 			uf.type_mask &= hci_sec_filter.type_mask;
1078 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1079 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1080 		}
1081 
1082 		{
1083 			struct hci_filter *f = &hci_pi(sk)->filter;
1084 
1085 			f->type_mask = uf.type_mask;
1086 			f->opcode    = uf.opcode;
1087 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1088 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1089 		}
1090 		break;
1091 
1092 	default:
1093 		err = -ENOPROTOOPT;
1094 		break;
1095 	}
1096 
1097 done:
1098 	release_sock(sk);
1099 	return err;
1100 }
1101 
1102 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1103 			       char __user *optval, int __user *optlen)
1104 {
1105 	struct hci_ufilter uf;
1106 	struct sock *sk = sock->sk;
1107 	int len, opt, err = 0;
1108 
1109 	BT_DBG("sk %p, opt %d", sk, optname);
1110 
1111 	if (get_user(len, optlen))
1112 		return -EFAULT;
1113 
1114 	lock_sock(sk);
1115 
1116 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1117 		err = -EBADFD;
1118 		goto done;
1119 	}
1120 
1121 	switch (optname) {
1122 	case HCI_DATA_DIR:
1123 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1124 			opt = 1;
1125 		else
1126 			opt = 0;
1127 
1128 		if (put_user(opt, optval))
1129 			err = -EFAULT;
1130 		break;
1131 
1132 	case HCI_TIME_STAMP:
1133 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1134 			opt = 1;
1135 		else
1136 			opt = 0;
1137 
1138 		if (put_user(opt, optval))
1139 			err = -EFAULT;
1140 		break;
1141 
1142 	case HCI_FILTER:
1143 		{
1144 			struct hci_filter *f = &hci_pi(sk)->filter;
1145 
1146 			memset(&uf, 0, sizeof(uf));
1147 			uf.type_mask = f->type_mask;
1148 			uf.opcode    = f->opcode;
1149 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1150 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1151 		}
1152 
1153 		len = min_t(unsigned int, len, sizeof(uf));
1154 		if (copy_to_user(optval, &uf, len))
1155 			err = -EFAULT;
1156 		break;
1157 
1158 	default:
1159 		err = -ENOPROTOOPT;
1160 		break;
1161 	}
1162 
1163 done:
1164 	release_sock(sk);
1165 	return err;
1166 }
1167 
1168 static const struct proto_ops hci_sock_ops = {
1169 	.family		= PF_BLUETOOTH,
1170 	.owner		= THIS_MODULE,
1171 	.release	= hci_sock_release,
1172 	.bind		= hci_sock_bind,
1173 	.getname	= hci_sock_getname,
1174 	.sendmsg	= hci_sock_sendmsg,
1175 	.recvmsg	= hci_sock_recvmsg,
1176 	.ioctl		= hci_sock_ioctl,
1177 	.poll		= datagram_poll,
1178 	.listen		= sock_no_listen,
1179 	.shutdown	= sock_no_shutdown,
1180 	.setsockopt	= hci_sock_setsockopt,
1181 	.getsockopt	= hci_sock_getsockopt,
1182 	.connect	= sock_no_connect,
1183 	.socketpair	= sock_no_socketpair,
1184 	.accept		= sock_no_accept,
1185 	.mmap		= sock_no_mmap
1186 };
1187 
1188 static struct proto hci_sk_proto = {
1189 	.name		= "HCI",
1190 	.owner		= THIS_MODULE,
1191 	.obj_size	= sizeof(struct hci_pinfo)
1192 };
1193 
1194 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1195 			   int kern)
1196 {
1197 	struct sock *sk;
1198 
1199 	BT_DBG("sock %p", sock);
1200 
1201 	if (sock->type != SOCK_RAW)
1202 		return -ESOCKTNOSUPPORT;
1203 
1204 	sock->ops = &hci_sock_ops;
1205 
1206 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1207 	if (!sk)
1208 		return -ENOMEM;
1209 
1210 	sock_init_data(sock, sk);
1211 
1212 	sock_reset_flag(sk, SOCK_ZAPPED);
1213 
1214 	sk->sk_protocol = protocol;
1215 
1216 	sock->state = SS_UNCONNECTED;
1217 	sk->sk_state = BT_OPEN;
1218 
1219 	bt_sock_link(&hci_sk_list, sk);
1220 	return 0;
1221 }
1222 
1223 static const struct net_proto_family hci_sock_family_ops = {
1224 	.family	= PF_BLUETOOTH,
1225 	.owner	= THIS_MODULE,
1226 	.create	= hci_sock_create,
1227 };
1228 
1229 int __init hci_sock_init(void)
1230 {
1231 	int err;
1232 
1233 	err = proto_register(&hci_sk_proto, 0);
1234 	if (err < 0)
1235 		return err;
1236 
1237 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1238 	if (err < 0) {
1239 		BT_ERR("HCI socket registration failed");
1240 		goto error;
1241 	}
1242 
1243 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1244 	if (err < 0) {
1245 		BT_ERR("Failed to create HCI proc file");
1246 		bt_sock_unregister(BTPROTO_HCI);
1247 		goto error;
1248 	}
1249 
1250 	BT_INFO("HCI socket layer initialized");
1251 
1252 	return 0;
1253 
1254 error:
1255 	proto_unregister(&hci_sk_proto);
1256 	return err;
1257 }
1258 
1259 void hci_sock_cleanup(void)
1260 {
1261 	bt_procfs_cleanup(&init_net, "hci");
1262 	bt_sock_unregister(BTPROTO_HCI);
1263 	proto_unregister(&hci_sk_proto);
1264 }
1265