xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision 1ab142d4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
43 #include <net/sock.h>
44 
45 #include <asm/system.h>
46 #include <linux/uaccess.h>
47 #include <asm/unaligned.h>
48 
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/hci_mon.h>
52 
53 static atomic_t monitor_promisc = ATOMIC_INIT(0);
54 
55 /* ----- HCI socket interface ----- */
56 
57 static inline int hci_test_bit(int nr, void *addr)
58 {
59 	return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
60 }
61 
62 /* Security filter */
63 static struct hci_sec_filter hci_sec_filter = {
64 	/* Packet types */
65 	0x10,
66 	/* Events */
67 	{ 0x1000d9fe, 0x0000b00c },
68 	/* Commands */
69 	{
70 		{ 0x0 },
71 		/* OGF_LINK_CTL */
72 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73 		/* OGF_LINK_POLICY */
74 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
75 		/* OGF_HOST_CTL */
76 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77 		/* OGF_INFO_PARAM */
78 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 		/* OGF_STATUS_PARAM */
80 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
81 	}
82 };
83 
84 static struct bt_sock_list hci_sk_list = {
85 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
86 };
87 
88 /* Send frame to RAW socket */
89 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
90 {
91 	struct sock *sk;
92 	struct hlist_node *node;
93 	struct sk_buff *skb_copy = NULL;
94 
95 	BT_DBG("hdev %p len %d", hdev, skb->len);
96 
97 	read_lock(&hci_sk_list.lock);
98 
99 	sk_for_each(sk, node, &hci_sk_list.head) {
100 		struct hci_filter *flt;
101 		struct sk_buff *nskb;
102 
103 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
104 			continue;
105 
106 		/* Don't send frame to the socket it came from */
107 		if (skb->sk == sk)
108 			continue;
109 
110 		if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
111 			continue;
112 
113 		/* Apply filter */
114 		flt = &hci_pi(sk)->filter;
115 
116 		if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
117 				0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
118 			continue;
119 
120 		if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
121 			register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
122 
123 			if (!hci_test_bit(evt, &flt->event_mask))
124 				continue;
125 
126 			if (flt->opcode &&
127 			    ((evt == HCI_EV_CMD_COMPLETE &&
128 			      flt->opcode !=
129 			      get_unaligned((__le16 *)(skb->data + 3))) ||
130 			     (evt == HCI_EV_CMD_STATUS &&
131 			      flt->opcode !=
132 			      get_unaligned((__le16 *)(skb->data + 4)))))
133 				continue;
134 		}
135 
136 		if (!skb_copy) {
137 			/* Create a private copy with headroom */
138 			skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
139 			if (!skb_copy)
140 				continue;
141 
142 			/* Put type byte before the data */
143 			memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
144 		}
145 
146 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
147 		if (!nskb)
148 			continue;
149 
150 		if (sock_queue_rcv_skb(sk, nskb))
151 			kfree_skb(nskb);
152 	}
153 
154 	read_unlock(&hci_sk_list.lock);
155 
156 	kfree_skb(skb_copy);
157 }
158 
159 /* Send frame to control socket */
160 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
161 {
162 	struct sock *sk;
163 	struct hlist_node *node;
164 
165 	BT_DBG("len %d", skb->len);
166 
167 	read_lock(&hci_sk_list.lock);
168 
169 	sk_for_each(sk, node, &hci_sk_list.head) {
170 		struct sk_buff *nskb;
171 
172 		/* Skip the original socket */
173 		if (sk == skip_sk)
174 			continue;
175 
176 		if (sk->sk_state != BT_BOUND)
177 			continue;
178 
179 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 			continue;
181 
182 		nskb = skb_clone(skb, GFP_ATOMIC);
183 		if (!nskb)
184 			continue;
185 
186 		if (sock_queue_rcv_skb(sk, nskb))
187 			kfree_skb(nskb);
188 	}
189 
190 	read_unlock(&hci_sk_list.lock);
191 }
192 
193 /* Send frame to monitor socket */
194 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195 {
196 	struct sock *sk;
197 	struct hlist_node *node;
198 	struct sk_buff *skb_copy = NULL;
199 	__le16 opcode;
200 
201 	if (!atomic_read(&monitor_promisc))
202 		return;
203 
204 	BT_DBG("hdev %p len %d", hdev, skb->len);
205 
206 	switch (bt_cb(skb)->pkt_type) {
207 	case HCI_COMMAND_PKT:
208 		opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
209 		break;
210 	case HCI_EVENT_PKT:
211 		opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
212 		break;
213 	case HCI_ACLDATA_PKT:
214 		if (bt_cb(skb)->incoming)
215 			opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
216 		else
217 			opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
218 		break;
219 	case HCI_SCODATA_PKT:
220 		if (bt_cb(skb)->incoming)
221 			opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
222 		else
223 			opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
224 		break;
225 	default:
226 		return;
227 	}
228 
229 	read_lock(&hci_sk_list.lock);
230 
231 	sk_for_each(sk, node, &hci_sk_list.head) {
232 		struct sk_buff *nskb;
233 
234 		if (sk->sk_state != BT_BOUND)
235 			continue;
236 
237 		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 			continue;
239 
240 		if (!skb_copy) {
241 			struct hci_mon_hdr *hdr;
242 
243 			/* Create a private copy with headroom */
244 			skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
245 			if (!skb_copy)
246 				continue;
247 
248 			/* Put header before the data */
249 			hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
250 			hdr->opcode = opcode;
251 			hdr->index = cpu_to_le16(hdev->id);
252 			hdr->len = cpu_to_le16(skb->len);
253 		}
254 
255 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
256 		if (!nskb)
257 			continue;
258 
259 		if (sock_queue_rcv_skb(sk, nskb))
260 			kfree_skb(nskb);
261 	}
262 
263 	read_unlock(&hci_sk_list.lock);
264 
265 	kfree_skb(skb_copy);
266 }
267 
268 static void send_monitor_event(struct sk_buff *skb)
269 {
270 	struct sock *sk;
271 	struct hlist_node *node;
272 
273 	BT_DBG("len %d", skb->len);
274 
275 	read_lock(&hci_sk_list.lock);
276 
277 	sk_for_each(sk, node, &hci_sk_list.head) {
278 		struct sk_buff *nskb;
279 
280 		if (sk->sk_state != BT_BOUND)
281 			continue;
282 
283 		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
284 			continue;
285 
286 		nskb = skb_clone(skb, GFP_ATOMIC);
287 		if (!nskb)
288 			continue;
289 
290 		if (sock_queue_rcv_skb(sk, nskb))
291 			kfree_skb(nskb);
292 	}
293 
294 	read_unlock(&hci_sk_list.lock);
295 }
296 
297 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
298 {
299 	struct hci_mon_hdr *hdr;
300 	struct hci_mon_new_index *ni;
301 	struct sk_buff *skb;
302 	__le16 opcode;
303 
304 	switch (event) {
305 	case HCI_DEV_REG:
306 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 		if (!skb)
308 			return NULL;
309 
310 		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 		ni->type = hdev->dev_type;
312 		ni->bus = hdev->bus;
313 		bacpy(&ni->bdaddr, &hdev->bdaddr);
314 		memcpy(ni->name, hdev->name, 8);
315 
316 		opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
317 		break;
318 
319 	case HCI_DEV_UNREG:
320 		skb = bt_skb_alloc(0, GFP_ATOMIC);
321 		if (!skb)
322 			return NULL;
323 
324 		opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
325 		break;
326 
327 	default:
328 		return NULL;
329 	}
330 
331 	__net_timestamp(skb);
332 
333 	hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 	hdr->opcode = opcode;
335 	hdr->index = cpu_to_le16(hdev->id);
336 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
337 
338 	return skb;
339 }
340 
341 static void send_monitor_replay(struct sock *sk)
342 {
343 	struct hci_dev *hdev;
344 
345 	read_lock(&hci_dev_list_lock);
346 
347 	list_for_each_entry(hdev, &hci_dev_list, list) {
348 		struct sk_buff *skb;
349 
350 		skb = create_monitor_event(hdev, HCI_DEV_REG);
351 		if (!skb)
352 			continue;
353 
354 		if (sock_queue_rcv_skb(sk, skb))
355 			kfree_skb(skb);
356 	}
357 
358 	read_unlock(&hci_dev_list_lock);
359 }
360 
361 /* Generate internal stack event */
362 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
363 {
364 	struct hci_event_hdr *hdr;
365 	struct hci_ev_stack_internal *ev;
366 	struct sk_buff *skb;
367 
368 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 	if (!skb)
370 		return;
371 
372 	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 	hdr->evt  = HCI_EV_STACK_INTERNAL;
374 	hdr->plen = sizeof(*ev) + dlen;
375 
376 	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
377 	ev->type = type;
378 	memcpy(ev->data, data, dlen);
379 
380 	bt_cb(skb)->incoming = 1;
381 	__net_timestamp(skb);
382 
383 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384 	skb->dev = (void *) hdev;
385 	hci_send_to_sock(hdev, skb);
386 	kfree_skb(skb);
387 }
388 
389 void hci_sock_dev_event(struct hci_dev *hdev, int event)
390 {
391 	struct hci_ev_si_device ev;
392 
393 	BT_DBG("hdev %s event %d", hdev->name, event);
394 
395 	/* Send event to monitor */
396 	if (atomic_read(&monitor_promisc)) {
397 		struct sk_buff *skb;
398 
399 		skb = create_monitor_event(hdev, event);
400 		if (skb) {
401 			send_monitor_event(skb);
402 			kfree_skb(skb);
403 		}
404 	}
405 
406 	/* Send event to sockets */
407 	ev.event  = event;
408 	ev.dev_id = hdev->id;
409 	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
410 
411 	if (event == HCI_DEV_UNREG) {
412 		struct sock *sk;
413 		struct hlist_node *node;
414 
415 		/* Detach sockets from device */
416 		read_lock(&hci_sk_list.lock);
417 		sk_for_each(sk, node, &hci_sk_list.head) {
418 			bh_lock_sock_nested(sk);
419 			if (hci_pi(sk)->hdev == hdev) {
420 				hci_pi(sk)->hdev = NULL;
421 				sk->sk_err = EPIPE;
422 				sk->sk_state = BT_OPEN;
423 				sk->sk_state_change(sk);
424 
425 				hci_dev_put(hdev);
426 			}
427 			bh_unlock_sock(sk);
428 		}
429 		read_unlock(&hci_sk_list.lock);
430 	}
431 }
432 
433 static int hci_sock_release(struct socket *sock)
434 {
435 	struct sock *sk = sock->sk;
436 	struct hci_dev *hdev;
437 
438 	BT_DBG("sock %p sk %p", sock, sk);
439 
440 	if (!sk)
441 		return 0;
442 
443 	hdev = hci_pi(sk)->hdev;
444 
445 	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
446 		atomic_dec(&monitor_promisc);
447 
448 	bt_sock_unlink(&hci_sk_list, sk);
449 
450 	if (hdev) {
451 		atomic_dec(&hdev->promisc);
452 		hci_dev_put(hdev);
453 	}
454 
455 	sock_orphan(sk);
456 
457 	skb_queue_purge(&sk->sk_receive_queue);
458 	skb_queue_purge(&sk->sk_write_queue);
459 
460 	sock_put(sk);
461 	return 0;
462 }
463 
464 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
465 {
466 	bdaddr_t bdaddr;
467 	int err;
468 
469 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 		return -EFAULT;
471 
472 	hci_dev_lock(hdev);
473 
474 	err = hci_blacklist_add(hdev, &bdaddr, 0);
475 
476 	hci_dev_unlock(hdev);
477 
478 	return err;
479 }
480 
481 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
482 {
483 	bdaddr_t bdaddr;
484 	int err;
485 
486 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
487 		return -EFAULT;
488 
489 	hci_dev_lock(hdev);
490 
491 	err = hci_blacklist_del(hdev, &bdaddr, 0);
492 
493 	hci_dev_unlock(hdev);
494 
495 	return err;
496 }
497 
498 /* Ioctls that require bound socket */
499 static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
500 {
501 	struct hci_dev *hdev = hci_pi(sk)->hdev;
502 
503 	if (!hdev)
504 		return -EBADFD;
505 
506 	switch (cmd) {
507 	case HCISETRAW:
508 		if (!capable(CAP_NET_ADMIN))
509 			return -EACCES;
510 
511 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 			return -EPERM;
513 
514 		if (arg)
515 			set_bit(HCI_RAW, &hdev->flags);
516 		else
517 			clear_bit(HCI_RAW, &hdev->flags);
518 
519 		return 0;
520 
521 	case HCIGETCONNINFO:
522 		return hci_get_conn_info(hdev, (void __user *) arg);
523 
524 	case HCIGETAUTHINFO:
525 		return hci_get_auth_info(hdev, (void __user *) arg);
526 
527 	case HCIBLOCKADDR:
528 		if (!capable(CAP_NET_ADMIN))
529 			return -EACCES;
530 		return hci_sock_blacklist_add(hdev, (void __user *) arg);
531 
532 	case HCIUNBLOCKADDR:
533 		if (!capable(CAP_NET_ADMIN))
534 			return -EACCES;
535 		return hci_sock_blacklist_del(hdev, (void __user *) arg);
536 
537 	default:
538 		if (hdev->ioctl)
539 			return hdev->ioctl(hdev, cmd, arg);
540 		return -EINVAL;
541 	}
542 }
543 
544 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
545 {
546 	struct sock *sk = sock->sk;
547 	void __user *argp = (void __user *) arg;
548 	int err;
549 
550 	BT_DBG("cmd %x arg %lx", cmd, arg);
551 
552 	switch (cmd) {
553 	case HCIGETDEVLIST:
554 		return hci_get_dev_list(argp);
555 
556 	case HCIGETDEVINFO:
557 		return hci_get_dev_info(argp);
558 
559 	case HCIGETCONNLIST:
560 		return hci_get_conn_list(argp);
561 
562 	case HCIDEVUP:
563 		if (!capable(CAP_NET_ADMIN))
564 			return -EACCES;
565 		return hci_dev_open(arg);
566 
567 	case HCIDEVDOWN:
568 		if (!capable(CAP_NET_ADMIN))
569 			return -EACCES;
570 		return hci_dev_close(arg);
571 
572 	case HCIDEVRESET:
573 		if (!capable(CAP_NET_ADMIN))
574 			return -EACCES;
575 		return hci_dev_reset(arg);
576 
577 	case HCIDEVRESTAT:
578 		if (!capable(CAP_NET_ADMIN))
579 			return -EACCES;
580 		return hci_dev_reset_stat(arg);
581 
582 	case HCISETSCAN:
583 	case HCISETAUTH:
584 	case HCISETENCRYPT:
585 	case HCISETPTYPE:
586 	case HCISETLINKPOL:
587 	case HCISETLINKMODE:
588 	case HCISETACLMTU:
589 	case HCISETSCOMTU:
590 		if (!capable(CAP_NET_ADMIN))
591 			return -EACCES;
592 		return hci_dev_cmd(cmd, argp);
593 
594 	case HCIINQUIRY:
595 		return hci_inquiry(argp);
596 
597 	default:
598 		lock_sock(sk);
599 		err = hci_sock_bound_ioctl(sk, cmd, arg);
600 		release_sock(sk);
601 		return err;
602 	}
603 }
604 
605 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
606 {
607 	struct sockaddr_hci haddr;
608 	struct sock *sk = sock->sk;
609 	struct hci_dev *hdev = NULL;
610 	int len, err = 0;
611 
612 	BT_DBG("sock %p sk %p", sock, sk);
613 
614 	if (!addr)
615 		return -EINVAL;
616 
617 	memset(&haddr, 0, sizeof(haddr));
618 	len = min_t(unsigned int, sizeof(haddr), addr_len);
619 	memcpy(&haddr, addr, len);
620 
621 	if (haddr.hci_family != AF_BLUETOOTH)
622 		return -EINVAL;
623 
624 	lock_sock(sk);
625 
626 	if (sk->sk_state == BT_BOUND) {
627 		err = -EALREADY;
628 		goto done;
629 	}
630 
631 	switch (haddr.hci_channel) {
632 	case HCI_CHANNEL_RAW:
633 		if (hci_pi(sk)->hdev) {
634 			err = -EALREADY;
635 			goto done;
636 		}
637 
638 		if (haddr.hci_dev != HCI_DEV_NONE) {
639 			hdev = hci_dev_get(haddr.hci_dev);
640 			if (!hdev) {
641 				err = -ENODEV;
642 				goto done;
643 			}
644 
645 			atomic_inc(&hdev->promisc);
646 		}
647 
648 		hci_pi(sk)->hdev = hdev;
649 		break;
650 
651 	case HCI_CHANNEL_CONTROL:
652 		if (haddr.hci_dev != HCI_DEV_NONE) {
653 			err = -EINVAL;
654 			goto done;
655 		}
656 
657 		if (!capable(CAP_NET_ADMIN)) {
658 			err = -EPERM;
659 			goto done;
660 		}
661 
662 		break;
663 
664 	case HCI_CHANNEL_MONITOR:
665 		if (haddr.hci_dev != HCI_DEV_NONE) {
666 			err = -EINVAL;
667 			goto done;
668 		}
669 
670 		if (!capable(CAP_NET_RAW)) {
671 			err = -EPERM;
672 			goto done;
673 		}
674 
675 		send_monitor_replay(sk);
676 
677 		atomic_inc(&monitor_promisc);
678 		break;
679 
680 	default:
681 		err = -EINVAL;
682 		goto done;
683 	}
684 
685 
686 	hci_pi(sk)->channel = haddr.hci_channel;
687 	sk->sk_state = BT_BOUND;
688 
689 done:
690 	release_sock(sk);
691 	return err;
692 }
693 
694 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
695 {
696 	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
697 	struct sock *sk = sock->sk;
698 	struct hci_dev *hdev = hci_pi(sk)->hdev;
699 
700 	BT_DBG("sock %p sk %p", sock, sk);
701 
702 	if (!hdev)
703 		return -EBADFD;
704 
705 	lock_sock(sk);
706 
707 	*addr_len = sizeof(*haddr);
708 	haddr->hci_family = AF_BLUETOOTH;
709 	haddr->hci_dev    = hdev->id;
710 
711 	release_sock(sk);
712 	return 0;
713 }
714 
715 static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
716 {
717 	__u32 mask = hci_pi(sk)->cmsg_mask;
718 
719 	if (mask & HCI_CMSG_DIR) {
720 		int incoming = bt_cb(skb)->incoming;
721 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
722 	}
723 
724 	if (mask & HCI_CMSG_TSTAMP) {
725 #ifdef CONFIG_COMPAT
726 		struct compat_timeval ctv;
727 #endif
728 		struct timeval tv;
729 		void *data;
730 		int len;
731 
732 		skb_get_timestamp(skb, &tv);
733 
734 		data = &tv;
735 		len = sizeof(tv);
736 #ifdef CONFIG_COMPAT
737 		if (msg->msg_flags & MSG_CMSG_COMPAT) {
738 			ctv.tv_sec = tv.tv_sec;
739 			ctv.tv_usec = tv.tv_usec;
740 			data = &ctv;
741 			len = sizeof(ctv);
742 		}
743 #endif
744 
745 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
746 	}
747 }
748 
749 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
750 				struct msghdr *msg, size_t len, int flags)
751 {
752 	int noblock = flags & MSG_DONTWAIT;
753 	struct sock *sk = sock->sk;
754 	struct sk_buff *skb;
755 	int copied, err;
756 
757 	BT_DBG("sock %p, sk %p", sock, sk);
758 
759 	if (flags & (MSG_OOB))
760 		return -EOPNOTSUPP;
761 
762 	if (sk->sk_state == BT_CLOSED)
763 		return 0;
764 
765 	skb = skb_recv_datagram(sk, flags, noblock, &err);
766 	if (!skb)
767 		return err;
768 
769 	msg->msg_namelen = 0;
770 
771 	copied = skb->len;
772 	if (len < copied) {
773 		msg->msg_flags |= MSG_TRUNC;
774 		copied = len;
775 	}
776 
777 	skb_reset_transport_header(skb);
778 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
779 
780 	switch (hci_pi(sk)->channel) {
781 	case HCI_CHANNEL_RAW:
782 		hci_sock_cmsg(sk, msg, skb);
783 		break;
784 	case HCI_CHANNEL_CONTROL:
785 	case HCI_CHANNEL_MONITOR:
786 		sock_recv_timestamp(msg, sk, skb);
787 		break;
788 	}
789 
790 	skb_free_datagram(sk, skb);
791 
792 	return err ? : copied;
793 }
794 
795 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
796 			    struct msghdr *msg, size_t len)
797 {
798 	struct sock *sk = sock->sk;
799 	struct hci_dev *hdev;
800 	struct sk_buff *skb;
801 	int err;
802 
803 	BT_DBG("sock %p sk %p", sock, sk);
804 
805 	if (msg->msg_flags & MSG_OOB)
806 		return -EOPNOTSUPP;
807 
808 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
809 		return -EINVAL;
810 
811 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
812 		return -EINVAL;
813 
814 	lock_sock(sk);
815 
816 	switch (hci_pi(sk)->channel) {
817 	case HCI_CHANNEL_RAW:
818 		break;
819 	case HCI_CHANNEL_CONTROL:
820 		err = mgmt_control(sk, msg, len);
821 		goto done;
822 	case HCI_CHANNEL_MONITOR:
823 		err = -EOPNOTSUPP;
824 		goto done;
825 	default:
826 		err = -EINVAL;
827 		goto done;
828 	}
829 
830 	hdev = hci_pi(sk)->hdev;
831 	if (!hdev) {
832 		err = -EBADFD;
833 		goto done;
834 	}
835 
836 	if (!test_bit(HCI_UP, &hdev->flags)) {
837 		err = -ENETDOWN;
838 		goto done;
839 	}
840 
841 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
842 	if (!skb)
843 		goto done;
844 
845 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
846 		err = -EFAULT;
847 		goto drop;
848 	}
849 
850 	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
851 	skb_pull(skb, 1);
852 	skb->dev = (void *) hdev;
853 
854 	if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
855 		u16 opcode = get_unaligned_le16(skb->data);
856 		u16 ogf = hci_opcode_ogf(opcode);
857 		u16 ocf = hci_opcode_ocf(opcode);
858 
859 		if (((ogf > HCI_SFLT_MAX_OGF) ||
860 				!hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
861 					!capable(CAP_NET_RAW)) {
862 			err = -EPERM;
863 			goto drop;
864 		}
865 
866 		if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
867 			skb_queue_tail(&hdev->raw_q, skb);
868 			queue_work(hdev->workqueue, &hdev->tx_work);
869 		} else {
870 			skb_queue_tail(&hdev->cmd_q, skb);
871 			queue_work(hdev->workqueue, &hdev->cmd_work);
872 		}
873 	} else {
874 		if (!capable(CAP_NET_RAW)) {
875 			err = -EPERM;
876 			goto drop;
877 		}
878 
879 		skb_queue_tail(&hdev->raw_q, skb);
880 		queue_work(hdev->workqueue, &hdev->tx_work);
881 	}
882 
883 	err = len;
884 
885 done:
886 	release_sock(sk);
887 	return err;
888 
889 drop:
890 	kfree_skb(skb);
891 	goto done;
892 }
893 
894 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
895 {
896 	struct hci_ufilter uf = { .opcode = 0 };
897 	struct sock *sk = sock->sk;
898 	int err = 0, opt = 0;
899 
900 	BT_DBG("sk %p, opt %d", sk, optname);
901 
902 	lock_sock(sk);
903 
904 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
905 		err = -EINVAL;
906 		goto done;
907 	}
908 
909 	switch (optname) {
910 	case HCI_DATA_DIR:
911 		if (get_user(opt, (int __user *)optval)) {
912 			err = -EFAULT;
913 			break;
914 		}
915 
916 		if (opt)
917 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
918 		else
919 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
920 		break;
921 
922 	case HCI_TIME_STAMP:
923 		if (get_user(opt, (int __user *)optval)) {
924 			err = -EFAULT;
925 			break;
926 		}
927 
928 		if (opt)
929 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
930 		else
931 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
932 		break;
933 
934 	case HCI_FILTER:
935 		{
936 			struct hci_filter *f = &hci_pi(sk)->filter;
937 
938 			uf.type_mask = f->type_mask;
939 			uf.opcode    = f->opcode;
940 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
941 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
942 		}
943 
944 		len = min_t(unsigned int, len, sizeof(uf));
945 		if (copy_from_user(&uf, optval, len)) {
946 			err = -EFAULT;
947 			break;
948 		}
949 
950 		if (!capable(CAP_NET_RAW)) {
951 			uf.type_mask &= hci_sec_filter.type_mask;
952 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
953 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
954 		}
955 
956 		{
957 			struct hci_filter *f = &hci_pi(sk)->filter;
958 
959 			f->type_mask = uf.type_mask;
960 			f->opcode    = uf.opcode;
961 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
962 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
963 		}
964 		break;
965 
966 	default:
967 		err = -ENOPROTOOPT;
968 		break;
969 	}
970 
971 done:
972 	release_sock(sk);
973 	return err;
974 }
975 
976 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
977 {
978 	struct hci_ufilter uf;
979 	struct sock *sk = sock->sk;
980 	int len, opt, err = 0;
981 
982 	BT_DBG("sk %p, opt %d", sk, optname);
983 
984 	if (get_user(len, optlen))
985 		return -EFAULT;
986 
987 	lock_sock(sk);
988 
989 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
990 		err = -EINVAL;
991 		goto done;
992 	}
993 
994 	switch (optname) {
995 	case HCI_DATA_DIR:
996 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
997 			opt = 1;
998 		else
999 			opt = 0;
1000 
1001 		if (put_user(opt, optval))
1002 			err = -EFAULT;
1003 		break;
1004 
1005 	case HCI_TIME_STAMP:
1006 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1007 			opt = 1;
1008 		else
1009 			opt = 0;
1010 
1011 		if (put_user(opt, optval))
1012 			err = -EFAULT;
1013 		break;
1014 
1015 	case HCI_FILTER:
1016 		{
1017 			struct hci_filter *f = &hci_pi(sk)->filter;
1018 
1019 			uf.type_mask = f->type_mask;
1020 			uf.opcode    = f->opcode;
1021 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1022 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1023 		}
1024 
1025 		len = min_t(unsigned int, len, sizeof(uf));
1026 		if (copy_to_user(optval, &uf, len))
1027 			err = -EFAULT;
1028 		break;
1029 
1030 	default:
1031 		err = -ENOPROTOOPT;
1032 		break;
1033 	}
1034 
1035 done:
1036 	release_sock(sk);
1037 	return err;
1038 }
1039 
1040 static const struct proto_ops hci_sock_ops = {
1041 	.family		= PF_BLUETOOTH,
1042 	.owner		= THIS_MODULE,
1043 	.release	= hci_sock_release,
1044 	.bind		= hci_sock_bind,
1045 	.getname	= hci_sock_getname,
1046 	.sendmsg	= hci_sock_sendmsg,
1047 	.recvmsg	= hci_sock_recvmsg,
1048 	.ioctl		= hci_sock_ioctl,
1049 	.poll		= datagram_poll,
1050 	.listen		= sock_no_listen,
1051 	.shutdown	= sock_no_shutdown,
1052 	.setsockopt	= hci_sock_setsockopt,
1053 	.getsockopt	= hci_sock_getsockopt,
1054 	.connect	= sock_no_connect,
1055 	.socketpair	= sock_no_socketpair,
1056 	.accept		= sock_no_accept,
1057 	.mmap		= sock_no_mmap
1058 };
1059 
1060 static struct proto hci_sk_proto = {
1061 	.name		= "HCI",
1062 	.owner		= THIS_MODULE,
1063 	.obj_size	= sizeof(struct hci_pinfo)
1064 };
1065 
1066 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1067 			   int kern)
1068 {
1069 	struct sock *sk;
1070 
1071 	BT_DBG("sock %p", sock);
1072 
1073 	if (sock->type != SOCK_RAW)
1074 		return -ESOCKTNOSUPPORT;
1075 
1076 	sock->ops = &hci_sock_ops;
1077 
1078 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1079 	if (!sk)
1080 		return -ENOMEM;
1081 
1082 	sock_init_data(sock, sk);
1083 
1084 	sock_reset_flag(sk, SOCK_ZAPPED);
1085 
1086 	sk->sk_protocol = protocol;
1087 
1088 	sock->state = SS_UNCONNECTED;
1089 	sk->sk_state = BT_OPEN;
1090 
1091 	bt_sock_link(&hci_sk_list, sk);
1092 	return 0;
1093 }
1094 
1095 static const struct net_proto_family hci_sock_family_ops = {
1096 	.family	= PF_BLUETOOTH,
1097 	.owner	= THIS_MODULE,
1098 	.create	= hci_sock_create,
1099 };
1100 
1101 int __init hci_sock_init(void)
1102 {
1103 	int err;
1104 
1105 	err = proto_register(&hci_sk_proto, 0);
1106 	if (err < 0)
1107 		return err;
1108 
1109 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1110 	if (err < 0)
1111 		goto error;
1112 
1113 	BT_INFO("HCI socket layer initialized");
1114 
1115 	return 0;
1116 
1117 error:
1118 	BT_ERR("HCI socket registration failed");
1119 	proto_unregister(&hci_sk_proto);
1120 	return err;
1121 }
1122 
1123 void hci_sock_cleanup(void)
1124 {
1125 	if (bt_sock_unregister(BTPROTO_HCI) < 0)
1126 		BT_ERR("HCI socket unregistration failed");
1127 
1128 	proto_unregister(&hci_sk_proto);
1129 }
1130