xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision bc5aa3a0)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <asm/unaligned.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/hci_mon.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "mgmt_util.h"
37 
38 static LIST_HEAD(mgmt_chan_list);
39 static DEFINE_MUTEX(mgmt_chan_list_lock);
40 
41 static atomic_t monitor_promisc = ATOMIC_INIT(0);
42 
43 /* ----- HCI socket interface ----- */
44 
45 /* Socket info */
46 #define hci_pi(sk) ((struct hci_pinfo *) sk)
47 
48 struct hci_pinfo {
49 	struct bt_sock    bt;
50 	struct hci_dev    *hdev;
51 	struct hci_filter filter;
52 	__u32             cmsg_mask;
53 	unsigned short    channel;
54 	unsigned long     flags;
55 };
56 
57 void hci_sock_set_flag(struct sock *sk, int nr)
58 {
59 	set_bit(nr, &hci_pi(sk)->flags);
60 }
61 
62 void hci_sock_clear_flag(struct sock *sk, int nr)
63 {
64 	clear_bit(nr, &hci_pi(sk)->flags);
65 }
66 
67 int hci_sock_test_flag(struct sock *sk, int nr)
68 {
69 	return test_bit(nr, &hci_pi(sk)->flags);
70 }
71 
72 unsigned short hci_sock_get_channel(struct sock *sk)
73 {
74 	return hci_pi(sk)->channel;
75 }
76 
77 static inline int hci_test_bit(int nr, const void *addr)
78 {
79 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
80 }
81 
82 /* Security filter */
83 #define HCI_SFLT_MAX_OGF  5
84 
85 struct hci_sec_filter {
86 	__u32 type_mask;
87 	__u32 event_mask[2];
88 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
89 };
90 
91 static const struct hci_sec_filter hci_sec_filter = {
92 	/* Packet types */
93 	0x10,
94 	/* Events */
95 	{ 0x1000d9fe, 0x0000b00c },
96 	/* Commands */
97 	{
98 		{ 0x0 },
99 		/* OGF_LINK_CTL */
100 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
101 		/* OGF_LINK_POLICY */
102 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
103 		/* OGF_HOST_CTL */
104 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
105 		/* OGF_INFO_PARAM */
106 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
107 		/* OGF_STATUS_PARAM */
108 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
109 	}
110 };
111 
112 static struct bt_sock_list hci_sk_list = {
113 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
114 };
115 
116 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
117 {
118 	struct hci_filter *flt;
119 	int flt_type, flt_event;
120 
121 	/* Apply filter */
122 	flt = &hci_pi(sk)->filter;
123 
124 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
125 
126 	if (!test_bit(flt_type, &flt->type_mask))
127 		return true;
128 
129 	/* Extra filter for event packets only */
130 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
131 		return false;
132 
133 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
134 
135 	if (!hci_test_bit(flt_event, &flt->event_mask))
136 		return true;
137 
138 	/* Check filter only when opcode is set */
139 	if (!flt->opcode)
140 		return false;
141 
142 	if (flt_event == HCI_EV_CMD_COMPLETE &&
143 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
144 		return true;
145 
146 	if (flt_event == HCI_EV_CMD_STATUS &&
147 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
148 		return true;
149 
150 	return false;
151 }
152 
153 /* Send frame to RAW socket */
154 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
155 {
156 	struct sock *sk;
157 	struct sk_buff *skb_copy = NULL;
158 
159 	BT_DBG("hdev %p len %d", hdev, skb->len);
160 
161 	read_lock(&hci_sk_list.lock);
162 
163 	sk_for_each(sk, &hci_sk_list.head) {
164 		struct sk_buff *nskb;
165 
166 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
167 			continue;
168 
169 		/* Don't send frame to the socket it came from */
170 		if (skb->sk == sk)
171 			continue;
172 
173 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
174 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
175 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
176 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
177 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
178 				continue;
179 			if (is_filtered_packet(sk, skb))
180 				continue;
181 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
182 			if (!bt_cb(skb)->incoming)
183 				continue;
184 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
185 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
186 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
187 				continue;
188 		} else {
189 			/* Don't send frame to other channel types */
190 			continue;
191 		}
192 
193 		if (!skb_copy) {
194 			/* Create a private copy with headroom */
195 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
196 			if (!skb_copy)
197 				continue;
198 
199 			/* Put type byte before the data */
200 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
201 		}
202 
203 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
204 		if (!nskb)
205 			continue;
206 
207 		if (sock_queue_rcv_skb(sk, nskb))
208 			kfree_skb(nskb);
209 	}
210 
211 	read_unlock(&hci_sk_list.lock);
212 
213 	kfree_skb(skb_copy);
214 }
215 
216 /* Send frame to sockets with specific channel */
217 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
218 			 int flag, struct sock *skip_sk)
219 {
220 	struct sock *sk;
221 
222 	BT_DBG("channel %u len %d", channel, skb->len);
223 
224 	read_lock(&hci_sk_list.lock);
225 
226 	sk_for_each(sk, &hci_sk_list.head) {
227 		struct sk_buff *nskb;
228 
229 		/* Ignore socket without the flag set */
230 		if (!hci_sock_test_flag(sk, flag))
231 			continue;
232 
233 		/* Skip the original socket */
234 		if (sk == skip_sk)
235 			continue;
236 
237 		if (sk->sk_state != BT_BOUND)
238 			continue;
239 
240 		if (hci_pi(sk)->channel != channel)
241 			continue;
242 
243 		nskb = skb_clone(skb, GFP_ATOMIC);
244 		if (!nskb)
245 			continue;
246 
247 		if (sock_queue_rcv_skb(sk, nskb))
248 			kfree_skb(nskb);
249 	}
250 
251 	read_unlock(&hci_sk_list.lock);
252 }
253 
254 /* Send frame to monitor socket */
255 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
256 {
257 	struct sk_buff *skb_copy = NULL;
258 	struct hci_mon_hdr *hdr;
259 	__le16 opcode;
260 
261 	if (!atomic_read(&monitor_promisc))
262 		return;
263 
264 	BT_DBG("hdev %p len %d", hdev, skb->len);
265 
266 	switch (hci_skb_pkt_type(skb)) {
267 	case HCI_COMMAND_PKT:
268 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
269 		break;
270 	case HCI_EVENT_PKT:
271 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
272 		break;
273 	case HCI_ACLDATA_PKT:
274 		if (bt_cb(skb)->incoming)
275 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
276 		else
277 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
278 		break;
279 	case HCI_SCODATA_PKT:
280 		if (bt_cb(skb)->incoming)
281 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
282 		else
283 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
284 		break;
285 	case HCI_DIAG_PKT:
286 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
287 		break;
288 	default:
289 		return;
290 	}
291 
292 	/* Create a private copy with headroom */
293 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
294 	if (!skb_copy)
295 		return;
296 
297 	/* Put header before the data */
298 	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
299 	hdr->opcode = opcode;
300 	hdr->index = cpu_to_le16(hdev->id);
301 	hdr->len = cpu_to_le16(skb->len);
302 
303 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
304 			    HCI_SOCK_TRUSTED, NULL);
305 	kfree_skb(skb_copy);
306 }
307 
308 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
309 {
310 	struct hci_mon_hdr *hdr;
311 	struct hci_mon_new_index *ni;
312 	struct hci_mon_index_info *ii;
313 	struct sk_buff *skb;
314 	__le16 opcode;
315 
316 	switch (event) {
317 	case HCI_DEV_REG:
318 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
319 		if (!skb)
320 			return NULL;
321 
322 		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
323 		ni->type = hdev->dev_type;
324 		ni->bus = hdev->bus;
325 		bacpy(&ni->bdaddr, &hdev->bdaddr);
326 		memcpy(ni->name, hdev->name, 8);
327 
328 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
329 		break;
330 
331 	case HCI_DEV_UNREG:
332 		skb = bt_skb_alloc(0, GFP_ATOMIC);
333 		if (!skb)
334 			return NULL;
335 
336 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
337 		break;
338 
339 	case HCI_DEV_SETUP:
340 		if (hdev->manufacturer == 0xffff)
341 			return NULL;
342 
343 		/* fall through */
344 
345 	case HCI_DEV_UP:
346 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
347 		if (!skb)
348 			return NULL;
349 
350 		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
351 		bacpy(&ii->bdaddr, &hdev->bdaddr);
352 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
353 
354 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
355 		break;
356 
357 	case HCI_DEV_OPEN:
358 		skb = bt_skb_alloc(0, GFP_ATOMIC);
359 		if (!skb)
360 			return NULL;
361 
362 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
363 		break;
364 
365 	case HCI_DEV_CLOSE:
366 		skb = bt_skb_alloc(0, GFP_ATOMIC);
367 		if (!skb)
368 			return NULL;
369 
370 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
371 		break;
372 
373 	default:
374 		return NULL;
375 	}
376 
377 	__net_timestamp(skb);
378 
379 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
380 	hdr->opcode = opcode;
381 	hdr->index = cpu_to_le16(hdev->id);
382 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
383 
384 	return skb;
385 }
386 
387 static void __printf(2, 3)
388 send_monitor_note(struct sock *sk, const char *fmt, ...)
389 {
390 	size_t len;
391 	struct hci_mon_hdr *hdr;
392 	struct sk_buff *skb;
393 	va_list args;
394 
395 	va_start(args, fmt);
396 	len = vsnprintf(NULL, 0, fmt, args);
397 	va_end(args);
398 
399 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
400 	if (!skb)
401 		return;
402 
403 	va_start(args, fmt);
404 	vsprintf(skb_put(skb, len), fmt, args);
405 	*skb_put(skb, 1) = 0;
406 	va_end(args);
407 
408 	__net_timestamp(skb);
409 
410 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
411 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
412 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
413 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
414 
415 	if (sock_queue_rcv_skb(sk, skb))
416 		kfree_skb(skb);
417 }
418 
419 static void send_monitor_replay(struct sock *sk)
420 {
421 	struct hci_dev *hdev;
422 
423 	read_lock(&hci_dev_list_lock);
424 
425 	list_for_each_entry(hdev, &hci_dev_list, list) {
426 		struct sk_buff *skb;
427 
428 		skb = create_monitor_event(hdev, HCI_DEV_REG);
429 		if (!skb)
430 			continue;
431 
432 		if (sock_queue_rcv_skb(sk, skb))
433 			kfree_skb(skb);
434 
435 		if (!test_bit(HCI_RUNNING, &hdev->flags))
436 			continue;
437 
438 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
439 		if (!skb)
440 			continue;
441 
442 		if (sock_queue_rcv_skb(sk, skb))
443 			kfree_skb(skb);
444 
445 		if (test_bit(HCI_UP, &hdev->flags))
446 			skb = create_monitor_event(hdev, HCI_DEV_UP);
447 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
448 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
449 		else
450 			skb = NULL;
451 
452 		if (skb) {
453 			if (sock_queue_rcv_skb(sk, skb))
454 				kfree_skb(skb);
455 		}
456 	}
457 
458 	read_unlock(&hci_dev_list_lock);
459 }
460 
461 /* Generate internal stack event */
462 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
463 {
464 	struct hci_event_hdr *hdr;
465 	struct hci_ev_stack_internal *ev;
466 	struct sk_buff *skb;
467 
468 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
469 	if (!skb)
470 		return;
471 
472 	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
473 	hdr->evt  = HCI_EV_STACK_INTERNAL;
474 	hdr->plen = sizeof(*ev) + dlen;
475 
476 	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
477 	ev->type = type;
478 	memcpy(ev->data, data, dlen);
479 
480 	bt_cb(skb)->incoming = 1;
481 	__net_timestamp(skb);
482 
483 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
484 	hci_send_to_sock(hdev, skb);
485 	kfree_skb(skb);
486 }
487 
488 void hci_sock_dev_event(struct hci_dev *hdev, int event)
489 {
490 	BT_DBG("hdev %s event %d", hdev->name, event);
491 
492 	if (atomic_read(&monitor_promisc)) {
493 		struct sk_buff *skb;
494 
495 		/* Send event to monitor */
496 		skb = create_monitor_event(hdev, event);
497 		if (skb) {
498 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
499 					    HCI_SOCK_TRUSTED, NULL);
500 			kfree_skb(skb);
501 		}
502 	}
503 
504 	if (event <= HCI_DEV_DOWN) {
505 		struct hci_ev_si_device ev;
506 
507 		/* Send event to sockets */
508 		ev.event  = event;
509 		ev.dev_id = hdev->id;
510 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
511 	}
512 
513 	if (event == HCI_DEV_UNREG) {
514 		struct sock *sk;
515 
516 		/* Detach sockets from device */
517 		read_lock(&hci_sk_list.lock);
518 		sk_for_each(sk, &hci_sk_list.head) {
519 			bh_lock_sock_nested(sk);
520 			if (hci_pi(sk)->hdev == hdev) {
521 				hci_pi(sk)->hdev = NULL;
522 				sk->sk_err = EPIPE;
523 				sk->sk_state = BT_OPEN;
524 				sk->sk_state_change(sk);
525 
526 				hci_dev_put(hdev);
527 			}
528 			bh_unlock_sock(sk);
529 		}
530 		read_unlock(&hci_sk_list.lock);
531 	}
532 }
533 
534 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
535 {
536 	struct hci_mgmt_chan *c;
537 
538 	list_for_each_entry(c, &mgmt_chan_list, list) {
539 		if (c->channel == channel)
540 			return c;
541 	}
542 
543 	return NULL;
544 }
545 
546 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
547 {
548 	struct hci_mgmt_chan *c;
549 
550 	mutex_lock(&mgmt_chan_list_lock);
551 	c = __hci_mgmt_chan_find(channel);
552 	mutex_unlock(&mgmt_chan_list_lock);
553 
554 	return c;
555 }
556 
557 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
558 {
559 	if (c->channel < HCI_CHANNEL_CONTROL)
560 		return -EINVAL;
561 
562 	mutex_lock(&mgmt_chan_list_lock);
563 	if (__hci_mgmt_chan_find(c->channel)) {
564 		mutex_unlock(&mgmt_chan_list_lock);
565 		return -EALREADY;
566 	}
567 
568 	list_add_tail(&c->list, &mgmt_chan_list);
569 
570 	mutex_unlock(&mgmt_chan_list_lock);
571 
572 	return 0;
573 }
574 EXPORT_SYMBOL(hci_mgmt_chan_register);
575 
576 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
577 {
578 	mutex_lock(&mgmt_chan_list_lock);
579 	list_del(&c->list);
580 	mutex_unlock(&mgmt_chan_list_lock);
581 }
582 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
583 
584 static int hci_sock_release(struct socket *sock)
585 {
586 	struct sock *sk = sock->sk;
587 	struct hci_dev *hdev;
588 
589 	BT_DBG("sock %p sk %p", sock, sk);
590 
591 	if (!sk)
592 		return 0;
593 
594 	hdev = hci_pi(sk)->hdev;
595 
596 	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
597 		atomic_dec(&monitor_promisc);
598 
599 	bt_sock_unlink(&hci_sk_list, sk);
600 
601 	if (hdev) {
602 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
603 			/* When releasing an user channel exclusive access,
604 			 * call hci_dev_do_close directly instead of calling
605 			 * hci_dev_close to ensure the exclusive access will
606 			 * be released and the controller brought back down.
607 			 *
608 			 * The checking of HCI_AUTO_OFF is not needed in this
609 			 * case since it will have been cleared already when
610 			 * opening the user channel.
611 			 */
612 			hci_dev_do_close(hdev);
613 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
614 			mgmt_index_added(hdev);
615 		}
616 
617 		atomic_dec(&hdev->promisc);
618 		hci_dev_put(hdev);
619 	}
620 
621 	sock_orphan(sk);
622 
623 	skb_queue_purge(&sk->sk_receive_queue);
624 	skb_queue_purge(&sk->sk_write_queue);
625 
626 	sock_put(sk);
627 	return 0;
628 }
629 
630 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
631 {
632 	bdaddr_t bdaddr;
633 	int err;
634 
635 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
636 		return -EFAULT;
637 
638 	hci_dev_lock(hdev);
639 
640 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
641 
642 	hci_dev_unlock(hdev);
643 
644 	return err;
645 }
646 
647 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
648 {
649 	bdaddr_t bdaddr;
650 	int err;
651 
652 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
653 		return -EFAULT;
654 
655 	hci_dev_lock(hdev);
656 
657 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
658 
659 	hci_dev_unlock(hdev);
660 
661 	return err;
662 }
663 
664 /* Ioctls that require bound socket */
665 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
666 				unsigned long arg)
667 {
668 	struct hci_dev *hdev = hci_pi(sk)->hdev;
669 
670 	if (!hdev)
671 		return -EBADFD;
672 
673 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
674 		return -EBUSY;
675 
676 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
677 		return -EOPNOTSUPP;
678 
679 	if (hdev->dev_type != HCI_PRIMARY)
680 		return -EOPNOTSUPP;
681 
682 	switch (cmd) {
683 	case HCISETRAW:
684 		if (!capable(CAP_NET_ADMIN))
685 			return -EPERM;
686 		return -EOPNOTSUPP;
687 
688 	case HCIGETCONNINFO:
689 		return hci_get_conn_info(hdev, (void __user *)arg);
690 
691 	case HCIGETAUTHINFO:
692 		return hci_get_auth_info(hdev, (void __user *)arg);
693 
694 	case HCIBLOCKADDR:
695 		if (!capable(CAP_NET_ADMIN))
696 			return -EPERM;
697 		return hci_sock_blacklist_add(hdev, (void __user *)arg);
698 
699 	case HCIUNBLOCKADDR:
700 		if (!capable(CAP_NET_ADMIN))
701 			return -EPERM;
702 		return hci_sock_blacklist_del(hdev, (void __user *)arg);
703 	}
704 
705 	return -ENOIOCTLCMD;
706 }
707 
708 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
709 			  unsigned long arg)
710 {
711 	void __user *argp = (void __user *)arg;
712 	struct sock *sk = sock->sk;
713 	int err;
714 
715 	BT_DBG("cmd %x arg %lx", cmd, arg);
716 
717 	lock_sock(sk);
718 
719 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
720 		err = -EBADFD;
721 		goto done;
722 	}
723 
724 	release_sock(sk);
725 
726 	switch (cmd) {
727 	case HCIGETDEVLIST:
728 		return hci_get_dev_list(argp);
729 
730 	case HCIGETDEVINFO:
731 		return hci_get_dev_info(argp);
732 
733 	case HCIGETCONNLIST:
734 		return hci_get_conn_list(argp);
735 
736 	case HCIDEVUP:
737 		if (!capable(CAP_NET_ADMIN))
738 			return -EPERM;
739 		return hci_dev_open(arg);
740 
741 	case HCIDEVDOWN:
742 		if (!capable(CAP_NET_ADMIN))
743 			return -EPERM;
744 		return hci_dev_close(arg);
745 
746 	case HCIDEVRESET:
747 		if (!capable(CAP_NET_ADMIN))
748 			return -EPERM;
749 		return hci_dev_reset(arg);
750 
751 	case HCIDEVRESTAT:
752 		if (!capable(CAP_NET_ADMIN))
753 			return -EPERM;
754 		return hci_dev_reset_stat(arg);
755 
756 	case HCISETSCAN:
757 	case HCISETAUTH:
758 	case HCISETENCRYPT:
759 	case HCISETPTYPE:
760 	case HCISETLINKPOL:
761 	case HCISETLINKMODE:
762 	case HCISETACLMTU:
763 	case HCISETSCOMTU:
764 		if (!capable(CAP_NET_ADMIN))
765 			return -EPERM;
766 		return hci_dev_cmd(cmd, argp);
767 
768 	case HCIINQUIRY:
769 		return hci_inquiry(argp);
770 	}
771 
772 	lock_sock(sk);
773 
774 	err = hci_sock_bound_ioctl(sk, cmd, arg);
775 
776 done:
777 	release_sock(sk);
778 	return err;
779 }
780 
781 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
782 			 int addr_len)
783 {
784 	struct sockaddr_hci haddr;
785 	struct sock *sk = sock->sk;
786 	struct hci_dev *hdev = NULL;
787 	int len, err = 0;
788 
789 	BT_DBG("sock %p sk %p", sock, sk);
790 
791 	if (!addr)
792 		return -EINVAL;
793 
794 	memset(&haddr, 0, sizeof(haddr));
795 	len = min_t(unsigned int, sizeof(haddr), addr_len);
796 	memcpy(&haddr, addr, len);
797 
798 	if (haddr.hci_family != AF_BLUETOOTH)
799 		return -EINVAL;
800 
801 	lock_sock(sk);
802 
803 	if (sk->sk_state == BT_BOUND) {
804 		err = -EALREADY;
805 		goto done;
806 	}
807 
808 	switch (haddr.hci_channel) {
809 	case HCI_CHANNEL_RAW:
810 		if (hci_pi(sk)->hdev) {
811 			err = -EALREADY;
812 			goto done;
813 		}
814 
815 		if (haddr.hci_dev != HCI_DEV_NONE) {
816 			hdev = hci_dev_get(haddr.hci_dev);
817 			if (!hdev) {
818 				err = -ENODEV;
819 				goto done;
820 			}
821 
822 			atomic_inc(&hdev->promisc);
823 		}
824 
825 		hci_pi(sk)->hdev = hdev;
826 		break;
827 
828 	case HCI_CHANNEL_USER:
829 		if (hci_pi(sk)->hdev) {
830 			err = -EALREADY;
831 			goto done;
832 		}
833 
834 		if (haddr.hci_dev == HCI_DEV_NONE) {
835 			err = -EINVAL;
836 			goto done;
837 		}
838 
839 		if (!capable(CAP_NET_ADMIN)) {
840 			err = -EPERM;
841 			goto done;
842 		}
843 
844 		hdev = hci_dev_get(haddr.hci_dev);
845 		if (!hdev) {
846 			err = -ENODEV;
847 			goto done;
848 		}
849 
850 		if (test_bit(HCI_INIT, &hdev->flags) ||
851 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
852 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
853 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
854 		     test_bit(HCI_UP, &hdev->flags))) {
855 			err = -EBUSY;
856 			hci_dev_put(hdev);
857 			goto done;
858 		}
859 
860 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
861 			err = -EUSERS;
862 			hci_dev_put(hdev);
863 			goto done;
864 		}
865 
866 		mgmt_index_removed(hdev);
867 
868 		err = hci_dev_open(hdev->id);
869 		if (err) {
870 			if (err == -EALREADY) {
871 				/* In case the transport is already up and
872 				 * running, clear the error here.
873 				 *
874 				 * This can happen when opening an user
875 				 * channel and HCI_AUTO_OFF grace period
876 				 * is still active.
877 				 */
878 				err = 0;
879 			} else {
880 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
881 				mgmt_index_added(hdev);
882 				hci_dev_put(hdev);
883 				goto done;
884 			}
885 		}
886 
887 		atomic_inc(&hdev->promisc);
888 
889 		hci_pi(sk)->hdev = hdev;
890 		break;
891 
892 	case HCI_CHANNEL_MONITOR:
893 		if (haddr.hci_dev != HCI_DEV_NONE) {
894 			err = -EINVAL;
895 			goto done;
896 		}
897 
898 		if (!capable(CAP_NET_RAW)) {
899 			err = -EPERM;
900 			goto done;
901 		}
902 
903 		/* The monitor interface is restricted to CAP_NET_RAW
904 		 * capabilities and with that implicitly trusted.
905 		 */
906 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
907 
908 		send_monitor_note(sk, "Linux version %s (%s)",
909 				  init_utsname()->release,
910 				  init_utsname()->machine);
911 		send_monitor_note(sk, "Bluetooth subsystem version %s",
912 				  BT_SUBSYS_VERSION);
913 		send_monitor_replay(sk);
914 
915 		atomic_inc(&monitor_promisc);
916 		break;
917 
918 	case HCI_CHANNEL_LOGGING:
919 		if (haddr.hci_dev != HCI_DEV_NONE) {
920 			err = -EINVAL;
921 			goto done;
922 		}
923 
924 		if (!capable(CAP_NET_ADMIN)) {
925 			err = -EPERM;
926 			goto done;
927 		}
928 		break;
929 
930 	default:
931 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
932 			err = -EINVAL;
933 			goto done;
934 		}
935 
936 		if (haddr.hci_dev != HCI_DEV_NONE) {
937 			err = -EINVAL;
938 			goto done;
939 		}
940 
941 		/* Users with CAP_NET_ADMIN capabilities are allowed
942 		 * access to all management commands and events. For
943 		 * untrusted users the interface is restricted and
944 		 * also only untrusted events are sent.
945 		 */
946 		if (capable(CAP_NET_ADMIN))
947 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
948 
949 		/* At the moment the index and unconfigured index events
950 		 * are enabled unconditionally. Setting them on each
951 		 * socket when binding keeps this functionality. They
952 		 * however might be cleared later and then sending of these
953 		 * events will be disabled, but that is then intentional.
954 		 *
955 		 * This also enables generic events that are safe to be
956 		 * received by untrusted users. Example for such events
957 		 * are changes to settings, class of device, name etc.
958 		 */
959 		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
960 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
961 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
962 			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
963 		}
964 		break;
965 	}
966 
967 
968 	hci_pi(sk)->channel = haddr.hci_channel;
969 	sk->sk_state = BT_BOUND;
970 
971 done:
972 	release_sock(sk);
973 	return err;
974 }
975 
976 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
977 			    int *addr_len, int peer)
978 {
979 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
980 	struct sock *sk = sock->sk;
981 	struct hci_dev *hdev;
982 	int err = 0;
983 
984 	BT_DBG("sock %p sk %p", sock, sk);
985 
986 	if (peer)
987 		return -EOPNOTSUPP;
988 
989 	lock_sock(sk);
990 
991 	hdev = hci_pi(sk)->hdev;
992 	if (!hdev) {
993 		err = -EBADFD;
994 		goto done;
995 	}
996 
997 	*addr_len = sizeof(*haddr);
998 	haddr->hci_family = AF_BLUETOOTH;
999 	haddr->hci_dev    = hdev->id;
1000 	haddr->hci_channel= hci_pi(sk)->channel;
1001 
1002 done:
1003 	release_sock(sk);
1004 	return err;
1005 }
1006 
1007 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1008 			  struct sk_buff *skb)
1009 {
1010 	__u32 mask = hci_pi(sk)->cmsg_mask;
1011 
1012 	if (mask & HCI_CMSG_DIR) {
1013 		int incoming = bt_cb(skb)->incoming;
1014 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1015 			 &incoming);
1016 	}
1017 
1018 	if (mask & HCI_CMSG_TSTAMP) {
1019 #ifdef CONFIG_COMPAT
1020 		struct compat_timeval ctv;
1021 #endif
1022 		struct timeval tv;
1023 		void *data;
1024 		int len;
1025 
1026 		skb_get_timestamp(skb, &tv);
1027 
1028 		data = &tv;
1029 		len = sizeof(tv);
1030 #ifdef CONFIG_COMPAT
1031 		if (!COMPAT_USE_64BIT_TIME &&
1032 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1033 			ctv.tv_sec = tv.tv_sec;
1034 			ctv.tv_usec = tv.tv_usec;
1035 			data = &ctv;
1036 			len = sizeof(ctv);
1037 		}
1038 #endif
1039 
1040 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1041 	}
1042 }
1043 
1044 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1045 			    size_t len, int flags)
1046 {
1047 	int noblock = flags & MSG_DONTWAIT;
1048 	struct sock *sk = sock->sk;
1049 	struct sk_buff *skb;
1050 	int copied, err;
1051 	unsigned int skblen;
1052 
1053 	BT_DBG("sock %p, sk %p", sock, sk);
1054 
1055 	if (flags & MSG_OOB)
1056 		return -EOPNOTSUPP;
1057 
1058 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1059 		return -EOPNOTSUPP;
1060 
1061 	if (sk->sk_state == BT_CLOSED)
1062 		return 0;
1063 
1064 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1065 	if (!skb)
1066 		return err;
1067 
1068 	skblen = skb->len;
1069 	copied = skb->len;
1070 	if (len < copied) {
1071 		msg->msg_flags |= MSG_TRUNC;
1072 		copied = len;
1073 	}
1074 
1075 	skb_reset_transport_header(skb);
1076 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1077 
1078 	switch (hci_pi(sk)->channel) {
1079 	case HCI_CHANNEL_RAW:
1080 		hci_sock_cmsg(sk, msg, skb);
1081 		break;
1082 	case HCI_CHANNEL_USER:
1083 	case HCI_CHANNEL_MONITOR:
1084 		sock_recv_timestamp(msg, sk, skb);
1085 		break;
1086 	default:
1087 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1088 			sock_recv_timestamp(msg, sk, skb);
1089 		break;
1090 	}
1091 
1092 	skb_free_datagram(sk, skb);
1093 
1094 	if (flags & MSG_TRUNC)
1095 		copied = skblen;
1096 
1097 	return err ? : copied;
1098 }
1099 
1100 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1101 			struct msghdr *msg, size_t msglen)
1102 {
1103 	void *buf;
1104 	u8 *cp;
1105 	struct mgmt_hdr *hdr;
1106 	u16 opcode, index, len;
1107 	struct hci_dev *hdev = NULL;
1108 	const struct hci_mgmt_handler *handler;
1109 	bool var_len, no_hdev;
1110 	int err;
1111 
1112 	BT_DBG("got %zu bytes", msglen);
1113 
1114 	if (msglen < sizeof(*hdr))
1115 		return -EINVAL;
1116 
1117 	buf = kmalloc(msglen, GFP_KERNEL);
1118 	if (!buf)
1119 		return -ENOMEM;
1120 
1121 	if (memcpy_from_msg(buf, msg, msglen)) {
1122 		err = -EFAULT;
1123 		goto done;
1124 	}
1125 
1126 	hdr = buf;
1127 	opcode = __le16_to_cpu(hdr->opcode);
1128 	index = __le16_to_cpu(hdr->index);
1129 	len = __le16_to_cpu(hdr->len);
1130 
1131 	if (len != msglen - sizeof(*hdr)) {
1132 		err = -EINVAL;
1133 		goto done;
1134 	}
1135 
1136 	if (opcode >= chan->handler_count ||
1137 	    chan->handlers[opcode].func == NULL) {
1138 		BT_DBG("Unknown op %u", opcode);
1139 		err = mgmt_cmd_status(sk, index, opcode,
1140 				      MGMT_STATUS_UNKNOWN_COMMAND);
1141 		goto done;
1142 	}
1143 
1144 	handler = &chan->handlers[opcode];
1145 
1146 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1147 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1148 		err = mgmt_cmd_status(sk, index, opcode,
1149 				      MGMT_STATUS_PERMISSION_DENIED);
1150 		goto done;
1151 	}
1152 
1153 	if (index != MGMT_INDEX_NONE) {
1154 		hdev = hci_dev_get(index);
1155 		if (!hdev) {
1156 			err = mgmt_cmd_status(sk, index, opcode,
1157 					      MGMT_STATUS_INVALID_INDEX);
1158 			goto done;
1159 		}
1160 
1161 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1162 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1163 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1164 			err = mgmt_cmd_status(sk, index, opcode,
1165 					      MGMT_STATUS_INVALID_INDEX);
1166 			goto done;
1167 		}
1168 
1169 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1170 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1171 			err = mgmt_cmd_status(sk, index, opcode,
1172 					      MGMT_STATUS_INVALID_INDEX);
1173 			goto done;
1174 		}
1175 	}
1176 
1177 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1178 	if (no_hdev != !hdev) {
1179 		err = mgmt_cmd_status(sk, index, opcode,
1180 				      MGMT_STATUS_INVALID_INDEX);
1181 		goto done;
1182 	}
1183 
1184 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1185 	if ((var_len && len < handler->data_len) ||
1186 	    (!var_len && len != handler->data_len)) {
1187 		err = mgmt_cmd_status(sk, index, opcode,
1188 				      MGMT_STATUS_INVALID_PARAMS);
1189 		goto done;
1190 	}
1191 
1192 	if (hdev && chan->hdev_init)
1193 		chan->hdev_init(sk, hdev);
1194 
1195 	cp = buf + sizeof(*hdr);
1196 
1197 	err = handler->func(sk, hdev, cp, len);
1198 	if (err < 0)
1199 		goto done;
1200 
1201 	err = msglen;
1202 
1203 done:
1204 	if (hdev)
1205 		hci_dev_put(hdev);
1206 
1207 	kfree(buf);
1208 	return err;
1209 }
1210 
1211 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1212 {
1213 	struct hci_mon_hdr *hdr;
1214 	struct sk_buff *skb;
1215 	struct hci_dev *hdev;
1216 	u16 index;
1217 	int err;
1218 
1219 	/* The logging frame consists at minimum of the standard header,
1220 	 * the priority byte, the ident length byte and at least one string
1221 	 * terminator NUL byte. Anything shorter are invalid packets.
1222 	 */
1223 	if (len < sizeof(*hdr) + 3)
1224 		return -EINVAL;
1225 
1226 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1227 	if (!skb)
1228 		return err;
1229 
1230 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1231 		err = -EFAULT;
1232 		goto drop;
1233 	}
1234 
1235 	hdr = (void *)skb->data;
1236 
1237 	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1238 		err = -EINVAL;
1239 		goto drop;
1240 	}
1241 
1242 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1243 		__u8 priority = skb->data[sizeof(*hdr)];
1244 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1245 
1246 		/* Only the priorities 0-7 are valid and with that any other
1247 		 * value results in an invalid packet.
1248 		 *
1249 		 * The priority byte is followed by an ident length byte and
1250 		 * the NUL terminated ident string. Check that the ident
1251 		 * length is not overflowing the packet and also that the
1252 		 * ident string itself is NUL terminated. In case the ident
1253 		 * length is zero, the length value actually doubles as NUL
1254 		 * terminator identifier.
1255 		 *
1256 		 * The message follows the ident string (if present) and
1257 		 * must be NUL terminated. Otherwise it is not a valid packet.
1258 		 */
1259 		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1260 		    ident_len > len - sizeof(*hdr) - 3 ||
1261 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1262 			err = -EINVAL;
1263 			goto drop;
1264 		}
1265 	} else {
1266 		err = -EINVAL;
1267 		goto drop;
1268 	}
1269 
1270 	index = __le16_to_cpu(hdr->index);
1271 
1272 	if (index != MGMT_INDEX_NONE) {
1273 		hdev = hci_dev_get(index);
1274 		if (!hdev) {
1275 			err = -ENODEV;
1276 			goto drop;
1277 		}
1278 	} else {
1279 		hdev = NULL;
1280 	}
1281 
1282 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1283 
1284 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1285 	err = len;
1286 
1287 	if (hdev)
1288 		hci_dev_put(hdev);
1289 
1290 drop:
1291 	kfree_skb(skb);
1292 	return err;
1293 }
1294 
1295 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1296 			    size_t len)
1297 {
1298 	struct sock *sk = sock->sk;
1299 	struct hci_mgmt_chan *chan;
1300 	struct hci_dev *hdev;
1301 	struct sk_buff *skb;
1302 	int err;
1303 
1304 	BT_DBG("sock %p sk %p", sock, sk);
1305 
1306 	if (msg->msg_flags & MSG_OOB)
1307 		return -EOPNOTSUPP;
1308 
1309 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1310 		return -EINVAL;
1311 
1312 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1313 		return -EINVAL;
1314 
1315 	lock_sock(sk);
1316 
1317 	switch (hci_pi(sk)->channel) {
1318 	case HCI_CHANNEL_RAW:
1319 	case HCI_CHANNEL_USER:
1320 		break;
1321 	case HCI_CHANNEL_MONITOR:
1322 		err = -EOPNOTSUPP;
1323 		goto done;
1324 	case HCI_CHANNEL_LOGGING:
1325 		err = hci_logging_frame(sk, msg, len);
1326 		goto done;
1327 	default:
1328 		mutex_lock(&mgmt_chan_list_lock);
1329 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1330 		if (chan)
1331 			err = hci_mgmt_cmd(chan, sk, msg, len);
1332 		else
1333 			err = -EINVAL;
1334 
1335 		mutex_unlock(&mgmt_chan_list_lock);
1336 		goto done;
1337 	}
1338 
1339 	hdev = hci_pi(sk)->hdev;
1340 	if (!hdev) {
1341 		err = -EBADFD;
1342 		goto done;
1343 	}
1344 
1345 	if (!test_bit(HCI_UP, &hdev->flags)) {
1346 		err = -ENETDOWN;
1347 		goto done;
1348 	}
1349 
1350 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1351 	if (!skb)
1352 		goto done;
1353 
1354 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1355 		err = -EFAULT;
1356 		goto drop;
1357 	}
1358 
1359 	hci_skb_pkt_type(skb) = skb->data[0];
1360 	skb_pull(skb, 1);
1361 
1362 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1363 		/* No permission check is needed for user channel
1364 		 * since that gets enforced when binding the socket.
1365 		 *
1366 		 * However check that the packet type is valid.
1367 		 */
1368 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1369 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1370 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1371 			err = -EINVAL;
1372 			goto drop;
1373 		}
1374 
1375 		skb_queue_tail(&hdev->raw_q, skb);
1376 		queue_work(hdev->workqueue, &hdev->tx_work);
1377 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1378 		u16 opcode = get_unaligned_le16(skb->data);
1379 		u16 ogf = hci_opcode_ogf(opcode);
1380 		u16 ocf = hci_opcode_ocf(opcode);
1381 
1382 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1383 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1384 				   &hci_sec_filter.ocf_mask[ogf])) &&
1385 		    !capable(CAP_NET_RAW)) {
1386 			err = -EPERM;
1387 			goto drop;
1388 		}
1389 
1390 		/* Since the opcode has already been extracted here, store
1391 		 * a copy of the value for later use by the drivers.
1392 		 */
1393 		hci_skb_opcode(skb) = opcode;
1394 
1395 		if (ogf == 0x3f) {
1396 			skb_queue_tail(&hdev->raw_q, skb);
1397 			queue_work(hdev->workqueue, &hdev->tx_work);
1398 		} else {
1399 			/* Stand-alone HCI commands must be flagged as
1400 			 * single-command requests.
1401 			 */
1402 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1403 
1404 			skb_queue_tail(&hdev->cmd_q, skb);
1405 			queue_work(hdev->workqueue, &hdev->cmd_work);
1406 		}
1407 	} else {
1408 		if (!capable(CAP_NET_RAW)) {
1409 			err = -EPERM;
1410 			goto drop;
1411 		}
1412 
1413 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1414 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1415 			err = -EINVAL;
1416 			goto drop;
1417 		}
1418 
1419 		skb_queue_tail(&hdev->raw_q, skb);
1420 		queue_work(hdev->workqueue, &hdev->tx_work);
1421 	}
1422 
1423 	err = len;
1424 
1425 done:
1426 	release_sock(sk);
1427 	return err;
1428 
1429 drop:
1430 	kfree_skb(skb);
1431 	goto done;
1432 }
1433 
1434 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1435 			       char __user *optval, unsigned int len)
1436 {
1437 	struct hci_ufilter uf = { .opcode = 0 };
1438 	struct sock *sk = sock->sk;
1439 	int err = 0, opt = 0;
1440 
1441 	BT_DBG("sk %p, opt %d", sk, optname);
1442 
1443 	lock_sock(sk);
1444 
1445 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1446 		err = -EBADFD;
1447 		goto done;
1448 	}
1449 
1450 	switch (optname) {
1451 	case HCI_DATA_DIR:
1452 		if (get_user(opt, (int __user *)optval)) {
1453 			err = -EFAULT;
1454 			break;
1455 		}
1456 
1457 		if (opt)
1458 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1459 		else
1460 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1461 		break;
1462 
1463 	case HCI_TIME_STAMP:
1464 		if (get_user(opt, (int __user *)optval)) {
1465 			err = -EFAULT;
1466 			break;
1467 		}
1468 
1469 		if (opt)
1470 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1471 		else
1472 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1473 		break;
1474 
1475 	case HCI_FILTER:
1476 		{
1477 			struct hci_filter *f = &hci_pi(sk)->filter;
1478 
1479 			uf.type_mask = f->type_mask;
1480 			uf.opcode    = f->opcode;
1481 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1482 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1483 		}
1484 
1485 		len = min_t(unsigned int, len, sizeof(uf));
1486 		if (copy_from_user(&uf, optval, len)) {
1487 			err = -EFAULT;
1488 			break;
1489 		}
1490 
1491 		if (!capable(CAP_NET_RAW)) {
1492 			uf.type_mask &= hci_sec_filter.type_mask;
1493 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1494 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1495 		}
1496 
1497 		{
1498 			struct hci_filter *f = &hci_pi(sk)->filter;
1499 
1500 			f->type_mask = uf.type_mask;
1501 			f->opcode    = uf.opcode;
1502 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1503 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1504 		}
1505 		break;
1506 
1507 	default:
1508 		err = -ENOPROTOOPT;
1509 		break;
1510 	}
1511 
1512 done:
1513 	release_sock(sk);
1514 	return err;
1515 }
1516 
1517 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1518 			       char __user *optval, int __user *optlen)
1519 {
1520 	struct hci_ufilter uf;
1521 	struct sock *sk = sock->sk;
1522 	int len, opt, err = 0;
1523 
1524 	BT_DBG("sk %p, opt %d", sk, optname);
1525 
1526 	if (get_user(len, optlen))
1527 		return -EFAULT;
1528 
1529 	lock_sock(sk);
1530 
1531 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1532 		err = -EBADFD;
1533 		goto done;
1534 	}
1535 
1536 	switch (optname) {
1537 	case HCI_DATA_DIR:
1538 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1539 			opt = 1;
1540 		else
1541 			opt = 0;
1542 
1543 		if (put_user(opt, optval))
1544 			err = -EFAULT;
1545 		break;
1546 
1547 	case HCI_TIME_STAMP:
1548 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1549 			opt = 1;
1550 		else
1551 			opt = 0;
1552 
1553 		if (put_user(opt, optval))
1554 			err = -EFAULT;
1555 		break;
1556 
1557 	case HCI_FILTER:
1558 		{
1559 			struct hci_filter *f = &hci_pi(sk)->filter;
1560 
1561 			memset(&uf, 0, sizeof(uf));
1562 			uf.type_mask = f->type_mask;
1563 			uf.opcode    = f->opcode;
1564 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1565 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1566 		}
1567 
1568 		len = min_t(unsigned int, len, sizeof(uf));
1569 		if (copy_to_user(optval, &uf, len))
1570 			err = -EFAULT;
1571 		break;
1572 
1573 	default:
1574 		err = -ENOPROTOOPT;
1575 		break;
1576 	}
1577 
1578 done:
1579 	release_sock(sk);
1580 	return err;
1581 }
1582 
1583 static const struct proto_ops hci_sock_ops = {
1584 	.family		= PF_BLUETOOTH,
1585 	.owner		= THIS_MODULE,
1586 	.release	= hci_sock_release,
1587 	.bind		= hci_sock_bind,
1588 	.getname	= hci_sock_getname,
1589 	.sendmsg	= hci_sock_sendmsg,
1590 	.recvmsg	= hci_sock_recvmsg,
1591 	.ioctl		= hci_sock_ioctl,
1592 	.poll		= datagram_poll,
1593 	.listen		= sock_no_listen,
1594 	.shutdown	= sock_no_shutdown,
1595 	.setsockopt	= hci_sock_setsockopt,
1596 	.getsockopt	= hci_sock_getsockopt,
1597 	.connect	= sock_no_connect,
1598 	.socketpair	= sock_no_socketpair,
1599 	.accept		= sock_no_accept,
1600 	.mmap		= sock_no_mmap
1601 };
1602 
1603 static struct proto hci_sk_proto = {
1604 	.name		= "HCI",
1605 	.owner		= THIS_MODULE,
1606 	.obj_size	= sizeof(struct hci_pinfo)
1607 };
1608 
1609 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1610 			   int kern)
1611 {
1612 	struct sock *sk;
1613 
1614 	BT_DBG("sock %p", sock);
1615 
1616 	if (sock->type != SOCK_RAW)
1617 		return -ESOCKTNOSUPPORT;
1618 
1619 	sock->ops = &hci_sock_ops;
1620 
1621 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1622 	if (!sk)
1623 		return -ENOMEM;
1624 
1625 	sock_init_data(sock, sk);
1626 
1627 	sock_reset_flag(sk, SOCK_ZAPPED);
1628 
1629 	sk->sk_protocol = protocol;
1630 
1631 	sock->state = SS_UNCONNECTED;
1632 	sk->sk_state = BT_OPEN;
1633 
1634 	bt_sock_link(&hci_sk_list, sk);
1635 	return 0;
1636 }
1637 
1638 static const struct net_proto_family hci_sock_family_ops = {
1639 	.family	= PF_BLUETOOTH,
1640 	.owner	= THIS_MODULE,
1641 	.create	= hci_sock_create,
1642 };
1643 
1644 int __init hci_sock_init(void)
1645 {
1646 	int err;
1647 
1648 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1649 
1650 	err = proto_register(&hci_sk_proto, 0);
1651 	if (err < 0)
1652 		return err;
1653 
1654 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1655 	if (err < 0) {
1656 		BT_ERR("HCI socket registration failed");
1657 		goto error;
1658 	}
1659 
1660 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1661 	if (err < 0) {
1662 		BT_ERR("Failed to create HCI proc file");
1663 		bt_sock_unregister(BTPROTO_HCI);
1664 		goto error;
1665 	}
1666 
1667 	BT_INFO("HCI socket layer initialized");
1668 
1669 	return 0;
1670 
1671 error:
1672 	proto_unregister(&hci_sk_proto);
1673 	return err;
1674 }
1675 
1676 void hci_sock_cleanup(void)
1677 {
1678 	bt_procfs_cleanup(&init_net, "hci");
1679 	bt_sock_unregister(BTPROTO_HCI);
1680 	proto_unregister(&hci_sk_proto);
1681 }
1682