xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision df1cb87af9f24527a8932e4d195d49ffab1168d2)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "mgmt_util.h"
38 
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41 
42 static DEFINE_IDA(sock_cookie_ida);
43 
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45 
46 /* ----- HCI socket interface ----- */
47 
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 
51 struct hci_pinfo {
52 	struct bt_sock    bt;
53 	struct hci_dev    *hdev;
54 	struct hci_filter filter;
55 	__u32             cmsg_mask;
56 	unsigned short    channel;
57 	unsigned long     flags;
58 	__u32             cookie;
59 	char              comm[TASK_COMM_LEN];
60 };
61 
62 void hci_sock_set_flag(struct sock *sk, int nr)
63 {
64 	set_bit(nr, &hci_pi(sk)->flags);
65 }
66 
67 void hci_sock_clear_flag(struct sock *sk, int nr)
68 {
69 	clear_bit(nr, &hci_pi(sk)->flags);
70 }
71 
72 int hci_sock_test_flag(struct sock *sk, int nr)
73 {
74 	return test_bit(nr, &hci_pi(sk)->flags);
75 }
76 
77 unsigned short hci_sock_get_channel(struct sock *sk)
78 {
79 	return hci_pi(sk)->channel;
80 }
81 
82 u32 hci_sock_get_cookie(struct sock *sk)
83 {
84 	return hci_pi(sk)->cookie;
85 }
86 
87 static bool hci_sock_gen_cookie(struct sock *sk)
88 {
89 	int id = hci_pi(sk)->cookie;
90 
91 	if (!id) {
92 		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
93 		if (id < 0)
94 			id = 0xffffffff;
95 
96 		hci_pi(sk)->cookie = id;
97 		get_task_comm(hci_pi(sk)->comm, current);
98 		return true;
99 	}
100 
101 	return false;
102 }
103 
104 static void hci_sock_free_cookie(struct sock *sk)
105 {
106 	int id = hci_pi(sk)->cookie;
107 
108 	if (id) {
109 		hci_pi(sk)->cookie = 0xffffffff;
110 		ida_simple_remove(&sock_cookie_ida, id);
111 	}
112 }
113 
114 static inline int hci_test_bit(int nr, const void *addr)
115 {
116 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
117 }
118 
119 /* Security filter */
120 #define HCI_SFLT_MAX_OGF  5
121 
122 struct hci_sec_filter {
123 	__u32 type_mask;
124 	__u32 event_mask[2];
125 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
126 };
127 
128 static const struct hci_sec_filter hci_sec_filter = {
129 	/* Packet types */
130 	0x10,
131 	/* Events */
132 	{ 0x1000d9fe, 0x0000b00c },
133 	/* Commands */
134 	{
135 		{ 0x0 },
136 		/* OGF_LINK_CTL */
137 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
138 		/* OGF_LINK_POLICY */
139 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
140 		/* OGF_HOST_CTL */
141 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
142 		/* OGF_INFO_PARAM */
143 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
144 		/* OGF_STATUS_PARAM */
145 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
146 	}
147 };
148 
149 static struct bt_sock_list hci_sk_list = {
150 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
151 };
152 
153 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
154 {
155 	struct hci_filter *flt;
156 	int flt_type, flt_event;
157 
158 	/* Apply filter */
159 	flt = &hci_pi(sk)->filter;
160 
161 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
162 
163 	if (!test_bit(flt_type, &flt->type_mask))
164 		return true;
165 
166 	/* Extra filter for event packets only */
167 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
168 		return false;
169 
170 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
171 
172 	if (!hci_test_bit(flt_event, &flt->event_mask))
173 		return true;
174 
175 	/* Check filter only when opcode is set */
176 	if (!flt->opcode)
177 		return false;
178 
179 	if (flt_event == HCI_EV_CMD_COMPLETE &&
180 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
181 		return true;
182 
183 	if (flt_event == HCI_EV_CMD_STATUS &&
184 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
185 		return true;
186 
187 	return false;
188 }
189 
190 /* Send frame to RAW socket */
191 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 	struct sock *sk;
194 	struct sk_buff *skb_copy = NULL;
195 
196 	BT_DBG("hdev %p len %d", hdev, skb->len);
197 
198 	read_lock(&hci_sk_list.lock);
199 
200 	sk_for_each(sk, &hci_sk_list.head) {
201 		struct sk_buff *nskb;
202 
203 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
204 			continue;
205 
206 		/* Don't send frame to the socket it came from */
207 		if (skb->sk == sk)
208 			continue;
209 
210 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
211 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
212 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
213 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
214 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
215 				continue;
216 			if (is_filtered_packet(sk, skb))
217 				continue;
218 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
219 			if (!bt_cb(skb)->incoming)
220 				continue;
221 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
222 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
223 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
224 				continue;
225 		} else {
226 			/* Don't send frame to other channel types */
227 			continue;
228 		}
229 
230 		if (!skb_copy) {
231 			/* Create a private copy with headroom */
232 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
233 			if (!skb_copy)
234 				continue;
235 
236 			/* Put type byte before the data */
237 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
238 		}
239 
240 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
241 		if (!nskb)
242 			continue;
243 
244 		if (sock_queue_rcv_skb(sk, nskb))
245 			kfree_skb(nskb);
246 	}
247 
248 	read_unlock(&hci_sk_list.lock);
249 
250 	kfree_skb(skb_copy);
251 }
252 
253 /* Send frame to sockets with specific channel */
254 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
255 			 int flag, struct sock *skip_sk)
256 {
257 	struct sock *sk;
258 
259 	BT_DBG("channel %u len %d", channel, skb->len);
260 
261 	read_lock(&hci_sk_list.lock);
262 
263 	sk_for_each(sk, &hci_sk_list.head) {
264 		struct sk_buff *nskb;
265 
266 		/* Ignore socket without the flag set */
267 		if (!hci_sock_test_flag(sk, flag))
268 			continue;
269 
270 		/* Skip the original socket */
271 		if (sk == skip_sk)
272 			continue;
273 
274 		if (sk->sk_state != BT_BOUND)
275 			continue;
276 
277 		if (hci_pi(sk)->channel != channel)
278 			continue;
279 
280 		nskb = skb_clone(skb, GFP_ATOMIC);
281 		if (!nskb)
282 			continue;
283 
284 		if (sock_queue_rcv_skb(sk, nskb))
285 			kfree_skb(nskb);
286 	}
287 
288 	read_unlock(&hci_sk_list.lock);
289 }
290 
291 /* Send frame to monitor socket */
292 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
293 {
294 	struct sk_buff *skb_copy = NULL;
295 	struct hci_mon_hdr *hdr;
296 	__le16 opcode;
297 
298 	if (!atomic_read(&monitor_promisc))
299 		return;
300 
301 	BT_DBG("hdev %p len %d", hdev, skb->len);
302 
303 	switch (hci_skb_pkt_type(skb)) {
304 	case HCI_COMMAND_PKT:
305 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
306 		break;
307 	case HCI_EVENT_PKT:
308 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
309 		break;
310 	case HCI_ACLDATA_PKT:
311 		if (bt_cb(skb)->incoming)
312 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
313 		else
314 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
315 		break;
316 	case HCI_SCODATA_PKT:
317 		if (bt_cb(skb)->incoming)
318 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
319 		else
320 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
321 		break;
322 	case HCI_DIAG_PKT:
323 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
324 		break;
325 	default:
326 		return;
327 	}
328 
329 	/* Create a private copy with headroom */
330 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
331 	if (!skb_copy)
332 		return;
333 
334 	/* Put header before the data */
335 	hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
336 	hdr->opcode = opcode;
337 	hdr->index = cpu_to_le16(hdev->id);
338 	hdr->len = cpu_to_le16(skb->len);
339 
340 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
341 			    HCI_SOCK_TRUSTED, NULL);
342 	kfree_skb(skb_copy);
343 }
344 
345 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
346 				 void *data, u16 data_len, ktime_t tstamp,
347 				 int flag, struct sock *skip_sk)
348 {
349 	struct sock *sk;
350 	__le16 index;
351 
352 	if (hdev)
353 		index = cpu_to_le16(hdev->id);
354 	else
355 		index = cpu_to_le16(MGMT_INDEX_NONE);
356 
357 	read_lock(&hci_sk_list.lock);
358 
359 	sk_for_each(sk, &hci_sk_list.head) {
360 		struct hci_mon_hdr *hdr;
361 		struct sk_buff *skb;
362 
363 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
364 			continue;
365 
366 		/* Ignore socket without the flag set */
367 		if (!hci_sock_test_flag(sk, flag))
368 			continue;
369 
370 		/* Skip the original socket */
371 		if (sk == skip_sk)
372 			continue;
373 
374 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
375 		if (!skb)
376 			continue;
377 
378 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
379 		put_unaligned_le16(event, skb_put(skb, 2));
380 
381 		if (data)
382 			memcpy(skb_put(skb, data_len), data, data_len);
383 
384 		skb->tstamp = tstamp;
385 
386 		hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
387 		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
388 		hdr->index = index;
389 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
390 
391 		hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
392 				    HCI_SOCK_TRUSTED, NULL);
393 		kfree_skb(skb);
394 	}
395 
396 	read_unlock(&hci_sk_list.lock);
397 }
398 
399 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
400 {
401 	struct hci_mon_hdr *hdr;
402 	struct hci_mon_new_index *ni;
403 	struct hci_mon_index_info *ii;
404 	struct sk_buff *skb;
405 	__le16 opcode;
406 
407 	switch (event) {
408 	case HCI_DEV_REG:
409 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
410 		if (!skb)
411 			return NULL;
412 
413 		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
414 		ni->type = hdev->dev_type;
415 		ni->bus = hdev->bus;
416 		bacpy(&ni->bdaddr, &hdev->bdaddr);
417 		memcpy(ni->name, hdev->name, 8);
418 
419 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
420 		break;
421 
422 	case HCI_DEV_UNREG:
423 		skb = bt_skb_alloc(0, GFP_ATOMIC);
424 		if (!skb)
425 			return NULL;
426 
427 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
428 		break;
429 
430 	case HCI_DEV_SETUP:
431 		if (hdev->manufacturer == 0xffff)
432 			return NULL;
433 
434 		/* fall through */
435 
436 	case HCI_DEV_UP:
437 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
438 		if (!skb)
439 			return NULL;
440 
441 		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
442 		bacpy(&ii->bdaddr, &hdev->bdaddr);
443 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
444 
445 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
446 		break;
447 
448 	case HCI_DEV_OPEN:
449 		skb = bt_skb_alloc(0, GFP_ATOMIC);
450 		if (!skb)
451 			return NULL;
452 
453 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
454 		break;
455 
456 	case HCI_DEV_CLOSE:
457 		skb = bt_skb_alloc(0, GFP_ATOMIC);
458 		if (!skb)
459 			return NULL;
460 
461 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
462 		break;
463 
464 	default:
465 		return NULL;
466 	}
467 
468 	__net_timestamp(skb);
469 
470 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
471 	hdr->opcode = opcode;
472 	hdr->index = cpu_to_le16(hdev->id);
473 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
474 
475 	return skb;
476 }
477 
478 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
479 {
480 	struct hci_mon_hdr *hdr;
481 	struct sk_buff *skb;
482 	u16 format = 0x0002;
483 	u8 ver[3];
484 	u32 flags;
485 
486 	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
487 	if (!skb)
488 		return NULL;
489 
490 	mgmt_fill_version_info(ver);
491 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
492 
493 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
494 	put_unaligned_le16(format, skb_put(skb, 2));
495 	memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
496 	put_unaligned_le32(flags, skb_put(skb, 4));
497 	*skb_put(skb, 1) = TASK_COMM_LEN;
498 	memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
499 
500 	__net_timestamp(skb);
501 
502 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
503 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
504 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
505 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
506 
507 	return skb;
508 }
509 
510 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
511 {
512 	struct hci_mon_hdr *hdr;
513 	struct sk_buff *skb;
514 
515 	skb = bt_skb_alloc(4, GFP_ATOMIC);
516 	if (!skb)
517 		return NULL;
518 
519 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
520 
521 	__net_timestamp(skb);
522 
523 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
524 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
525 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
526 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
527 
528 	return skb;
529 }
530 
531 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
532 						   u16 opcode, u16 len,
533 						   const void *buf)
534 {
535 	struct hci_mon_hdr *hdr;
536 	struct sk_buff *skb;
537 
538 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
539 	if (!skb)
540 		return NULL;
541 
542 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
543 	put_unaligned_le16(opcode, skb_put(skb, 2));
544 
545 	if (buf)
546 		memcpy(skb_put(skb, len), buf, len);
547 
548 	__net_timestamp(skb);
549 
550 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
551 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
552 	hdr->index = cpu_to_le16(index);
553 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
554 
555 	return skb;
556 }
557 
558 static void __printf(2, 3)
559 send_monitor_note(struct sock *sk, const char *fmt, ...)
560 {
561 	size_t len;
562 	struct hci_mon_hdr *hdr;
563 	struct sk_buff *skb;
564 	va_list args;
565 
566 	va_start(args, fmt);
567 	len = vsnprintf(NULL, 0, fmt, args);
568 	va_end(args);
569 
570 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
571 	if (!skb)
572 		return;
573 
574 	va_start(args, fmt);
575 	vsprintf(skb_put(skb, len), fmt, args);
576 	*skb_put(skb, 1) = 0;
577 	va_end(args);
578 
579 	__net_timestamp(skb);
580 
581 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
582 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
583 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
584 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
585 
586 	if (sock_queue_rcv_skb(sk, skb))
587 		kfree_skb(skb);
588 }
589 
590 static void send_monitor_replay(struct sock *sk)
591 {
592 	struct hci_dev *hdev;
593 
594 	read_lock(&hci_dev_list_lock);
595 
596 	list_for_each_entry(hdev, &hci_dev_list, list) {
597 		struct sk_buff *skb;
598 
599 		skb = create_monitor_event(hdev, HCI_DEV_REG);
600 		if (!skb)
601 			continue;
602 
603 		if (sock_queue_rcv_skb(sk, skb))
604 			kfree_skb(skb);
605 
606 		if (!test_bit(HCI_RUNNING, &hdev->flags))
607 			continue;
608 
609 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
610 		if (!skb)
611 			continue;
612 
613 		if (sock_queue_rcv_skb(sk, skb))
614 			kfree_skb(skb);
615 
616 		if (test_bit(HCI_UP, &hdev->flags))
617 			skb = create_monitor_event(hdev, HCI_DEV_UP);
618 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
619 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
620 		else
621 			skb = NULL;
622 
623 		if (skb) {
624 			if (sock_queue_rcv_skb(sk, skb))
625 				kfree_skb(skb);
626 		}
627 	}
628 
629 	read_unlock(&hci_dev_list_lock);
630 }
631 
632 static void send_monitor_control_replay(struct sock *mon_sk)
633 {
634 	struct sock *sk;
635 
636 	read_lock(&hci_sk_list.lock);
637 
638 	sk_for_each(sk, &hci_sk_list.head) {
639 		struct sk_buff *skb;
640 
641 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
642 			continue;
643 
644 		skb = create_monitor_ctrl_open(sk);
645 		if (!skb)
646 			continue;
647 
648 		if (sock_queue_rcv_skb(mon_sk, skb))
649 			kfree_skb(skb);
650 	}
651 
652 	read_unlock(&hci_sk_list.lock);
653 }
654 
655 /* Generate internal stack event */
656 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
657 {
658 	struct hci_event_hdr *hdr;
659 	struct hci_ev_stack_internal *ev;
660 	struct sk_buff *skb;
661 
662 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
663 	if (!skb)
664 		return;
665 
666 	hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
667 	hdr->evt  = HCI_EV_STACK_INTERNAL;
668 	hdr->plen = sizeof(*ev) + dlen;
669 
670 	ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
671 	ev->type = type;
672 	memcpy(ev->data, data, dlen);
673 
674 	bt_cb(skb)->incoming = 1;
675 	__net_timestamp(skb);
676 
677 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
678 	hci_send_to_sock(hdev, skb);
679 	kfree_skb(skb);
680 }
681 
682 void hci_sock_dev_event(struct hci_dev *hdev, int event)
683 {
684 	BT_DBG("hdev %s event %d", hdev->name, event);
685 
686 	if (atomic_read(&monitor_promisc)) {
687 		struct sk_buff *skb;
688 
689 		/* Send event to monitor */
690 		skb = create_monitor_event(hdev, event);
691 		if (skb) {
692 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
693 					    HCI_SOCK_TRUSTED, NULL);
694 			kfree_skb(skb);
695 		}
696 	}
697 
698 	if (event <= HCI_DEV_DOWN) {
699 		struct hci_ev_si_device ev;
700 
701 		/* Send event to sockets */
702 		ev.event  = event;
703 		ev.dev_id = hdev->id;
704 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
705 	}
706 
707 	if (event == HCI_DEV_UNREG) {
708 		struct sock *sk;
709 
710 		/* Detach sockets from device */
711 		read_lock(&hci_sk_list.lock);
712 		sk_for_each(sk, &hci_sk_list.head) {
713 			bh_lock_sock_nested(sk);
714 			if (hci_pi(sk)->hdev == hdev) {
715 				hci_pi(sk)->hdev = NULL;
716 				sk->sk_err = EPIPE;
717 				sk->sk_state = BT_OPEN;
718 				sk->sk_state_change(sk);
719 
720 				hci_dev_put(hdev);
721 			}
722 			bh_unlock_sock(sk);
723 		}
724 		read_unlock(&hci_sk_list.lock);
725 	}
726 }
727 
728 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
729 {
730 	struct hci_mgmt_chan *c;
731 
732 	list_for_each_entry(c, &mgmt_chan_list, list) {
733 		if (c->channel == channel)
734 			return c;
735 	}
736 
737 	return NULL;
738 }
739 
740 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
741 {
742 	struct hci_mgmt_chan *c;
743 
744 	mutex_lock(&mgmt_chan_list_lock);
745 	c = __hci_mgmt_chan_find(channel);
746 	mutex_unlock(&mgmt_chan_list_lock);
747 
748 	return c;
749 }
750 
751 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
752 {
753 	if (c->channel < HCI_CHANNEL_CONTROL)
754 		return -EINVAL;
755 
756 	mutex_lock(&mgmt_chan_list_lock);
757 	if (__hci_mgmt_chan_find(c->channel)) {
758 		mutex_unlock(&mgmt_chan_list_lock);
759 		return -EALREADY;
760 	}
761 
762 	list_add_tail(&c->list, &mgmt_chan_list);
763 
764 	mutex_unlock(&mgmt_chan_list_lock);
765 
766 	return 0;
767 }
768 EXPORT_SYMBOL(hci_mgmt_chan_register);
769 
770 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
771 {
772 	mutex_lock(&mgmt_chan_list_lock);
773 	list_del(&c->list);
774 	mutex_unlock(&mgmt_chan_list_lock);
775 }
776 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
777 
778 static int hci_sock_release(struct socket *sock)
779 {
780 	struct sock *sk = sock->sk;
781 	struct hci_dev *hdev;
782 	struct sk_buff *skb;
783 
784 	BT_DBG("sock %p sk %p", sock, sk);
785 
786 	if (!sk)
787 		return 0;
788 
789 	hdev = hci_pi(sk)->hdev;
790 
791 	switch (hci_pi(sk)->channel) {
792 	case HCI_CHANNEL_MONITOR:
793 		atomic_dec(&monitor_promisc);
794 		break;
795 	case HCI_CHANNEL_CONTROL:
796 		/* Send event to monitor */
797 		skb = create_monitor_ctrl_close(sk);
798 		if (skb) {
799 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
800 					    HCI_SOCK_TRUSTED, NULL);
801 			kfree_skb(skb);
802 		}
803 
804 		hci_sock_free_cookie(sk);
805 		break;
806 	}
807 
808 	bt_sock_unlink(&hci_sk_list, sk);
809 
810 	if (hdev) {
811 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
812 			/* When releasing an user channel exclusive access,
813 			 * call hci_dev_do_close directly instead of calling
814 			 * hci_dev_close to ensure the exclusive access will
815 			 * be released and the controller brought back down.
816 			 *
817 			 * The checking of HCI_AUTO_OFF is not needed in this
818 			 * case since it will have been cleared already when
819 			 * opening the user channel.
820 			 */
821 			hci_dev_do_close(hdev);
822 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
823 			mgmt_index_added(hdev);
824 		}
825 
826 		atomic_dec(&hdev->promisc);
827 		hci_dev_put(hdev);
828 	}
829 
830 	sock_orphan(sk);
831 
832 	skb_queue_purge(&sk->sk_receive_queue);
833 	skb_queue_purge(&sk->sk_write_queue);
834 
835 	sock_put(sk);
836 	return 0;
837 }
838 
839 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
840 {
841 	bdaddr_t bdaddr;
842 	int err;
843 
844 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
845 		return -EFAULT;
846 
847 	hci_dev_lock(hdev);
848 
849 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
850 
851 	hci_dev_unlock(hdev);
852 
853 	return err;
854 }
855 
856 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
857 {
858 	bdaddr_t bdaddr;
859 	int err;
860 
861 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
862 		return -EFAULT;
863 
864 	hci_dev_lock(hdev);
865 
866 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
867 
868 	hci_dev_unlock(hdev);
869 
870 	return err;
871 }
872 
873 /* Ioctls that require bound socket */
874 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
875 				unsigned long arg)
876 {
877 	struct hci_dev *hdev = hci_pi(sk)->hdev;
878 
879 	if (!hdev)
880 		return -EBADFD;
881 
882 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
883 		return -EBUSY;
884 
885 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
886 		return -EOPNOTSUPP;
887 
888 	if (hdev->dev_type != HCI_PRIMARY)
889 		return -EOPNOTSUPP;
890 
891 	switch (cmd) {
892 	case HCISETRAW:
893 		if (!capable(CAP_NET_ADMIN))
894 			return -EPERM;
895 		return -EOPNOTSUPP;
896 
897 	case HCIGETCONNINFO:
898 		return hci_get_conn_info(hdev, (void __user *)arg);
899 
900 	case HCIGETAUTHINFO:
901 		return hci_get_auth_info(hdev, (void __user *)arg);
902 
903 	case HCIBLOCKADDR:
904 		if (!capable(CAP_NET_ADMIN))
905 			return -EPERM;
906 		return hci_sock_blacklist_add(hdev, (void __user *)arg);
907 
908 	case HCIUNBLOCKADDR:
909 		if (!capable(CAP_NET_ADMIN))
910 			return -EPERM;
911 		return hci_sock_blacklist_del(hdev, (void __user *)arg);
912 	}
913 
914 	return -ENOIOCTLCMD;
915 }
916 
917 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
918 			  unsigned long arg)
919 {
920 	void __user *argp = (void __user *)arg;
921 	struct sock *sk = sock->sk;
922 	int err;
923 
924 	BT_DBG("cmd %x arg %lx", cmd, arg);
925 
926 	lock_sock(sk);
927 
928 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
929 		err = -EBADFD;
930 		goto done;
931 	}
932 
933 	release_sock(sk);
934 
935 	switch (cmd) {
936 	case HCIGETDEVLIST:
937 		return hci_get_dev_list(argp);
938 
939 	case HCIGETDEVINFO:
940 		return hci_get_dev_info(argp);
941 
942 	case HCIGETCONNLIST:
943 		return hci_get_conn_list(argp);
944 
945 	case HCIDEVUP:
946 		if (!capable(CAP_NET_ADMIN))
947 			return -EPERM;
948 		return hci_dev_open(arg);
949 
950 	case HCIDEVDOWN:
951 		if (!capable(CAP_NET_ADMIN))
952 			return -EPERM;
953 		return hci_dev_close(arg);
954 
955 	case HCIDEVRESET:
956 		if (!capable(CAP_NET_ADMIN))
957 			return -EPERM;
958 		return hci_dev_reset(arg);
959 
960 	case HCIDEVRESTAT:
961 		if (!capable(CAP_NET_ADMIN))
962 			return -EPERM;
963 		return hci_dev_reset_stat(arg);
964 
965 	case HCISETSCAN:
966 	case HCISETAUTH:
967 	case HCISETENCRYPT:
968 	case HCISETPTYPE:
969 	case HCISETLINKPOL:
970 	case HCISETLINKMODE:
971 	case HCISETACLMTU:
972 	case HCISETSCOMTU:
973 		if (!capable(CAP_NET_ADMIN))
974 			return -EPERM;
975 		return hci_dev_cmd(cmd, argp);
976 
977 	case HCIINQUIRY:
978 		return hci_inquiry(argp);
979 	}
980 
981 	lock_sock(sk);
982 
983 	err = hci_sock_bound_ioctl(sk, cmd, arg);
984 
985 done:
986 	release_sock(sk);
987 	return err;
988 }
989 
990 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
991 			 int addr_len)
992 {
993 	struct sockaddr_hci haddr;
994 	struct sock *sk = sock->sk;
995 	struct hci_dev *hdev = NULL;
996 	int len, err = 0;
997 
998 	BT_DBG("sock %p sk %p", sock, sk);
999 
1000 	if (!addr)
1001 		return -EINVAL;
1002 
1003 	memset(&haddr, 0, sizeof(haddr));
1004 	len = min_t(unsigned int, sizeof(haddr), addr_len);
1005 	memcpy(&haddr, addr, len);
1006 
1007 	if (haddr.hci_family != AF_BLUETOOTH)
1008 		return -EINVAL;
1009 
1010 	lock_sock(sk);
1011 
1012 	if (sk->sk_state == BT_BOUND) {
1013 		err = -EALREADY;
1014 		goto done;
1015 	}
1016 
1017 	switch (haddr.hci_channel) {
1018 	case HCI_CHANNEL_RAW:
1019 		if (hci_pi(sk)->hdev) {
1020 			err = -EALREADY;
1021 			goto done;
1022 		}
1023 
1024 		if (haddr.hci_dev != HCI_DEV_NONE) {
1025 			hdev = hci_dev_get(haddr.hci_dev);
1026 			if (!hdev) {
1027 				err = -ENODEV;
1028 				goto done;
1029 			}
1030 
1031 			atomic_inc(&hdev->promisc);
1032 		}
1033 
1034 		hci_pi(sk)->hdev = hdev;
1035 		break;
1036 
1037 	case HCI_CHANNEL_USER:
1038 		if (hci_pi(sk)->hdev) {
1039 			err = -EALREADY;
1040 			goto done;
1041 		}
1042 
1043 		if (haddr.hci_dev == HCI_DEV_NONE) {
1044 			err = -EINVAL;
1045 			goto done;
1046 		}
1047 
1048 		if (!capable(CAP_NET_ADMIN)) {
1049 			err = -EPERM;
1050 			goto done;
1051 		}
1052 
1053 		hdev = hci_dev_get(haddr.hci_dev);
1054 		if (!hdev) {
1055 			err = -ENODEV;
1056 			goto done;
1057 		}
1058 
1059 		if (test_bit(HCI_INIT, &hdev->flags) ||
1060 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1061 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1062 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1063 		     test_bit(HCI_UP, &hdev->flags))) {
1064 			err = -EBUSY;
1065 			hci_dev_put(hdev);
1066 			goto done;
1067 		}
1068 
1069 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1070 			err = -EUSERS;
1071 			hci_dev_put(hdev);
1072 			goto done;
1073 		}
1074 
1075 		mgmt_index_removed(hdev);
1076 
1077 		err = hci_dev_open(hdev->id);
1078 		if (err) {
1079 			if (err == -EALREADY) {
1080 				/* In case the transport is already up and
1081 				 * running, clear the error here.
1082 				 *
1083 				 * This can happen when opening an user
1084 				 * channel and HCI_AUTO_OFF grace period
1085 				 * is still active.
1086 				 */
1087 				err = 0;
1088 			} else {
1089 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1090 				mgmt_index_added(hdev);
1091 				hci_dev_put(hdev);
1092 				goto done;
1093 			}
1094 		}
1095 
1096 		atomic_inc(&hdev->promisc);
1097 
1098 		hci_pi(sk)->hdev = hdev;
1099 		break;
1100 
1101 	case HCI_CHANNEL_MONITOR:
1102 		if (haddr.hci_dev != HCI_DEV_NONE) {
1103 			err = -EINVAL;
1104 			goto done;
1105 		}
1106 
1107 		if (!capable(CAP_NET_RAW)) {
1108 			err = -EPERM;
1109 			goto done;
1110 		}
1111 
1112 		/* The monitor interface is restricted to CAP_NET_RAW
1113 		 * capabilities and with that implicitly trusted.
1114 		 */
1115 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1116 
1117 		send_monitor_note(sk, "Linux version %s (%s)",
1118 				  init_utsname()->release,
1119 				  init_utsname()->machine);
1120 		send_monitor_note(sk, "Bluetooth subsystem version %s",
1121 				  BT_SUBSYS_VERSION);
1122 		send_monitor_replay(sk);
1123 		send_monitor_control_replay(sk);
1124 
1125 		atomic_inc(&monitor_promisc);
1126 		break;
1127 
1128 	case HCI_CHANNEL_LOGGING:
1129 		if (haddr.hci_dev != HCI_DEV_NONE) {
1130 			err = -EINVAL;
1131 			goto done;
1132 		}
1133 
1134 		if (!capable(CAP_NET_ADMIN)) {
1135 			err = -EPERM;
1136 			goto done;
1137 		}
1138 		break;
1139 
1140 	default:
1141 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1142 			err = -EINVAL;
1143 			goto done;
1144 		}
1145 
1146 		if (haddr.hci_dev != HCI_DEV_NONE) {
1147 			err = -EINVAL;
1148 			goto done;
1149 		}
1150 
1151 		/* Users with CAP_NET_ADMIN capabilities are allowed
1152 		 * access to all management commands and events. For
1153 		 * untrusted users the interface is restricted and
1154 		 * also only untrusted events are sent.
1155 		 */
1156 		if (capable(CAP_NET_ADMIN))
1157 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1158 
1159 		/* At the moment the index and unconfigured index events
1160 		 * are enabled unconditionally. Setting them on each
1161 		 * socket when binding keeps this functionality. They
1162 		 * however might be cleared later and then sending of these
1163 		 * events will be disabled, but that is then intentional.
1164 		 *
1165 		 * This also enables generic events that are safe to be
1166 		 * received by untrusted users. Example for such events
1167 		 * are changes to settings, class of device, name etc.
1168 		 */
1169 		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
1170 			struct sk_buff *skb;
1171 
1172 			hci_sock_gen_cookie(sk);
1173 
1174 			/* Send event to monitor */
1175 			skb = create_monitor_ctrl_open(sk);
1176 			if (skb) {
1177 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1178 						    HCI_SOCK_TRUSTED, NULL);
1179 				kfree_skb(skb);
1180 			}
1181 
1182 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1183 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1184 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1185 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1186 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1187 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1188 		}
1189 		break;
1190 	}
1191 
1192 
1193 	hci_pi(sk)->channel = haddr.hci_channel;
1194 	sk->sk_state = BT_BOUND;
1195 
1196 done:
1197 	release_sock(sk);
1198 	return err;
1199 }
1200 
1201 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1202 			    int *addr_len, int peer)
1203 {
1204 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1205 	struct sock *sk = sock->sk;
1206 	struct hci_dev *hdev;
1207 	int err = 0;
1208 
1209 	BT_DBG("sock %p sk %p", sock, sk);
1210 
1211 	if (peer)
1212 		return -EOPNOTSUPP;
1213 
1214 	lock_sock(sk);
1215 
1216 	hdev = hci_pi(sk)->hdev;
1217 	if (!hdev) {
1218 		err = -EBADFD;
1219 		goto done;
1220 	}
1221 
1222 	*addr_len = sizeof(*haddr);
1223 	haddr->hci_family = AF_BLUETOOTH;
1224 	haddr->hci_dev    = hdev->id;
1225 	haddr->hci_channel= hci_pi(sk)->channel;
1226 
1227 done:
1228 	release_sock(sk);
1229 	return err;
1230 }
1231 
1232 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1233 			  struct sk_buff *skb)
1234 {
1235 	__u32 mask = hci_pi(sk)->cmsg_mask;
1236 
1237 	if (mask & HCI_CMSG_DIR) {
1238 		int incoming = bt_cb(skb)->incoming;
1239 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1240 			 &incoming);
1241 	}
1242 
1243 	if (mask & HCI_CMSG_TSTAMP) {
1244 #ifdef CONFIG_COMPAT
1245 		struct compat_timeval ctv;
1246 #endif
1247 		struct timeval tv;
1248 		void *data;
1249 		int len;
1250 
1251 		skb_get_timestamp(skb, &tv);
1252 
1253 		data = &tv;
1254 		len = sizeof(tv);
1255 #ifdef CONFIG_COMPAT
1256 		if (!COMPAT_USE_64BIT_TIME &&
1257 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1258 			ctv.tv_sec = tv.tv_sec;
1259 			ctv.tv_usec = tv.tv_usec;
1260 			data = &ctv;
1261 			len = sizeof(ctv);
1262 		}
1263 #endif
1264 
1265 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1266 	}
1267 }
1268 
1269 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1270 			    size_t len, int flags)
1271 {
1272 	int noblock = flags & MSG_DONTWAIT;
1273 	struct sock *sk = sock->sk;
1274 	struct sk_buff *skb;
1275 	int copied, err;
1276 	unsigned int skblen;
1277 
1278 	BT_DBG("sock %p, sk %p", sock, sk);
1279 
1280 	if (flags & MSG_OOB)
1281 		return -EOPNOTSUPP;
1282 
1283 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1284 		return -EOPNOTSUPP;
1285 
1286 	if (sk->sk_state == BT_CLOSED)
1287 		return 0;
1288 
1289 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1290 	if (!skb)
1291 		return err;
1292 
1293 	skblen = skb->len;
1294 	copied = skb->len;
1295 	if (len < copied) {
1296 		msg->msg_flags |= MSG_TRUNC;
1297 		copied = len;
1298 	}
1299 
1300 	skb_reset_transport_header(skb);
1301 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1302 
1303 	switch (hci_pi(sk)->channel) {
1304 	case HCI_CHANNEL_RAW:
1305 		hci_sock_cmsg(sk, msg, skb);
1306 		break;
1307 	case HCI_CHANNEL_USER:
1308 	case HCI_CHANNEL_MONITOR:
1309 		sock_recv_timestamp(msg, sk, skb);
1310 		break;
1311 	default:
1312 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1313 			sock_recv_timestamp(msg, sk, skb);
1314 		break;
1315 	}
1316 
1317 	skb_free_datagram(sk, skb);
1318 
1319 	if (flags & MSG_TRUNC)
1320 		copied = skblen;
1321 
1322 	return err ? : copied;
1323 }
1324 
1325 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1326 			struct msghdr *msg, size_t msglen)
1327 {
1328 	void *buf;
1329 	u8 *cp;
1330 	struct mgmt_hdr *hdr;
1331 	u16 opcode, index, len;
1332 	struct hci_dev *hdev = NULL;
1333 	const struct hci_mgmt_handler *handler;
1334 	bool var_len, no_hdev;
1335 	int err;
1336 
1337 	BT_DBG("got %zu bytes", msglen);
1338 
1339 	if (msglen < sizeof(*hdr))
1340 		return -EINVAL;
1341 
1342 	buf = kmalloc(msglen, GFP_KERNEL);
1343 	if (!buf)
1344 		return -ENOMEM;
1345 
1346 	if (memcpy_from_msg(buf, msg, msglen)) {
1347 		err = -EFAULT;
1348 		goto done;
1349 	}
1350 
1351 	hdr = buf;
1352 	opcode = __le16_to_cpu(hdr->opcode);
1353 	index = __le16_to_cpu(hdr->index);
1354 	len = __le16_to_cpu(hdr->len);
1355 
1356 	if (len != msglen - sizeof(*hdr)) {
1357 		err = -EINVAL;
1358 		goto done;
1359 	}
1360 
1361 	if (chan->channel == HCI_CHANNEL_CONTROL) {
1362 		struct sk_buff *skb;
1363 
1364 		/* Send event to monitor */
1365 		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1366 						  buf + sizeof(*hdr));
1367 		if (skb) {
1368 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1369 					    HCI_SOCK_TRUSTED, NULL);
1370 			kfree_skb(skb);
1371 		}
1372 	}
1373 
1374 	if (opcode >= chan->handler_count ||
1375 	    chan->handlers[opcode].func == NULL) {
1376 		BT_DBG("Unknown op %u", opcode);
1377 		err = mgmt_cmd_status(sk, index, opcode,
1378 				      MGMT_STATUS_UNKNOWN_COMMAND);
1379 		goto done;
1380 	}
1381 
1382 	handler = &chan->handlers[opcode];
1383 
1384 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1385 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1386 		err = mgmt_cmd_status(sk, index, opcode,
1387 				      MGMT_STATUS_PERMISSION_DENIED);
1388 		goto done;
1389 	}
1390 
1391 	if (index != MGMT_INDEX_NONE) {
1392 		hdev = hci_dev_get(index);
1393 		if (!hdev) {
1394 			err = mgmt_cmd_status(sk, index, opcode,
1395 					      MGMT_STATUS_INVALID_INDEX);
1396 			goto done;
1397 		}
1398 
1399 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1400 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1401 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1402 			err = mgmt_cmd_status(sk, index, opcode,
1403 					      MGMT_STATUS_INVALID_INDEX);
1404 			goto done;
1405 		}
1406 
1407 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1408 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1409 			err = mgmt_cmd_status(sk, index, opcode,
1410 					      MGMT_STATUS_INVALID_INDEX);
1411 			goto done;
1412 		}
1413 	}
1414 
1415 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1416 	if (no_hdev != !hdev) {
1417 		err = mgmt_cmd_status(sk, index, opcode,
1418 				      MGMT_STATUS_INVALID_INDEX);
1419 		goto done;
1420 	}
1421 
1422 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1423 	if ((var_len && len < handler->data_len) ||
1424 	    (!var_len && len != handler->data_len)) {
1425 		err = mgmt_cmd_status(sk, index, opcode,
1426 				      MGMT_STATUS_INVALID_PARAMS);
1427 		goto done;
1428 	}
1429 
1430 	if (hdev && chan->hdev_init)
1431 		chan->hdev_init(sk, hdev);
1432 
1433 	cp = buf + sizeof(*hdr);
1434 
1435 	err = handler->func(sk, hdev, cp, len);
1436 	if (err < 0)
1437 		goto done;
1438 
1439 	err = msglen;
1440 
1441 done:
1442 	if (hdev)
1443 		hci_dev_put(hdev);
1444 
1445 	kfree(buf);
1446 	return err;
1447 }
1448 
1449 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1450 {
1451 	struct hci_mon_hdr *hdr;
1452 	struct sk_buff *skb;
1453 	struct hci_dev *hdev;
1454 	u16 index;
1455 	int err;
1456 
1457 	/* The logging frame consists at minimum of the standard header,
1458 	 * the priority byte, the ident length byte and at least one string
1459 	 * terminator NUL byte. Anything shorter are invalid packets.
1460 	 */
1461 	if (len < sizeof(*hdr) + 3)
1462 		return -EINVAL;
1463 
1464 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1465 	if (!skb)
1466 		return err;
1467 
1468 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1469 		err = -EFAULT;
1470 		goto drop;
1471 	}
1472 
1473 	hdr = (void *)skb->data;
1474 
1475 	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1476 		err = -EINVAL;
1477 		goto drop;
1478 	}
1479 
1480 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1481 		__u8 priority = skb->data[sizeof(*hdr)];
1482 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1483 
1484 		/* Only the priorities 0-7 are valid and with that any other
1485 		 * value results in an invalid packet.
1486 		 *
1487 		 * The priority byte is followed by an ident length byte and
1488 		 * the NUL terminated ident string. Check that the ident
1489 		 * length is not overflowing the packet and also that the
1490 		 * ident string itself is NUL terminated. In case the ident
1491 		 * length is zero, the length value actually doubles as NUL
1492 		 * terminator identifier.
1493 		 *
1494 		 * The message follows the ident string (if present) and
1495 		 * must be NUL terminated. Otherwise it is not a valid packet.
1496 		 */
1497 		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1498 		    ident_len > len - sizeof(*hdr) - 3 ||
1499 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1500 			err = -EINVAL;
1501 			goto drop;
1502 		}
1503 	} else {
1504 		err = -EINVAL;
1505 		goto drop;
1506 	}
1507 
1508 	index = __le16_to_cpu(hdr->index);
1509 
1510 	if (index != MGMT_INDEX_NONE) {
1511 		hdev = hci_dev_get(index);
1512 		if (!hdev) {
1513 			err = -ENODEV;
1514 			goto drop;
1515 		}
1516 	} else {
1517 		hdev = NULL;
1518 	}
1519 
1520 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1521 
1522 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1523 	err = len;
1524 
1525 	if (hdev)
1526 		hci_dev_put(hdev);
1527 
1528 drop:
1529 	kfree_skb(skb);
1530 	return err;
1531 }
1532 
1533 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1534 			    size_t len)
1535 {
1536 	struct sock *sk = sock->sk;
1537 	struct hci_mgmt_chan *chan;
1538 	struct hci_dev *hdev;
1539 	struct sk_buff *skb;
1540 	int err;
1541 
1542 	BT_DBG("sock %p sk %p", sock, sk);
1543 
1544 	if (msg->msg_flags & MSG_OOB)
1545 		return -EOPNOTSUPP;
1546 
1547 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1548 		return -EINVAL;
1549 
1550 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1551 		return -EINVAL;
1552 
1553 	lock_sock(sk);
1554 
1555 	switch (hci_pi(sk)->channel) {
1556 	case HCI_CHANNEL_RAW:
1557 	case HCI_CHANNEL_USER:
1558 		break;
1559 	case HCI_CHANNEL_MONITOR:
1560 		err = -EOPNOTSUPP;
1561 		goto done;
1562 	case HCI_CHANNEL_LOGGING:
1563 		err = hci_logging_frame(sk, msg, len);
1564 		goto done;
1565 	default:
1566 		mutex_lock(&mgmt_chan_list_lock);
1567 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1568 		if (chan)
1569 			err = hci_mgmt_cmd(chan, sk, msg, len);
1570 		else
1571 			err = -EINVAL;
1572 
1573 		mutex_unlock(&mgmt_chan_list_lock);
1574 		goto done;
1575 	}
1576 
1577 	hdev = hci_pi(sk)->hdev;
1578 	if (!hdev) {
1579 		err = -EBADFD;
1580 		goto done;
1581 	}
1582 
1583 	if (!test_bit(HCI_UP, &hdev->flags)) {
1584 		err = -ENETDOWN;
1585 		goto done;
1586 	}
1587 
1588 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1589 	if (!skb)
1590 		goto done;
1591 
1592 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1593 		err = -EFAULT;
1594 		goto drop;
1595 	}
1596 
1597 	hci_skb_pkt_type(skb) = skb->data[0];
1598 	skb_pull(skb, 1);
1599 
1600 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1601 		/* No permission check is needed for user channel
1602 		 * since that gets enforced when binding the socket.
1603 		 *
1604 		 * However check that the packet type is valid.
1605 		 */
1606 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1607 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1608 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1609 			err = -EINVAL;
1610 			goto drop;
1611 		}
1612 
1613 		skb_queue_tail(&hdev->raw_q, skb);
1614 		queue_work(hdev->workqueue, &hdev->tx_work);
1615 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1616 		u16 opcode = get_unaligned_le16(skb->data);
1617 		u16 ogf = hci_opcode_ogf(opcode);
1618 		u16 ocf = hci_opcode_ocf(opcode);
1619 
1620 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1621 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1622 				   &hci_sec_filter.ocf_mask[ogf])) &&
1623 		    !capable(CAP_NET_RAW)) {
1624 			err = -EPERM;
1625 			goto drop;
1626 		}
1627 
1628 		/* Since the opcode has already been extracted here, store
1629 		 * a copy of the value for later use by the drivers.
1630 		 */
1631 		hci_skb_opcode(skb) = opcode;
1632 
1633 		if (ogf == 0x3f) {
1634 			skb_queue_tail(&hdev->raw_q, skb);
1635 			queue_work(hdev->workqueue, &hdev->tx_work);
1636 		} else {
1637 			/* Stand-alone HCI commands must be flagged as
1638 			 * single-command requests.
1639 			 */
1640 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1641 
1642 			skb_queue_tail(&hdev->cmd_q, skb);
1643 			queue_work(hdev->workqueue, &hdev->cmd_work);
1644 		}
1645 	} else {
1646 		if (!capable(CAP_NET_RAW)) {
1647 			err = -EPERM;
1648 			goto drop;
1649 		}
1650 
1651 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1652 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1653 			err = -EINVAL;
1654 			goto drop;
1655 		}
1656 
1657 		skb_queue_tail(&hdev->raw_q, skb);
1658 		queue_work(hdev->workqueue, &hdev->tx_work);
1659 	}
1660 
1661 	err = len;
1662 
1663 done:
1664 	release_sock(sk);
1665 	return err;
1666 
1667 drop:
1668 	kfree_skb(skb);
1669 	goto done;
1670 }
1671 
1672 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1673 			       char __user *optval, unsigned int len)
1674 {
1675 	struct hci_ufilter uf = { .opcode = 0 };
1676 	struct sock *sk = sock->sk;
1677 	int err = 0, opt = 0;
1678 
1679 	BT_DBG("sk %p, opt %d", sk, optname);
1680 
1681 	if (level != SOL_HCI)
1682 		return -ENOPROTOOPT;
1683 
1684 	lock_sock(sk);
1685 
1686 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1687 		err = -EBADFD;
1688 		goto done;
1689 	}
1690 
1691 	switch (optname) {
1692 	case HCI_DATA_DIR:
1693 		if (get_user(opt, (int __user *)optval)) {
1694 			err = -EFAULT;
1695 			break;
1696 		}
1697 
1698 		if (opt)
1699 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1700 		else
1701 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1702 		break;
1703 
1704 	case HCI_TIME_STAMP:
1705 		if (get_user(opt, (int __user *)optval)) {
1706 			err = -EFAULT;
1707 			break;
1708 		}
1709 
1710 		if (opt)
1711 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1712 		else
1713 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1714 		break;
1715 
1716 	case HCI_FILTER:
1717 		{
1718 			struct hci_filter *f = &hci_pi(sk)->filter;
1719 
1720 			uf.type_mask = f->type_mask;
1721 			uf.opcode    = f->opcode;
1722 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1723 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1724 		}
1725 
1726 		len = min_t(unsigned int, len, sizeof(uf));
1727 		if (copy_from_user(&uf, optval, len)) {
1728 			err = -EFAULT;
1729 			break;
1730 		}
1731 
1732 		if (!capable(CAP_NET_RAW)) {
1733 			uf.type_mask &= hci_sec_filter.type_mask;
1734 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1735 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1736 		}
1737 
1738 		{
1739 			struct hci_filter *f = &hci_pi(sk)->filter;
1740 
1741 			f->type_mask = uf.type_mask;
1742 			f->opcode    = uf.opcode;
1743 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1744 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1745 		}
1746 		break;
1747 
1748 	default:
1749 		err = -ENOPROTOOPT;
1750 		break;
1751 	}
1752 
1753 done:
1754 	release_sock(sk);
1755 	return err;
1756 }
1757 
1758 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1759 			       char __user *optval, int __user *optlen)
1760 {
1761 	struct hci_ufilter uf;
1762 	struct sock *sk = sock->sk;
1763 	int len, opt, err = 0;
1764 
1765 	BT_DBG("sk %p, opt %d", sk, optname);
1766 
1767 	if (level != SOL_HCI)
1768 		return -ENOPROTOOPT;
1769 
1770 	if (get_user(len, optlen))
1771 		return -EFAULT;
1772 
1773 	lock_sock(sk);
1774 
1775 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1776 		err = -EBADFD;
1777 		goto done;
1778 	}
1779 
1780 	switch (optname) {
1781 	case HCI_DATA_DIR:
1782 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1783 			opt = 1;
1784 		else
1785 			opt = 0;
1786 
1787 		if (put_user(opt, optval))
1788 			err = -EFAULT;
1789 		break;
1790 
1791 	case HCI_TIME_STAMP:
1792 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1793 			opt = 1;
1794 		else
1795 			opt = 0;
1796 
1797 		if (put_user(opt, optval))
1798 			err = -EFAULT;
1799 		break;
1800 
1801 	case HCI_FILTER:
1802 		{
1803 			struct hci_filter *f = &hci_pi(sk)->filter;
1804 
1805 			memset(&uf, 0, sizeof(uf));
1806 			uf.type_mask = f->type_mask;
1807 			uf.opcode    = f->opcode;
1808 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1809 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1810 		}
1811 
1812 		len = min_t(unsigned int, len, sizeof(uf));
1813 		if (copy_to_user(optval, &uf, len))
1814 			err = -EFAULT;
1815 		break;
1816 
1817 	default:
1818 		err = -ENOPROTOOPT;
1819 		break;
1820 	}
1821 
1822 done:
1823 	release_sock(sk);
1824 	return err;
1825 }
1826 
1827 static const struct proto_ops hci_sock_ops = {
1828 	.family		= PF_BLUETOOTH,
1829 	.owner		= THIS_MODULE,
1830 	.release	= hci_sock_release,
1831 	.bind		= hci_sock_bind,
1832 	.getname	= hci_sock_getname,
1833 	.sendmsg	= hci_sock_sendmsg,
1834 	.recvmsg	= hci_sock_recvmsg,
1835 	.ioctl		= hci_sock_ioctl,
1836 	.poll		= datagram_poll,
1837 	.listen		= sock_no_listen,
1838 	.shutdown	= sock_no_shutdown,
1839 	.setsockopt	= hci_sock_setsockopt,
1840 	.getsockopt	= hci_sock_getsockopt,
1841 	.connect	= sock_no_connect,
1842 	.socketpair	= sock_no_socketpair,
1843 	.accept		= sock_no_accept,
1844 	.mmap		= sock_no_mmap
1845 };
1846 
1847 static struct proto hci_sk_proto = {
1848 	.name		= "HCI",
1849 	.owner		= THIS_MODULE,
1850 	.obj_size	= sizeof(struct hci_pinfo)
1851 };
1852 
1853 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1854 			   int kern)
1855 {
1856 	struct sock *sk;
1857 
1858 	BT_DBG("sock %p", sock);
1859 
1860 	if (sock->type != SOCK_RAW)
1861 		return -ESOCKTNOSUPPORT;
1862 
1863 	sock->ops = &hci_sock_ops;
1864 
1865 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1866 	if (!sk)
1867 		return -ENOMEM;
1868 
1869 	sock_init_data(sock, sk);
1870 
1871 	sock_reset_flag(sk, SOCK_ZAPPED);
1872 
1873 	sk->sk_protocol = protocol;
1874 
1875 	sock->state = SS_UNCONNECTED;
1876 	sk->sk_state = BT_OPEN;
1877 
1878 	bt_sock_link(&hci_sk_list, sk);
1879 	return 0;
1880 }
1881 
1882 static const struct net_proto_family hci_sock_family_ops = {
1883 	.family	= PF_BLUETOOTH,
1884 	.owner	= THIS_MODULE,
1885 	.create	= hci_sock_create,
1886 };
1887 
1888 int __init hci_sock_init(void)
1889 {
1890 	int err;
1891 
1892 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1893 
1894 	err = proto_register(&hci_sk_proto, 0);
1895 	if (err < 0)
1896 		return err;
1897 
1898 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1899 	if (err < 0) {
1900 		BT_ERR("HCI socket registration failed");
1901 		goto error;
1902 	}
1903 
1904 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1905 	if (err < 0) {
1906 		BT_ERR("Failed to create HCI proc file");
1907 		bt_sock_unregister(BTPROTO_HCI);
1908 		goto error;
1909 	}
1910 
1911 	BT_INFO("HCI socket layer initialized");
1912 
1913 	return 0;
1914 
1915 error:
1916 	proto_unregister(&hci_sk_proto);
1917 	return err;
1918 }
1919 
1920 void hci_sock_cleanup(void)
1921 {
1922 	bt_procfs_cleanup(&init_net, "hci");
1923 	bt_sock_unregister(BTPROTO_HCI);
1924 	proto_unregister(&hci_sk_proto);
1925 }
1926