xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision 1b36955c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "mgmt_util.h"
38 
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41 
42 static DEFINE_IDA(sock_cookie_ida);
43 
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45 
46 /* ----- HCI socket interface ----- */
47 
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 
51 struct hci_pinfo {
52 	struct bt_sock    bt;
53 	struct hci_dev    *hdev;
54 	struct hci_filter filter;
55 	__u8              cmsg_mask;
56 	unsigned short    channel;
57 	unsigned long     flags;
58 	__u32             cookie;
59 	char              comm[TASK_COMM_LEN];
60 	__u16             mtu;
61 };
62 
63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 {
65 	struct hci_dev *hdev = hci_pi(sk)->hdev;
66 
67 	if (!hdev)
68 		return ERR_PTR(-EBADFD);
69 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70 		return ERR_PTR(-EPIPE);
71 	return hdev;
72 }
73 
74 void hci_sock_set_flag(struct sock *sk, int nr)
75 {
76 	set_bit(nr, &hci_pi(sk)->flags);
77 }
78 
79 void hci_sock_clear_flag(struct sock *sk, int nr)
80 {
81 	clear_bit(nr, &hci_pi(sk)->flags);
82 }
83 
84 int hci_sock_test_flag(struct sock *sk, int nr)
85 {
86 	return test_bit(nr, &hci_pi(sk)->flags);
87 }
88 
89 unsigned short hci_sock_get_channel(struct sock *sk)
90 {
91 	return hci_pi(sk)->channel;
92 }
93 
94 u32 hci_sock_get_cookie(struct sock *sk)
95 {
96 	return hci_pi(sk)->cookie;
97 }
98 
99 static bool hci_sock_gen_cookie(struct sock *sk)
100 {
101 	int id = hci_pi(sk)->cookie;
102 
103 	if (!id) {
104 		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
105 		if (id < 0)
106 			id = 0xffffffff;
107 
108 		hci_pi(sk)->cookie = id;
109 		get_task_comm(hci_pi(sk)->comm, current);
110 		return true;
111 	}
112 
113 	return false;
114 }
115 
116 static void hci_sock_free_cookie(struct sock *sk)
117 {
118 	int id = hci_pi(sk)->cookie;
119 
120 	if (id) {
121 		hci_pi(sk)->cookie = 0xffffffff;
122 		ida_simple_remove(&sock_cookie_ida, id);
123 	}
124 }
125 
126 static inline int hci_test_bit(int nr, const void *addr)
127 {
128 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
129 }
130 
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF  5
133 
134 struct hci_sec_filter {
135 	__u32 type_mask;
136 	__u32 event_mask[2];
137 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
138 };
139 
140 static const struct hci_sec_filter hci_sec_filter = {
141 	/* Packet types */
142 	0x10,
143 	/* Events */
144 	{ 0x1000d9fe, 0x0000b00c },
145 	/* Commands */
146 	{
147 		{ 0x0 },
148 		/* OGF_LINK_CTL */
149 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150 		/* OGF_LINK_POLICY */
151 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
152 		/* OGF_HOST_CTL */
153 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154 		/* OGF_INFO_PARAM */
155 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
156 		/* OGF_STATUS_PARAM */
157 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
158 	}
159 };
160 
161 static struct bt_sock_list hci_sk_list = {
162 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
163 };
164 
165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 {
167 	struct hci_filter *flt;
168 	int flt_type, flt_event;
169 
170 	/* Apply filter */
171 	flt = &hci_pi(sk)->filter;
172 
173 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174 
175 	if (!test_bit(flt_type, &flt->type_mask))
176 		return true;
177 
178 	/* Extra filter for event packets only */
179 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
180 		return false;
181 
182 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183 
184 	if (!hci_test_bit(flt_event, &flt->event_mask))
185 		return true;
186 
187 	/* Check filter only when opcode is set */
188 	if (!flt->opcode)
189 		return false;
190 
191 	if (flt_event == HCI_EV_CMD_COMPLETE &&
192 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
193 		return true;
194 
195 	if (flt_event == HCI_EV_CMD_STATUS &&
196 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
197 		return true;
198 
199 	return false;
200 }
201 
202 /* Send frame to RAW socket */
203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
204 {
205 	struct sock *sk;
206 	struct sk_buff *skb_copy = NULL;
207 
208 	BT_DBG("hdev %p len %d", hdev, skb->len);
209 
210 	read_lock(&hci_sk_list.lock);
211 
212 	sk_for_each(sk, &hci_sk_list.head) {
213 		struct sk_buff *nskb;
214 
215 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
216 			continue;
217 
218 		/* Don't send frame to the socket it came from */
219 		if (skb->sk == sk)
220 			continue;
221 
222 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
228 				continue;
229 			if (is_filtered_packet(sk, skb))
230 				continue;
231 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232 			if (!bt_cb(skb)->incoming)
233 				continue;
234 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
238 				continue;
239 		} else {
240 			/* Don't send frame to other channel types */
241 			continue;
242 		}
243 
244 		if (!skb_copy) {
245 			/* Create a private copy with headroom */
246 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
247 			if (!skb_copy)
248 				continue;
249 
250 			/* Put type byte before the data */
251 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
252 		}
253 
254 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 		if (!nskb)
256 			continue;
257 
258 		if (sock_queue_rcv_skb(sk, nskb))
259 			kfree_skb(nskb);
260 	}
261 
262 	read_unlock(&hci_sk_list.lock);
263 
264 	kfree_skb(skb_copy);
265 }
266 
267 static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
268 {
269 	struct scm_creds *creds;
270 
271 	if (!sk || WARN_ON(!skb))
272 		return;
273 
274 	creds = &bt_cb(skb)->creds;
275 
276 	/* Check if peer credentials is set */
277 	if (!sk->sk_peer_pid) {
278 		/* Check if parent peer credentials is set */
279 		if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
280 			sk = bt_sk(sk)->parent;
281 		else
282 			return;
283 	}
284 
285 	/* Check if scm_creds already set */
286 	if (creds->pid == pid_vnr(sk->sk_peer_pid))
287 		return;
288 
289 	memset(creds, 0, sizeof(*creds));
290 
291 	creds->pid = pid_vnr(sk->sk_peer_pid);
292 	if (sk->sk_peer_cred) {
293 		creds->uid = sk->sk_peer_cred->uid;
294 		creds->gid = sk->sk_peer_cred->gid;
295 	}
296 }
297 
298 static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
299 {
300 	struct sk_buff *nskb;
301 
302 	if (!skb)
303 		return NULL;
304 
305 	nskb = skb_clone(skb, GFP_ATOMIC);
306 	if (!nskb)
307 		return NULL;
308 
309 	hci_sock_copy_creds(skb->sk, nskb);
310 
311 	return nskb;
312 }
313 
314 /* Send frame to sockets with specific channel */
315 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
316 				  int flag, struct sock *skip_sk)
317 {
318 	struct sock *sk;
319 
320 	BT_DBG("channel %u len %d", channel, skb->len);
321 
322 	sk_for_each(sk, &hci_sk_list.head) {
323 		struct sk_buff *nskb;
324 
325 		/* Ignore socket without the flag set */
326 		if (!hci_sock_test_flag(sk, flag))
327 			continue;
328 
329 		/* Skip the original socket */
330 		if (sk == skip_sk)
331 			continue;
332 
333 		if (sk->sk_state != BT_BOUND)
334 			continue;
335 
336 		if (hci_pi(sk)->channel != channel)
337 			continue;
338 
339 		nskb = hci_skb_clone(skb);
340 		if (!nskb)
341 			continue;
342 
343 		if (sock_queue_rcv_skb(sk, nskb))
344 			kfree_skb(nskb);
345 	}
346 
347 }
348 
349 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
350 			 int flag, struct sock *skip_sk)
351 {
352 	read_lock(&hci_sk_list.lock);
353 	__hci_send_to_channel(channel, skb, flag, skip_sk);
354 	read_unlock(&hci_sk_list.lock);
355 }
356 
357 /* Send frame to monitor socket */
358 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
359 {
360 	struct sk_buff *skb_copy = NULL;
361 	struct hci_mon_hdr *hdr;
362 	__le16 opcode;
363 
364 	if (!atomic_read(&monitor_promisc))
365 		return;
366 
367 	BT_DBG("hdev %p len %d", hdev, skb->len);
368 
369 	switch (hci_skb_pkt_type(skb)) {
370 	case HCI_COMMAND_PKT:
371 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
372 		break;
373 	case HCI_EVENT_PKT:
374 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
375 		break;
376 	case HCI_ACLDATA_PKT:
377 		if (bt_cb(skb)->incoming)
378 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
379 		else
380 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
381 		break;
382 	case HCI_SCODATA_PKT:
383 		if (bt_cb(skb)->incoming)
384 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
385 		else
386 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
387 		break;
388 	case HCI_ISODATA_PKT:
389 		if (bt_cb(skb)->incoming)
390 			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
391 		else
392 			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
393 		break;
394 	case HCI_DIAG_PKT:
395 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
396 		break;
397 	default:
398 		return;
399 	}
400 
401 	/* Create a private copy with headroom */
402 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
403 	if (!skb_copy)
404 		return;
405 
406 	hci_sock_copy_creds(skb->sk, skb_copy);
407 
408 	/* Put header before the data */
409 	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
410 	hdr->opcode = opcode;
411 	hdr->index = cpu_to_le16(hdev->id);
412 	hdr->len = cpu_to_le16(skb->len);
413 
414 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
415 			    HCI_SOCK_TRUSTED, NULL);
416 	kfree_skb(skb_copy);
417 }
418 
419 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
420 				 void *data, u16 data_len, ktime_t tstamp,
421 				 int flag, struct sock *skip_sk)
422 {
423 	struct sock *sk;
424 	__le16 index;
425 
426 	if (hdev)
427 		index = cpu_to_le16(hdev->id);
428 	else
429 		index = cpu_to_le16(MGMT_INDEX_NONE);
430 
431 	read_lock(&hci_sk_list.lock);
432 
433 	sk_for_each(sk, &hci_sk_list.head) {
434 		struct hci_mon_hdr *hdr;
435 		struct sk_buff *skb;
436 
437 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
438 			continue;
439 
440 		/* Ignore socket without the flag set */
441 		if (!hci_sock_test_flag(sk, flag))
442 			continue;
443 
444 		/* Skip the original socket */
445 		if (sk == skip_sk)
446 			continue;
447 
448 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
449 		if (!skb)
450 			continue;
451 
452 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
453 		put_unaligned_le16(event, skb_put(skb, 2));
454 
455 		if (data)
456 			skb_put_data(skb, data, data_len);
457 
458 		skb->tstamp = tstamp;
459 
460 		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
461 		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
462 		hdr->index = index;
463 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
464 
465 		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 				      HCI_SOCK_TRUSTED, NULL);
467 		kfree_skb(skb);
468 	}
469 
470 	read_unlock(&hci_sk_list.lock);
471 }
472 
473 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
474 {
475 	struct hci_mon_hdr *hdr;
476 	struct hci_mon_new_index *ni;
477 	struct hci_mon_index_info *ii;
478 	struct sk_buff *skb;
479 	__le16 opcode;
480 
481 	switch (event) {
482 	case HCI_DEV_REG:
483 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
484 		if (!skb)
485 			return NULL;
486 
487 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
488 		ni->type = hdev->dev_type;
489 		ni->bus = hdev->bus;
490 		bacpy(&ni->bdaddr, &hdev->bdaddr);
491 		memcpy(ni->name, hdev->name, 8);
492 
493 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
494 		break;
495 
496 	case HCI_DEV_UNREG:
497 		skb = bt_skb_alloc(0, GFP_ATOMIC);
498 		if (!skb)
499 			return NULL;
500 
501 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
502 		break;
503 
504 	case HCI_DEV_SETUP:
505 		if (hdev->manufacturer == 0xffff)
506 			return NULL;
507 		fallthrough;
508 
509 	case HCI_DEV_UP:
510 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
511 		if (!skb)
512 			return NULL;
513 
514 		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
515 		bacpy(&ii->bdaddr, &hdev->bdaddr);
516 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
517 
518 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
519 		break;
520 
521 	case HCI_DEV_OPEN:
522 		skb = bt_skb_alloc(0, GFP_ATOMIC);
523 		if (!skb)
524 			return NULL;
525 
526 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
527 		break;
528 
529 	case HCI_DEV_CLOSE:
530 		skb = bt_skb_alloc(0, GFP_ATOMIC);
531 		if (!skb)
532 			return NULL;
533 
534 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
535 		break;
536 
537 	default:
538 		return NULL;
539 	}
540 
541 	__net_timestamp(skb);
542 
543 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
544 	hdr->opcode = opcode;
545 	hdr->index = cpu_to_le16(hdev->id);
546 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
547 
548 	return skb;
549 }
550 
551 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
552 {
553 	struct hci_mon_hdr *hdr;
554 	struct sk_buff *skb;
555 	u16 format;
556 	u8 ver[3];
557 	u32 flags;
558 
559 	/* No message needed when cookie is not present */
560 	if (!hci_pi(sk)->cookie)
561 		return NULL;
562 
563 	switch (hci_pi(sk)->channel) {
564 	case HCI_CHANNEL_RAW:
565 		format = 0x0000;
566 		ver[0] = BT_SUBSYS_VERSION;
567 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
568 		break;
569 	case HCI_CHANNEL_USER:
570 		format = 0x0001;
571 		ver[0] = BT_SUBSYS_VERSION;
572 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
573 		break;
574 	case HCI_CHANNEL_CONTROL:
575 		format = 0x0002;
576 		mgmt_fill_version_info(ver);
577 		break;
578 	default:
579 		/* No message for unsupported format */
580 		return NULL;
581 	}
582 
583 	skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
584 	if (!skb)
585 		return NULL;
586 
587 	hci_sock_copy_creds(sk, skb);
588 
589 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
590 
591 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
592 	put_unaligned_le16(format, skb_put(skb, 2));
593 	skb_put_data(skb, ver, sizeof(ver));
594 	put_unaligned_le32(flags, skb_put(skb, 4));
595 	skb_put_u8(skb, TASK_COMM_LEN);
596 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
597 
598 	__net_timestamp(skb);
599 
600 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
601 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
602 	if (hci_pi(sk)->hdev)
603 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
604 	else
605 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
606 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
607 
608 	return skb;
609 }
610 
611 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
612 {
613 	struct hci_mon_hdr *hdr;
614 	struct sk_buff *skb;
615 
616 	/* No message needed when cookie is not present */
617 	if (!hci_pi(sk)->cookie)
618 		return NULL;
619 
620 	switch (hci_pi(sk)->channel) {
621 	case HCI_CHANNEL_RAW:
622 	case HCI_CHANNEL_USER:
623 	case HCI_CHANNEL_CONTROL:
624 		break;
625 	default:
626 		/* No message for unsupported format */
627 		return NULL;
628 	}
629 
630 	skb = bt_skb_alloc(4, GFP_ATOMIC);
631 	if (!skb)
632 		return NULL;
633 
634 	hci_sock_copy_creds(sk, skb);
635 
636 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
637 
638 	__net_timestamp(skb);
639 
640 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
641 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
642 	if (hci_pi(sk)->hdev)
643 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
644 	else
645 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
646 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
647 
648 	return skb;
649 }
650 
651 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
652 						   u16 opcode, u16 len,
653 						   const void *buf)
654 {
655 	struct hci_mon_hdr *hdr;
656 	struct sk_buff *skb;
657 
658 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
659 	if (!skb)
660 		return NULL;
661 
662 	hci_sock_copy_creds(sk, skb);
663 
664 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
665 	put_unaligned_le16(opcode, skb_put(skb, 2));
666 
667 	if (buf)
668 		skb_put_data(skb, buf, len);
669 
670 	__net_timestamp(skb);
671 
672 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
673 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
674 	hdr->index = cpu_to_le16(index);
675 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
676 
677 	return skb;
678 }
679 
680 static void __printf(2, 3)
681 send_monitor_note(struct sock *sk, const char *fmt, ...)
682 {
683 	size_t len;
684 	struct hci_mon_hdr *hdr;
685 	struct sk_buff *skb;
686 	va_list args;
687 
688 	va_start(args, fmt);
689 	len = vsnprintf(NULL, 0, fmt, args);
690 	va_end(args);
691 
692 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
693 	if (!skb)
694 		return;
695 
696 	hci_sock_copy_creds(sk, skb);
697 
698 	va_start(args, fmt);
699 	vsprintf(skb_put(skb, len), fmt, args);
700 	*(u8 *)skb_put(skb, 1) = 0;
701 	va_end(args);
702 
703 	__net_timestamp(skb);
704 
705 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
706 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
707 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
708 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
709 
710 	if (sock_queue_rcv_skb(sk, skb))
711 		kfree_skb(skb);
712 }
713 
714 static void send_monitor_replay(struct sock *sk)
715 {
716 	struct hci_dev *hdev;
717 
718 	read_lock(&hci_dev_list_lock);
719 
720 	list_for_each_entry(hdev, &hci_dev_list, list) {
721 		struct sk_buff *skb;
722 
723 		skb = create_monitor_event(hdev, HCI_DEV_REG);
724 		if (!skb)
725 			continue;
726 
727 		if (sock_queue_rcv_skb(sk, skb))
728 			kfree_skb(skb);
729 
730 		if (!test_bit(HCI_RUNNING, &hdev->flags))
731 			continue;
732 
733 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
734 		if (!skb)
735 			continue;
736 
737 		if (sock_queue_rcv_skb(sk, skb))
738 			kfree_skb(skb);
739 
740 		if (test_bit(HCI_UP, &hdev->flags))
741 			skb = create_monitor_event(hdev, HCI_DEV_UP);
742 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
743 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
744 		else
745 			skb = NULL;
746 
747 		if (skb) {
748 			if (sock_queue_rcv_skb(sk, skb))
749 				kfree_skb(skb);
750 		}
751 	}
752 
753 	read_unlock(&hci_dev_list_lock);
754 }
755 
756 static void send_monitor_control_replay(struct sock *mon_sk)
757 {
758 	struct sock *sk;
759 
760 	read_lock(&hci_sk_list.lock);
761 
762 	sk_for_each(sk, &hci_sk_list.head) {
763 		struct sk_buff *skb;
764 
765 		skb = create_monitor_ctrl_open(sk);
766 		if (!skb)
767 			continue;
768 
769 		if (sock_queue_rcv_skb(mon_sk, skb))
770 			kfree_skb(skb);
771 	}
772 
773 	read_unlock(&hci_sk_list.lock);
774 }
775 
776 /* Generate internal stack event */
777 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
778 {
779 	struct hci_event_hdr *hdr;
780 	struct hci_ev_stack_internal *ev;
781 	struct sk_buff *skb;
782 
783 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
784 	if (!skb)
785 		return;
786 
787 	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
788 	hdr->evt  = HCI_EV_STACK_INTERNAL;
789 	hdr->plen = sizeof(*ev) + dlen;
790 
791 	ev = skb_put(skb, sizeof(*ev) + dlen);
792 	ev->type = type;
793 	memcpy(ev->data, data, dlen);
794 
795 	bt_cb(skb)->incoming = 1;
796 	__net_timestamp(skb);
797 
798 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
799 	hci_send_to_sock(hdev, skb);
800 	kfree_skb(skb);
801 }
802 
803 void hci_sock_dev_event(struct hci_dev *hdev, int event)
804 {
805 	BT_DBG("hdev %s event %d", hdev->name, event);
806 
807 	if (atomic_read(&monitor_promisc)) {
808 		struct sk_buff *skb;
809 
810 		/* Send event to monitor */
811 		skb = create_monitor_event(hdev, event);
812 		if (skb) {
813 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
814 					    HCI_SOCK_TRUSTED, NULL);
815 			kfree_skb(skb);
816 		}
817 	}
818 
819 	if (event <= HCI_DEV_DOWN) {
820 		struct hci_ev_si_device ev;
821 
822 		/* Send event to sockets */
823 		ev.event  = event;
824 		ev.dev_id = hdev->id;
825 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
826 	}
827 
828 	if (event == HCI_DEV_UNREG) {
829 		struct sock *sk;
830 
831 		/* Wake up sockets using this dead device */
832 		read_lock(&hci_sk_list.lock);
833 		sk_for_each(sk, &hci_sk_list.head) {
834 			if (hci_pi(sk)->hdev == hdev) {
835 				sk->sk_err = EPIPE;
836 				sk->sk_state_change(sk);
837 			}
838 		}
839 		read_unlock(&hci_sk_list.lock);
840 	}
841 }
842 
843 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
844 {
845 	struct hci_mgmt_chan *c;
846 
847 	list_for_each_entry(c, &mgmt_chan_list, list) {
848 		if (c->channel == channel)
849 			return c;
850 	}
851 
852 	return NULL;
853 }
854 
855 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
856 {
857 	struct hci_mgmt_chan *c;
858 
859 	mutex_lock(&mgmt_chan_list_lock);
860 	c = __hci_mgmt_chan_find(channel);
861 	mutex_unlock(&mgmt_chan_list_lock);
862 
863 	return c;
864 }
865 
866 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
867 {
868 	if (c->channel < HCI_CHANNEL_CONTROL)
869 		return -EINVAL;
870 
871 	mutex_lock(&mgmt_chan_list_lock);
872 	if (__hci_mgmt_chan_find(c->channel)) {
873 		mutex_unlock(&mgmt_chan_list_lock);
874 		return -EALREADY;
875 	}
876 
877 	list_add_tail(&c->list, &mgmt_chan_list);
878 
879 	mutex_unlock(&mgmt_chan_list_lock);
880 
881 	return 0;
882 }
883 EXPORT_SYMBOL(hci_mgmt_chan_register);
884 
885 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
886 {
887 	mutex_lock(&mgmt_chan_list_lock);
888 	list_del(&c->list);
889 	mutex_unlock(&mgmt_chan_list_lock);
890 }
891 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
892 
893 static int hci_sock_release(struct socket *sock)
894 {
895 	struct sock *sk = sock->sk;
896 	struct hci_dev *hdev;
897 	struct sk_buff *skb;
898 
899 	BT_DBG("sock %p sk %p", sock, sk);
900 
901 	if (!sk)
902 		return 0;
903 
904 	lock_sock(sk);
905 
906 	switch (hci_pi(sk)->channel) {
907 	case HCI_CHANNEL_MONITOR:
908 		atomic_dec(&monitor_promisc);
909 		break;
910 	case HCI_CHANNEL_RAW:
911 	case HCI_CHANNEL_USER:
912 	case HCI_CHANNEL_CONTROL:
913 		/* Send event to monitor */
914 		skb = create_monitor_ctrl_close(sk);
915 		if (skb) {
916 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
917 					    HCI_SOCK_TRUSTED, NULL);
918 			kfree_skb(skb);
919 		}
920 
921 		hci_sock_free_cookie(sk);
922 		break;
923 	}
924 
925 	bt_sock_unlink(&hci_sk_list, sk);
926 
927 	hdev = hci_pi(sk)->hdev;
928 	if (hdev) {
929 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
930 		    !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
931 			/* When releasing a user channel exclusive access,
932 			 * call hci_dev_do_close directly instead of calling
933 			 * hci_dev_close to ensure the exclusive access will
934 			 * be released and the controller brought back down.
935 			 *
936 			 * The checking of HCI_AUTO_OFF is not needed in this
937 			 * case since it will have been cleared already when
938 			 * opening the user channel.
939 			 *
940 			 * Make sure to also check that we haven't already
941 			 * unregistered since all the cleanup will have already
942 			 * been complete and hdev will get released when we put
943 			 * below.
944 			 */
945 			hci_dev_do_close(hdev);
946 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
947 			mgmt_index_added(hdev);
948 		}
949 
950 		atomic_dec(&hdev->promisc);
951 		hci_dev_put(hdev);
952 	}
953 
954 	sock_orphan(sk);
955 	release_sock(sk);
956 	sock_put(sk);
957 	return 0;
958 }
959 
960 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
961 {
962 	bdaddr_t bdaddr;
963 	int err;
964 
965 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
966 		return -EFAULT;
967 
968 	hci_dev_lock(hdev);
969 
970 	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
971 
972 	hci_dev_unlock(hdev);
973 
974 	return err;
975 }
976 
977 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
978 {
979 	bdaddr_t bdaddr;
980 	int err;
981 
982 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
983 		return -EFAULT;
984 
985 	hci_dev_lock(hdev);
986 
987 	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
988 
989 	hci_dev_unlock(hdev);
990 
991 	return err;
992 }
993 
994 /* Ioctls that require bound socket */
995 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
996 				unsigned long arg)
997 {
998 	struct hci_dev *hdev = hci_hdev_from_sock(sk);
999 
1000 	if (IS_ERR(hdev))
1001 		return PTR_ERR(hdev);
1002 
1003 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1004 		return -EBUSY;
1005 
1006 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1007 		return -EOPNOTSUPP;
1008 
1009 	if (hdev->dev_type != HCI_PRIMARY)
1010 		return -EOPNOTSUPP;
1011 
1012 	switch (cmd) {
1013 	case HCISETRAW:
1014 		if (!capable(CAP_NET_ADMIN))
1015 			return -EPERM;
1016 		return -EOPNOTSUPP;
1017 
1018 	case HCIGETCONNINFO:
1019 		return hci_get_conn_info(hdev, (void __user *)arg);
1020 
1021 	case HCIGETAUTHINFO:
1022 		return hci_get_auth_info(hdev, (void __user *)arg);
1023 
1024 	case HCIBLOCKADDR:
1025 		if (!capable(CAP_NET_ADMIN))
1026 			return -EPERM;
1027 		return hci_sock_reject_list_add(hdev, (void __user *)arg);
1028 
1029 	case HCIUNBLOCKADDR:
1030 		if (!capable(CAP_NET_ADMIN))
1031 			return -EPERM;
1032 		return hci_sock_reject_list_del(hdev, (void __user *)arg);
1033 	}
1034 
1035 	return -ENOIOCTLCMD;
1036 }
1037 
1038 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1039 			  unsigned long arg)
1040 {
1041 	void __user *argp = (void __user *)arg;
1042 	struct sock *sk = sock->sk;
1043 	int err;
1044 
1045 	BT_DBG("cmd %x arg %lx", cmd, arg);
1046 
1047 	/* Make sure the cmd is valid before doing anything */
1048 	switch (cmd) {
1049 	case HCIGETDEVLIST:
1050 	case HCIGETDEVINFO:
1051 	case HCIGETCONNLIST:
1052 	case HCIDEVUP:
1053 	case HCIDEVDOWN:
1054 	case HCIDEVRESET:
1055 	case HCIDEVRESTAT:
1056 	case HCISETSCAN:
1057 	case HCISETAUTH:
1058 	case HCISETENCRYPT:
1059 	case HCISETPTYPE:
1060 	case HCISETLINKPOL:
1061 	case HCISETLINKMODE:
1062 	case HCISETACLMTU:
1063 	case HCISETSCOMTU:
1064 	case HCIINQUIRY:
1065 	case HCISETRAW:
1066 	case HCIGETCONNINFO:
1067 	case HCIGETAUTHINFO:
1068 	case HCIBLOCKADDR:
1069 	case HCIUNBLOCKADDR:
1070 		break;
1071 	default:
1072 		return -ENOIOCTLCMD;
1073 	}
1074 
1075 	lock_sock(sk);
1076 
1077 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1078 		err = -EBADFD;
1079 		goto done;
1080 	}
1081 
1082 	/* When calling an ioctl on an unbound raw socket, then ensure
1083 	 * that the monitor gets informed. Ensure that the resulting event
1084 	 * is only send once by checking if the cookie exists or not. The
1085 	 * socket cookie will be only ever generated once for the lifetime
1086 	 * of a given socket.
1087 	 */
1088 	if (hci_sock_gen_cookie(sk)) {
1089 		struct sk_buff *skb;
1090 
1091 		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
1092 		 * flag. Make sure that not only the current task but also
1093 		 * the socket opener has the required capability, since
1094 		 * privileged programs can be tricked into making ioctl calls
1095 		 * on HCI sockets, and the socket should not be marked as
1096 		 * trusted simply because the ioctl caller is privileged.
1097 		 */
1098 		if (sk_capable(sk, CAP_NET_ADMIN))
1099 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1100 
1101 		/* Send event to monitor */
1102 		skb = create_monitor_ctrl_open(sk);
1103 		if (skb) {
1104 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1105 					    HCI_SOCK_TRUSTED, NULL);
1106 			kfree_skb(skb);
1107 		}
1108 	}
1109 
1110 	release_sock(sk);
1111 
1112 	switch (cmd) {
1113 	case HCIGETDEVLIST:
1114 		return hci_get_dev_list(argp);
1115 
1116 	case HCIGETDEVINFO:
1117 		return hci_get_dev_info(argp);
1118 
1119 	case HCIGETCONNLIST:
1120 		return hci_get_conn_list(argp);
1121 
1122 	case HCIDEVUP:
1123 		if (!capable(CAP_NET_ADMIN))
1124 			return -EPERM;
1125 		return hci_dev_open(arg);
1126 
1127 	case HCIDEVDOWN:
1128 		if (!capable(CAP_NET_ADMIN))
1129 			return -EPERM;
1130 		return hci_dev_close(arg);
1131 
1132 	case HCIDEVRESET:
1133 		if (!capable(CAP_NET_ADMIN))
1134 			return -EPERM;
1135 		return hci_dev_reset(arg);
1136 
1137 	case HCIDEVRESTAT:
1138 		if (!capable(CAP_NET_ADMIN))
1139 			return -EPERM;
1140 		return hci_dev_reset_stat(arg);
1141 
1142 	case HCISETSCAN:
1143 	case HCISETAUTH:
1144 	case HCISETENCRYPT:
1145 	case HCISETPTYPE:
1146 	case HCISETLINKPOL:
1147 	case HCISETLINKMODE:
1148 	case HCISETACLMTU:
1149 	case HCISETSCOMTU:
1150 		if (!capable(CAP_NET_ADMIN))
1151 			return -EPERM;
1152 		return hci_dev_cmd(cmd, argp);
1153 
1154 	case HCIINQUIRY:
1155 		return hci_inquiry(argp);
1156 	}
1157 
1158 	lock_sock(sk);
1159 
1160 	err = hci_sock_bound_ioctl(sk, cmd, arg);
1161 
1162 done:
1163 	release_sock(sk);
1164 	return err;
1165 }
1166 
1167 #ifdef CONFIG_COMPAT
1168 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1169 				 unsigned long arg)
1170 {
1171 	switch (cmd) {
1172 	case HCIDEVUP:
1173 	case HCIDEVDOWN:
1174 	case HCIDEVRESET:
1175 	case HCIDEVRESTAT:
1176 		return hci_sock_ioctl(sock, cmd, arg);
1177 	}
1178 
1179 	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1180 }
1181 #endif
1182 
1183 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1184 			 int addr_len)
1185 {
1186 	struct sockaddr_hci haddr;
1187 	struct sock *sk = sock->sk;
1188 	struct hci_dev *hdev = NULL;
1189 	struct sk_buff *skb;
1190 	int len, err = 0;
1191 
1192 	BT_DBG("sock %p sk %p", sock, sk);
1193 
1194 	if (!addr)
1195 		return -EINVAL;
1196 
1197 	memset(&haddr, 0, sizeof(haddr));
1198 	len = min_t(unsigned int, sizeof(haddr), addr_len);
1199 	memcpy(&haddr, addr, len);
1200 
1201 	if (haddr.hci_family != AF_BLUETOOTH)
1202 		return -EINVAL;
1203 
1204 	lock_sock(sk);
1205 
1206 	/* Allow detaching from dead device and attaching to alive device, if
1207 	 * the caller wants to re-bind (instead of close) this socket in
1208 	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1209 	 */
1210 	hdev = hci_pi(sk)->hdev;
1211 	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1212 		hci_pi(sk)->hdev = NULL;
1213 		sk->sk_state = BT_OPEN;
1214 		hci_dev_put(hdev);
1215 	}
1216 	hdev = NULL;
1217 
1218 	if (sk->sk_state == BT_BOUND) {
1219 		err = -EALREADY;
1220 		goto done;
1221 	}
1222 
1223 	switch (haddr.hci_channel) {
1224 	case HCI_CHANNEL_RAW:
1225 		if (hci_pi(sk)->hdev) {
1226 			err = -EALREADY;
1227 			goto done;
1228 		}
1229 
1230 		if (haddr.hci_dev != HCI_DEV_NONE) {
1231 			hdev = hci_dev_get(haddr.hci_dev);
1232 			if (!hdev) {
1233 				err = -ENODEV;
1234 				goto done;
1235 			}
1236 
1237 			atomic_inc(&hdev->promisc);
1238 		}
1239 
1240 		hci_pi(sk)->channel = haddr.hci_channel;
1241 
1242 		if (!hci_sock_gen_cookie(sk)) {
1243 			/* In the case when a cookie has already been assigned,
1244 			 * then there has been already an ioctl issued against
1245 			 * an unbound socket and with that triggered an open
1246 			 * notification. Send a close notification first to
1247 			 * allow the state transition to bounded.
1248 			 */
1249 			skb = create_monitor_ctrl_close(sk);
1250 			if (skb) {
1251 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1252 						    HCI_SOCK_TRUSTED, NULL);
1253 				kfree_skb(skb);
1254 			}
1255 		}
1256 
1257 		if (capable(CAP_NET_ADMIN))
1258 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1259 
1260 		hci_pi(sk)->hdev = hdev;
1261 
1262 		/* Send event to monitor */
1263 		skb = create_monitor_ctrl_open(sk);
1264 		if (skb) {
1265 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1266 					    HCI_SOCK_TRUSTED, NULL);
1267 			kfree_skb(skb);
1268 		}
1269 		break;
1270 
1271 	case HCI_CHANNEL_USER:
1272 		if (hci_pi(sk)->hdev) {
1273 			err = -EALREADY;
1274 			goto done;
1275 		}
1276 
1277 		if (haddr.hci_dev == HCI_DEV_NONE) {
1278 			err = -EINVAL;
1279 			goto done;
1280 		}
1281 
1282 		if (!capable(CAP_NET_ADMIN)) {
1283 			err = -EPERM;
1284 			goto done;
1285 		}
1286 
1287 		hdev = hci_dev_get(haddr.hci_dev);
1288 		if (!hdev) {
1289 			err = -ENODEV;
1290 			goto done;
1291 		}
1292 
1293 		if (test_bit(HCI_INIT, &hdev->flags) ||
1294 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1295 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1296 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1297 		     test_bit(HCI_UP, &hdev->flags))) {
1298 			err = -EBUSY;
1299 			hci_dev_put(hdev);
1300 			goto done;
1301 		}
1302 
1303 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1304 			err = -EUSERS;
1305 			hci_dev_put(hdev);
1306 			goto done;
1307 		}
1308 
1309 		mgmt_index_removed(hdev);
1310 
1311 		err = hci_dev_open(hdev->id);
1312 		if (err) {
1313 			if (err == -EALREADY) {
1314 				/* In case the transport is already up and
1315 				 * running, clear the error here.
1316 				 *
1317 				 * This can happen when opening a user
1318 				 * channel and HCI_AUTO_OFF grace period
1319 				 * is still active.
1320 				 */
1321 				err = 0;
1322 			} else {
1323 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1324 				mgmt_index_added(hdev);
1325 				hci_dev_put(hdev);
1326 				goto done;
1327 			}
1328 		}
1329 
1330 		hci_pi(sk)->channel = haddr.hci_channel;
1331 
1332 		if (!hci_sock_gen_cookie(sk)) {
1333 			/* In the case when a cookie has already been assigned,
1334 			 * this socket will transition from a raw socket into
1335 			 * a user channel socket. For a clean transition, send
1336 			 * the close notification first.
1337 			 */
1338 			skb = create_monitor_ctrl_close(sk);
1339 			if (skb) {
1340 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1341 						    HCI_SOCK_TRUSTED, NULL);
1342 				kfree_skb(skb);
1343 			}
1344 		}
1345 
1346 		/* The user channel is restricted to CAP_NET_ADMIN
1347 		 * capabilities and with that implicitly trusted.
1348 		 */
1349 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1350 
1351 		hci_pi(sk)->hdev = hdev;
1352 
1353 		/* Send event to monitor */
1354 		skb = create_monitor_ctrl_open(sk);
1355 		if (skb) {
1356 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1357 					    HCI_SOCK_TRUSTED, NULL);
1358 			kfree_skb(skb);
1359 		}
1360 
1361 		atomic_inc(&hdev->promisc);
1362 		break;
1363 
1364 	case HCI_CHANNEL_MONITOR:
1365 		if (haddr.hci_dev != HCI_DEV_NONE) {
1366 			err = -EINVAL;
1367 			goto done;
1368 		}
1369 
1370 		if (!capable(CAP_NET_RAW)) {
1371 			err = -EPERM;
1372 			goto done;
1373 		}
1374 
1375 		hci_pi(sk)->channel = haddr.hci_channel;
1376 
1377 		/* The monitor interface is restricted to CAP_NET_RAW
1378 		 * capabilities and with that implicitly trusted.
1379 		 */
1380 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1381 
1382 		send_monitor_note(sk, "Linux version %s (%s)",
1383 				  init_utsname()->release,
1384 				  init_utsname()->machine);
1385 		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1386 				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1387 		send_monitor_replay(sk);
1388 		send_monitor_control_replay(sk);
1389 
1390 		atomic_inc(&monitor_promisc);
1391 		break;
1392 
1393 	case HCI_CHANNEL_LOGGING:
1394 		if (haddr.hci_dev != HCI_DEV_NONE) {
1395 			err = -EINVAL;
1396 			goto done;
1397 		}
1398 
1399 		if (!capable(CAP_NET_ADMIN)) {
1400 			err = -EPERM;
1401 			goto done;
1402 		}
1403 
1404 		hci_pi(sk)->channel = haddr.hci_channel;
1405 		break;
1406 
1407 	default:
1408 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1409 			err = -EINVAL;
1410 			goto done;
1411 		}
1412 
1413 		if (haddr.hci_dev != HCI_DEV_NONE) {
1414 			err = -EINVAL;
1415 			goto done;
1416 		}
1417 
1418 		/* Users with CAP_NET_ADMIN capabilities are allowed
1419 		 * access to all management commands and events. For
1420 		 * untrusted users the interface is restricted and
1421 		 * also only untrusted events are sent.
1422 		 */
1423 		if (capable(CAP_NET_ADMIN))
1424 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1425 
1426 		hci_pi(sk)->channel = haddr.hci_channel;
1427 
1428 		/* At the moment the index and unconfigured index events
1429 		 * are enabled unconditionally. Setting them on each
1430 		 * socket when binding keeps this functionality. They
1431 		 * however might be cleared later and then sending of these
1432 		 * events will be disabled, but that is then intentional.
1433 		 *
1434 		 * This also enables generic events that are safe to be
1435 		 * received by untrusted users. Example for such events
1436 		 * are changes to settings, class of device, name etc.
1437 		 */
1438 		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1439 			if (!hci_sock_gen_cookie(sk)) {
1440 				/* In the case when a cookie has already been
1441 				 * assigned, this socket will transition from
1442 				 * a raw socket into a control socket. To
1443 				 * allow for a clean transition, send the
1444 				 * close notification first.
1445 				 */
1446 				skb = create_monitor_ctrl_close(sk);
1447 				if (skb) {
1448 					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1449 							    HCI_SOCK_TRUSTED, NULL);
1450 					kfree_skb(skb);
1451 				}
1452 			}
1453 
1454 			/* Send event to monitor */
1455 			skb = create_monitor_ctrl_open(sk);
1456 			if (skb) {
1457 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1458 						    HCI_SOCK_TRUSTED, NULL);
1459 				kfree_skb(skb);
1460 			}
1461 
1462 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1463 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1464 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1465 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1466 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1467 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1468 		}
1469 		break;
1470 	}
1471 
1472 	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1473 	if (!hci_pi(sk)->mtu)
1474 		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1475 
1476 	sk->sk_state = BT_BOUND;
1477 
1478 done:
1479 	release_sock(sk);
1480 	return err;
1481 }
1482 
1483 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1484 			    int peer)
1485 {
1486 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1487 	struct sock *sk = sock->sk;
1488 	struct hci_dev *hdev;
1489 	int err = 0;
1490 
1491 	BT_DBG("sock %p sk %p", sock, sk);
1492 
1493 	if (peer)
1494 		return -EOPNOTSUPP;
1495 
1496 	lock_sock(sk);
1497 
1498 	hdev = hci_hdev_from_sock(sk);
1499 	if (IS_ERR(hdev)) {
1500 		err = PTR_ERR(hdev);
1501 		goto done;
1502 	}
1503 
1504 	haddr->hci_family = AF_BLUETOOTH;
1505 	haddr->hci_dev    = hdev->id;
1506 	haddr->hci_channel= hci_pi(sk)->channel;
1507 	err = sizeof(*haddr);
1508 
1509 done:
1510 	release_sock(sk);
1511 	return err;
1512 }
1513 
1514 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1515 			  struct sk_buff *skb)
1516 {
1517 	__u8 mask = hci_pi(sk)->cmsg_mask;
1518 
1519 	if (mask & HCI_CMSG_DIR) {
1520 		int incoming = bt_cb(skb)->incoming;
1521 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1522 			 &incoming);
1523 	}
1524 
1525 	if (mask & HCI_CMSG_TSTAMP) {
1526 #ifdef CONFIG_COMPAT
1527 		struct old_timeval32 ctv;
1528 #endif
1529 		struct __kernel_old_timeval tv;
1530 		void *data;
1531 		int len;
1532 
1533 		skb_get_timestamp(skb, &tv);
1534 
1535 		data = &tv;
1536 		len = sizeof(tv);
1537 #ifdef CONFIG_COMPAT
1538 		if (!COMPAT_USE_64BIT_TIME &&
1539 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1540 			ctv.tv_sec = tv.tv_sec;
1541 			ctv.tv_usec = tv.tv_usec;
1542 			data = &ctv;
1543 			len = sizeof(ctv);
1544 		}
1545 #endif
1546 
1547 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1548 	}
1549 }
1550 
1551 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1552 			    size_t len, int flags)
1553 {
1554 	struct scm_cookie scm;
1555 	struct sock *sk = sock->sk;
1556 	struct sk_buff *skb;
1557 	int copied, err;
1558 	unsigned int skblen;
1559 
1560 	BT_DBG("sock %p, sk %p", sock, sk);
1561 
1562 	if (flags & MSG_OOB)
1563 		return -EOPNOTSUPP;
1564 
1565 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1566 		return -EOPNOTSUPP;
1567 
1568 	if (sk->sk_state == BT_CLOSED)
1569 		return 0;
1570 
1571 	skb = skb_recv_datagram(sk, flags, &err);
1572 	if (!skb)
1573 		return err;
1574 
1575 	skblen = skb->len;
1576 	copied = skb->len;
1577 	if (len < copied) {
1578 		msg->msg_flags |= MSG_TRUNC;
1579 		copied = len;
1580 	}
1581 
1582 	skb_reset_transport_header(skb);
1583 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1584 
1585 	switch (hci_pi(sk)->channel) {
1586 	case HCI_CHANNEL_RAW:
1587 		hci_sock_cmsg(sk, msg, skb);
1588 		break;
1589 	case HCI_CHANNEL_USER:
1590 	case HCI_CHANNEL_MONITOR:
1591 		sock_recv_timestamp(msg, sk, skb);
1592 		break;
1593 	default:
1594 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1595 			sock_recv_timestamp(msg, sk, skb);
1596 		break;
1597 	}
1598 
1599 	memset(&scm, 0, sizeof(scm));
1600 	scm.creds = bt_cb(skb)->creds;
1601 
1602 	skb_free_datagram(sk, skb);
1603 
1604 	if (flags & MSG_TRUNC)
1605 		copied = skblen;
1606 
1607 	scm_recv(sock, msg, &scm, flags);
1608 
1609 	return err ? : copied;
1610 }
1611 
1612 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1613 			struct sk_buff *skb)
1614 {
1615 	u8 *cp;
1616 	struct mgmt_hdr *hdr;
1617 	u16 opcode, index, len;
1618 	struct hci_dev *hdev = NULL;
1619 	const struct hci_mgmt_handler *handler;
1620 	bool var_len, no_hdev;
1621 	int err;
1622 
1623 	BT_DBG("got %d bytes", skb->len);
1624 
1625 	if (skb->len < sizeof(*hdr))
1626 		return -EINVAL;
1627 
1628 	hdr = (void *)skb->data;
1629 	opcode = __le16_to_cpu(hdr->opcode);
1630 	index = __le16_to_cpu(hdr->index);
1631 	len = __le16_to_cpu(hdr->len);
1632 
1633 	if (len != skb->len - sizeof(*hdr)) {
1634 		err = -EINVAL;
1635 		goto done;
1636 	}
1637 
1638 	if (chan->channel == HCI_CHANNEL_CONTROL) {
1639 		struct sk_buff *cmd;
1640 
1641 		/* Send event to monitor */
1642 		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1643 						  skb->data + sizeof(*hdr));
1644 		if (cmd) {
1645 			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1646 					    HCI_SOCK_TRUSTED, NULL);
1647 			kfree_skb(cmd);
1648 		}
1649 	}
1650 
1651 	if (opcode >= chan->handler_count ||
1652 	    chan->handlers[opcode].func == NULL) {
1653 		BT_DBG("Unknown op %u", opcode);
1654 		err = mgmt_cmd_status(sk, index, opcode,
1655 				      MGMT_STATUS_UNKNOWN_COMMAND);
1656 		goto done;
1657 	}
1658 
1659 	handler = &chan->handlers[opcode];
1660 
1661 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1662 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1663 		err = mgmt_cmd_status(sk, index, opcode,
1664 				      MGMT_STATUS_PERMISSION_DENIED);
1665 		goto done;
1666 	}
1667 
1668 	if (index != MGMT_INDEX_NONE) {
1669 		hdev = hci_dev_get(index);
1670 		if (!hdev) {
1671 			err = mgmt_cmd_status(sk, index, opcode,
1672 					      MGMT_STATUS_INVALID_INDEX);
1673 			goto done;
1674 		}
1675 
1676 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1677 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1678 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1679 			err = mgmt_cmd_status(sk, index, opcode,
1680 					      MGMT_STATUS_INVALID_INDEX);
1681 			goto done;
1682 		}
1683 
1684 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1685 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1686 			err = mgmt_cmd_status(sk, index, opcode,
1687 					      MGMT_STATUS_INVALID_INDEX);
1688 			goto done;
1689 		}
1690 	}
1691 
1692 	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1693 		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1694 		if (no_hdev != !hdev) {
1695 			err = mgmt_cmd_status(sk, index, opcode,
1696 					      MGMT_STATUS_INVALID_INDEX);
1697 			goto done;
1698 		}
1699 	}
1700 
1701 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1702 	if ((var_len && len < handler->data_len) ||
1703 	    (!var_len && len != handler->data_len)) {
1704 		err = mgmt_cmd_status(sk, index, opcode,
1705 				      MGMT_STATUS_INVALID_PARAMS);
1706 		goto done;
1707 	}
1708 
1709 	if (hdev && chan->hdev_init)
1710 		chan->hdev_init(sk, hdev);
1711 
1712 	cp = skb->data + sizeof(*hdr);
1713 
1714 	err = handler->func(sk, hdev, cp, len);
1715 	if (err < 0)
1716 		goto done;
1717 
1718 	err = skb->len;
1719 
1720 done:
1721 	if (hdev)
1722 		hci_dev_put(hdev);
1723 
1724 	return err;
1725 }
1726 
1727 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1728 			     unsigned int flags)
1729 {
1730 	struct hci_mon_hdr *hdr;
1731 	struct hci_dev *hdev;
1732 	u16 index;
1733 	int err;
1734 
1735 	/* The logging frame consists at minimum of the standard header,
1736 	 * the priority byte, the ident length byte and at least one string
1737 	 * terminator NUL byte. Anything shorter are invalid packets.
1738 	 */
1739 	if (skb->len < sizeof(*hdr) + 3)
1740 		return -EINVAL;
1741 
1742 	hdr = (void *)skb->data;
1743 
1744 	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1745 		return -EINVAL;
1746 
1747 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1748 		__u8 priority = skb->data[sizeof(*hdr)];
1749 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1750 
1751 		/* Only the priorities 0-7 are valid and with that any other
1752 		 * value results in an invalid packet.
1753 		 *
1754 		 * The priority byte is followed by an ident length byte and
1755 		 * the NUL terminated ident string. Check that the ident
1756 		 * length is not overflowing the packet and also that the
1757 		 * ident string itself is NUL terminated. In case the ident
1758 		 * length is zero, the length value actually doubles as NUL
1759 		 * terminator identifier.
1760 		 *
1761 		 * The message follows the ident string (if present) and
1762 		 * must be NUL terminated. Otherwise it is not a valid packet.
1763 		 */
1764 		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1765 		    ident_len > skb->len - sizeof(*hdr) - 3 ||
1766 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1767 			return -EINVAL;
1768 	} else {
1769 		return -EINVAL;
1770 	}
1771 
1772 	index = __le16_to_cpu(hdr->index);
1773 
1774 	if (index != MGMT_INDEX_NONE) {
1775 		hdev = hci_dev_get(index);
1776 		if (!hdev)
1777 			return -ENODEV;
1778 	} else {
1779 		hdev = NULL;
1780 	}
1781 
1782 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1783 
1784 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1785 	err = skb->len;
1786 
1787 	if (hdev)
1788 		hci_dev_put(hdev);
1789 
1790 	return err;
1791 }
1792 
1793 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1794 			    size_t len)
1795 {
1796 	struct sock *sk = sock->sk;
1797 	struct hci_mgmt_chan *chan;
1798 	struct hci_dev *hdev;
1799 	struct sk_buff *skb;
1800 	int err;
1801 	const unsigned int flags = msg->msg_flags;
1802 
1803 	BT_DBG("sock %p sk %p", sock, sk);
1804 
1805 	if (flags & MSG_OOB)
1806 		return -EOPNOTSUPP;
1807 
1808 	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1809 		return -EINVAL;
1810 
1811 	if (len < 4 || len > hci_pi(sk)->mtu)
1812 		return -EINVAL;
1813 
1814 	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1815 	if (IS_ERR(skb))
1816 		return PTR_ERR(skb);
1817 
1818 	lock_sock(sk);
1819 
1820 	switch (hci_pi(sk)->channel) {
1821 	case HCI_CHANNEL_RAW:
1822 	case HCI_CHANNEL_USER:
1823 		break;
1824 	case HCI_CHANNEL_MONITOR:
1825 		err = -EOPNOTSUPP;
1826 		goto drop;
1827 	case HCI_CHANNEL_LOGGING:
1828 		err = hci_logging_frame(sk, skb, flags);
1829 		goto drop;
1830 	default:
1831 		mutex_lock(&mgmt_chan_list_lock);
1832 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1833 		if (chan)
1834 			err = hci_mgmt_cmd(chan, sk, skb);
1835 		else
1836 			err = -EINVAL;
1837 
1838 		mutex_unlock(&mgmt_chan_list_lock);
1839 		goto drop;
1840 	}
1841 
1842 	hdev = hci_hdev_from_sock(sk);
1843 	if (IS_ERR(hdev)) {
1844 		err = PTR_ERR(hdev);
1845 		goto drop;
1846 	}
1847 
1848 	if (!test_bit(HCI_UP, &hdev->flags)) {
1849 		err = -ENETDOWN;
1850 		goto drop;
1851 	}
1852 
1853 	hci_skb_pkt_type(skb) = skb->data[0];
1854 	skb_pull(skb, 1);
1855 
1856 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1857 		/* No permission check is needed for user channel
1858 		 * since that gets enforced when binding the socket.
1859 		 *
1860 		 * However check that the packet type is valid.
1861 		 */
1862 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1863 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1864 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1865 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1866 			err = -EINVAL;
1867 			goto drop;
1868 		}
1869 
1870 		skb_queue_tail(&hdev->raw_q, skb);
1871 		queue_work(hdev->workqueue, &hdev->tx_work);
1872 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1873 		u16 opcode = get_unaligned_le16(skb->data);
1874 		u16 ogf = hci_opcode_ogf(opcode);
1875 		u16 ocf = hci_opcode_ocf(opcode);
1876 
1877 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1878 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1879 				   &hci_sec_filter.ocf_mask[ogf])) &&
1880 		    !capable(CAP_NET_RAW)) {
1881 			err = -EPERM;
1882 			goto drop;
1883 		}
1884 
1885 		/* Since the opcode has already been extracted here, store
1886 		 * a copy of the value for later use by the drivers.
1887 		 */
1888 		hci_skb_opcode(skb) = opcode;
1889 
1890 		if (ogf == 0x3f) {
1891 			skb_queue_tail(&hdev->raw_q, skb);
1892 			queue_work(hdev->workqueue, &hdev->tx_work);
1893 		} else {
1894 			/* Stand-alone HCI commands must be flagged as
1895 			 * single-command requests.
1896 			 */
1897 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1898 
1899 			skb_queue_tail(&hdev->cmd_q, skb);
1900 			queue_work(hdev->workqueue, &hdev->cmd_work);
1901 		}
1902 	} else {
1903 		if (!capable(CAP_NET_RAW)) {
1904 			err = -EPERM;
1905 			goto drop;
1906 		}
1907 
1908 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1909 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1910 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1911 			err = -EINVAL;
1912 			goto drop;
1913 		}
1914 
1915 		skb_queue_tail(&hdev->raw_q, skb);
1916 		queue_work(hdev->workqueue, &hdev->tx_work);
1917 	}
1918 
1919 	err = len;
1920 
1921 done:
1922 	release_sock(sk);
1923 	return err;
1924 
1925 drop:
1926 	kfree_skb(skb);
1927 	goto done;
1928 }
1929 
1930 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1931 				   sockptr_t optval, unsigned int len)
1932 {
1933 	struct hci_ufilter uf = { .opcode = 0 };
1934 	struct sock *sk = sock->sk;
1935 	int err = 0, opt = 0;
1936 
1937 	BT_DBG("sk %p, opt %d", sk, optname);
1938 
1939 	lock_sock(sk);
1940 
1941 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1942 		err = -EBADFD;
1943 		goto done;
1944 	}
1945 
1946 	switch (optname) {
1947 	case HCI_DATA_DIR:
1948 		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1949 			err = -EFAULT;
1950 			break;
1951 		}
1952 
1953 		if (opt)
1954 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1955 		else
1956 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1957 		break;
1958 
1959 	case HCI_TIME_STAMP:
1960 		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1961 			err = -EFAULT;
1962 			break;
1963 		}
1964 
1965 		if (opt)
1966 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1967 		else
1968 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1969 		break;
1970 
1971 	case HCI_FILTER:
1972 		{
1973 			struct hci_filter *f = &hci_pi(sk)->filter;
1974 
1975 			uf.type_mask = f->type_mask;
1976 			uf.opcode    = f->opcode;
1977 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1978 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1979 		}
1980 
1981 		len = min_t(unsigned int, len, sizeof(uf));
1982 		if (copy_from_sockptr(&uf, optval, len)) {
1983 			err = -EFAULT;
1984 			break;
1985 		}
1986 
1987 		if (!capable(CAP_NET_RAW)) {
1988 			uf.type_mask &= hci_sec_filter.type_mask;
1989 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1990 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1991 		}
1992 
1993 		{
1994 			struct hci_filter *f = &hci_pi(sk)->filter;
1995 
1996 			f->type_mask = uf.type_mask;
1997 			f->opcode    = uf.opcode;
1998 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1999 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
2000 		}
2001 		break;
2002 
2003 	default:
2004 		err = -ENOPROTOOPT;
2005 		break;
2006 	}
2007 
2008 done:
2009 	release_sock(sk);
2010 	return err;
2011 }
2012 
2013 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2014 			       sockptr_t optval, unsigned int len)
2015 {
2016 	struct sock *sk = sock->sk;
2017 	int err = 0;
2018 	u16 opt;
2019 
2020 	BT_DBG("sk %p, opt %d", sk, optname);
2021 
2022 	if (level == SOL_HCI)
2023 		return hci_sock_setsockopt_old(sock, level, optname, optval,
2024 					       len);
2025 
2026 	if (level != SOL_BLUETOOTH)
2027 		return -ENOPROTOOPT;
2028 
2029 	lock_sock(sk);
2030 
2031 	switch (optname) {
2032 	case BT_SNDMTU:
2033 	case BT_RCVMTU:
2034 		switch (hci_pi(sk)->channel) {
2035 		/* Don't allow changing MTU for channels that are meant for HCI
2036 		 * traffic only.
2037 		 */
2038 		case HCI_CHANNEL_RAW:
2039 		case HCI_CHANNEL_USER:
2040 			err = -ENOPROTOOPT;
2041 			goto done;
2042 		}
2043 
2044 		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2045 			err = -EFAULT;
2046 			break;
2047 		}
2048 
2049 		hci_pi(sk)->mtu = opt;
2050 		break;
2051 
2052 	default:
2053 		err = -ENOPROTOOPT;
2054 		break;
2055 	}
2056 
2057 done:
2058 	release_sock(sk);
2059 	return err;
2060 }
2061 
2062 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2063 				   char __user *optval, int __user *optlen)
2064 {
2065 	struct hci_ufilter uf;
2066 	struct sock *sk = sock->sk;
2067 	int len, opt, err = 0;
2068 
2069 	BT_DBG("sk %p, opt %d", sk, optname);
2070 
2071 	if (get_user(len, optlen))
2072 		return -EFAULT;
2073 
2074 	lock_sock(sk);
2075 
2076 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2077 		err = -EBADFD;
2078 		goto done;
2079 	}
2080 
2081 	switch (optname) {
2082 	case HCI_DATA_DIR:
2083 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2084 			opt = 1;
2085 		else
2086 			opt = 0;
2087 
2088 		if (put_user(opt, optval))
2089 			err = -EFAULT;
2090 		break;
2091 
2092 	case HCI_TIME_STAMP:
2093 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2094 			opt = 1;
2095 		else
2096 			opt = 0;
2097 
2098 		if (put_user(opt, optval))
2099 			err = -EFAULT;
2100 		break;
2101 
2102 	case HCI_FILTER:
2103 		{
2104 			struct hci_filter *f = &hci_pi(sk)->filter;
2105 
2106 			memset(&uf, 0, sizeof(uf));
2107 			uf.type_mask = f->type_mask;
2108 			uf.opcode    = f->opcode;
2109 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2110 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2111 		}
2112 
2113 		len = min_t(unsigned int, len, sizeof(uf));
2114 		if (copy_to_user(optval, &uf, len))
2115 			err = -EFAULT;
2116 		break;
2117 
2118 	default:
2119 		err = -ENOPROTOOPT;
2120 		break;
2121 	}
2122 
2123 done:
2124 	release_sock(sk);
2125 	return err;
2126 }
2127 
2128 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2129 			       char __user *optval, int __user *optlen)
2130 {
2131 	struct sock *sk = sock->sk;
2132 	int err = 0;
2133 
2134 	BT_DBG("sk %p, opt %d", sk, optname);
2135 
2136 	if (level == SOL_HCI)
2137 		return hci_sock_getsockopt_old(sock, level, optname, optval,
2138 					       optlen);
2139 
2140 	if (level != SOL_BLUETOOTH)
2141 		return -ENOPROTOOPT;
2142 
2143 	lock_sock(sk);
2144 
2145 	switch (optname) {
2146 	case BT_SNDMTU:
2147 	case BT_RCVMTU:
2148 		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2149 			err = -EFAULT;
2150 		break;
2151 
2152 	default:
2153 		err = -ENOPROTOOPT;
2154 		break;
2155 	}
2156 
2157 	release_sock(sk);
2158 	return err;
2159 }
2160 
2161 static void hci_sock_destruct(struct sock *sk)
2162 {
2163 	mgmt_cleanup(sk);
2164 	skb_queue_purge(&sk->sk_receive_queue);
2165 	skb_queue_purge(&sk->sk_write_queue);
2166 }
2167 
2168 static const struct proto_ops hci_sock_ops = {
2169 	.family		= PF_BLUETOOTH,
2170 	.owner		= THIS_MODULE,
2171 	.release	= hci_sock_release,
2172 	.bind		= hci_sock_bind,
2173 	.getname	= hci_sock_getname,
2174 	.sendmsg	= hci_sock_sendmsg,
2175 	.recvmsg	= hci_sock_recvmsg,
2176 	.ioctl		= hci_sock_ioctl,
2177 #ifdef CONFIG_COMPAT
2178 	.compat_ioctl	= hci_sock_compat_ioctl,
2179 #endif
2180 	.poll		= datagram_poll,
2181 	.listen		= sock_no_listen,
2182 	.shutdown	= sock_no_shutdown,
2183 	.setsockopt	= hci_sock_setsockopt,
2184 	.getsockopt	= hci_sock_getsockopt,
2185 	.connect	= sock_no_connect,
2186 	.socketpair	= sock_no_socketpair,
2187 	.accept		= sock_no_accept,
2188 	.mmap		= sock_no_mmap
2189 };
2190 
2191 static struct proto hci_sk_proto = {
2192 	.name		= "HCI",
2193 	.owner		= THIS_MODULE,
2194 	.obj_size	= sizeof(struct hci_pinfo)
2195 };
2196 
2197 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2198 			   int kern)
2199 {
2200 	struct sock *sk;
2201 
2202 	BT_DBG("sock %p", sock);
2203 
2204 	if (sock->type != SOCK_RAW)
2205 		return -ESOCKTNOSUPPORT;
2206 
2207 	sock->ops = &hci_sock_ops;
2208 
2209 	sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2210 			   kern);
2211 	if (!sk)
2212 		return -ENOMEM;
2213 
2214 	sock->state = SS_UNCONNECTED;
2215 	sk->sk_destruct = hci_sock_destruct;
2216 
2217 	bt_sock_link(&hci_sk_list, sk);
2218 	return 0;
2219 }
2220 
2221 static const struct net_proto_family hci_sock_family_ops = {
2222 	.family	= PF_BLUETOOTH,
2223 	.owner	= THIS_MODULE,
2224 	.create	= hci_sock_create,
2225 };
2226 
2227 int __init hci_sock_init(void)
2228 {
2229 	int err;
2230 
2231 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2232 
2233 	err = proto_register(&hci_sk_proto, 0);
2234 	if (err < 0)
2235 		return err;
2236 
2237 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2238 	if (err < 0) {
2239 		BT_ERR("HCI socket registration failed");
2240 		goto error;
2241 	}
2242 
2243 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2244 	if (err < 0) {
2245 		BT_ERR("Failed to create HCI proc file");
2246 		bt_sock_unregister(BTPROTO_HCI);
2247 		goto error;
2248 	}
2249 
2250 	BT_INFO("HCI socket layer initialized");
2251 
2252 	return 0;
2253 
2254 error:
2255 	proto_unregister(&hci_sk_proto);
2256 	return err;
2257 }
2258 
2259 void hci_sock_cleanup(void)
2260 {
2261 	bt_procfs_cleanup(&init_net, "hci");
2262 	bt_sock_unregister(BTPROTO_HCI);
2263 	proto_unregister(&hci_sk_proto);
2264 }
2265