xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision dc6a81c3)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "mgmt_util.h"
38 
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41 
42 static DEFINE_IDA(sock_cookie_ida);
43 
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45 
46 /* ----- HCI socket interface ----- */
47 
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 
51 struct hci_pinfo {
52 	struct bt_sock    bt;
53 	struct hci_dev    *hdev;
54 	struct hci_filter filter;
55 	__u32             cmsg_mask;
56 	unsigned short    channel;
57 	unsigned long     flags;
58 	__u32             cookie;
59 	char              comm[TASK_COMM_LEN];
60 };
61 
62 void hci_sock_set_flag(struct sock *sk, int nr)
63 {
64 	set_bit(nr, &hci_pi(sk)->flags);
65 }
66 
67 void hci_sock_clear_flag(struct sock *sk, int nr)
68 {
69 	clear_bit(nr, &hci_pi(sk)->flags);
70 }
71 
72 int hci_sock_test_flag(struct sock *sk, int nr)
73 {
74 	return test_bit(nr, &hci_pi(sk)->flags);
75 }
76 
77 unsigned short hci_sock_get_channel(struct sock *sk)
78 {
79 	return hci_pi(sk)->channel;
80 }
81 
82 u32 hci_sock_get_cookie(struct sock *sk)
83 {
84 	return hci_pi(sk)->cookie;
85 }
86 
87 static bool hci_sock_gen_cookie(struct sock *sk)
88 {
89 	int id = hci_pi(sk)->cookie;
90 
91 	if (!id) {
92 		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
93 		if (id < 0)
94 			id = 0xffffffff;
95 
96 		hci_pi(sk)->cookie = id;
97 		get_task_comm(hci_pi(sk)->comm, current);
98 		return true;
99 	}
100 
101 	return false;
102 }
103 
104 static void hci_sock_free_cookie(struct sock *sk)
105 {
106 	int id = hci_pi(sk)->cookie;
107 
108 	if (id) {
109 		hci_pi(sk)->cookie = 0xffffffff;
110 		ida_simple_remove(&sock_cookie_ida, id);
111 	}
112 }
113 
114 static inline int hci_test_bit(int nr, const void *addr)
115 {
116 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
117 }
118 
119 /* Security filter */
120 #define HCI_SFLT_MAX_OGF  5
121 
122 struct hci_sec_filter {
123 	__u32 type_mask;
124 	__u32 event_mask[2];
125 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
126 };
127 
128 static const struct hci_sec_filter hci_sec_filter = {
129 	/* Packet types */
130 	0x10,
131 	/* Events */
132 	{ 0x1000d9fe, 0x0000b00c },
133 	/* Commands */
134 	{
135 		{ 0x0 },
136 		/* OGF_LINK_CTL */
137 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
138 		/* OGF_LINK_POLICY */
139 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
140 		/* OGF_HOST_CTL */
141 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
142 		/* OGF_INFO_PARAM */
143 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
144 		/* OGF_STATUS_PARAM */
145 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
146 	}
147 };
148 
149 static struct bt_sock_list hci_sk_list = {
150 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
151 };
152 
153 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
154 {
155 	struct hci_filter *flt;
156 	int flt_type, flt_event;
157 
158 	/* Apply filter */
159 	flt = &hci_pi(sk)->filter;
160 
161 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
162 
163 	if (!test_bit(flt_type, &flt->type_mask))
164 		return true;
165 
166 	/* Extra filter for event packets only */
167 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
168 		return false;
169 
170 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
171 
172 	if (!hci_test_bit(flt_event, &flt->event_mask))
173 		return true;
174 
175 	/* Check filter only when opcode is set */
176 	if (!flt->opcode)
177 		return false;
178 
179 	if (flt_event == HCI_EV_CMD_COMPLETE &&
180 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
181 		return true;
182 
183 	if (flt_event == HCI_EV_CMD_STATUS &&
184 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
185 		return true;
186 
187 	return false;
188 }
189 
190 /* Send frame to RAW socket */
191 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 	struct sock *sk;
194 	struct sk_buff *skb_copy = NULL;
195 
196 	BT_DBG("hdev %p len %d", hdev, skb->len);
197 
198 	read_lock(&hci_sk_list.lock);
199 
200 	sk_for_each(sk, &hci_sk_list.head) {
201 		struct sk_buff *nskb;
202 
203 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
204 			continue;
205 
206 		/* Don't send frame to the socket it came from */
207 		if (skb->sk == sk)
208 			continue;
209 
210 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
211 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
212 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
213 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
214 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
215 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
216 				continue;
217 			if (is_filtered_packet(sk, skb))
218 				continue;
219 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
220 			if (!bt_cb(skb)->incoming)
221 				continue;
222 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
223 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
224 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
225 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
226 				continue;
227 		} else {
228 			/* Don't send frame to other channel types */
229 			continue;
230 		}
231 
232 		if (!skb_copy) {
233 			/* Create a private copy with headroom */
234 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
235 			if (!skb_copy)
236 				continue;
237 
238 			/* Put type byte before the data */
239 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
240 		}
241 
242 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
243 		if (!nskb)
244 			continue;
245 
246 		if (sock_queue_rcv_skb(sk, nskb))
247 			kfree_skb(nskb);
248 	}
249 
250 	read_unlock(&hci_sk_list.lock);
251 
252 	kfree_skb(skb_copy);
253 }
254 
255 /* Send frame to sockets with specific channel */
256 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
257 				  int flag, struct sock *skip_sk)
258 {
259 	struct sock *sk;
260 
261 	BT_DBG("channel %u len %d", channel, skb->len);
262 
263 	sk_for_each(sk, &hci_sk_list.head) {
264 		struct sk_buff *nskb;
265 
266 		/* Ignore socket without the flag set */
267 		if (!hci_sock_test_flag(sk, flag))
268 			continue;
269 
270 		/* Skip the original socket */
271 		if (sk == skip_sk)
272 			continue;
273 
274 		if (sk->sk_state != BT_BOUND)
275 			continue;
276 
277 		if (hci_pi(sk)->channel != channel)
278 			continue;
279 
280 		nskb = skb_clone(skb, GFP_ATOMIC);
281 		if (!nskb)
282 			continue;
283 
284 		if (sock_queue_rcv_skb(sk, nskb))
285 			kfree_skb(nskb);
286 	}
287 
288 }
289 
290 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
291 			 int flag, struct sock *skip_sk)
292 {
293 	read_lock(&hci_sk_list.lock);
294 	__hci_send_to_channel(channel, skb, flag, skip_sk);
295 	read_unlock(&hci_sk_list.lock);
296 }
297 
298 /* Send frame to monitor socket */
299 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
300 {
301 	struct sk_buff *skb_copy = NULL;
302 	struct hci_mon_hdr *hdr;
303 	__le16 opcode;
304 
305 	if (!atomic_read(&monitor_promisc))
306 		return;
307 
308 	BT_DBG("hdev %p len %d", hdev, skb->len);
309 
310 	switch (hci_skb_pkt_type(skb)) {
311 	case HCI_COMMAND_PKT:
312 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
313 		break;
314 	case HCI_EVENT_PKT:
315 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
316 		break;
317 	case HCI_ACLDATA_PKT:
318 		if (bt_cb(skb)->incoming)
319 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
320 		else
321 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
322 		break;
323 	case HCI_SCODATA_PKT:
324 		if (bt_cb(skb)->incoming)
325 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
326 		else
327 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
328 		break;
329 	case HCI_ISODATA_PKT:
330 		if (bt_cb(skb)->incoming)
331 			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
332 		else
333 			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
334 		break;
335 	case HCI_DIAG_PKT:
336 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
337 		break;
338 	default:
339 		return;
340 	}
341 
342 	/* Create a private copy with headroom */
343 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
344 	if (!skb_copy)
345 		return;
346 
347 	/* Put header before the data */
348 	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
349 	hdr->opcode = opcode;
350 	hdr->index = cpu_to_le16(hdev->id);
351 	hdr->len = cpu_to_le16(skb->len);
352 
353 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
354 			    HCI_SOCK_TRUSTED, NULL);
355 	kfree_skb(skb_copy);
356 }
357 
358 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
359 				 void *data, u16 data_len, ktime_t tstamp,
360 				 int flag, struct sock *skip_sk)
361 {
362 	struct sock *sk;
363 	__le16 index;
364 
365 	if (hdev)
366 		index = cpu_to_le16(hdev->id);
367 	else
368 		index = cpu_to_le16(MGMT_INDEX_NONE);
369 
370 	read_lock(&hci_sk_list.lock);
371 
372 	sk_for_each(sk, &hci_sk_list.head) {
373 		struct hci_mon_hdr *hdr;
374 		struct sk_buff *skb;
375 
376 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
377 			continue;
378 
379 		/* Ignore socket without the flag set */
380 		if (!hci_sock_test_flag(sk, flag))
381 			continue;
382 
383 		/* Skip the original socket */
384 		if (sk == skip_sk)
385 			continue;
386 
387 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
388 		if (!skb)
389 			continue;
390 
391 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
392 		put_unaligned_le16(event, skb_put(skb, 2));
393 
394 		if (data)
395 			skb_put_data(skb, data, data_len);
396 
397 		skb->tstamp = tstamp;
398 
399 		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
400 		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
401 		hdr->index = index;
402 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
403 
404 		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
405 				      HCI_SOCK_TRUSTED, NULL);
406 		kfree_skb(skb);
407 	}
408 
409 	read_unlock(&hci_sk_list.lock);
410 }
411 
412 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
413 {
414 	struct hci_mon_hdr *hdr;
415 	struct hci_mon_new_index *ni;
416 	struct hci_mon_index_info *ii;
417 	struct sk_buff *skb;
418 	__le16 opcode;
419 
420 	switch (event) {
421 	case HCI_DEV_REG:
422 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
423 		if (!skb)
424 			return NULL;
425 
426 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
427 		ni->type = hdev->dev_type;
428 		ni->bus = hdev->bus;
429 		bacpy(&ni->bdaddr, &hdev->bdaddr);
430 		memcpy(ni->name, hdev->name, 8);
431 
432 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
433 		break;
434 
435 	case HCI_DEV_UNREG:
436 		skb = bt_skb_alloc(0, GFP_ATOMIC);
437 		if (!skb)
438 			return NULL;
439 
440 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
441 		break;
442 
443 	case HCI_DEV_SETUP:
444 		if (hdev->manufacturer == 0xffff)
445 			return NULL;
446 
447 		/* fall through */
448 
449 	case HCI_DEV_UP:
450 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
451 		if (!skb)
452 			return NULL;
453 
454 		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
455 		bacpy(&ii->bdaddr, &hdev->bdaddr);
456 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
457 
458 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
459 		break;
460 
461 	case HCI_DEV_OPEN:
462 		skb = bt_skb_alloc(0, GFP_ATOMIC);
463 		if (!skb)
464 			return NULL;
465 
466 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
467 		break;
468 
469 	case HCI_DEV_CLOSE:
470 		skb = bt_skb_alloc(0, GFP_ATOMIC);
471 		if (!skb)
472 			return NULL;
473 
474 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
475 		break;
476 
477 	default:
478 		return NULL;
479 	}
480 
481 	__net_timestamp(skb);
482 
483 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
484 	hdr->opcode = opcode;
485 	hdr->index = cpu_to_le16(hdev->id);
486 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
487 
488 	return skb;
489 }
490 
491 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
492 {
493 	struct hci_mon_hdr *hdr;
494 	struct sk_buff *skb;
495 	u16 format;
496 	u8 ver[3];
497 	u32 flags;
498 
499 	/* No message needed when cookie is not present */
500 	if (!hci_pi(sk)->cookie)
501 		return NULL;
502 
503 	switch (hci_pi(sk)->channel) {
504 	case HCI_CHANNEL_RAW:
505 		format = 0x0000;
506 		ver[0] = BT_SUBSYS_VERSION;
507 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
508 		break;
509 	case HCI_CHANNEL_USER:
510 		format = 0x0001;
511 		ver[0] = BT_SUBSYS_VERSION;
512 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
513 		break;
514 	case HCI_CHANNEL_CONTROL:
515 		format = 0x0002;
516 		mgmt_fill_version_info(ver);
517 		break;
518 	default:
519 		/* No message for unsupported format */
520 		return NULL;
521 	}
522 
523 	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
524 	if (!skb)
525 		return NULL;
526 
527 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
528 
529 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
530 	put_unaligned_le16(format, skb_put(skb, 2));
531 	skb_put_data(skb, ver, sizeof(ver));
532 	put_unaligned_le32(flags, skb_put(skb, 4));
533 	skb_put_u8(skb, TASK_COMM_LEN);
534 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
535 
536 	__net_timestamp(skb);
537 
538 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
539 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
540 	if (hci_pi(sk)->hdev)
541 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
542 	else
543 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
544 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
545 
546 	return skb;
547 }
548 
549 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
550 {
551 	struct hci_mon_hdr *hdr;
552 	struct sk_buff *skb;
553 
554 	/* No message needed when cookie is not present */
555 	if (!hci_pi(sk)->cookie)
556 		return NULL;
557 
558 	switch (hci_pi(sk)->channel) {
559 	case HCI_CHANNEL_RAW:
560 	case HCI_CHANNEL_USER:
561 	case HCI_CHANNEL_CONTROL:
562 		break;
563 	default:
564 		/* No message for unsupported format */
565 		return NULL;
566 	}
567 
568 	skb = bt_skb_alloc(4, GFP_ATOMIC);
569 	if (!skb)
570 		return NULL;
571 
572 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
573 
574 	__net_timestamp(skb);
575 
576 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
577 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
578 	if (hci_pi(sk)->hdev)
579 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
580 	else
581 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
582 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
583 
584 	return skb;
585 }
586 
587 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
588 						   u16 opcode, u16 len,
589 						   const void *buf)
590 {
591 	struct hci_mon_hdr *hdr;
592 	struct sk_buff *skb;
593 
594 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
595 	if (!skb)
596 		return NULL;
597 
598 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
599 	put_unaligned_le16(opcode, skb_put(skb, 2));
600 
601 	if (buf)
602 		skb_put_data(skb, buf, len);
603 
604 	__net_timestamp(skb);
605 
606 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
607 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
608 	hdr->index = cpu_to_le16(index);
609 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
610 
611 	return skb;
612 }
613 
614 static void __printf(2, 3)
615 send_monitor_note(struct sock *sk, const char *fmt, ...)
616 {
617 	size_t len;
618 	struct hci_mon_hdr *hdr;
619 	struct sk_buff *skb;
620 	va_list args;
621 
622 	va_start(args, fmt);
623 	len = vsnprintf(NULL, 0, fmt, args);
624 	va_end(args);
625 
626 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
627 	if (!skb)
628 		return;
629 
630 	va_start(args, fmt);
631 	vsprintf(skb_put(skb, len), fmt, args);
632 	*(u8 *)skb_put(skb, 1) = 0;
633 	va_end(args);
634 
635 	__net_timestamp(skb);
636 
637 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
638 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
639 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
640 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
641 
642 	if (sock_queue_rcv_skb(sk, skb))
643 		kfree_skb(skb);
644 }
645 
646 static void send_monitor_replay(struct sock *sk)
647 {
648 	struct hci_dev *hdev;
649 
650 	read_lock(&hci_dev_list_lock);
651 
652 	list_for_each_entry(hdev, &hci_dev_list, list) {
653 		struct sk_buff *skb;
654 
655 		skb = create_monitor_event(hdev, HCI_DEV_REG);
656 		if (!skb)
657 			continue;
658 
659 		if (sock_queue_rcv_skb(sk, skb))
660 			kfree_skb(skb);
661 
662 		if (!test_bit(HCI_RUNNING, &hdev->flags))
663 			continue;
664 
665 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
666 		if (!skb)
667 			continue;
668 
669 		if (sock_queue_rcv_skb(sk, skb))
670 			kfree_skb(skb);
671 
672 		if (test_bit(HCI_UP, &hdev->flags))
673 			skb = create_monitor_event(hdev, HCI_DEV_UP);
674 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
675 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
676 		else
677 			skb = NULL;
678 
679 		if (skb) {
680 			if (sock_queue_rcv_skb(sk, skb))
681 				kfree_skb(skb);
682 		}
683 	}
684 
685 	read_unlock(&hci_dev_list_lock);
686 }
687 
688 static void send_monitor_control_replay(struct sock *mon_sk)
689 {
690 	struct sock *sk;
691 
692 	read_lock(&hci_sk_list.lock);
693 
694 	sk_for_each(sk, &hci_sk_list.head) {
695 		struct sk_buff *skb;
696 
697 		skb = create_monitor_ctrl_open(sk);
698 		if (!skb)
699 			continue;
700 
701 		if (sock_queue_rcv_skb(mon_sk, skb))
702 			kfree_skb(skb);
703 	}
704 
705 	read_unlock(&hci_sk_list.lock);
706 }
707 
708 /* Generate internal stack event */
709 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
710 {
711 	struct hci_event_hdr *hdr;
712 	struct hci_ev_stack_internal *ev;
713 	struct sk_buff *skb;
714 
715 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
716 	if (!skb)
717 		return;
718 
719 	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
720 	hdr->evt  = HCI_EV_STACK_INTERNAL;
721 	hdr->plen = sizeof(*ev) + dlen;
722 
723 	ev = skb_put(skb, sizeof(*ev) + dlen);
724 	ev->type = type;
725 	memcpy(ev->data, data, dlen);
726 
727 	bt_cb(skb)->incoming = 1;
728 	__net_timestamp(skb);
729 
730 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
731 	hci_send_to_sock(hdev, skb);
732 	kfree_skb(skb);
733 }
734 
735 void hci_sock_dev_event(struct hci_dev *hdev, int event)
736 {
737 	BT_DBG("hdev %s event %d", hdev->name, event);
738 
739 	if (atomic_read(&monitor_promisc)) {
740 		struct sk_buff *skb;
741 
742 		/* Send event to monitor */
743 		skb = create_monitor_event(hdev, event);
744 		if (skb) {
745 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
746 					    HCI_SOCK_TRUSTED, NULL);
747 			kfree_skb(skb);
748 		}
749 	}
750 
751 	if (event <= HCI_DEV_DOWN) {
752 		struct hci_ev_si_device ev;
753 
754 		/* Send event to sockets */
755 		ev.event  = event;
756 		ev.dev_id = hdev->id;
757 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
758 	}
759 
760 	if (event == HCI_DEV_UNREG) {
761 		struct sock *sk;
762 
763 		/* Detach sockets from device */
764 		read_lock(&hci_sk_list.lock);
765 		sk_for_each(sk, &hci_sk_list.head) {
766 			bh_lock_sock_nested(sk);
767 			if (hci_pi(sk)->hdev == hdev) {
768 				hci_pi(sk)->hdev = NULL;
769 				sk->sk_err = EPIPE;
770 				sk->sk_state = BT_OPEN;
771 				sk->sk_state_change(sk);
772 
773 				hci_dev_put(hdev);
774 			}
775 			bh_unlock_sock(sk);
776 		}
777 		read_unlock(&hci_sk_list.lock);
778 	}
779 }
780 
781 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
782 {
783 	struct hci_mgmt_chan *c;
784 
785 	list_for_each_entry(c, &mgmt_chan_list, list) {
786 		if (c->channel == channel)
787 			return c;
788 	}
789 
790 	return NULL;
791 }
792 
793 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
794 {
795 	struct hci_mgmt_chan *c;
796 
797 	mutex_lock(&mgmt_chan_list_lock);
798 	c = __hci_mgmt_chan_find(channel);
799 	mutex_unlock(&mgmt_chan_list_lock);
800 
801 	return c;
802 }
803 
804 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
805 {
806 	if (c->channel < HCI_CHANNEL_CONTROL)
807 		return -EINVAL;
808 
809 	mutex_lock(&mgmt_chan_list_lock);
810 	if (__hci_mgmt_chan_find(c->channel)) {
811 		mutex_unlock(&mgmt_chan_list_lock);
812 		return -EALREADY;
813 	}
814 
815 	list_add_tail(&c->list, &mgmt_chan_list);
816 
817 	mutex_unlock(&mgmt_chan_list_lock);
818 
819 	return 0;
820 }
821 EXPORT_SYMBOL(hci_mgmt_chan_register);
822 
823 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
824 {
825 	mutex_lock(&mgmt_chan_list_lock);
826 	list_del(&c->list);
827 	mutex_unlock(&mgmt_chan_list_lock);
828 }
829 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
830 
831 static int hci_sock_release(struct socket *sock)
832 {
833 	struct sock *sk = sock->sk;
834 	struct hci_dev *hdev;
835 	struct sk_buff *skb;
836 
837 	BT_DBG("sock %p sk %p", sock, sk);
838 
839 	if (!sk)
840 		return 0;
841 
842 	lock_sock(sk);
843 
844 	switch (hci_pi(sk)->channel) {
845 	case HCI_CHANNEL_MONITOR:
846 		atomic_dec(&monitor_promisc);
847 		break;
848 	case HCI_CHANNEL_RAW:
849 	case HCI_CHANNEL_USER:
850 	case HCI_CHANNEL_CONTROL:
851 		/* Send event to monitor */
852 		skb = create_monitor_ctrl_close(sk);
853 		if (skb) {
854 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
855 					    HCI_SOCK_TRUSTED, NULL);
856 			kfree_skb(skb);
857 		}
858 
859 		hci_sock_free_cookie(sk);
860 		break;
861 	}
862 
863 	bt_sock_unlink(&hci_sk_list, sk);
864 
865 	hdev = hci_pi(sk)->hdev;
866 	if (hdev) {
867 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
868 			/* When releasing a user channel exclusive access,
869 			 * call hci_dev_do_close directly instead of calling
870 			 * hci_dev_close to ensure the exclusive access will
871 			 * be released and the controller brought back down.
872 			 *
873 			 * The checking of HCI_AUTO_OFF is not needed in this
874 			 * case since it will have been cleared already when
875 			 * opening the user channel.
876 			 */
877 			hci_dev_do_close(hdev);
878 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
879 			mgmt_index_added(hdev);
880 		}
881 
882 		atomic_dec(&hdev->promisc);
883 		hci_dev_put(hdev);
884 	}
885 
886 	sock_orphan(sk);
887 
888 	skb_queue_purge(&sk->sk_receive_queue);
889 	skb_queue_purge(&sk->sk_write_queue);
890 
891 	release_sock(sk);
892 	sock_put(sk);
893 	return 0;
894 }
895 
896 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
897 {
898 	bdaddr_t bdaddr;
899 	int err;
900 
901 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
902 		return -EFAULT;
903 
904 	hci_dev_lock(hdev);
905 
906 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
907 
908 	hci_dev_unlock(hdev);
909 
910 	return err;
911 }
912 
913 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
914 {
915 	bdaddr_t bdaddr;
916 	int err;
917 
918 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
919 		return -EFAULT;
920 
921 	hci_dev_lock(hdev);
922 
923 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
924 
925 	hci_dev_unlock(hdev);
926 
927 	return err;
928 }
929 
930 /* Ioctls that require bound socket */
931 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
932 				unsigned long arg)
933 {
934 	struct hci_dev *hdev = hci_pi(sk)->hdev;
935 
936 	if (!hdev)
937 		return -EBADFD;
938 
939 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
940 		return -EBUSY;
941 
942 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
943 		return -EOPNOTSUPP;
944 
945 	if (hdev->dev_type != HCI_PRIMARY)
946 		return -EOPNOTSUPP;
947 
948 	switch (cmd) {
949 	case HCISETRAW:
950 		if (!capable(CAP_NET_ADMIN))
951 			return -EPERM;
952 		return -EOPNOTSUPP;
953 
954 	case HCIGETCONNINFO:
955 		return hci_get_conn_info(hdev, (void __user *)arg);
956 
957 	case HCIGETAUTHINFO:
958 		return hci_get_auth_info(hdev, (void __user *)arg);
959 
960 	case HCIBLOCKADDR:
961 		if (!capable(CAP_NET_ADMIN))
962 			return -EPERM;
963 		return hci_sock_blacklist_add(hdev, (void __user *)arg);
964 
965 	case HCIUNBLOCKADDR:
966 		if (!capable(CAP_NET_ADMIN))
967 			return -EPERM;
968 		return hci_sock_blacklist_del(hdev, (void __user *)arg);
969 	}
970 
971 	return -ENOIOCTLCMD;
972 }
973 
974 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
975 			  unsigned long arg)
976 {
977 	void __user *argp = (void __user *)arg;
978 	struct sock *sk = sock->sk;
979 	int err;
980 
981 	BT_DBG("cmd %x arg %lx", cmd, arg);
982 
983 	lock_sock(sk);
984 
985 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
986 		err = -EBADFD;
987 		goto done;
988 	}
989 
990 	/* When calling an ioctl on an unbound raw socket, then ensure
991 	 * that the monitor gets informed. Ensure that the resulting event
992 	 * is only send once by checking if the cookie exists or not. The
993 	 * socket cookie will be only ever generated once for the lifetime
994 	 * of a given socket.
995 	 */
996 	if (hci_sock_gen_cookie(sk)) {
997 		struct sk_buff *skb;
998 
999 		if (capable(CAP_NET_ADMIN))
1000 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1001 
1002 		/* Send event to monitor */
1003 		skb = create_monitor_ctrl_open(sk);
1004 		if (skb) {
1005 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1006 					    HCI_SOCK_TRUSTED, NULL);
1007 			kfree_skb(skb);
1008 		}
1009 	}
1010 
1011 	release_sock(sk);
1012 
1013 	switch (cmd) {
1014 	case HCIGETDEVLIST:
1015 		return hci_get_dev_list(argp);
1016 
1017 	case HCIGETDEVINFO:
1018 		return hci_get_dev_info(argp);
1019 
1020 	case HCIGETCONNLIST:
1021 		return hci_get_conn_list(argp);
1022 
1023 	case HCIDEVUP:
1024 		if (!capable(CAP_NET_ADMIN))
1025 			return -EPERM;
1026 		return hci_dev_open(arg);
1027 
1028 	case HCIDEVDOWN:
1029 		if (!capable(CAP_NET_ADMIN))
1030 			return -EPERM;
1031 		return hci_dev_close(arg);
1032 
1033 	case HCIDEVRESET:
1034 		if (!capable(CAP_NET_ADMIN))
1035 			return -EPERM;
1036 		return hci_dev_reset(arg);
1037 
1038 	case HCIDEVRESTAT:
1039 		if (!capable(CAP_NET_ADMIN))
1040 			return -EPERM;
1041 		return hci_dev_reset_stat(arg);
1042 
1043 	case HCISETSCAN:
1044 	case HCISETAUTH:
1045 	case HCISETENCRYPT:
1046 	case HCISETPTYPE:
1047 	case HCISETLINKPOL:
1048 	case HCISETLINKMODE:
1049 	case HCISETACLMTU:
1050 	case HCISETSCOMTU:
1051 		if (!capable(CAP_NET_ADMIN))
1052 			return -EPERM;
1053 		return hci_dev_cmd(cmd, argp);
1054 
1055 	case HCIINQUIRY:
1056 		return hci_inquiry(argp);
1057 	}
1058 
1059 	lock_sock(sk);
1060 
1061 	err = hci_sock_bound_ioctl(sk, cmd, arg);
1062 
1063 done:
1064 	release_sock(sk);
1065 	return err;
1066 }
1067 
1068 #ifdef CONFIG_COMPAT
1069 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1070 				 unsigned long arg)
1071 {
1072 	switch (cmd) {
1073 	case HCIDEVUP:
1074 	case HCIDEVDOWN:
1075 	case HCIDEVRESET:
1076 	case HCIDEVRESTAT:
1077 		return hci_sock_ioctl(sock, cmd, arg);
1078 	}
1079 
1080 	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1081 }
1082 #endif
1083 
1084 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1085 			 int addr_len)
1086 {
1087 	struct sockaddr_hci haddr;
1088 	struct sock *sk = sock->sk;
1089 	struct hci_dev *hdev = NULL;
1090 	struct sk_buff *skb;
1091 	int len, err = 0;
1092 
1093 	BT_DBG("sock %p sk %p", sock, sk);
1094 
1095 	if (!addr)
1096 		return -EINVAL;
1097 
1098 	memset(&haddr, 0, sizeof(haddr));
1099 	len = min_t(unsigned int, sizeof(haddr), addr_len);
1100 	memcpy(&haddr, addr, len);
1101 
1102 	if (haddr.hci_family != AF_BLUETOOTH)
1103 		return -EINVAL;
1104 
1105 	lock_sock(sk);
1106 
1107 	if (sk->sk_state == BT_BOUND) {
1108 		err = -EALREADY;
1109 		goto done;
1110 	}
1111 
1112 	switch (haddr.hci_channel) {
1113 	case HCI_CHANNEL_RAW:
1114 		if (hci_pi(sk)->hdev) {
1115 			err = -EALREADY;
1116 			goto done;
1117 		}
1118 
1119 		if (haddr.hci_dev != HCI_DEV_NONE) {
1120 			hdev = hci_dev_get(haddr.hci_dev);
1121 			if (!hdev) {
1122 				err = -ENODEV;
1123 				goto done;
1124 			}
1125 
1126 			atomic_inc(&hdev->promisc);
1127 		}
1128 
1129 		hci_pi(sk)->channel = haddr.hci_channel;
1130 
1131 		if (!hci_sock_gen_cookie(sk)) {
1132 			/* In the case when a cookie has already been assigned,
1133 			 * then there has been already an ioctl issued against
1134 			 * an unbound socket and with that triggerd an open
1135 			 * notification. Send a close notification first to
1136 			 * allow the state transition to bounded.
1137 			 */
1138 			skb = create_monitor_ctrl_close(sk);
1139 			if (skb) {
1140 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1141 						    HCI_SOCK_TRUSTED, NULL);
1142 				kfree_skb(skb);
1143 			}
1144 		}
1145 
1146 		if (capable(CAP_NET_ADMIN))
1147 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1148 
1149 		hci_pi(sk)->hdev = hdev;
1150 
1151 		/* Send event to monitor */
1152 		skb = create_monitor_ctrl_open(sk);
1153 		if (skb) {
1154 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1155 					    HCI_SOCK_TRUSTED, NULL);
1156 			kfree_skb(skb);
1157 		}
1158 		break;
1159 
1160 	case HCI_CHANNEL_USER:
1161 		if (hci_pi(sk)->hdev) {
1162 			err = -EALREADY;
1163 			goto done;
1164 		}
1165 
1166 		if (haddr.hci_dev == HCI_DEV_NONE) {
1167 			err = -EINVAL;
1168 			goto done;
1169 		}
1170 
1171 		if (!capable(CAP_NET_ADMIN)) {
1172 			err = -EPERM;
1173 			goto done;
1174 		}
1175 
1176 		hdev = hci_dev_get(haddr.hci_dev);
1177 		if (!hdev) {
1178 			err = -ENODEV;
1179 			goto done;
1180 		}
1181 
1182 		if (test_bit(HCI_INIT, &hdev->flags) ||
1183 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1184 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1185 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1186 		     test_bit(HCI_UP, &hdev->flags))) {
1187 			err = -EBUSY;
1188 			hci_dev_put(hdev);
1189 			goto done;
1190 		}
1191 
1192 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1193 			err = -EUSERS;
1194 			hci_dev_put(hdev);
1195 			goto done;
1196 		}
1197 
1198 		mgmt_index_removed(hdev);
1199 
1200 		err = hci_dev_open(hdev->id);
1201 		if (err) {
1202 			if (err == -EALREADY) {
1203 				/* In case the transport is already up and
1204 				 * running, clear the error here.
1205 				 *
1206 				 * This can happen when opening a user
1207 				 * channel and HCI_AUTO_OFF grace period
1208 				 * is still active.
1209 				 */
1210 				err = 0;
1211 			} else {
1212 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1213 				mgmt_index_added(hdev);
1214 				hci_dev_put(hdev);
1215 				goto done;
1216 			}
1217 		}
1218 
1219 		hci_pi(sk)->channel = haddr.hci_channel;
1220 
1221 		if (!hci_sock_gen_cookie(sk)) {
1222 			/* In the case when a cookie has already been assigned,
1223 			 * this socket will transition from a raw socket into
1224 			 * a user channel socket. For a clean transition, send
1225 			 * the close notification first.
1226 			 */
1227 			skb = create_monitor_ctrl_close(sk);
1228 			if (skb) {
1229 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1230 						    HCI_SOCK_TRUSTED, NULL);
1231 				kfree_skb(skb);
1232 			}
1233 		}
1234 
1235 		/* The user channel is restricted to CAP_NET_ADMIN
1236 		 * capabilities and with that implicitly trusted.
1237 		 */
1238 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1239 
1240 		hci_pi(sk)->hdev = hdev;
1241 
1242 		/* Send event to monitor */
1243 		skb = create_monitor_ctrl_open(sk);
1244 		if (skb) {
1245 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1246 					    HCI_SOCK_TRUSTED, NULL);
1247 			kfree_skb(skb);
1248 		}
1249 
1250 		atomic_inc(&hdev->promisc);
1251 		break;
1252 
1253 	case HCI_CHANNEL_MONITOR:
1254 		if (haddr.hci_dev != HCI_DEV_NONE) {
1255 			err = -EINVAL;
1256 			goto done;
1257 		}
1258 
1259 		if (!capable(CAP_NET_RAW)) {
1260 			err = -EPERM;
1261 			goto done;
1262 		}
1263 
1264 		hci_pi(sk)->channel = haddr.hci_channel;
1265 
1266 		/* The monitor interface is restricted to CAP_NET_RAW
1267 		 * capabilities and with that implicitly trusted.
1268 		 */
1269 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1270 
1271 		send_monitor_note(sk, "Linux version %s (%s)",
1272 				  init_utsname()->release,
1273 				  init_utsname()->machine);
1274 		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1275 				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1276 		send_monitor_replay(sk);
1277 		send_monitor_control_replay(sk);
1278 
1279 		atomic_inc(&monitor_promisc);
1280 		break;
1281 
1282 	case HCI_CHANNEL_LOGGING:
1283 		if (haddr.hci_dev != HCI_DEV_NONE) {
1284 			err = -EINVAL;
1285 			goto done;
1286 		}
1287 
1288 		if (!capable(CAP_NET_ADMIN)) {
1289 			err = -EPERM;
1290 			goto done;
1291 		}
1292 
1293 		hci_pi(sk)->channel = haddr.hci_channel;
1294 		break;
1295 
1296 	default:
1297 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1298 			err = -EINVAL;
1299 			goto done;
1300 		}
1301 
1302 		if (haddr.hci_dev != HCI_DEV_NONE) {
1303 			err = -EINVAL;
1304 			goto done;
1305 		}
1306 
1307 		/* Users with CAP_NET_ADMIN capabilities are allowed
1308 		 * access to all management commands and events. For
1309 		 * untrusted users the interface is restricted and
1310 		 * also only untrusted events are sent.
1311 		 */
1312 		if (capable(CAP_NET_ADMIN))
1313 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1314 
1315 		hci_pi(sk)->channel = haddr.hci_channel;
1316 
1317 		/* At the moment the index and unconfigured index events
1318 		 * are enabled unconditionally. Setting them on each
1319 		 * socket when binding keeps this functionality. They
1320 		 * however might be cleared later and then sending of these
1321 		 * events will be disabled, but that is then intentional.
1322 		 *
1323 		 * This also enables generic events that are safe to be
1324 		 * received by untrusted users. Example for such events
1325 		 * are changes to settings, class of device, name etc.
1326 		 */
1327 		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1328 			if (!hci_sock_gen_cookie(sk)) {
1329 				/* In the case when a cookie has already been
1330 				 * assigned, this socket will transtion from
1331 				 * a raw socket into a control socket. To
1332 				 * allow for a clean transtion, send the
1333 				 * close notification first.
1334 				 */
1335 				skb = create_monitor_ctrl_close(sk);
1336 				if (skb) {
1337 					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1338 							    HCI_SOCK_TRUSTED, NULL);
1339 					kfree_skb(skb);
1340 				}
1341 			}
1342 
1343 			/* Send event to monitor */
1344 			skb = create_monitor_ctrl_open(sk);
1345 			if (skb) {
1346 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1347 						    HCI_SOCK_TRUSTED, NULL);
1348 				kfree_skb(skb);
1349 			}
1350 
1351 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1352 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1353 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1354 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1355 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1356 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1357 		}
1358 		break;
1359 	}
1360 
1361 	sk->sk_state = BT_BOUND;
1362 
1363 done:
1364 	release_sock(sk);
1365 	return err;
1366 }
1367 
1368 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1369 			    int peer)
1370 {
1371 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1372 	struct sock *sk = sock->sk;
1373 	struct hci_dev *hdev;
1374 	int err = 0;
1375 
1376 	BT_DBG("sock %p sk %p", sock, sk);
1377 
1378 	if (peer)
1379 		return -EOPNOTSUPP;
1380 
1381 	lock_sock(sk);
1382 
1383 	hdev = hci_pi(sk)->hdev;
1384 	if (!hdev) {
1385 		err = -EBADFD;
1386 		goto done;
1387 	}
1388 
1389 	haddr->hci_family = AF_BLUETOOTH;
1390 	haddr->hci_dev    = hdev->id;
1391 	haddr->hci_channel= hci_pi(sk)->channel;
1392 	err = sizeof(*haddr);
1393 
1394 done:
1395 	release_sock(sk);
1396 	return err;
1397 }
1398 
1399 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1400 			  struct sk_buff *skb)
1401 {
1402 	__u32 mask = hci_pi(sk)->cmsg_mask;
1403 
1404 	if (mask & HCI_CMSG_DIR) {
1405 		int incoming = bt_cb(skb)->incoming;
1406 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1407 			 &incoming);
1408 	}
1409 
1410 	if (mask & HCI_CMSG_TSTAMP) {
1411 #ifdef CONFIG_COMPAT
1412 		struct old_timeval32 ctv;
1413 #endif
1414 		struct __kernel_old_timeval tv;
1415 		void *data;
1416 		int len;
1417 
1418 		skb_get_timestamp(skb, &tv);
1419 
1420 		data = &tv;
1421 		len = sizeof(tv);
1422 #ifdef CONFIG_COMPAT
1423 		if (!COMPAT_USE_64BIT_TIME &&
1424 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1425 			ctv.tv_sec = tv.tv_sec;
1426 			ctv.tv_usec = tv.tv_usec;
1427 			data = &ctv;
1428 			len = sizeof(ctv);
1429 		}
1430 #endif
1431 
1432 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1433 	}
1434 }
1435 
1436 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1437 			    size_t len, int flags)
1438 {
1439 	int noblock = flags & MSG_DONTWAIT;
1440 	struct sock *sk = sock->sk;
1441 	struct sk_buff *skb;
1442 	int copied, err;
1443 	unsigned int skblen;
1444 
1445 	BT_DBG("sock %p, sk %p", sock, sk);
1446 
1447 	if (flags & MSG_OOB)
1448 		return -EOPNOTSUPP;
1449 
1450 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1451 		return -EOPNOTSUPP;
1452 
1453 	if (sk->sk_state == BT_CLOSED)
1454 		return 0;
1455 
1456 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1457 	if (!skb)
1458 		return err;
1459 
1460 	skblen = skb->len;
1461 	copied = skb->len;
1462 	if (len < copied) {
1463 		msg->msg_flags |= MSG_TRUNC;
1464 		copied = len;
1465 	}
1466 
1467 	skb_reset_transport_header(skb);
1468 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1469 
1470 	switch (hci_pi(sk)->channel) {
1471 	case HCI_CHANNEL_RAW:
1472 		hci_sock_cmsg(sk, msg, skb);
1473 		break;
1474 	case HCI_CHANNEL_USER:
1475 	case HCI_CHANNEL_MONITOR:
1476 		sock_recv_timestamp(msg, sk, skb);
1477 		break;
1478 	default:
1479 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1480 			sock_recv_timestamp(msg, sk, skb);
1481 		break;
1482 	}
1483 
1484 	skb_free_datagram(sk, skb);
1485 
1486 	if (flags & MSG_TRUNC)
1487 		copied = skblen;
1488 
1489 	return err ? : copied;
1490 }
1491 
1492 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1493 			struct msghdr *msg, size_t msglen)
1494 {
1495 	void *buf;
1496 	u8 *cp;
1497 	struct mgmt_hdr *hdr;
1498 	u16 opcode, index, len;
1499 	struct hci_dev *hdev = NULL;
1500 	const struct hci_mgmt_handler *handler;
1501 	bool var_len, no_hdev;
1502 	int err;
1503 
1504 	BT_DBG("got %zu bytes", msglen);
1505 
1506 	if (msglen < sizeof(*hdr))
1507 		return -EINVAL;
1508 
1509 	buf = kmalloc(msglen, GFP_KERNEL);
1510 	if (!buf)
1511 		return -ENOMEM;
1512 
1513 	if (memcpy_from_msg(buf, msg, msglen)) {
1514 		err = -EFAULT;
1515 		goto done;
1516 	}
1517 
1518 	hdr = buf;
1519 	opcode = __le16_to_cpu(hdr->opcode);
1520 	index = __le16_to_cpu(hdr->index);
1521 	len = __le16_to_cpu(hdr->len);
1522 
1523 	if (len != msglen - sizeof(*hdr)) {
1524 		err = -EINVAL;
1525 		goto done;
1526 	}
1527 
1528 	if (chan->channel == HCI_CHANNEL_CONTROL) {
1529 		struct sk_buff *skb;
1530 
1531 		/* Send event to monitor */
1532 		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1533 						  buf + sizeof(*hdr));
1534 		if (skb) {
1535 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1536 					    HCI_SOCK_TRUSTED, NULL);
1537 			kfree_skb(skb);
1538 		}
1539 	}
1540 
1541 	if (opcode >= chan->handler_count ||
1542 	    chan->handlers[opcode].func == NULL) {
1543 		BT_DBG("Unknown op %u", opcode);
1544 		err = mgmt_cmd_status(sk, index, opcode,
1545 				      MGMT_STATUS_UNKNOWN_COMMAND);
1546 		goto done;
1547 	}
1548 
1549 	handler = &chan->handlers[opcode];
1550 
1551 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1552 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1553 		err = mgmt_cmd_status(sk, index, opcode,
1554 				      MGMT_STATUS_PERMISSION_DENIED);
1555 		goto done;
1556 	}
1557 
1558 	if (index != MGMT_INDEX_NONE) {
1559 		hdev = hci_dev_get(index);
1560 		if (!hdev) {
1561 			err = mgmt_cmd_status(sk, index, opcode,
1562 					      MGMT_STATUS_INVALID_INDEX);
1563 			goto done;
1564 		}
1565 
1566 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1567 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1568 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1569 			err = mgmt_cmd_status(sk, index, opcode,
1570 					      MGMT_STATUS_INVALID_INDEX);
1571 			goto done;
1572 		}
1573 
1574 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1575 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1576 			err = mgmt_cmd_status(sk, index, opcode,
1577 					      MGMT_STATUS_INVALID_INDEX);
1578 			goto done;
1579 		}
1580 	}
1581 
1582 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1583 	if (no_hdev != !hdev) {
1584 		err = mgmt_cmd_status(sk, index, opcode,
1585 				      MGMT_STATUS_INVALID_INDEX);
1586 		goto done;
1587 	}
1588 
1589 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1590 	if ((var_len && len < handler->data_len) ||
1591 	    (!var_len && len != handler->data_len)) {
1592 		err = mgmt_cmd_status(sk, index, opcode,
1593 				      MGMT_STATUS_INVALID_PARAMS);
1594 		goto done;
1595 	}
1596 
1597 	if (hdev && chan->hdev_init)
1598 		chan->hdev_init(sk, hdev);
1599 
1600 	cp = buf + sizeof(*hdr);
1601 
1602 	err = handler->func(sk, hdev, cp, len);
1603 	if (err < 0)
1604 		goto done;
1605 
1606 	err = msglen;
1607 
1608 done:
1609 	if (hdev)
1610 		hci_dev_put(hdev);
1611 
1612 	kfree(buf);
1613 	return err;
1614 }
1615 
1616 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1617 {
1618 	struct hci_mon_hdr *hdr;
1619 	struct sk_buff *skb;
1620 	struct hci_dev *hdev;
1621 	u16 index;
1622 	int err;
1623 
1624 	/* The logging frame consists at minimum of the standard header,
1625 	 * the priority byte, the ident length byte and at least one string
1626 	 * terminator NUL byte. Anything shorter are invalid packets.
1627 	 */
1628 	if (len < sizeof(*hdr) + 3)
1629 		return -EINVAL;
1630 
1631 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1632 	if (!skb)
1633 		return err;
1634 
1635 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1636 		err = -EFAULT;
1637 		goto drop;
1638 	}
1639 
1640 	hdr = (void *)skb->data;
1641 
1642 	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1643 		err = -EINVAL;
1644 		goto drop;
1645 	}
1646 
1647 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1648 		__u8 priority = skb->data[sizeof(*hdr)];
1649 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1650 
1651 		/* Only the priorities 0-7 are valid and with that any other
1652 		 * value results in an invalid packet.
1653 		 *
1654 		 * The priority byte is followed by an ident length byte and
1655 		 * the NUL terminated ident string. Check that the ident
1656 		 * length is not overflowing the packet and also that the
1657 		 * ident string itself is NUL terminated. In case the ident
1658 		 * length is zero, the length value actually doubles as NUL
1659 		 * terminator identifier.
1660 		 *
1661 		 * The message follows the ident string (if present) and
1662 		 * must be NUL terminated. Otherwise it is not a valid packet.
1663 		 */
1664 		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1665 		    ident_len > len - sizeof(*hdr) - 3 ||
1666 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1667 			err = -EINVAL;
1668 			goto drop;
1669 		}
1670 	} else {
1671 		err = -EINVAL;
1672 		goto drop;
1673 	}
1674 
1675 	index = __le16_to_cpu(hdr->index);
1676 
1677 	if (index != MGMT_INDEX_NONE) {
1678 		hdev = hci_dev_get(index);
1679 		if (!hdev) {
1680 			err = -ENODEV;
1681 			goto drop;
1682 		}
1683 	} else {
1684 		hdev = NULL;
1685 	}
1686 
1687 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1688 
1689 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1690 	err = len;
1691 
1692 	if (hdev)
1693 		hci_dev_put(hdev);
1694 
1695 drop:
1696 	kfree_skb(skb);
1697 	return err;
1698 }
1699 
1700 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1701 			    size_t len)
1702 {
1703 	struct sock *sk = sock->sk;
1704 	struct hci_mgmt_chan *chan;
1705 	struct hci_dev *hdev;
1706 	struct sk_buff *skb;
1707 	int err;
1708 
1709 	BT_DBG("sock %p sk %p", sock, sk);
1710 
1711 	if (msg->msg_flags & MSG_OOB)
1712 		return -EOPNOTSUPP;
1713 
1714 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1715 			       MSG_CMSG_COMPAT))
1716 		return -EINVAL;
1717 
1718 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1719 		return -EINVAL;
1720 
1721 	lock_sock(sk);
1722 
1723 	switch (hci_pi(sk)->channel) {
1724 	case HCI_CHANNEL_RAW:
1725 	case HCI_CHANNEL_USER:
1726 		break;
1727 	case HCI_CHANNEL_MONITOR:
1728 		err = -EOPNOTSUPP;
1729 		goto done;
1730 	case HCI_CHANNEL_LOGGING:
1731 		err = hci_logging_frame(sk, msg, len);
1732 		goto done;
1733 	default:
1734 		mutex_lock(&mgmt_chan_list_lock);
1735 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1736 		if (chan)
1737 			err = hci_mgmt_cmd(chan, sk, msg, len);
1738 		else
1739 			err = -EINVAL;
1740 
1741 		mutex_unlock(&mgmt_chan_list_lock);
1742 		goto done;
1743 	}
1744 
1745 	hdev = hci_pi(sk)->hdev;
1746 	if (!hdev) {
1747 		err = -EBADFD;
1748 		goto done;
1749 	}
1750 
1751 	if (!test_bit(HCI_UP, &hdev->flags)) {
1752 		err = -ENETDOWN;
1753 		goto done;
1754 	}
1755 
1756 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1757 	if (!skb)
1758 		goto done;
1759 
1760 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1761 		err = -EFAULT;
1762 		goto drop;
1763 	}
1764 
1765 	hci_skb_pkt_type(skb) = skb->data[0];
1766 	skb_pull(skb, 1);
1767 
1768 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1769 		/* No permission check is needed for user channel
1770 		 * since that gets enforced when binding the socket.
1771 		 *
1772 		 * However check that the packet type is valid.
1773 		 */
1774 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1775 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1776 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1777 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1778 			err = -EINVAL;
1779 			goto drop;
1780 		}
1781 
1782 		skb_queue_tail(&hdev->raw_q, skb);
1783 		queue_work(hdev->workqueue, &hdev->tx_work);
1784 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1785 		u16 opcode = get_unaligned_le16(skb->data);
1786 		u16 ogf = hci_opcode_ogf(opcode);
1787 		u16 ocf = hci_opcode_ocf(opcode);
1788 
1789 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1790 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1791 				   &hci_sec_filter.ocf_mask[ogf])) &&
1792 		    !capable(CAP_NET_RAW)) {
1793 			err = -EPERM;
1794 			goto drop;
1795 		}
1796 
1797 		/* Since the opcode has already been extracted here, store
1798 		 * a copy of the value for later use by the drivers.
1799 		 */
1800 		hci_skb_opcode(skb) = opcode;
1801 
1802 		if (ogf == 0x3f) {
1803 			skb_queue_tail(&hdev->raw_q, skb);
1804 			queue_work(hdev->workqueue, &hdev->tx_work);
1805 		} else {
1806 			/* Stand-alone HCI commands must be flagged as
1807 			 * single-command requests.
1808 			 */
1809 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1810 
1811 			skb_queue_tail(&hdev->cmd_q, skb);
1812 			queue_work(hdev->workqueue, &hdev->cmd_work);
1813 		}
1814 	} else {
1815 		if (!capable(CAP_NET_RAW)) {
1816 			err = -EPERM;
1817 			goto drop;
1818 		}
1819 
1820 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1821 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1822 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1823 			err = -EINVAL;
1824 			goto drop;
1825 		}
1826 
1827 		skb_queue_tail(&hdev->raw_q, skb);
1828 		queue_work(hdev->workqueue, &hdev->tx_work);
1829 	}
1830 
1831 	err = len;
1832 
1833 done:
1834 	release_sock(sk);
1835 	return err;
1836 
1837 drop:
1838 	kfree_skb(skb);
1839 	goto done;
1840 }
1841 
1842 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1843 			       char __user *optval, unsigned int len)
1844 {
1845 	struct hci_ufilter uf = { .opcode = 0 };
1846 	struct sock *sk = sock->sk;
1847 	int err = 0, opt = 0;
1848 
1849 	BT_DBG("sk %p, opt %d", sk, optname);
1850 
1851 	if (level != SOL_HCI)
1852 		return -ENOPROTOOPT;
1853 
1854 	lock_sock(sk);
1855 
1856 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1857 		err = -EBADFD;
1858 		goto done;
1859 	}
1860 
1861 	switch (optname) {
1862 	case HCI_DATA_DIR:
1863 		if (get_user(opt, (int __user *)optval)) {
1864 			err = -EFAULT;
1865 			break;
1866 		}
1867 
1868 		if (opt)
1869 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1870 		else
1871 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1872 		break;
1873 
1874 	case HCI_TIME_STAMP:
1875 		if (get_user(opt, (int __user *)optval)) {
1876 			err = -EFAULT;
1877 			break;
1878 		}
1879 
1880 		if (opt)
1881 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1882 		else
1883 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1884 		break;
1885 
1886 	case HCI_FILTER:
1887 		{
1888 			struct hci_filter *f = &hci_pi(sk)->filter;
1889 
1890 			uf.type_mask = f->type_mask;
1891 			uf.opcode    = f->opcode;
1892 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1893 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1894 		}
1895 
1896 		len = min_t(unsigned int, len, sizeof(uf));
1897 		if (copy_from_user(&uf, optval, len)) {
1898 			err = -EFAULT;
1899 			break;
1900 		}
1901 
1902 		if (!capable(CAP_NET_RAW)) {
1903 			uf.type_mask &= hci_sec_filter.type_mask;
1904 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1905 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1906 		}
1907 
1908 		{
1909 			struct hci_filter *f = &hci_pi(sk)->filter;
1910 
1911 			f->type_mask = uf.type_mask;
1912 			f->opcode    = uf.opcode;
1913 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1914 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1915 		}
1916 		break;
1917 
1918 	default:
1919 		err = -ENOPROTOOPT;
1920 		break;
1921 	}
1922 
1923 done:
1924 	release_sock(sk);
1925 	return err;
1926 }
1927 
1928 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1929 			       char __user *optval, int __user *optlen)
1930 {
1931 	struct hci_ufilter uf;
1932 	struct sock *sk = sock->sk;
1933 	int len, opt, err = 0;
1934 
1935 	BT_DBG("sk %p, opt %d", sk, optname);
1936 
1937 	if (level != SOL_HCI)
1938 		return -ENOPROTOOPT;
1939 
1940 	if (get_user(len, optlen))
1941 		return -EFAULT;
1942 
1943 	lock_sock(sk);
1944 
1945 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1946 		err = -EBADFD;
1947 		goto done;
1948 	}
1949 
1950 	switch (optname) {
1951 	case HCI_DATA_DIR:
1952 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1953 			opt = 1;
1954 		else
1955 			opt = 0;
1956 
1957 		if (put_user(opt, optval))
1958 			err = -EFAULT;
1959 		break;
1960 
1961 	case HCI_TIME_STAMP:
1962 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1963 			opt = 1;
1964 		else
1965 			opt = 0;
1966 
1967 		if (put_user(opt, optval))
1968 			err = -EFAULT;
1969 		break;
1970 
1971 	case HCI_FILTER:
1972 		{
1973 			struct hci_filter *f = &hci_pi(sk)->filter;
1974 
1975 			memset(&uf, 0, sizeof(uf));
1976 			uf.type_mask = f->type_mask;
1977 			uf.opcode    = f->opcode;
1978 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1979 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1980 		}
1981 
1982 		len = min_t(unsigned int, len, sizeof(uf));
1983 		if (copy_to_user(optval, &uf, len))
1984 			err = -EFAULT;
1985 		break;
1986 
1987 	default:
1988 		err = -ENOPROTOOPT;
1989 		break;
1990 	}
1991 
1992 done:
1993 	release_sock(sk);
1994 	return err;
1995 }
1996 
1997 static const struct proto_ops hci_sock_ops = {
1998 	.family		= PF_BLUETOOTH,
1999 	.owner		= THIS_MODULE,
2000 	.release	= hci_sock_release,
2001 	.bind		= hci_sock_bind,
2002 	.getname	= hci_sock_getname,
2003 	.sendmsg	= hci_sock_sendmsg,
2004 	.recvmsg	= hci_sock_recvmsg,
2005 	.ioctl		= hci_sock_ioctl,
2006 #ifdef CONFIG_COMPAT
2007 	.compat_ioctl	= hci_sock_compat_ioctl,
2008 #endif
2009 	.poll		= datagram_poll,
2010 	.listen		= sock_no_listen,
2011 	.shutdown	= sock_no_shutdown,
2012 	.setsockopt	= hci_sock_setsockopt,
2013 	.getsockopt	= hci_sock_getsockopt,
2014 	.connect	= sock_no_connect,
2015 	.socketpair	= sock_no_socketpair,
2016 	.accept		= sock_no_accept,
2017 	.mmap		= sock_no_mmap
2018 };
2019 
2020 static struct proto hci_sk_proto = {
2021 	.name		= "HCI",
2022 	.owner		= THIS_MODULE,
2023 	.obj_size	= sizeof(struct hci_pinfo)
2024 };
2025 
2026 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2027 			   int kern)
2028 {
2029 	struct sock *sk;
2030 
2031 	BT_DBG("sock %p", sock);
2032 
2033 	if (sock->type != SOCK_RAW)
2034 		return -ESOCKTNOSUPPORT;
2035 
2036 	sock->ops = &hci_sock_ops;
2037 
2038 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2039 	if (!sk)
2040 		return -ENOMEM;
2041 
2042 	sock_init_data(sock, sk);
2043 
2044 	sock_reset_flag(sk, SOCK_ZAPPED);
2045 
2046 	sk->sk_protocol = protocol;
2047 
2048 	sock->state = SS_UNCONNECTED;
2049 	sk->sk_state = BT_OPEN;
2050 
2051 	bt_sock_link(&hci_sk_list, sk);
2052 	return 0;
2053 }
2054 
2055 static const struct net_proto_family hci_sock_family_ops = {
2056 	.family	= PF_BLUETOOTH,
2057 	.owner	= THIS_MODULE,
2058 	.create	= hci_sock_create,
2059 };
2060 
2061 int __init hci_sock_init(void)
2062 {
2063 	int err;
2064 
2065 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2066 
2067 	err = proto_register(&hci_sk_proto, 0);
2068 	if (err < 0)
2069 		return err;
2070 
2071 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2072 	if (err < 0) {
2073 		BT_ERR("HCI socket registration failed");
2074 		goto error;
2075 	}
2076 
2077 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2078 	if (err < 0) {
2079 		BT_ERR("Failed to create HCI proc file");
2080 		bt_sock_unregister(BTPROTO_HCI);
2081 		goto error;
2082 	}
2083 
2084 	BT_INFO("HCI socket layer initialized");
2085 
2086 	return 0;
2087 
2088 error:
2089 	proto_unregister(&hci_sk_proto);
2090 	return err;
2091 }
2092 
2093 void hci_sock_cleanup(void)
2094 {
2095 	bt_procfs_cleanup(&init_net, "hci");
2096 	bt_sock_unregister(BTPROTO_HCI);
2097 	proto_unregister(&hci_sk_proto);
2098 }
2099