xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision cfbb9be8)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "mgmt_util.h"
38 
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41 
42 static DEFINE_IDA(sock_cookie_ida);
43 
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45 
46 /* ----- HCI socket interface ----- */
47 
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 
51 struct hci_pinfo {
52 	struct bt_sock    bt;
53 	struct hci_dev    *hdev;
54 	struct hci_filter filter;
55 	__u32             cmsg_mask;
56 	unsigned short    channel;
57 	unsigned long     flags;
58 	__u32             cookie;
59 	char              comm[TASK_COMM_LEN];
60 };
61 
62 void hci_sock_set_flag(struct sock *sk, int nr)
63 {
64 	set_bit(nr, &hci_pi(sk)->flags);
65 }
66 
67 void hci_sock_clear_flag(struct sock *sk, int nr)
68 {
69 	clear_bit(nr, &hci_pi(sk)->flags);
70 }
71 
72 int hci_sock_test_flag(struct sock *sk, int nr)
73 {
74 	return test_bit(nr, &hci_pi(sk)->flags);
75 }
76 
77 unsigned short hci_sock_get_channel(struct sock *sk)
78 {
79 	return hci_pi(sk)->channel;
80 }
81 
82 u32 hci_sock_get_cookie(struct sock *sk)
83 {
84 	return hci_pi(sk)->cookie;
85 }
86 
87 static bool hci_sock_gen_cookie(struct sock *sk)
88 {
89 	int id = hci_pi(sk)->cookie;
90 
91 	if (!id) {
92 		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
93 		if (id < 0)
94 			id = 0xffffffff;
95 
96 		hci_pi(sk)->cookie = id;
97 		get_task_comm(hci_pi(sk)->comm, current);
98 		return true;
99 	}
100 
101 	return false;
102 }
103 
104 static void hci_sock_free_cookie(struct sock *sk)
105 {
106 	int id = hci_pi(sk)->cookie;
107 
108 	if (id) {
109 		hci_pi(sk)->cookie = 0xffffffff;
110 		ida_simple_remove(&sock_cookie_ida, id);
111 	}
112 }
113 
114 static inline int hci_test_bit(int nr, const void *addr)
115 {
116 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
117 }
118 
119 /* Security filter */
120 #define HCI_SFLT_MAX_OGF  5
121 
122 struct hci_sec_filter {
123 	__u32 type_mask;
124 	__u32 event_mask[2];
125 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
126 };
127 
128 static const struct hci_sec_filter hci_sec_filter = {
129 	/* Packet types */
130 	0x10,
131 	/* Events */
132 	{ 0x1000d9fe, 0x0000b00c },
133 	/* Commands */
134 	{
135 		{ 0x0 },
136 		/* OGF_LINK_CTL */
137 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
138 		/* OGF_LINK_POLICY */
139 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
140 		/* OGF_HOST_CTL */
141 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
142 		/* OGF_INFO_PARAM */
143 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
144 		/* OGF_STATUS_PARAM */
145 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
146 	}
147 };
148 
149 static struct bt_sock_list hci_sk_list = {
150 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
151 };
152 
153 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
154 {
155 	struct hci_filter *flt;
156 	int flt_type, flt_event;
157 
158 	/* Apply filter */
159 	flt = &hci_pi(sk)->filter;
160 
161 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
162 
163 	if (!test_bit(flt_type, &flt->type_mask))
164 		return true;
165 
166 	/* Extra filter for event packets only */
167 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
168 		return false;
169 
170 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
171 
172 	if (!hci_test_bit(flt_event, &flt->event_mask))
173 		return true;
174 
175 	/* Check filter only when opcode is set */
176 	if (!flt->opcode)
177 		return false;
178 
179 	if (flt_event == HCI_EV_CMD_COMPLETE &&
180 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
181 		return true;
182 
183 	if (flt_event == HCI_EV_CMD_STATUS &&
184 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
185 		return true;
186 
187 	return false;
188 }
189 
190 /* Send frame to RAW socket */
191 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 	struct sock *sk;
194 	struct sk_buff *skb_copy = NULL;
195 
196 	BT_DBG("hdev %p len %d", hdev, skb->len);
197 
198 	read_lock(&hci_sk_list.lock);
199 
200 	sk_for_each(sk, &hci_sk_list.head) {
201 		struct sk_buff *nskb;
202 
203 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
204 			continue;
205 
206 		/* Don't send frame to the socket it came from */
207 		if (skb->sk == sk)
208 			continue;
209 
210 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
211 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
212 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
213 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
214 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
215 				continue;
216 			if (is_filtered_packet(sk, skb))
217 				continue;
218 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
219 			if (!bt_cb(skb)->incoming)
220 				continue;
221 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
222 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
223 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
224 				continue;
225 		} else {
226 			/* Don't send frame to other channel types */
227 			continue;
228 		}
229 
230 		if (!skb_copy) {
231 			/* Create a private copy with headroom */
232 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
233 			if (!skb_copy)
234 				continue;
235 
236 			/* Put type byte before the data */
237 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
238 		}
239 
240 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
241 		if (!nskb)
242 			continue;
243 
244 		if (sock_queue_rcv_skb(sk, nskb))
245 			kfree_skb(nskb);
246 	}
247 
248 	read_unlock(&hci_sk_list.lock);
249 
250 	kfree_skb(skb_copy);
251 }
252 
253 /* Send frame to sockets with specific channel */
254 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
255 				  int flag, struct sock *skip_sk)
256 {
257 	struct sock *sk;
258 
259 	BT_DBG("channel %u len %d", channel, skb->len);
260 
261 	sk_for_each(sk, &hci_sk_list.head) {
262 		struct sk_buff *nskb;
263 
264 		/* Ignore socket without the flag set */
265 		if (!hci_sock_test_flag(sk, flag))
266 			continue;
267 
268 		/* Skip the original socket */
269 		if (sk == skip_sk)
270 			continue;
271 
272 		if (sk->sk_state != BT_BOUND)
273 			continue;
274 
275 		if (hci_pi(sk)->channel != channel)
276 			continue;
277 
278 		nskb = skb_clone(skb, GFP_ATOMIC);
279 		if (!nskb)
280 			continue;
281 
282 		if (sock_queue_rcv_skb(sk, nskb))
283 			kfree_skb(nskb);
284 	}
285 
286 }
287 
288 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
289 			 int flag, struct sock *skip_sk)
290 {
291 	read_lock(&hci_sk_list.lock);
292 	__hci_send_to_channel(channel, skb, flag, skip_sk);
293 	read_unlock(&hci_sk_list.lock);
294 }
295 
296 /* Send frame to monitor socket */
297 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
298 {
299 	struct sk_buff *skb_copy = NULL;
300 	struct hci_mon_hdr *hdr;
301 	__le16 opcode;
302 
303 	if (!atomic_read(&monitor_promisc))
304 		return;
305 
306 	BT_DBG("hdev %p len %d", hdev, skb->len);
307 
308 	switch (hci_skb_pkt_type(skb)) {
309 	case HCI_COMMAND_PKT:
310 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
311 		break;
312 	case HCI_EVENT_PKT:
313 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
314 		break;
315 	case HCI_ACLDATA_PKT:
316 		if (bt_cb(skb)->incoming)
317 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
318 		else
319 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
320 		break;
321 	case HCI_SCODATA_PKT:
322 		if (bt_cb(skb)->incoming)
323 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
324 		else
325 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
326 		break;
327 	case HCI_DIAG_PKT:
328 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
329 		break;
330 	default:
331 		return;
332 	}
333 
334 	/* Create a private copy with headroom */
335 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
336 	if (!skb_copy)
337 		return;
338 
339 	/* Put header before the data */
340 	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
341 	hdr->opcode = opcode;
342 	hdr->index = cpu_to_le16(hdev->id);
343 	hdr->len = cpu_to_le16(skb->len);
344 
345 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
346 			    HCI_SOCK_TRUSTED, NULL);
347 	kfree_skb(skb_copy);
348 }
349 
350 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
351 				 void *data, u16 data_len, ktime_t tstamp,
352 				 int flag, struct sock *skip_sk)
353 {
354 	struct sock *sk;
355 	__le16 index;
356 
357 	if (hdev)
358 		index = cpu_to_le16(hdev->id);
359 	else
360 		index = cpu_to_le16(MGMT_INDEX_NONE);
361 
362 	read_lock(&hci_sk_list.lock);
363 
364 	sk_for_each(sk, &hci_sk_list.head) {
365 		struct hci_mon_hdr *hdr;
366 		struct sk_buff *skb;
367 
368 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
369 			continue;
370 
371 		/* Ignore socket without the flag set */
372 		if (!hci_sock_test_flag(sk, flag))
373 			continue;
374 
375 		/* Skip the original socket */
376 		if (sk == skip_sk)
377 			continue;
378 
379 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
380 		if (!skb)
381 			continue;
382 
383 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
384 		put_unaligned_le16(event, skb_put(skb, 2));
385 
386 		if (data)
387 			skb_put_data(skb, data, data_len);
388 
389 		skb->tstamp = tstamp;
390 
391 		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
392 		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
393 		hdr->index = index;
394 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
395 
396 		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
397 				      HCI_SOCK_TRUSTED, NULL);
398 		kfree_skb(skb);
399 	}
400 
401 	read_unlock(&hci_sk_list.lock);
402 }
403 
404 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
405 {
406 	struct hci_mon_hdr *hdr;
407 	struct hci_mon_new_index *ni;
408 	struct hci_mon_index_info *ii;
409 	struct sk_buff *skb;
410 	__le16 opcode;
411 
412 	switch (event) {
413 	case HCI_DEV_REG:
414 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
415 		if (!skb)
416 			return NULL;
417 
418 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
419 		ni->type = hdev->dev_type;
420 		ni->bus = hdev->bus;
421 		bacpy(&ni->bdaddr, &hdev->bdaddr);
422 		memcpy(ni->name, hdev->name, 8);
423 
424 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
425 		break;
426 
427 	case HCI_DEV_UNREG:
428 		skb = bt_skb_alloc(0, GFP_ATOMIC);
429 		if (!skb)
430 			return NULL;
431 
432 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
433 		break;
434 
435 	case HCI_DEV_SETUP:
436 		if (hdev->manufacturer == 0xffff)
437 			return NULL;
438 
439 		/* fall through */
440 
441 	case HCI_DEV_UP:
442 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
443 		if (!skb)
444 			return NULL;
445 
446 		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
447 		bacpy(&ii->bdaddr, &hdev->bdaddr);
448 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
449 
450 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
451 		break;
452 
453 	case HCI_DEV_OPEN:
454 		skb = bt_skb_alloc(0, GFP_ATOMIC);
455 		if (!skb)
456 			return NULL;
457 
458 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
459 		break;
460 
461 	case HCI_DEV_CLOSE:
462 		skb = bt_skb_alloc(0, GFP_ATOMIC);
463 		if (!skb)
464 			return NULL;
465 
466 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
467 		break;
468 
469 	default:
470 		return NULL;
471 	}
472 
473 	__net_timestamp(skb);
474 
475 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
476 	hdr->opcode = opcode;
477 	hdr->index = cpu_to_le16(hdev->id);
478 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
479 
480 	return skb;
481 }
482 
483 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
484 {
485 	struct hci_mon_hdr *hdr;
486 	struct sk_buff *skb;
487 	u16 format;
488 	u8 ver[3];
489 	u32 flags;
490 
491 	/* No message needed when cookie is not present */
492 	if (!hci_pi(sk)->cookie)
493 		return NULL;
494 
495 	switch (hci_pi(sk)->channel) {
496 	case HCI_CHANNEL_RAW:
497 		format = 0x0000;
498 		ver[0] = BT_SUBSYS_VERSION;
499 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
500 		break;
501 	case HCI_CHANNEL_USER:
502 		format = 0x0001;
503 		ver[0] = BT_SUBSYS_VERSION;
504 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
505 		break;
506 	case HCI_CHANNEL_CONTROL:
507 		format = 0x0002;
508 		mgmt_fill_version_info(ver);
509 		break;
510 	default:
511 		/* No message for unsupported format */
512 		return NULL;
513 	}
514 
515 	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
516 	if (!skb)
517 		return NULL;
518 
519 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
520 
521 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
522 	put_unaligned_le16(format, skb_put(skb, 2));
523 	skb_put_data(skb, ver, sizeof(ver));
524 	put_unaligned_le32(flags, skb_put(skb, 4));
525 	skb_put_u8(skb, TASK_COMM_LEN);
526 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
527 
528 	__net_timestamp(skb);
529 
530 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
531 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
532 	if (hci_pi(sk)->hdev)
533 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
534 	else
535 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
536 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
537 
538 	return skb;
539 }
540 
541 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
542 {
543 	struct hci_mon_hdr *hdr;
544 	struct sk_buff *skb;
545 
546 	/* No message needed when cookie is not present */
547 	if (!hci_pi(sk)->cookie)
548 		return NULL;
549 
550 	switch (hci_pi(sk)->channel) {
551 	case HCI_CHANNEL_RAW:
552 	case HCI_CHANNEL_USER:
553 	case HCI_CHANNEL_CONTROL:
554 		break;
555 	default:
556 		/* No message for unsupported format */
557 		return NULL;
558 	}
559 
560 	skb = bt_skb_alloc(4, GFP_ATOMIC);
561 	if (!skb)
562 		return NULL;
563 
564 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
565 
566 	__net_timestamp(skb);
567 
568 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
569 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
570 	if (hci_pi(sk)->hdev)
571 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
572 	else
573 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
574 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
575 
576 	return skb;
577 }
578 
579 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
580 						   u16 opcode, u16 len,
581 						   const void *buf)
582 {
583 	struct hci_mon_hdr *hdr;
584 	struct sk_buff *skb;
585 
586 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
587 	if (!skb)
588 		return NULL;
589 
590 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
591 	put_unaligned_le16(opcode, skb_put(skb, 2));
592 
593 	if (buf)
594 		skb_put_data(skb, buf, len);
595 
596 	__net_timestamp(skb);
597 
598 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
599 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
600 	hdr->index = cpu_to_le16(index);
601 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
602 
603 	return skb;
604 }
605 
606 static void __printf(2, 3)
607 send_monitor_note(struct sock *sk, const char *fmt, ...)
608 {
609 	size_t len;
610 	struct hci_mon_hdr *hdr;
611 	struct sk_buff *skb;
612 	va_list args;
613 
614 	va_start(args, fmt);
615 	len = vsnprintf(NULL, 0, fmt, args);
616 	va_end(args);
617 
618 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
619 	if (!skb)
620 		return;
621 
622 	va_start(args, fmt);
623 	vsprintf(skb_put(skb, len), fmt, args);
624 	*(u8 *)skb_put(skb, 1) = 0;
625 	va_end(args);
626 
627 	__net_timestamp(skb);
628 
629 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
630 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
631 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
632 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
633 
634 	if (sock_queue_rcv_skb(sk, skb))
635 		kfree_skb(skb);
636 }
637 
638 static void send_monitor_replay(struct sock *sk)
639 {
640 	struct hci_dev *hdev;
641 
642 	read_lock(&hci_dev_list_lock);
643 
644 	list_for_each_entry(hdev, &hci_dev_list, list) {
645 		struct sk_buff *skb;
646 
647 		skb = create_monitor_event(hdev, HCI_DEV_REG);
648 		if (!skb)
649 			continue;
650 
651 		if (sock_queue_rcv_skb(sk, skb))
652 			kfree_skb(skb);
653 
654 		if (!test_bit(HCI_RUNNING, &hdev->flags))
655 			continue;
656 
657 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
658 		if (!skb)
659 			continue;
660 
661 		if (sock_queue_rcv_skb(sk, skb))
662 			kfree_skb(skb);
663 
664 		if (test_bit(HCI_UP, &hdev->flags))
665 			skb = create_monitor_event(hdev, HCI_DEV_UP);
666 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
667 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
668 		else
669 			skb = NULL;
670 
671 		if (skb) {
672 			if (sock_queue_rcv_skb(sk, skb))
673 				kfree_skb(skb);
674 		}
675 	}
676 
677 	read_unlock(&hci_dev_list_lock);
678 }
679 
680 static void send_monitor_control_replay(struct sock *mon_sk)
681 {
682 	struct sock *sk;
683 
684 	read_lock(&hci_sk_list.lock);
685 
686 	sk_for_each(sk, &hci_sk_list.head) {
687 		struct sk_buff *skb;
688 
689 		skb = create_monitor_ctrl_open(sk);
690 		if (!skb)
691 			continue;
692 
693 		if (sock_queue_rcv_skb(mon_sk, skb))
694 			kfree_skb(skb);
695 	}
696 
697 	read_unlock(&hci_sk_list.lock);
698 }
699 
700 /* Generate internal stack event */
701 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
702 {
703 	struct hci_event_hdr *hdr;
704 	struct hci_ev_stack_internal *ev;
705 	struct sk_buff *skb;
706 
707 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
708 	if (!skb)
709 		return;
710 
711 	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
712 	hdr->evt  = HCI_EV_STACK_INTERNAL;
713 	hdr->plen = sizeof(*ev) + dlen;
714 
715 	ev = skb_put(skb, sizeof(*ev) + dlen);
716 	ev->type = type;
717 	memcpy(ev->data, data, dlen);
718 
719 	bt_cb(skb)->incoming = 1;
720 	__net_timestamp(skb);
721 
722 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
723 	hci_send_to_sock(hdev, skb);
724 	kfree_skb(skb);
725 }
726 
727 void hci_sock_dev_event(struct hci_dev *hdev, int event)
728 {
729 	BT_DBG("hdev %s event %d", hdev->name, event);
730 
731 	if (atomic_read(&monitor_promisc)) {
732 		struct sk_buff *skb;
733 
734 		/* Send event to monitor */
735 		skb = create_monitor_event(hdev, event);
736 		if (skb) {
737 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
738 					    HCI_SOCK_TRUSTED, NULL);
739 			kfree_skb(skb);
740 		}
741 	}
742 
743 	if (event <= HCI_DEV_DOWN) {
744 		struct hci_ev_si_device ev;
745 
746 		/* Send event to sockets */
747 		ev.event  = event;
748 		ev.dev_id = hdev->id;
749 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
750 	}
751 
752 	if (event == HCI_DEV_UNREG) {
753 		struct sock *sk;
754 
755 		/* Detach sockets from device */
756 		read_lock(&hci_sk_list.lock);
757 		sk_for_each(sk, &hci_sk_list.head) {
758 			bh_lock_sock_nested(sk);
759 			if (hci_pi(sk)->hdev == hdev) {
760 				hci_pi(sk)->hdev = NULL;
761 				sk->sk_err = EPIPE;
762 				sk->sk_state = BT_OPEN;
763 				sk->sk_state_change(sk);
764 
765 				hci_dev_put(hdev);
766 			}
767 			bh_unlock_sock(sk);
768 		}
769 		read_unlock(&hci_sk_list.lock);
770 	}
771 }
772 
773 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
774 {
775 	struct hci_mgmt_chan *c;
776 
777 	list_for_each_entry(c, &mgmt_chan_list, list) {
778 		if (c->channel == channel)
779 			return c;
780 	}
781 
782 	return NULL;
783 }
784 
785 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
786 {
787 	struct hci_mgmt_chan *c;
788 
789 	mutex_lock(&mgmt_chan_list_lock);
790 	c = __hci_mgmt_chan_find(channel);
791 	mutex_unlock(&mgmt_chan_list_lock);
792 
793 	return c;
794 }
795 
796 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
797 {
798 	if (c->channel < HCI_CHANNEL_CONTROL)
799 		return -EINVAL;
800 
801 	mutex_lock(&mgmt_chan_list_lock);
802 	if (__hci_mgmt_chan_find(c->channel)) {
803 		mutex_unlock(&mgmt_chan_list_lock);
804 		return -EALREADY;
805 	}
806 
807 	list_add_tail(&c->list, &mgmt_chan_list);
808 
809 	mutex_unlock(&mgmt_chan_list_lock);
810 
811 	return 0;
812 }
813 EXPORT_SYMBOL(hci_mgmt_chan_register);
814 
815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
816 {
817 	mutex_lock(&mgmt_chan_list_lock);
818 	list_del(&c->list);
819 	mutex_unlock(&mgmt_chan_list_lock);
820 }
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
822 
823 static int hci_sock_release(struct socket *sock)
824 {
825 	struct sock *sk = sock->sk;
826 	struct hci_dev *hdev;
827 	struct sk_buff *skb;
828 
829 	BT_DBG("sock %p sk %p", sock, sk);
830 
831 	if (!sk)
832 		return 0;
833 
834 	hdev = hci_pi(sk)->hdev;
835 
836 	switch (hci_pi(sk)->channel) {
837 	case HCI_CHANNEL_MONITOR:
838 		atomic_dec(&monitor_promisc);
839 		break;
840 	case HCI_CHANNEL_RAW:
841 	case HCI_CHANNEL_USER:
842 	case HCI_CHANNEL_CONTROL:
843 		/* Send event to monitor */
844 		skb = create_monitor_ctrl_close(sk);
845 		if (skb) {
846 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
847 					    HCI_SOCK_TRUSTED, NULL);
848 			kfree_skb(skb);
849 		}
850 
851 		hci_sock_free_cookie(sk);
852 		break;
853 	}
854 
855 	bt_sock_unlink(&hci_sk_list, sk);
856 
857 	if (hdev) {
858 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
859 			/* When releasing a user channel exclusive access,
860 			 * call hci_dev_do_close directly instead of calling
861 			 * hci_dev_close to ensure the exclusive access will
862 			 * be released and the controller brought back down.
863 			 *
864 			 * The checking of HCI_AUTO_OFF is not needed in this
865 			 * case since it will have been cleared already when
866 			 * opening the user channel.
867 			 */
868 			hci_dev_do_close(hdev);
869 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
870 			mgmt_index_added(hdev);
871 		}
872 
873 		atomic_dec(&hdev->promisc);
874 		hci_dev_put(hdev);
875 	}
876 
877 	sock_orphan(sk);
878 
879 	skb_queue_purge(&sk->sk_receive_queue);
880 	skb_queue_purge(&sk->sk_write_queue);
881 
882 	sock_put(sk);
883 	return 0;
884 }
885 
886 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
887 {
888 	bdaddr_t bdaddr;
889 	int err;
890 
891 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
892 		return -EFAULT;
893 
894 	hci_dev_lock(hdev);
895 
896 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
897 
898 	hci_dev_unlock(hdev);
899 
900 	return err;
901 }
902 
903 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
904 {
905 	bdaddr_t bdaddr;
906 	int err;
907 
908 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
909 		return -EFAULT;
910 
911 	hci_dev_lock(hdev);
912 
913 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
914 
915 	hci_dev_unlock(hdev);
916 
917 	return err;
918 }
919 
920 /* Ioctls that require bound socket */
921 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
922 				unsigned long arg)
923 {
924 	struct hci_dev *hdev = hci_pi(sk)->hdev;
925 
926 	if (!hdev)
927 		return -EBADFD;
928 
929 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
930 		return -EBUSY;
931 
932 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
933 		return -EOPNOTSUPP;
934 
935 	if (hdev->dev_type != HCI_PRIMARY)
936 		return -EOPNOTSUPP;
937 
938 	switch (cmd) {
939 	case HCISETRAW:
940 		if (!capable(CAP_NET_ADMIN))
941 			return -EPERM;
942 		return -EOPNOTSUPP;
943 
944 	case HCIGETCONNINFO:
945 		return hci_get_conn_info(hdev, (void __user *)arg);
946 
947 	case HCIGETAUTHINFO:
948 		return hci_get_auth_info(hdev, (void __user *)arg);
949 
950 	case HCIBLOCKADDR:
951 		if (!capable(CAP_NET_ADMIN))
952 			return -EPERM;
953 		return hci_sock_blacklist_add(hdev, (void __user *)arg);
954 
955 	case HCIUNBLOCKADDR:
956 		if (!capable(CAP_NET_ADMIN))
957 			return -EPERM;
958 		return hci_sock_blacklist_del(hdev, (void __user *)arg);
959 	}
960 
961 	return -ENOIOCTLCMD;
962 }
963 
964 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
965 			  unsigned long arg)
966 {
967 	void __user *argp = (void __user *)arg;
968 	struct sock *sk = sock->sk;
969 	int err;
970 
971 	BT_DBG("cmd %x arg %lx", cmd, arg);
972 
973 	lock_sock(sk);
974 
975 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
976 		err = -EBADFD;
977 		goto done;
978 	}
979 
980 	/* When calling an ioctl on an unbound raw socket, then ensure
981 	 * that the monitor gets informed. Ensure that the resulting event
982 	 * is only send once by checking if the cookie exists or not. The
983 	 * socket cookie will be only ever generated once for the lifetime
984 	 * of a given socket.
985 	 */
986 	if (hci_sock_gen_cookie(sk)) {
987 		struct sk_buff *skb;
988 
989 		if (capable(CAP_NET_ADMIN))
990 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
991 
992 		/* Send event to monitor */
993 		skb = create_monitor_ctrl_open(sk);
994 		if (skb) {
995 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
996 					    HCI_SOCK_TRUSTED, NULL);
997 			kfree_skb(skb);
998 		}
999 	}
1000 
1001 	release_sock(sk);
1002 
1003 	switch (cmd) {
1004 	case HCIGETDEVLIST:
1005 		return hci_get_dev_list(argp);
1006 
1007 	case HCIGETDEVINFO:
1008 		return hci_get_dev_info(argp);
1009 
1010 	case HCIGETCONNLIST:
1011 		return hci_get_conn_list(argp);
1012 
1013 	case HCIDEVUP:
1014 		if (!capable(CAP_NET_ADMIN))
1015 			return -EPERM;
1016 		return hci_dev_open(arg);
1017 
1018 	case HCIDEVDOWN:
1019 		if (!capable(CAP_NET_ADMIN))
1020 			return -EPERM;
1021 		return hci_dev_close(arg);
1022 
1023 	case HCIDEVRESET:
1024 		if (!capable(CAP_NET_ADMIN))
1025 			return -EPERM;
1026 		return hci_dev_reset(arg);
1027 
1028 	case HCIDEVRESTAT:
1029 		if (!capable(CAP_NET_ADMIN))
1030 			return -EPERM;
1031 		return hci_dev_reset_stat(arg);
1032 
1033 	case HCISETSCAN:
1034 	case HCISETAUTH:
1035 	case HCISETENCRYPT:
1036 	case HCISETPTYPE:
1037 	case HCISETLINKPOL:
1038 	case HCISETLINKMODE:
1039 	case HCISETACLMTU:
1040 	case HCISETSCOMTU:
1041 		if (!capable(CAP_NET_ADMIN))
1042 			return -EPERM;
1043 		return hci_dev_cmd(cmd, argp);
1044 
1045 	case HCIINQUIRY:
1046 		return hci_inquiry(argp);
1047 	}
1048 
1049 	lock_sock(sk);
1050 
1051 	err = hci_sock_bound_ioctl(sk, cmd, arg);
1052 
1053 done:
1054 	release_sock(sk);
1055 	return err;
1056 }
1057 
1058 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1059 			 int addr_len)
1060 {
1061 	struct sockaddr_hci haddr;
1062 	struct sock *sk = sock->sk;
1063 	struct hci_dev *hdev = NULL;
1064 	struct sk_buff *skb;
1065 	int len, err = 0;
1066 
1067 	BT_DBG("sock %p sk %p", sock, sk);
1068 
1069 	if (!addr)
1070 		return -EINVAL;
1071 
1072 	memset(&haddr, 0, sizeof(haddr));
1073 	len = min_t(unsigned int, sizeof(haddr), addr_len);
1074 	memcpy(&haddr, addr, len);
1075 
1076 	if (haddr.hci_family != AF_BLUETOOTH)
1077 		return -EINVAL;
1078 
1079 	lock_sock(sk);
1080 
1081 	if (sk->sk_state == BT_BOUND) {
1082 		err = -EALREADY;
1083 		goto done;
1084 	}
1085 
1086 	switch (haddr.hci_channel) {
1087 	case HCI_CHANNEL_RAW:
1088 		if (hci_pi(sk)->hdev) {
1089 			err = -EALREADY;
1090 			goto done;
1091 		}
1092 
1093 		if (haddr.hci_dev != HCI_DEV_NONE) {
1094 			hdev = hci_dev_get(haddr.hci_dev);
1095 			if (!hdev) {
1096 				err = -ENODEV;
1097 				goto done;
1098 			}
1099 
1100 			atomic_inc(&hdev->promisc);
1101 		}
1102 
1103 		hci_pi(sk)->channel = haddr.hci_channel;
1104 
1105 		if (!hci_sock_gen_cookie(sk)) {
1106 			/* In the case when a cookie has already been assigned,
1107 			 * then there has been already an ioctl issued against
1108 			 * an unbound socket and with that triggerd an open
1109 			 * notification. Send a close notification first to
1110 			 * allow the state transition to bounded.
1111 			 */
1112 			skb = create_monitor_ctrl_close(sk);
1113 			if (skb) {
1114 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1115 						    HCI_SOCK_TRUSTED, NULL);
1116 				kfree_skb(skb);
1117 			}
1118 		}
1119 
1120 		if (capable(CAP_NET_ADMIN))
1121 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1122 
1123 		hci_pi(sk)->hdev = hdev;
1124 
1125 		/* Send event to monitor */
1126 		skb = create_monitor_ctrl_open(sk);
1127 		if (skb) {
1128 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1129 					    HCI_SOCK_TRUSTED, NULL);
1130 			kfree_skb(skb);
1131 		}
1132 		break;
1133 
1134 	case HCI_CHANNEL_USER:
1135 		if (hci_pi(sk)->hdev) {
1136 			err = -EALREADY;
1137 			goto done;
1138 		}
1139 
1140 		if (haddr.hci_dev == HCI_DEV_NONE) {
1141 			err = -EINVAL;
1142 			goto done;
1143 		}
1144 
1145 		if (!capable(CAP_NET_ADMIN)) {
1146 			err = -EPERM;
1147 			goto done;
1148 		}
1149 
1150 		hdev = hci_dev_get(haddr.hci_dev);
1151 		if (!hdev) {
1152 			err = -ENODEV;
1153 			goto done;
1154 		}
1155 
1156 		if (test_bit(HCI_INIT, &hdev->flags) ||
1157 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1158 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1159 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1160 		     test_bit(HCI_UP, &hdev->flags))) {
1161 			err = -EBUSY;
1162 			hci_dev_put(hdev);
1163 			goto done;
1164 		}
1165 
1166 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1167 			err = -EUSERS;
1168 			hci_dev_put(hdev);
1169 			goto done;
1170 		}
1171 
1172 		mgmt_index_removed(hdev);
1173 
1174 		err = hci_dev_open(hdev->id);
1175 		if (err) {
1176 			if (err == -EALREADY) {
1177 				/* In case the transport is already up and
1178 				 * running, clear the error here.
1179 				 *
1180 				 * This can happen when opening a user
1181 				 * channel and HCI_AUTO_OFF grace period
1182 				 * is still active.
1183 				 */
1184 				err = 0;
1185 			} else {
1186 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1187 				mgmt_index_added(hdev);
1188 				hci_dev_put(hdev);
1189 				goto done;
1190 			}
1191 		}
1192 
1193 		hci_pi(sk)->channel = haddr.hci_channel;
1194 
1195 		if (!hci_sock_gen_cookie(sk)) {
1196 			/* In the case when a cookie has already been assigned,
1197 			 * this socket will transition from a raw socket into
1198 			 * a user channel socket. For a clean transition, send
1199 			 * the close notification first.
1200 			 */
1201 			skb = create_monitor_ctrl_close(sk);
1202 			if (skb) {
1203 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1204 						    HCI_SOCK_TRUSTED, NULL);
1205 				kfree_skb(skb);
1206 			}
1207 		}
1208 
1209 		/* The user channel is restricted to CAP_NET_ADMIN
1210 		 * capabilities and with that implicitly trusted.
1211 		 */
1212 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1213 
1214 		hci_pi(sk)->hdev = hdev;
1215 
1216 		/* Send event to monitor */
1217 		skb = create_monitor_ctrl_open(sk);
1218 		if (skb) {
1219 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1220 					    HCI_SOCK_TRUSTED, NULL);
1221 			kfree_skb(skb);
1222 		}
1223 
1224 		atomic_inc(&hdev->promisc);
1225 		break;
1226 
1227 	case HCI_CHANNEL_MONITOR:
1228 		if (haddr.hci_dev != HCI_DEV_NONE) {
1229 			err = -EINVAL;
1230 			goto done;
1231 		}
1232 
1233 		if (!capable(CAP_NET_RAW)) {
1234 			err = -EPERM;
1235 			goto done;
1236 		}
1237 
1238 		hci_pi(sk)->channel = haddr.hci_channel;
1239 
1240 		/* The monitor interface is restricted to CAP_NET_RAW
1241 		 * capabilities and with that implicitly trusted.
1242 		 */
1243 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1244 
1245 		send_monitor_note(sk, "Linux version %s (%s)",
1246 				  init_utsname()->release,
1247 				  init_utsname()->machine);
1248 		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1249 				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1250 		send_monitor_replay(sk);
1251 		send_monitor_control_replay(sk);
1252 
1253 		atomic_inc(&monitor_promisc);
1254 		break;
1255 
1256 	case HCI_CHANNEL_LOGGING:
1257 		if (haddr.hci_dev != HCI_DEV_NONE) {
1258 			err = -EINVAL;
1259 			goto done;
1260 		}
1261 
1262 		if (!capable(CAP_NET_ADMIN)) {
1263 			err = -EPERM;
1264 			goto done;
1265 		}
1266 
1267 		hci_pi(sk)->channel = haddr.hci_channel;
1268 		break;
1269 
1270 	default:
1271 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1272 			err = -EINVAL;
1273 			goto done;
1274 		}
1275 
1276 		if (haddr.hci_dev != HCI_DEV_NONE) {
1277 			err = -EINVAL;
1278 			goto done;
1279 		}
1280 
1281 		/* Users with CAP_NET_ADMIN capabilities are allowed
1282 		 * access to all management commands and events. For
1283 		 * untrusted users the interface is restricted and
1284 		 * also only untrusted events are sent.
1285 		 */
1286 		if (capable(CAP_NET_ADMIN))
1287 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1288 
1289 		hci_pi(sk)->channel = haddr.hci_channel;
1290 
1291 		/* At the moment the index and unconfigured index events
1292 		 * are enabled unconditionally. Setting them on each
1293 		 * socket when binding keeps this functionality. They
1294 		 * however might be cleared later and then sending of these
1295 		 * events will be disabled, but that is then intentional.
1296 		 *
1297 		 * This also enables generic events that are safe to be
1298 		 * received by untrusted users. Example for such events
1299 		 * are changes to settings, class of device, name etc.
1300 		 */
1301 		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1302 			if (!hci_sock_gen_cookie(sk)) {
1303 				/* In the case when a cookie has already been
1304 				 * assigned, this socket will transtion from
1305 				 * a raw socket into a control socket. To
1306 				 * allow for a clean transtion, send the
1307 				 * close notification first.
1308 				 */
1309 				skb = create_monitor_ctrl_close(sk);
1310 				if (skb) {
1311 					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1312 							    HCI_SOCK_TRUSTED, NULL);
1313 					kfree_skb(skb);
1314 				}
1315 			}
1316 
1317 			/* Send event to monitor */
1318 			skb = create_monitor_ctrl_open(sk);
1319 			if (skb) {
1320 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1321 						    HCI_SOCK_TRUSTED, NULL);
1322 				kfree_skb(skb);
1323 			}
1324 
1325 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1326 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1327 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1328 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1329 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1330 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1331 		}
1332 		break;
1333 	}
1334 
1335 	sk->sk_state = BT_BOUND;
1336 
1337 done:
1338 	release_sock(sk);
1339 	return err;
1340 }
1341 
1342 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1343 			    int *addr_len, int peer)
1344 {
1345 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1346 	struct sock *sk = sock->sk;
1347 	struct hci_dev *hdev;
1348 	int err = 0;
1349 
1350 	BT_DBG("sock %p sk %p", sock, sk);
1351 
1352 	if (peer)
1353 		return -EOPNOTSUPP;
1354 
1355 	lock_sock(sk);
1356 
1357 	hdev = hci_pi(sk)->hdev;
1358 	if (!hdev) {
1359 		err = -EBADFD;
1360 		goto done;
1361 	}
1362 
1363 	*addr_len = sizeof(*haddr);
1364 	haddr->hci_family = AF_BLUETOOTH;
1365 	haddr->hci_dev    = hdev->id;
1366 	haddr->hci_channel= hci_pi(sk)->channel;
1367 
1368 done:
1369 	release_sock(sk);
1370 	return err;
1371 }
1372 
1373 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1374 			  struct sk_buff *skb)
1375 {
1376 	__u32 mask = hci_pi(sk)->cmsg_mask;
1377 
1378 	if (mask & HCI_CMSG_DIR) {
1379 		int incoming = bt_cb(skb)->incoming;
1380 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1381 			 &incoming);
1382 	}
1383 
1384 	if (mask & HCI_CMSG_TSTAMP) {
1385 #ifdef CONFIG_COMPAT
1386 		struct compat_timeval ctv;
1387 #endif
1388 		struct timeval tv;
1389 		void *data;
1390 		int len;
1391 
1392 		skb_get_timestamp(skb, &tv);
1393 
1394 		data = &tv;
1395 		len = sizeof(tv);
1396 #ifdef CONFIG_COMPAT
1397 		if (!COMPAT_USE_64BIT_TIME &&
1398 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1399 			ctv.tv_sec = tv.tv_sec;
1400 			ctv.tv_usec = tv.tv_usec;
1401 			data = &ctv;
1402 			len = sizeof(ctv);
1403 		}
1404 #endif
1405 
1406 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1407 	}
1408 }
1409 
1410 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1411 			    size_t len, int flags)
1412 {
1413 	int noblock = flags & MSG_DONTWAIT;
1414 	struct sock *sk = sock->sk;
1415 	struct sk_buff *skb;
1416 	int copied, err;
1417 	unsigned int skblen;
1418 
1419 	BT_DBG("sock %p, sk %p", sock, sk);
1420 
1421 	if (flags & MSG_OOB)
1422 		return -EOPNOTSUPP;
1423 
1424 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1425 		return -EOPNOTSUPP;
1426 
1427 	if (sk->sk_state == BT_CLOSED)
1428 		return 0;
1429 
1430 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1431 	if (!skb)
1432 		return err;
1433 
1434 	skblen = skb->len;
1435 	copied = skb->len;
1436 	if (len < copied) {
1437 		msg->msg_flags |= MSG_TRUNC;
1438 		copied = len;
1439 	}
1440 
1441 	skb_reset_transport_header(skb);
1442 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1443 
1444 	switch (hci_pi(sk)->channel) {
1445 	case HCI_CHANNEL_RAW:
1446 		hci_sock_cmsg(sk, msg, skb);
1447 		break;
1448 	case HCI_CHANNEL_USER:
1449 	case HCI_CHANNEL_MONITOR:
1450 		sock_recv_timestamp(msg, sk, skb);
1451 		break;
1452 	default:
1453 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1454 			sock_recv_timestamp(msg, sk, skb);
1455 		break;
1456 	}
1457 
1458 	skb_free_datagram(sk, skb);
1459 
1460 	if (flags & MSG_TRUNC)
1461 		copied = skblen;
1462 
1463 	return err ? : copied;
1464 }
1465 
1466 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1467 			struct msghdr *msg, size_t msglen)
1468 {
1469 	void *buf;
1470 	u8 *cp;
1471 	struct mgmt_hdr *hdr;
1472 	u16 opcode, index, len;
1473 	struct hci_dev *hdev = NULL;
1474 	const struct hci_mgmt_handler *handler;
1475 	bool var_len, no_hdev;
1476 	int err;
1477 
1478 	BT_DBG("got %zu bytes", msglen);
1479 
1480 	if (msglen < sizeof(*hdr))
1481 		return -EINVAL;
1482 
1483 	buf = kmalloc(msglen, GFP_KERNEL);
1484 	if (!buf)
1485 		return -ENOMEM;
1486 
1487 	if (memcpy_from_msg(buf, msg, msglen)) {
1488 		err = -EFAULT;
1489 		goto done;
1490 	}
1491 
1492 	hdr = buf;
1493 	opcode = __le16_to_cpu(hdr->opcode);
1494 	index = __le16_to_cpu(hdr->index);
1495 	len = __le16_to_cpu(hdr->len);
1496 
1497 	if (len != msglen - sizeof(*hdr)) {
1498 		err = -EINVAL;
1499 		goto done;
1500 	}
1501 
1502 	if (chan->channel == HCI_CHANNEL_CONTROL) {
1503 		struct sk_buff *skb;
1504 
1505 		/* Send event to monitor */
1506 		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1507 						  buf + sizeof(*hdr));
1508 		if (skb) {
1509 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1510 					    HCI_SOCK_TRUSTED, NULL);
1511 			kfree_skb(skb);
1512 		}
1513 	}
1514 
1515 	if (opcode >= chan->handler_count ||
1516 	    chan->handlers[opcode].func == NULL) {
1517 		BT_DBG("Unknown op %u", opcode);
1518 		err = mgmt_cmd_status(sk, index, opcode,
1519 				      MGMT_STATUS_UNKNOWN_COMMAND);
1520 		goto done;
1521 	}
1522 
1523 	handler = &chan->handlers[opcode];
1524 
1525 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1526 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1527 		err = mgmt_cmd_status(sk, index, opcode,
1528 				      MGMT_STATUS_PERMISSION_DENIED);
1529 		goto done;
1530 	}
1531 
1532 	if (index != MGMT_INDEX_NONE) {
1533 		hdev = hci_dev_get(index);
1534 		if (!hdev) {
1535 			err = mgmt_cmd_status(sk, index, opcode,
1536 					      MGMT_STATUS_INVALID_INDEX);
1537 			goto done;
1538 		}
1539 
1540 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1541 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1542 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1543 			err = mgmt_cmd_status(sk, index, opcode,
1544 					      MGMT_STATUS_INVALID_INDEX);
1545 			goto done;
1546 		}
1547 
1548 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1549 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1550 			err = mgmt_cmd_status(sk, index, opcode,
1551 					      MGMT_STATUS_INVALID_INDEX);
1552 			goto done;
1553 		}
1554 	}
1555 
1556 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1557 	if (no_hdev != !hdev) {
1558 		err = mgmt_cmd_status(sk, index, opcode,
1559 				      MGMT_STATUS_INVALID_INDEX);
1560 		goto done;
1561 	}
1562 
1563 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1564 	if ((var_len && len < handler->data_len) ||
1565 	    (!var_len && len != handler->data_len)) {
1566 		err = mgmt_cmd_status(sk, index, opcode,
1567 				      MGMT_STATUS_INVALID_PARAMS);
1568 		goto done;
1569 	}
1570 
1571 	if (hdev && chan->hdev_init)
1572 		chan->hdev_init(sk, hdev);
1573 
1574 	cp = buf + sizeof(*hdr);
1575 
1576 	err = handler->func(sk, hdev, cp, len);
1577 	if (err < 0)
1578 		goto done;
1579 
1580 	err = msglen;
1581 
1582 done:
1583 	if (hdev)
1584 		hci_dev_put(hdev);
1585 
1586 	kfree(buf);
1587 	return err;
1588 }
1589 
1590 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1591 {
1592 	struct hci_mon_hdr *hdr;
1593 	struct sk_buff *skb;
1594 	struct hci_dev *hdev;
1595 	u16 index;
1596 	int err;
1597 
1598 	/* The logging frame consists at minimum of the standard header,
1599 	 * the priority byte, the ident length byte and at least one string
1600 	 * terminator NUL byte. Anything shorter are invalid packets.
1601 	 */
1602 	if (len < sizeof(*hdr) + 3)
1603 		return -EINVAL;
1604 
1605 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1606 	if (!skb)
1607 		return err;
1608 
1609 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1610 		err = -EFAULT;
1611 		goto drop;
1612 	}
1613 
1614 	hdr = (void *)skb->data;
1615 
1616 	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1617 		err = -EINVAL;
1618 		goto drop;
1619 	}
1620 
1621 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1622 		__u8 priority = skb->data[sizeof(*hdr)];
1623 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1624 
1625 		/* Only the priorities 0-7 are valid and with that any other
1626 		 * value results in an invalid packet.
1627 		 *
1628 		 * The priority byte is followed by an ident length byte and
1629 		 * the NUL terminated ident string. Check that the ident
1630 		 * length is not overflowing the packet and also that the
1631 		 * ident string itself is NUL terminated. In case the ident
1632 		 * length is zero, the length value actually doubles as NUL
1633 		 * terminator identifier.
1634 		 *
1635 		 * The message follows the ident string (if present) and
1636 		 * must be NUL terminated. Otherwise it is not a valid packet.
1637 		 */
1638 		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1639 		    ident_len > len - sizeof(*hdr) - 3 ||
1640 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1641 			err = -EINVAL;
1642 			goto drop;
1643 		}
1644 	} else {
1645 		err = -EINVAL;
1646 		goto drop;
1647 	}
1648 
1649 	index = __le16_to_cpu(hdr->index);
1650 
1651 	if (index != MGMT_INDEX_NONE) {
1652 		hdev = hci_dev_get(index);
1653 		if (!hdev) {
1654 			err = -ENODEV;
1655 			goto drop;
1656 		}
1657 	} else {
1658 		hdev = NULL;
1659 	}
1660 
1661 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1662 
1663 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1664 	err = len;
1665 
1666 	if (hdev)
1667 		hci_dev_put(hdev);
1668 
1669 drop:
1670 	kfree_skb(skb);
1671 	return err;
1672 }
1673 
1674 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1675 			    size_t len)
1676 {
1677 	struct sock *sk = sock->sk;
1678 	struct hci_mgmt_chan *chan;
1679 	struct hci_dev *hdev;
1680 	struct sk_buff *skb;
1681 	int err;
1682 
1683 	BT_DBG("sock %p sk %p", sock, sk);
1684 
1685 	if (msg->msg_flags & MSG_OOB)
1686 		return -EOPNOTSUPP;
1687 
1688 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1689 			       MSG_CMSG_COMPAT))
1690 		return -EINVAL;
1691 
1692 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1693 		return -EINVAL;
1694 
1695 	lock_sock(sk);
1696 
1697 	switch (hci_pi(sk)->channel) {
1698 	case HCI_CHANNEL_RAW:
1699 	case HCI_CHANNEL_USER:
1700 		break;
1701 	case HCI_CHANNEL_MONITOR:
1702 		err = -EOPNOTSUPP;
1703 		goto done;
1704 	case HCI_CHANNEL_LOGGING:
1705 		err = hci_logging_frame(sk, msg, len);
1706 		goto done;
1707 	default:
1708 		mutex_lock(&mgmt_chan_list_lock);
1709 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1710 		if (chan)
1711 			err = hci_mgmt_cmd(chan, sk, msg, len);
1712 		else
1713 			err = -EINVAL;
1714 
1715 		mutex_unlock(&mgmt_chan_list_lock);
1716 		goto done;
1717 	}
1718 
1719 	hdev = hci_pi(sk)->hdev;
1720 	if (!hdev) {
1721 		err = -EBADFD;
1722 		goto done;
1723 	}
1724 
1725 	if (!test_bit(HCI_UP, &hdev->flags)) {
1726 		err = -ENETDOWN;
1727 		goto done;
1728 	}
1729 
1730 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1731 	if (!skb)
1732 		goto done;
1733 
1734 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1735 		err = -EFAULT;
1736 		goto drop;
1737 	}
1738 
1739 	hci_skb_pkt_type(skb) = skb->data[0];
1740 	skb_pull(skb, 1);
1741 
1742 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1743 		/* No permission check is needed for user channel
1744 		 * since that gets enforced when binding the socket.
1745 		 *
1746 		 * However check that the packet type is valid.
1747 		 */
1748 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1749 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1750 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1751 			err = -EINVAL;
1752 			goto drop;
1753 		}
1754 
1755 		skb_queue_tail(&hdev->raw_q, skb);
1756 		queue_work(hdev->workqueue, &hdev->tx_work);
1757 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1758 		u16 opcode = get_unaligned_le16(skb->data);
1759 		u16 ogf = hci_opcode_ogf(opcode);
1760 		u16 ocf = hci_opcode_ocf(opcode);
1761 
1762 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1763 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1764 				   &hci_sec_filter.ocf_mask[ogf])) &&
1765 		    !capable(CAP_NET_RAW)) {
1766 			err = -EPERM;
1767 			goto drop;
1768 		}
1769 
1770 		/* Since the opcode has already been extracted here, store
1771 		 * a copy of the value for later use by the drivers.
1772 		 */
1773 		hci_skb_opcode(skb) = opcode;
1774 
1775 		if (ogf == 0x3f) {
1776 			skb_queue_tail(&hdev->raw_q, skb);
1777 			queue_work(hdev->workqueue, &hdev->tx_work);
1778 		} else {
1779 			/* Stand-alone HCI commands must be flagged as
1780 			 * single-command requests.
1781 			 */
1782 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1783 
1784 			skb_queue_tail(&hdev->cmd_q, skb);
1785 			queue_work(hdev->workqueue, &hdev->cmd_work);
1786 		}
1787 	} else {
1788 		if (!capable(CAP_NET_RAW)) {
1789 			err = -EPERM;
1790 			goto drop;
1791 		}
1792 
1793 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1794 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1795 			err = -EINVAL;
1796 			goto drop;
1797 		}
1798 
1799 		skb_queue_tail(&hdev->raw_q, skb);
1800 		queue_work(hdev->workqueue, &hdev->tx_work);
1801 	}
1802 
1803 	err = len;
1804 
1805 done:
1806 	release_sock(sk);
1807 	return err;
1808 
1809 drop:
1810 	kfree_skb(skb);
1811 	goto done;
1812 }
1813 
1814 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1815 			       char __user *optval, unsigned int len)
1816 {
1817 	struct hci_ufilter uf = { .opcode = 0 };
1818 	struct sock *sk = sock->sk;
1819 	int err = 0, opt = 0;
1820 
1821 	BT_DBG("sk %p, opt %d", sk, optname);
1822 
1823 	if (level != SOL_HCI)
1824 		return -ENOPROTOOPT;
1825 
1826 	lock_sock(sk);
1827 
1828 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1829 		err = -EBADFD;
1830 		goto done;
1831 	}
1832 
1833 	switch (optname) {
1834 	case HCI_DATA_DIR:
1835 		if (get_user(opt, (int __user *)optval)) {
1836 			err = -EFAULT;
1837 			break;
1838 		}
1839 
1840 		if (opt)
1841 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1842 		else
1843 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1844 		break;
1845 
1846 	case HCI_TIME_STAMP:
1847 		if (get_user(opt, (int __user *)optval)) {
1848 			err = -EFAULT;
1849 			break;
1850 		}
1851 
1852 		if (opt)
1853 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1854 		else
1855 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1856 		break;
1857 
1858 	case HCI_FILTER:
1859 		{
1860 			struct hci_filter *f = &hci_pi(sk)->filter;
1861 
1862 			uf.type_mask = f->type_mask;
1863 			uf.opcode    = f->opcode;
1864 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1865 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1866 		}
1867 
1868 		len = min_t(unsigned int, len, sizeof(uf));
1869 		if (copy_from_user(&uf, optval, len)) {
1870 			err = -EFAULT;
1871 			break;
1872 		}
1873 
1874 		if (!capable(CAP_NET_RAW)) {
1875 			uf.type_mask &= hci_sec_filter.type_mask;
1876 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1877 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1878 		}
1879 
1880 		{
1881 			struct hci_filter *f = &hci_pi(sk)->filter;
1882 
1883 			f->type_mask = uf.type_mask;
1884 			f->opcode    = uf.opcode;
1885 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1886 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1887 		}
1888 		break;
1889 
1890 	default:
1891 		err = -ENOPROTOOPT;
1892 		break;
1893 	}
1894 
1895 done:
1896 	release_sock(sk);
1897 	return err;
1898 }
1899 
1900 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1901 			       char __user *optval, int __user *optlen)
1902 {
1903 	struct hci_ufilter uf;
1904 	struct sock *sk = sock->sk;
1905 	int len, opt, err = 0;
1906 
1907 	BT_DBG("sk %p, opt %d", sk, optname);
1908 
1909 	if (level != SOL_HCI)
1910 		return -ENOPROTOOPT;
1911 
1912 	if (get_user(len, optlen))
1913 		return -EFAULT;
1914 
1915 	lock_sock(sk);
1916 
1917 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1918 		err = -EBADFD;
1919 		goto done;
1920 	}
1921 
1922 	switch (optname) {
1923 	case HCI_DATA_DIR:
1924 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1925 			opt = 1;
1926 		else
1927 			opt = 0;
1928 
1929 		if (put_user(opt, optval))
1930 			err = -EFAULT;
1931 		break;
1932 
1933 	case HCI_TIME_STAMP:
1934 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1935 			opt = 1;
1936 		else
1937 			opt = 0;
1938 
1939 		if (put_user(opt, optval))
1940 			err = -EFAULT;
1941 		break;
1942 
1943 	case HCI_FILTER:
1944 		{
1945 			struct hci_filter *f = &hci_pi(sk)->filter;
1946 
1947 			memset(&uf, 0, sizeof(uf));
1948 			uf.type_mask = f->type_mask;
1949 			uf.opcode    = f->opcode;
1950 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1951 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1952 		}
1953 
1954 		len = min_t(unsigned int, len, sizeof(uf));
1955 		if (copy_to_user(optval, &uf, len))
1956 			err = -EFAULT;
1957 		break;
1958 
1959 	default:
1960 		err = -ENOPROTOOPT;
1961 		break;
1962 	}
1963 
1964 done:
1965 	release_sock(sk);
1966 	return err;
1967 }
1968 
1969 static const struct proto_ops hci_sock_ops = {
1970 	.family		= PF_BLUETOOTH,
1971 	.owner		= THIS_MODULE,
1972 	.release	= hci_sock_release,
1973 	.bind		= hci_sock_bind,
1974 	.getname	= hci_sock_getname,
1975 	.sendmsg	= hci_sock_sendmsg,
1976 	.recvmsg	= hci_sock_recvmsg,
1977 	.ioctl		= hci_sock_ioctl,
1978 	.poll		= datagram_poll,
1979 	.listen		= sock_no_listen,
1980 	.shutdown	= sock_no_shutdown,
1981 	.setsockopt	= hci_sock_setsockopt,
1982 	.getsockopt	= hci_sock_getsockopt,
1983 	.connect	= sock_no_connect,
1984 	.socketpair	= sock_no_socketpair,
1985 	.accept		= sock_no_accept,
1986 	.mmap		= sock_no_mmap
1987 };
1988 
1989 static struct proto hci_sk_proto = {
1990 	.name		= "HCI",
1991 	.owner		= THIS_MODULE,
1992 	.obj_size	= sizeof(struct hci_pinfo)
1993 };
1994 
1995 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1996 			   int kern)
1997 {
1998 	struct sock *sk;
1999 
2000 	BT_DBG("sock %p", sock);
2001 
2002 	if (sock->type != SOCK_RAW)
2003 		return -ESOCKTNOSUPPORT;
2004 
2005 	sock->ops = &hci_sock_ops;
2006 
2007 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2008 	if (!sk)
2009 		return -ENOMEM;
2010 
2011 	sock_init_data(sock, sk);
2012 
2013 	sock_reset_flag(sk, SOCK_ZAPPED);
2014 
2015 	sk->sk_protocol = protocol;
2016 
2017 	sock->state = SS_UNCONNECTED;
2018 	sk->sk_state = BT_OPEN;
2019 
2020 	bt_sock_link(&hci_sk_list, sk);
2021 	return 0;
2022 }
2023 
2024 static const struct net_proto_family hci_sock_family_ops = {
2025 	.family	= PF_BLUETOOTH,
2026 	.owner	= THIS_MODULE,
2027 	.create	= hci_sock_create,
2028 };
2029 
2030 int __init hci_sock_init(void)
2031 {
2032 	int err;
2033 
2034 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2035 
2036 	err = proto_register(&hci_sk_proto, 0);
2037 	if (err < 0)
2038 		return err;
2039 
2040 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2041 	if (err < 0) {
2042 		BT_ERR("HCI socket registration failed");
2043 		goto error;
2044 	}
2045 
2046 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2047 	if (err < 0) {
2048 		BT_ERR("Failed to create HCI proc file");
2049 		bt_sock_unregister(BTPROTO_HCI);
2050 		goto error;
2051 	}
2052 
2053 	BT_INFO("HCI socket layer initialized");
2054 
2055 	return 0;
2056 
2057 error:
2058 	proto_unregister(&hci_sk_proto);
2059 	return err;
2060 }
2061 
2062 void hci_sock_cleanup(void)
2063 {
2064 	bt_procfs_cleanup(&init_net, "hci");
2065 	bt_sock_unregister(BTPROTO_HCI);
2066 	proto_unregister(&hci_sk_proto);
2067 }
2068