xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision 3ddc8b84)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "mgmt_util.h"
38 
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41 
42 static DEFINE_IDA(sock_cookie_ida);
43 
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45 
46 /* ----- HCI socket interface ----- */
47 
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 
51 struct hci_pinfo {
52 	struct bt_sock    bt;
53 	struct hci_dev    *hdev;
54 	struct hci_filter filter;
55 	__u8              cmsg_mask;
56 	unsigned short    channel;
57 	unsigned long     flags;
58 	__u32             cookie;
59 	char              comm[TASK_COMM_LEN];
60 	__u16             mtu;
61 };
62 
63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 {
65 	struct hci_dev *hdev = hci_pi(sk)->hdev;
66 
67 	if (!hdev)
68 		return ERR_PTR(-EBADFD);
69 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70 		return ERR_PTR(-EPIPE);
71 	return hdev;
72 }
73 
74 void hci_sock_set_flag(struct sock *sk, int nr)
75 {
76 	set_bit(nr, &hci_pi(sk)->flags);
77 }
78 
79 void hci_sock_clear_flag(struct sock *sk, int nr)
80 {
81 	clear_bit(nr, &hci_pi(sk)->flags);
82 }
83 
84 int hci_sock_test_flag(struct sock *sk, int nr)
85 {
86 	return test_bit(nr, &hci_pi(sk)->flags);
87 }
88 
89 unsigned short hci_sock_get_channel(struct sock *sk)
90 {
91 	return hci_pi(sk)->channel;
92 }
93 
94 u32 hci_sock_get_cookie(struct sock *sk)
95 {
96 	return hci_pi(sk)->cookie;
97 }
98 
99 static bool hci_sock_gen_cookie(struct sock *sk)
100 {
101 	int id = hci_pi(sk)->cookie;
102 
103 	if (!id) {
104 		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
105 		if (id < 0)
106 			id = 0xffffffff;
107 
108 		hci_pi(sk)->cookie = id;
109 		get_task_comm(hci_pi(sk)->comm, current);
110 		return true;
111 	}
112 
113 	return false;
114 }
115 
116 static void hci_sock_free_cookie(struct sock *sk)
117 {
118 	int id = hci_pi(sk)->cookie;
119 
120 	if (id) {
121 		hci_pi(sk)->cookie = 0xffffffff;
122 		ida_simple_remove(&sock_cookie_ida, id);
123 	}
124 }
125 
126 static inline int hci_test_bit(int nr, const void *addr)
127 {
128 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
129 }
130 
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF  5
133 
134 struct hci_sec_filter {
135 	__u32 type_mask;
136 	__u32 event_mask[2];
137 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
138 };
139 
140 static const struct hci_sec_filter hci_sec_filter = {
141 	/* Packet types */
142 	0x10,
143 	/* Events */
144 	{ 0x1000d9fe, 0x0000b00c },
145 	/* Commands */
146 	{
147 		{ 0x0 },
148 		/* OGF_LINK_CTL */
149 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150 		/* OGF_LINK_POLICY */
151 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
152 		/* OGF_HOST_CTL */
153 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154 		/* OGF_INFO_PARAM */
155 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
156 		/* OGF_STATUS_PARAM */
157 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
158 	}
159 };
160 
161 static struct bt_sock_list hci_sk_list = {
162 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
163 };
164 
165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 {
167 	struct hci_filter *flt;
168 	int flt_type, flt_event;
169 
170 	/* Apply filter */
171 	flt = &hci_pi(sk)->filter;
172 
173 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174 
175 	if (!test_bit(flt_type, &flt->type_mask))
176 		return true;
177 
178 	/* Extra filter for event packets only */
179 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
180 		return false;
181 
182 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183 
184 	if (!hci_test_bit(flt_event, &flt->event_mask))
185 		return true;
186 
187 	/* Check filter only when opcode is set */
188 	if (!flt->opcode)
189 		return false;
190 
191 	if (flt_event == HCI_EV_CMD_COMPLETE &&
192 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
193 		return true;
194 
195 	if (flt_event == HCI_EV_CMD_STATUS &&
196 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
197 		return true;
198 
199 	return false;
200 }
201 
202 /* Send frame to RAW socket */
203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
204 {
205 	struct sock *sk;
206 	struct sk_buff *skb_copy = NULL;
207 
208 	BT_DBG("hdev %p len %d", hdev, skb->len);
209 
210 	read_lock(&hci_sk_list.lock);
211 
212 	sk_for_each(sk, &hci_sk_list.head) {
213 		struct sk_buff *nskb;
214 
215 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
216 			continue;
217 
218 		/* Don't send frame to the socket it came from */
219 		if (skb->sk == sk)
220 			continue;
221 
222 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
228 				continue;
229 			if (is_filtered_packet(sk, skb))
230 				continue;
231 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232 			if (!bt_cb(skb)->incoming)
233 				continue;
234 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
238 				continue;
239 		} else {
240 			/* Don't send frame to other channel types */
241 			continue;
242 		}
243 
244 		if (!skb_copy) {
245 			/* Create a private copy with headroom */
246 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
247 			if (!skb_copy)
248 				continue;
249 
250 			/* Put type byte before the data */
251 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
252 		}
253 
254 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 		if (!nskb)
256 			continue;
257 
258 		if (sock_queue_rcv_skb(sk, nskb))
259 			kfree_skb(nskb);
260 	}
261 
262 	read_unlock(&hci_sk_list.lock);
263 
264 	kfree_skb(skb_copy);
265 }
266 
267 static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
268 {
269 	struct scm_creds *creds;
270 
271 	if (!sk || WARN_ON(!skb))
272 		return;
273 
274 	creds = &bt_cb(skb)->creds;
275 
276 	/* Check if peer credentials is set */
277 	if (!sk->sk_peer_pid) {
278 		/* Check if parent peer credentials is set */
279 		if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
280 			sk = bt_sk(sk)->parent;
281 		else
282 			return;
283 	}
284 
285 	/* Check if scm_creds already set */
286 	if (creds->pid == pid_vnr(sk->sk_peer_pid))
287 		return;
288 
289 	memset(creds, 0, sizeof(*creds));
290 
291 	creds->pid = pid_vnr(sk->sk_peer_pid);
292 	if (sk->sk_peer_cred) {
293 		creds->uid = sk->sk_peer_cred->uid;
294 		creds->gid = sk->sk_peer_cred->gid;
295 	}
296 }
297 
298 static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
299 {
300 	struct sk_buff *nskb;
301 
302 	if (!skb)
303 		return NULL;
304 
305 	nskb = skb_clone(skb, GFP_ATOMIC);
306 	if (!nskb)
307 		return NULL;
308 
309 	hci_sock_copy_creds(skb->sk, nskb);
310 
311 	return nskb;
312 }
313 
314 /* Send frame to sockets with specific channel */
315 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
316 				  int flag, struct sock *skip_sk)
317 {
318 	struct sock *sk;
319 
320 	BT_DBG("channel %u len %d", channel, skb->len);
321 
322 	sk_for_each(sk, &hci_sk_list.head) {
323 		struct sk_buff *nskb;
324 
325 		/* Ignore socket without the flag set */
326 		if (!hci_sock_test_flag(sk, flag))
327 			continue;
328 
329 		/* Skip the original socket */
330 		if (sk == skip_sk)
331 			continue;
332 
333 		if (sk->sk_state != BT_BOUND)
334 			continue;
335 
336 		if (hci_pi(sk)->channel != channel)
337 			continue;
338 
339 		nskb = hci_skb_clone(skb);
340 		if (!nskb)
341 			continue;
342 
343 		if (sock_queue_rcv_skb(sk, nskb))
344 			kfree_skb(nskb);
345 	}
346 
347 }
348 
349 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
350 			 int flag, struct sock *skip_sk)
351 {
352 	read_lock(&hci_sk_list.lock);
353 	__hci_send_to_channel(channel, skb, flag, skip_sk);
354 	read_unlock(&hci_sk_list.lock);
355 }
356 
357 /* Send frame to monitor socket */
358 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
359 {
360 	struct sk_buff *skb_copy = NULL;
361 	struct hci_mon_hdr *hdr;
362 	__le16 opcode;
363 
364 	if (!atomic_read(&monitor_promisc))
365 		return;
366 
367 	BT_DBG("hdev %p len %d", hdev, skb->len);
368 
369 	switch (hci_skb_pkt_type(skb)) {
370 	case HCI_COMMAND_PKT:
371 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
372 		break;
373 	case HCI_EVENT_PKT:
374 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
375 		break;
376 	case HCI_ACLDATA_PKT:
377 		if (bt_cb(skb)->incoming)
378 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
379 		else
380 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
381 		break;
382 	case HCI_SCODATA_PKT:
383 		if (bt_cb(skb)->incoming)
384 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
385 		else
386 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
387 		break;
388 	case HCI_ISODATA_PKT:
389 		if (bt_cb(skb)->incoming)
390 			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
391 		else
392 			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
393 		break;
394 	case HCI_DIAG_PKT:
395 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
396 		break;
397 	default:
398 		return;
399 	}
400 
401 	/* Create a private copy with headroom */
402 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
403 	if (!skb_copy)
404 		return;
405 
406 	hci_sock_copy_creds(skb->sk, skb_copy);
407 
408 	/* Put header before the data */
409 	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
410 	hdr->opcode = opcode;
411 	hdr->index = cpu_to_le16(hdev->id);
412 	hdr->len = cpu_to_le16(skb->len);
413 
414 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
415 			    HCI_SOCK_TRUSTED, NULL);
416 	kfree_skb(skb_copy);
417 }
418 
419 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
420 				 void *data, u16 data_len, ktime_t tstamp,
421 				 int flag, struct sock *skip_sk)
422 {
423 	struct sock *sk;
424 	__le16 index;
425 
426 	if (hdev)
427 		index = cpu_to_le16(hdev->id);
428 	else
429 		index = cpu_to_le16(MGMT_INDEX_NONE);
430 
431 	read_lock(&hci_sk_list.lock);
432 
433 	sk_for_each(sk, &hci_sk_list.head) {
434 		struct hci_mon_hdr *hdr;
435 		struct sk_buff *skb;
436 
437 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
438 			continue;
439 
440 		/* Ignore socket without the flag set */
441 		if (!hci_sock_test_flag(sk, flag))
442 			continue;
443 
444 		/* Skip the original socket */
445 		if (sk == skip_sk)
446 			continue;
447 
448 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
449 		if (!skb)
450 			continue;
451 
452 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
453 		put_unaligned_le16(event, skb_put(skb, 2));
454 
455 		if (data)
456 			skb_put_data(skb, data, data_len);
457 
458 		skb->tstamp = tstamp;
459 
460 		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
461 		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
462 		hdr->index = index;
463 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
464 
465 		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 				      HCI_SOCK_TRUSTED, NULL);
467 		kfree_skb(skb);
468 	}
469 
470 	read_unlock(&hci_sk_list.lock);
471 }
472 
473 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
474 {
475 	struct hci_mon_hdr *hdr;
476 	struct hci_mon_new_index *ni;
477 	struct hci_mon_index_info *ii;
478 	struct sk_buff *skb;
479 	__le16 opcode;
480 
481 	switch (event) {
482 	case HCI_DEV_REG:
483 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
484 		if (!skb)
485 			return NULL;
486 
487 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
488 		ni->type = hdev->dev_type;
489 		ni->bus = hdev->bus;
490 		bacpy(&ni->bdaddr, &hdev->bdaddr);
491 		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
492 			       strnlen(hdev->name, sizeof(ni->name)), '\0');
493 
494 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
495 		break;
496 
497 	case HCI_DEV_UNREG:
498 		skb = bt_skb_alloc(0, GFP_ATOMIC);
499 		if (!skb)
500 			return NULL;
501 
502 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
503 		break;
504 
505 	case HCI_DEV_SETUP:
506 		if (hdev->manufacturer == 0xffff)
507 			return NULL;
508 		fallthrough;
509 
510 	case HCI_DEV_UP:
511 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
512 		if (!skb)
513 			return NULL;
514 
515 		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
516 		bacpy(&ii->bdaddr, &hdev->bdaddr);
517 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
518 
519 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
520 		break;
521 
522 	case HCI_DEV_OPEN:
523 		skb = bt_skb_alloc(0, GFP_ATOMIC);
524 		if (!skb)
525 			return NULL;
526 
527 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
528 		break;
529 
530 	case HCI_DEV_CLOSE:
531 		skb = bt_skb_alloc(0, GFP_ATOMIC);
532 		if (!skb)
533 			return NULL;
534 
535 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
536 		break;
537 
538 	default:
539 		return NULL;
540 	}
541 
542 	__net_timestamp(skb);
543 
544 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
545 	hdr->opcode = opcode;
546 	hdr->index = cpu_to_le16(hdev->id);
547 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
548 
549 	return skb;
550 }
551 
552 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
553 {
554 	struct hci_mon_hdr *hdr;
555 	struct sk_buff *skb;
556 	u16 format;
557 	u8 ver[3];
558 	u32 flags;
559 
560 	/* No message needed when cookie is not present */
561 	if (!hci_pi(sk)->cookie)
562 		return NULL;
563 
564 	switch (hci_pi(sk)->channel) {
565 	case HCI_CHANNEL_RAW:
566 		format = 0x0000;
567 		ver[0] = BT_SUBSYS_VERSION;
568 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
569 		break;
570 	case HCI_CHANNEL_USER:
571 		format = 0x0001;
572 		ver[0] = BT_SUBSYS_VERSION;
573 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
574 		break;
575 	case HCI_CHANNEL_CONTROL:
576 		format = 0x0002;
577 		mgmt_fill_version_info(ver);
578 		break;
579 	default:
580 		/* No message for unsupported format */
581 		return NULL;
582 	}
583 
584 	skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
585 	if (!skb)
586 		return NULL;
587 
588 	hci_sock_copy_creds(sk, skb);
589 
590 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
591 
592 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
593 	put_unaligned_le16(format, skb_put(skb, 2));
594 	skb_put_data(skb, ver, sizeof(ver));
595 	put_unaligned_le32(flags, skb_put(skb, 4));
596 	skb_put_u8(skb, TASK_COMM_LEN);
597 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
598 
599 	__net_timestamp(skb);
600 
601 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
602 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
603 	if (hci_pi(sk)->hdev)
604 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
605 	else
606 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
607 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
608 
609 	return skb;
610 }
611 
612 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
613 {
614 	struct hci_mon_hdr *hdr;
615 	struct sk_buff *skb;
616 
617 	/* No message needed when cookie is not present */
618 	if (!hci_pi(sk)->cookie)
619 		return NULL;
620 
621 	switch (hci_pi(sk)->channel) {
622 	case HCI_CHANNEL_RAW:
623 	case HCI_CHANNEL_USER:
624 	case HCI_CHANNEL_CONTROL:
625 		break;
626 	default:
627 		/* No message for unsupported format */
628 		return NULL;
629 	}
630 
631 	skb = bt_skb_alloc(4, GFP_ATOMIC);
632 	if (!skb)
633 		return NULL;
634 
635 	hci_sock_copy_creds(sk, skb);
636 
637 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
638 
639 	__net_timestamp(skb);
640 
641 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
642 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
643 	if (hci_pi(sk)->hdev)
644 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
645 	else
646 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
647 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
648 
649 	return skb;
650 }
651 
652 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
653 						   u16 opcode, u16 len,
654 						   const void *buf)
655 {
656 	struct hci_mon_hdr *hdr;
657 	struct sk_buff *skb;
658 
659 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
660 	if (!skb)
661 		return NULL;
662 
663 	hci_sock_copy_creds(sk, skb);
664 
665 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
666 	put_unaligned_le16(opcode, skb_put(skb, 2));
667 
668 	if (buf)
669 		skb_put_data(skb, buf, len);
670 
671 	__net_timestamp(skb);
672 
673 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
674 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
675 	hdr->index = cpu_to_le16(index);
676 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
677 
678 	return skb;
679 }
680 
681 static void __printf(2, 3)
682 send_monitor_note(struct sock *sk, const char *fmt, ...)
683 {
684 	size_t len;
685 	struct hci_mon_hdr *hdr;
686 	struct sk_buff *skb;
687 	va_list args;
688 
689 	va_start(args, fmt);
690 	len = vsnprintf(NULL, 0, fmt, args);
691 	va_end(args);
692 
693 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
694 	if (!skb)
695 		return;
696 
697 	hci_sock_copy_creds(sk, skb);
698 
699 	va_start(args, fmt);
700 	vsprintf(skb_put(skb, len), fmt, args);
701 	*(u8 *)skb_put(skb, 1) = 0;
702 	va_end(args);
703 
704 	__net_timestamp(skb);
705 
706 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
707 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
708 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
709 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
710 
711 	if (sock_queue_rcv_skb(sk, skb))
712 		kfree_skb(skb);
713 }
714 
715 static void send_monitor_replay(struct sock *sk)
716 {
717 	struct hci_dev *hdev;
718 
719 	read_lock(&hci_dev_list_lock);
720 
721 	list_for_each_entry(hdev, &hci_dev_list, list) {
722 		struct sk_buff *skb;
723 
724 		skb = create_monitor_event(hdev, HCI_DEV_REG);
725 		if (!skb)
726 			continue;
727 
728 		if (sock_queue_rcv_skb(sk, skb))
729 			kfree_skb(skb);
730 
731 		if (!test_bit(HCI_RUNNING, &hdev->flags))
732 			continue;
733 
734 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
735 		if (!skb)
736 			continue;
737 
738 		if (sock_queue_rcv_skb(sk, skb))
739 			kfree_skb(skb);
740 
741 		if (test_bit(HCI_UP, &hdev->flags))
742 			skb = create_monitor_event(hdev, HCI_DEV_UP);
743 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
744 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
745 		else
746 			skb = NULL;
747 
748 		if (skb) {
749 			if (sock_queue_rcv_skb(sk, skb))
750 				kfree_skb(skb);
751 		}
752 	}
753 
754 	read_unlock(&hci_dev_list_lock);
755 }
756 
757 static void send_monitor_control_replay(struct sock *mon_sk)
758 {
759 	struct sock *sk;
760 
761 	read_lock(&hci_sk_list.lock);
762 
763 	sk_for_each(sk, &hci_sk_list.head) {
764 		struct sk_buff *skb;
765 
766 		skb = create_monitor_ctrl_open(sk);
767 		if (!skb)
768 			continue;
769 
770 		if (sock_queue_rcv_skb(mon_sk, skb))
771 			kfree_skb(skb);
772 	}
773 
774 	read_unlock(&hci_sk_list.lock);
775 }
776 
777 /* Generate internal stack event */
778 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
779 {
780 	struct hci_event_hdr *hdr;
781 	struct hci_ev_stack_internal *ev;
782 	struct sk_buff *skb;
783 
784 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
785 	if (!skb)
786 		return;
787 
788 	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
789 	hdr->evt  = HCI_EV_STACK_INTERNAL;
790 	hdr->plen = sizeof(*ev) + dlen;
791 
792 	ev = skb_put(skb, sizeof(*ev) + dlen);
793 	ev->type = type;
794 	memcpy(ev->data, data, dlen);
795 
796 	bt_cb(skb)->incoming = 1;
797 	__net_timestamp(skb);
798 
799 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
800 	hci_send_to_sock(hdev, skb);
801 	kfree_skb(skb);
802 }
803 
804 void hci_sock_dev_event(struct hci_dev *hdev, int event)
805 {
806 	BT_DBG("hdev %s event %d", hdev->name, event);
807 
808 	if (atomic_read(&monitor_promisc)) {
809 		struct sk_buff *skb;
810 
811 		/* Send event to monitor */
812 		skb = create_monitor_event(hdev, event);
813 		if (skb) {
814 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
815 					    HCI_SOCK_TRUSTED, NULL);
816 			kfree_skb(skb);
817 		}
818 	}
819 
820 	if (event <= HCI_DEV_DOWN) {
821 		struct hci_ev_si_device ev;
822 
823 		/* Send event to sockets */
824 		ev.event  = event;
825 		ev.dev_id = hdev->id;
826 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
827 	}
828 
829 	if (event == HCI_DEV_UNREG) {
830 		struct sock *sk;
831 
832 		/* Wake up sockets using this dead device */
833 		read_lock(&hci_sk_list.lock);
834 		sk_for_each(sk, &hci_sk_list.head) {
835 			if (hci_pi(sk)->hdev == hdev) {
836 				sk->sk_err = EPIPE;
837 				sk->sk_state_change(sk);
838 			}
839 		}
840 		read_unlock(&hci_sk_list.lock);
841 	}
842 }
843 
844 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
845 {
846 	struct hci_mgmt_chan *c;
847 
848 	list_for_each_entry(c, &mgmt_chan_list, list) {
849 		if (c->channel == channel)
850 			return c;
851 	}
852 
853 	return NULL;
854 }
855 
856 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
857 {
858 	struct hci_mgmt_chan *c;
859 
860 	mutex_lock(&mgmt_chan_list_lock);
861 	c = __hci_mgmt_chan_find(channel);
862 	mutex_unlock(&mgmt_chan_list_lock);
863 
864 	return c;
865 }
866 
867 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
868 {
869 	if (c->channel < HCI_CHANNEL_CONTROL)
870 		return -EINVAL;
871 
872 	mutex_lock(&mgmt_chan_list_lock);
873 	if (__hci_mgmt_chan_find(c->channel)) {
874 		mutex_unlock(&mgmt_chan_list_lock);
875 		return -EALREADY;
876 	}
877 
878 	list_add_tail(&c->list, &mgmt_chan_list);
879 
880 	mutex_unlock(&mgmt_chan_list_lock);
881 
882 	return 0;
883 }
884 EXPORT_SYMBOL(hci_mgmt_chan_register);
885 
886 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
887 {
888 	mutex_lock(&mgmt_chan_list_lock);
889 	list_del(&c->list);
890 	mutex_unlock(&mgmt_chan_list_lock);
891 }
892 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
893 
894 static int hci_sock_release(struct socket *sock)
895 {
896 	struct sock *sk = sock->sk;
897 	struct hci_dev *hdev;
898 	struct sk_buff *skb;
899 
900 	BT_DBG("sock %p sk %p", sock, sk);
901 
902 	if (!sk)
903 		return 0;
904 
905 	lock_sock(sk);
906 
907 	switch (hci_pi(sk)->channel) {
908 	case HCI_CHANNEL_MONITOR:
909 		atomic_dec(&monitor_promisc);
910 		break;
911 	case HCI_CHANNEL_RAW:
912 	case HCI_CHANNEL_USER:
913 	case HCI_CHANNEL_CONTROL:
914 		/* Send event to monitor */
915 		skb = create_monitor_ctrl_close(sk);
916 		if (skb) {
917 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
918 					    HCI_SOCK_TRUSTED, NULL);
919 			kfree_skb(skb);
920 		}
921 
922 		hci_sock_free_cookie(sk);
923 		break;
924 	}
925 
926 	bt_sock_unlink(&hci_sk_list, sk);
927 
928 	hdev = hci_pi(sk)->hdev;
929 	if (hdev) {
930 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
931 		    !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
932 			/* When releasing a user channel exclusive access,
933 			 * call hci_dev_do_close directly instead of calling
934 			 * hci_dev_close to ensure the exclusive access will
935 			 * be released and the controller brought back down.
936 			 *
937 			 * The checking of HCI_AUTO_OFF is not needed in this
938 			 * case since it will have been cleared already when
939 			 * opening the user channel.
940 			 *
941 			 * Make sure to also check that we haven't already
942 			 * unregistered since all the cleanup will have already
943 			 * been complete and hdev will get released when we put
944 			 * below.
945 			 */
946 			hci_dev_do_close(hdev);
947 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
948 			mgmt_index_added(hdev);
949 		}
950 
951 		atomic_dec(&hdev->promisc);
952 		hci_dev_put(hdev);
953 	}
954 
955 	sock_orphan(sk);
956 	release_sock(sk);
957 	sock_put(sk);
958 	return 0;
959 }
960 
961 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
962 {
963 	bdaddr_t bdaddr;
964 	int err;
965 
966 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
967 		return -EFAULT;
968 
969 	hci_dev_lock(hdev);
970 
971 	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
972 
973 	hci_dev_unlock(hdev);
974 
975 	return err;
976 }
977 
978 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
979 {
980 	bdaddr_t bdaddr;
981 	int err;
982 
983 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
984 		return -EFAULT;
985 
986 	hci_dev_lock(hdev);
987 
988 	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
989 
990 	hci_dev_unlock(hdev);
991 
992 	return err;
993 }
994 
995 /* Ioctls that require bound socket */
996 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
997 				unsigned long arg)
998 {
999 	struct hci_dev *hdev = hci_hdev_from_sock(sk);
1000 
1001 	if (IS_ERR(hdev))
1002 		return PTR_ERR(hdev);
1003 
1004 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1005 		return -EBUSY;
1006 
1007 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1008 		return -EOPNOTSUPP;
1009 
1010 	if (hdev->dev_type != HCI_PRIMARY)
1011 		return -EOPNOTSUPP;
1012 
1013 	switch (cmd) {
1014 	case HCISETRAW:
1015 		if (!capable(CAP_NET_ADMIN))
1016 			return -EPERM;
1017 		return -EOPNOTSUPP;
1018 
1019 	case HCIGETCONNINFO:
1020 		return hci_get_conn_info(hdev, (void __user *)arg);
1021 
1022 	case HCIGETAUTHINFO:
1023 		return hci_get_auth_info(hdev, (void __user *)arg);
1024 
1025 	case HCIBLOCKADDR:
1026 		if (!capable(CAP_NET_ADMIN))
1027 			return -EPERM;
1028 		return hci_sock_reject_list_add(hdev, (void __user *)arg);
1029 
1030 	case HCIUNBLOCKADDR:
1031 		if (!capable(CAP_NET_ADMIN))
1032 			return -EPERM;
1033 		return hci_sock_reject_list_del(hdev, (void __user *)arg);
1034 	}
1035 
1036 	return -ENOIOCTLCMD;
1037 }
1038 
1039 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1040 			  unsigned long arg)
1041 {
1042 	void __user *argp = (void __user *)arg;
1043 	struct sock *sk = sock->sk;
1044 	int err;
1045 
1046 	BT_DBG("cmd %x arg %lx", cmd, arg);
1047 
1048 	/* Make sure the cmd is valid before doing anything */
1049 	switch (cmd) {
1050 	case HCIGETDEVLIST:
1051 	case HCIGETDEVINFO:
1052 	case HCIGETCONNLIST:
1053 	case HCIDEVUP:
1054 	case HCIDEVDOWN:
1055 	case HCIDEVRESET:
1056 	case HCIDEVRESTAT:
1057 	case HCISETSCAN:
1058 	case HCISETAUTH:
1059 	case HCISETENCRYPT:
1060 	case HCISETPTYPE:
1061 	case HCISETLINKPOL:
1062 	case HCISETLINKMODE:
1063 	case HCISETACLMTU:
1064 	case HCISETSCOMTU:
1065 	case HCIINQUIRY:
1066 	case HCISETRAW:
1067 	case HCIGETCONNINFO:
1068 	case HCIGETAUTHINFO:
1069 	case HCIBLOCKADDR:
1070 	case HCIUNBLOCKADDR:
1071 		break;
1072 	default:
1073 		return -ENOIOCTLCMD;
1074 	}
1075 
1076 	lock_sock(sk);
1077 
1078 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1079 		err = -EBADFD;
1080 		goto done;
1081 	}
1082 
1083 	/* When calling an ioctl on an unbound raw socket, then ensure
1084 	 * that the monitor gets informed. Ensure that the resulting event
1085 	 * is only send once by checking if the cookie exists or not. The
1086 	 * socket cookie will be only ever generated once for the lifetime
1087 	 * of a given socket.
1088 	 */
1089 	if (hci_sock_gen_cookie(sk)) {
1090 		struct sk_buff *skb;
1091 
1092 		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
1093 		 * flag. Make sure that not only the current task but also
1094 		 * the socket opener has the required capability, since
1095 		 * privileged programs can be tricked into making ioctl calls
1096 		 * on HCI sockets, and the socket should not be marked as
1097 		 * trusted simply because the ioctl caller is privileged.
1098 		 */
1099 		if (sk_capable(sk, CAP_NET_ADMIN))
1100 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1101 
1102 		/* Send event to monitor */
1103 		skb = create_monitor_ctrl_open(sk);
1104 		if (skb) {
1105 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1106 					    HCI_SOCK_TRUSTED, NULL);
1107 			kfree_skb(skb);
1108 		}
1109 	}
1110 
1111 	release_sock(sk);
1112 
1113 	switch (cmd) {
1114 	case HCIGETDEVLIST:
1115 		return hci_get_dev_list(argp);
1116 
1117 	case HCIGETDEVINFO:
1118 		return hci_get_dev_info(argp);
1119 
1120 	case HCIGETCONNLIST:
1121 		return hci_get_conn_list(argp);
1122 
1123 	case HCIDEVUP:
1124 		if (!capable(CAP_NET_ADMIN))
1125 			return -EPERM;
1126 		return hci_dev_open(arg);
1127 
1128 	case HCIDEVDOWN:
1129 		if (!capable(CAP_NET_ADMIN))
1130 			return -EPERM;
1131 		return hci_dev_close(arg);
1132 
1133 	case HCIDEVRESET:
1134 		if (!capable(CAP_NET_ADMIN))
1135 			return -EPERM;
1136 		return hci_dev_reset(arg);
1137 
1138 	case HCIDEVRESTAT:
1139 		if (!capable(CAP_NET_ADMIN))
1140 			return -EPERM;
1141 		return hci_dev_reset_stat(arg);
1142 
1143 	case HCISETSCAN:
1144 	case HCISETAUTH:
1145 	case HCISETENCRYPT:
1146 	case HCISETPTYPE:
1147 	case HCISETLINKPOL:
1148 	case HCISETLINKMODE:
1149 	case HCISETACLMTU:
1150 	case HCISETSCOMTU:
1151 		if (!capable(CAP_NET_ADMIN))
1152 			return -EPERM;
1153 		return hci_dev_cmd(cmd, argp);
1154 
1155 	case HCIINQUIRY:
1156 		return hci_inquiry(argp);
1157 	}
1158 
1159 	lock_sock(sk);
1160 
1161 	err = hci_sock_bound_ioctl(sk, cmd, arg);
1162 
1163 done:
1164 	release_sock(sk);
1165 	return err;
1166 }
1167 
1168 #ifdef CONFIG_COMPAT
1169 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1170 				 unsigned long arg)
1171 {
1172 	switch (cmd) {
1173 	case HCIDEVUP:
1174 	case HCIDEVDOWN:
1175 	case HCIDEVRESET:
1176 	case HCIDEVRESTAT:
1177 		return hci_sock_ioctl(sock, cmd, arg);
1178 	}
1179 
1180 	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1181 }
1182 #endif
1183 
1184 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1185 			 int addr_len)
1186 {
1187 	struct sockaddr_hci haddr;
1188 	struct sock *sk = sock->sk;
1189 	struct hci_dev *hdev = NULL;
1190 	struct sk_buff *skb;
1191 	int len, err = 0;
1192 
1193 	BT_DBG("sock %p sk %p", sock, sk);
1194 
1195 	if (!addr)
1196 		return -EINVAL;
1197 
1198 	memset(&haddr, 0, sizeof(haddr));
1199 	len = min_t(unsigned int, sizeof(haddr), addr_len);
1200 	memcpy(&haddr, addr, len);
1201 
1202 	if (haddr.hci_family != AF_BLUETOOTH)
1203 		return -EINVAL;
1204 
1205 	lock_sock(sk);
1206 
1207 	/* Allow detaching from dead device and attaching to alive device, if
1208 	 * the caller wants to re-bind (instead of close) this socket in
1209 	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1210 	 */
1211 	hdev = hci_pi(sk)->hdev;
1212 	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1213 		hci_pi(sk)->hdev = NULL;
1214 		sk->sk_state = BT_OPEN;
1215 		hci_dev_put(hdev);
1216 	}
1217 	hdev = NULL;
1218 
1219 	if (sk->sk_state == BT_BOUND) {
1220 		err = -EALREADY;
1221 		goto done;
1222 	}
1223 
1224 	switch (haddr.hci_channel) {
1225 	case HCI_CHANNEL_RAW:
1226 		if (hci_pi(sk)->hdev) {
1227 			err = -EALREADY;
1228 			goto done;
1229 		}
1230 
1231 		if (haddr.hci_dev != HCI_DEV_NONE) {
1232 			hdev = hci_dev_get(haddr.hci_dev);
1233 			if (!hdev) {
1234 				err = -ENODEV;
1235 				goto done;
1236 			}
1237 
1238 			atomic_inc(&hdev->promisc);
1239 		}
1240 
1241 		hci_pi(sk)->channel = haddr.hci_channel;
1242 
1243 		if (!hci_sock_gen_cookie(sk)) {
1244 			/* In the case when a cookie has already been assigned,
1245 			 * then there has been already an ioctl issued against
1246 			 * an unbound socket and with that triggered an open
1247 			 * notification. Send a close notification first to
1248 			 * allow the state transition to bounded.
1249 			 */
1250 			skb = create_monitor_ctrl_close(sk);
1251 			if (skb) {
1252 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1253 						    HCI_SOCK_TRUSTED, NULL);
1254 				kfree_skb(skb);
1255 			}
1256 		}
1257 
1258 		if (capable(CAP_NET_ADMIN))
1259 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1260 
1261 		hci_pi(sk)->hdev = hdev;
1262 
1263 		/* Send event to monitor */
1264 		skb = create_monitor_ctrl_open(sk);
1265 		if (skb) {
1266 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1267 					    HCI_SOCK_TRUSTED, NULL);
1268 			kfree_skb(skb);
1269 		}
1270 		break;
1271 
1272 	case HCI_CHANNEL_USER:
1273 		if (hci_pi(sk)->hdev) {
1274 			err = -EALREADY;
1275 			goto done;
1276 		}
1277 
1278 		if (haddr.hci_dev == HCI_DEV_NONE) {
1279 			err = -EINVAL;
1280 			goto done;
1281 		}
1282 
1283 		if (!capable(CAP_NET_ADMIN)) {
1284 			err = -EPERM;
1285 			goto done;
1286 		}
1287 
1288 		hdev = hci_dev_get(haddr.hci_dev);
1289 		if (!hdev) {
1290 			err = -ENODEV;
1291 			goto done;
1292 		}
1293 
1294 		if (test_bit(HCI_INIT, &hdev->flags) ||
1295 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1296 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1297 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1298 		     test_bit(HCI_UP, &hdev->flags))) {
1299 			err = -EBUSY;
1300 			hci_dev_put(hdev);
1301 			goto done;
1302 		}
1303 
1304 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1305 			err = -EUSERS;
1306 			hci_dev_put(hdev);
1307 			goto done;
1308 		}
1309 
1310 		mgmt_index_removed(hdev);
1311 
1312 		err = hci_dev_open(hdev->id);
1313 		if (err) {
1314 			if (err == -EALREADY) {
1315 				/* In case the transport is already up and
1316 				 * running, clear the error here.
1317 				 *
1318 				 * This can happen when opening a user
1319 				 * channel and HCI_AUTO_OFF grace period
1320 				 * is still active.
1321 				 */
1322 				err = 0;
1323 			} else {
1324 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1325 				mgmt_index_added(hdev);
1326 				hci_dev_put(hdev);
1327 				goto done;
1328 			}
1329 		}
1330 
1331 		hci_pi(sk)->channel = haddr.hci_channel;
1332 
1333 		if (!hci_sock_gen_cookie(sk)) {
1334 			/* In the case when a cookie has already been assigned,
1335 			 * this socket will transition from a raw socket into
1336 			 * a user channel socket. For a clean transition, send
1337 			 * the close notification first.
1338 			 */
1339 			skb = create_monitor_ctrl_close(sk);
1340 			if (skb) {
1341 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1342 						    HCI_SOCK_TRUSTED, NULL);
1343 				kfree_skb(skb);
1344 			}
1345 		}
1346 
1347 		/* The user channel is restricted to CAP_NET_ADMIN
1348 		 * capabilities and with that implicitly trusted.
1349 		 */
1350 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1351 
1352 		hci_pi(sk)->hdev = hdev;
1353 
1354 		/* Send event to monitor */
1355 		skb = create_monitor_ctrl_open(sk);
1356 		if (skb) {
1357 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1358 					    HCI_SOCK_TRUSTED, NULL);
1359 			kfree_skb(skb);
1360 		}
1361 
1362 		atomic_inc(&hdev->promisc);
1363 		break;
1364 
1365 	case HCI_CHANNEL_MONITOR:
1366 		if (haddr.hci_dev != HCI_DEV_NONE) {
1367 			err = -EINVAL;
1368 			goto done;
1369 		}
1370 
1371 		if (!capable(CAP_NET_RAW)) {
1372 			err = -EPERM;
1373 			goto done;
1374 		}
1375 
1376 		hci_pi(sk)->channel = haddr.hci_channel;
1377 
1378 		/* The monitor interface is restricted to CAP_NET_RAW
1379 		 * capabilities and with that implicitly trusted.
1380 		 */
1381 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1382 
1383 		send_monitor_note(sk, "Linux version %s (%s)",
1384 				  init_utsname()->release,
1385 				  init_utsname()->machine);
1386 		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1387 				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1388 		send_monitor_replay(sk);
1389 		send_monitor_control_replay(sk);
1390 
1391 		atomic_inc(&monitor_promisc);
1392 		break;
1393 
1394 	case HCI_CHANNEL_LOGGING:
1395 		if (haddr.hci_dev != HCI_DEV_NONE) {
1396 			err = -EINVAL;
1397 			goto done;
1398 		}
1399 
1400 		if (!capable(CAP_NET_ADMIN)) {
1401 			err = -EPERM;
1402 			goto done;
1403 		}
1404 
1405 		hci_pi(sk)->channel = haddr.hci_channel;
1406 		break;
1407 
1408 	default:
1409 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1410 			err = -EINVAL;
1411 			goto done;
1412 		}
1413 
1414 		if (haddr.hci_dev != HCI_DEV_NONE) {
1415 			err = -EINVAL;
1416 			goto done;
1417 		}
1418 
1419 		/* Users with CAP_NET_ADMIN capabilities are allowed
1420 		 * access to all management commands and events. For
1421 		 * untrusted users the interface is restricted and
1422 		 * also only untrusted events are sent.
1423 		 */
1424 		if (capable(CAP_NET_ADMIN))
1425 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1426 
1427 		hci_pi(sk)->channel = haddr.hci_channel;
1428 
1429 		/* At the moment the index and unconfigured index events
1430 		 * are enabled unconditionally. Setting them on each
1431 		 * socket when binding keeps this functionality. They
1432 		 * however might be cleared later and then sending of these
1433 		 * events will be disabled, but that is then intentional.
1434 		 *
1435 		 * This also enables generic events that are safe to be
1436 		 * received by untrusted users. Example for such events
1437 		 * are changes to settings, class of device, name etc.
1438 		 */
1439 		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1440 			if (!hci_sock_gen_cookie(sk)) {
1441 				/* In the case when a cookie has already been
1442 				 * assigned, this socket will transition from
1443 				 * a raw socket into a control socket. To
1444 				 * allow for a clean transition, send the
1445 				 * close notification first.
1446 				 */
1447 				skb = create_monitor_ctrl_close(sk);
1448 				if (skb) {
1449 					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1450 							    HCI_SOCK_TRUSTED, NULL);
1451 					kfree_skb(skb);
1452 				}
1453 			}
1454 
1455 			/* Send event to monitor */
1456 			skb = create_monitor_ctrl_open(sk);
1457 			if (skb) {
1458 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1459 						    HCI_SOCK_TRUSTED, NULL);
1460 				kfree_skb(skb);
1461 			}
1462 
1463 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1464 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1465 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1466 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1467 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1468 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1469 		}
1470 		break;
1471 	}
1472 
1473 	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1474 	if (!hci_pi(sk)->mtu)
1475 		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1476 
1477 	sk->sk_state = BT_BOUND;
1478 
1479 done:
1480 	release_sock(sk);
1481 	return err;
1482 }
1483 
1484 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1485 			    int peer)
1486 {
1487 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1488 	struct sock *sk = sock->sk;
1489 	struct hci_dev *hdev;
1490 	int err = 0;
1491 
1492 	BT_DBG("sock %p sk %p", sock, sk);
1493 
1494 	if (peer)
1495 		return -EOPNOTSUPP;
1496 
1497 	lock_sock(sk);
1498 
1499 	hdev = hci_hdev_from_sock(sk);
1500 	if (IS_ERR(hdev)) {
1501 		err = PTR_ERR(hdev);
1502 		goto done;
1503 	}
1504 
1505 	haddr->hci_family = AF_BLUETOOTH;
1506 	haddr->hci_dev    = hdev->id;
1507 	haddr->hci_channel= hci_pi(sk)->channel;
1508 	err = sizeof(*haddr);
1509 
1510 done:
1511 	release_sock(sk);
1512 	return err;
1513 }
1514 
1515 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1516 			  struct sk_buff *skb)
1517 {
1518 	__u8 mask = hci_pi(sk)->cmsg_mask;
1519 
1520 	if (mask & HCI_CMSG_DIR) {
1521 		int incoming = bt_cb(skb)->incoming;
1522 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1523 			 &incoming);
1524 	}
1525 
1526 	if (mask & HCI_CMSG_TSTAMP) {
1527 #ifdef CONFIG_COMPAT
1528 		struct old_timeval32 ctv;
1529 #endif
1530 		struct __kernel_old_timeval tv;
1531 		void *data;
1532 		int len;
1533 
1534 		skb_get_timestamp(skb, &tv);
1535 
1536 		data = &tv;
1537 		len = sizeof(tv);
1538 #ifdef CONFIG_COMPAT
1539 		if (!COMPAT_USE_64BIT_TIME &&
1540 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1541 			ctv.tv_sec = tv.tv_sec;
1542 			ctv.tv_usec = tv.tv_usec;
1543 			data = &ctv;
1544 			len = sizeof(ctv);
1545 		}
1546 #endif
1547 
1548 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1549 	}
1550 }
1551 
1552 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1553 			    size_t len, int flags)
1554 {
1555 	struct scm_cookie scm;
1556 	struct sock *sk = sock->sk;
1557 	struct sk_buff *skb;
1558 	int copied, err;
1559 	unsigned int skblen;
1560 
1561 	BT_DBG("sock %p, sk %p", sock, sk);
1562 
1563 	if (flags & MSG_OOB)
1564 		return -EOPNOTSUPP;
1565 
1566 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1567 		return -EOPNOTSUPP;
1568 
1569 	if (sk->sk_state == BT_CLOSED)
1570 		return 0;
1571 
1572 	skb = skb_recv_datagram(sk, flags, &err);
1573 	if (!skb)
1574 		return err;
1575 
1576 	skblen = skb->len;
1577 	copied = skb->len;
1578 	if (len < copied) {
1579 		msg->msg_flags |= MSG_TRUNC;
1580 		copied = len;
1581 	}
1582 
1583 	skb_reset_transport_header(skb);
1584 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1585 
1586 	switch (hci_pi(sk)->channel) {
1587 	case HCI_CHANNEL_RAW:
1588 		hci_sock_cmsg(sk, msg, skb);
1589 		break;
1590 	case HCI_CHANNEL_USER:
1591 	case HCI_CHANNEL_MONITOR:
1592 		sock_recv_timestamp(msg, sk, skb);
1593 		break;
1594 	default:
1595 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1596 			sock_recv_timestamp(msg, sk, skb);
1597 		break;
1598 	}
1599 
1600 	memset(&scm, 0, sizeof(scm));
1601 	scm.creds = bt_cb(skb)->creds;
1602 
1603 	skb_free_datagram(sk, skb);
1604 
1605 	if (flags & MSG_TRUNC)
1606 		copied = skblen;
1607 
1608 	scm_recv(sock, msg, &scm, flags);
1609 
1610 	return err ? : copied;
1611 }
1612 
1613 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1614 			struct sk_buff *skb)
1615 {
1616 	u8 *cp;
1617 	struct mgmt_hdr *hdr;
1618 	u16 opcode, index, len;
1619 	struct hci_dev *hdev = NULL;
1620 	const struct hci_mgmt_handler *handler;
1621 	bool var_len, no_hdev;
1622 	int err;
1623 
1624 	BT_DBG("got %d bytes", skb->len);
1625 
1626 	if (skb->len < sizeof(*hdr))
1627 		return -EINVAL;
1628 
1629 	hdr = (void *)skb->data;
1630 	opcode = __le16_to_cpu(hdr->opcode);
1631 	index = __le16_to_cpu(hdr->index);
1632 	len = __le16_to_cpu(hdr->len);
1633 
1634 	if (len != skb->len - sizeof(*hdr)) {
1635 		err = -EINVAL;
1636 		goto done;
1637 	}
1638 
1639 	if (chan->channel == HCI_CHANNEL_CONTROL) {
1640 		struct sk_buff *cmd;
1641 
1642 		/* Send event to monitor */
1643 		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1644 						  skb->data + sizeof(*hdr));
1645 		if (cmd) {
1646 			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1647 					    HCI_SOCK_TRUSTED, NULL);
1648 			kfree_skb(cmd);
1649 		}
1650 	}
1651 
1652 	if (opcode >= chan->handler_count ||
1653 	    chan->handlers[opcode].func == NULL) {
1654 		BT_DBG("Unknown op %u", opcode);
1655 		err = mgmt_cmd_status(sk, index, opcode,
1656 				      MGMT_STATUS_UNKNOWN_COMMAND);
1657 		goto done;
1658 	}
1659 
1660 	handler = &chan->handlers[opcode];
1661 
1662 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1663 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1664 		err = mgmt_cmd_status(sk, index, opcode,
1665 				      MGMT_STATUS_PERMISSION_DENIED);
1666 		goto done;
1667 	}
1668 
1669 	if (index != MGMT_INDEX_NONE) {
1670 		hdev = hci_dev_get(index);
1671 		if (!hdev) {
1672 			err = mgmt_cmd_status(sk, index, opcode,
1673 					      MGMT_STATUS_INVALID_INDEX);
1674 			goto done;
1675 		}
1676 
1677 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1678 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1679 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1680 			err = mgmt_cmd_status(sk, index, opcode,
1681 					      MGMT_STATUS_INVALID_INDEX);
1682 			goto done;
1683 		}
1684 
1685 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1686 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1687 			err = mgmt_cmd_status(sk, index, opcode,
1688 					      MGMT_STATUS_INVALID_INDEX);
1689 			goto done;
1690 		}
1691 	}
1692 
1693 	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1694 		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1695 		if (no_hdev != !hdev) {
1696 			err = mgmt_cmd_status(sk, index, opcode,
1697 					      MGMT_STATUS_INVALID_INDEX);
1698 			goto done;
1699 		}
1700 	}
1701 
1702 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1703 	if ((var_len && len < handler->data_len) ||
1704 	    (!var_len && len != handler->data_len)) {
1705 		err = mgmt_cmd_status(sk, index, opcode,
1706 				      MGMT_STATUS_INVALID_PARAMS);
1707 		goto done;
1708 	}
1709 
1710 	if (hdev && chan->hdev_init)
1711 		chan->hdev_init(sk, hdev);
1712 
1713 	cp = skb->data + sizeof(*hdr);
1714 
1715 	err = handler->func(sk, hdev, cp, len);
1716 	if (err < 0)
1717 		goto done;
1718 
1719 	err = skb->len;
1720 
1721 done:
1722 	if (hdev)
1723 		hci_dev_put(hdev);
1724 
1725 	return err;
1726 }
1727 
1728 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1729 			     unsigned int flags)
1730 {
1731 	struct hci_mon_hdr *hdr;
1732 	struct hci_dev *hdev;
1733 	u16 index;
1734 	int err;
1735 
1736 	/* The logging frame consists at minimum of the standard header,
1737 	 * the priority byte, the ident length byte and at least one string
1738 	 * terminator NUL byte. Anything shorter are invalid packets.
1739 	 */
1740 	if (skb->len < sizeof(*hdr) + 3)
1741 		return -EINVAL;
1742 
1743 	hdr = (void *)skb->data;
1744 
1745 	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1746 		return -EINVAL;
1747 
1748 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1749 		__u8 priority = skb->data[sizeof(*hdr)];
1750 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1751 
1752 		/* Only the priorities 0-7 are valid and with that any other
1753 		 * value results in an invalid packet.
1754 		 *
1755 		 * The priority byte is followed by an ident length byte and
1756 		 * the NUL terminated ident string. Check that the ident
1757 		 * length is not overflowing the packet and also that the
1758 		 * ident string itself is NUL terminated. In case the ident
1759 		 * length is zero, the length value actually doubles as NUL
1760 		 * terminator identifier.
1761 		 *
1762 		 * The message follows the ident string (if present) and
1763 		 * must be NUL terminated. Otherwise it is not a valid packet.
1764 		 */
1765 		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1766 		    ident_len > skb->len - sizeof(*hdr) - 3 ||
1767 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1768 			return -EINVAL;
1769 	} else {
1770 		return -EINVAL;
1771 	}
1772 
1773 	index = __le16_to_cpu(hdr->index);
1774 
1775 	if (index != MGMT_INDEX_NONE) {
1776 		hdev = hci_dev_get(index);
1777 		if (!hdev)
1778 			return -ENODEV;
1779 	} else {
1780 		hdev = NULL;
1781 	}
1782 
1783 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1784 
1785 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1786 	err = skb->len;
1787 
1788 	if (hdev)
1789 		hci_dev_put(hdev);
1790 
1791 	return err;
1792 }
1793 
1794 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1795 			    size_t len)
1796 {
1797 	struct sock *sk = sock->sk;
1798 	struct hci_mgmt_chan *chan;
1799 	struct hci_dev *hdev;
1800 	struct sk_buff *skb;
1801 	int err;
1802 	const unsigned int flags = msg->msg_flags;
1803 
1804 	BT_DBG("sock %p sk %p", sock, sk);
1805 
1806 	if (flags & MSG_OOB)
1807 		return -EOPNOTSUPP;
1808 
1809 	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1810 		return -EINVAL;
1811 
1812 	if (len < 4 || len > hci_pi(sk)->mtu)
1813 		return -EINVAL;
1814 
1815 	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1816 	if (IS_ERR(skb))
1817 		return PTR_ERR(skb);
1818 
1819 	lock_sock(sk);
1820 
1821 	switch (hci_pi(sk)->channel) {
1822 	case HCI_CHANNEL_RAW:
1823 	case HCI_CHANNEL_USER:
1824 		break;
1825 	case HCI_CHANNEL_MONITOR:
1826 		err = -EOPNOTSUPP;
1827 		goto drop;
1828 	case HCI_CHANNEL_LOGGING:
1829 		err = hci_logging_frame(sk, skb, flags);
1830 		goto drop;
1831 	default:
1832 		mutex_lock(&mgmt_chan_list_lock);
1833 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1834 		if (chan)
1835 			err = hci_mgmt_cmd(chan, sk, skb);
1836 		else
1837 			err = -EINVAL;
1838 
1839 		mutex_unlock(&mgmt_chan_list_lock);
1840 		goto drop;
1841 	}
1842 
1843 	hdev = hci_hdev_from_sock(sk);
1844 	if (IS_ERR(hdev)) {
1845 		err = PTR_ERR(hdev);
1846 		goto drop;
1847 	}
1848 
1849 	if (!test_bit(HCI_UP, &hdev->flags)) {
1850 		err = -ENETDOWN;
1851 		goto drop;
1852 	}
1853 
1854 	hci_skb_pkt_type(skb) = skb->data[0];
1855 	skb_pull(skb, 1);
1856 
1857 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1858 		/* No permission check is needed for user channel
1859 		 * since that gets enforced when binding the socket.
1860 		 *
1861 		 * However check that the packet type is valid.
1862 		 */
1863 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1864 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1865 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1866 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1867 			err = -EINVAL;
1868 			goto drop;
1869 		}
1870 
1871 		skb_queue_tail(&hdev->raw_q, skb);
1872 		queue_work(hdev->workqueue, &hdev->tx_work);
1873 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1874 		u16 opcode = get_unaligned_le16(skb->data);
1875 		u16 ogf = hci_opcode_ogf(opcode);
1876 		u16 ocf = hci_opcode_ocf(opcode);
1877 
1878 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1879 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1880 				   &hci_sec_filter.ocf_mask[ogf])) &&
1881 		    !capable(CAP_NET_RAW)) {
1882 			err = -EPERM;
1883 			goto drop;
1884 		}
1885 
1886 		/* Since the opcode has already been extracted here, store
1887 		 * a copy of the value for later use by the drivers.
1888 		 */
1889 		hci_skb_opcode(skb) = opcode;
1890 
1891 		if (ogf == 0x3f) {
1892 			skb_queue_tail(&hdev->raw_q, skb);
1893 			queue_work(hdev->workqueue, &hdev->tx_work);
1894 		} else {
1895 			/* Stand-alone HCI commands must be flagged as
1896 			 * single-command requests.
1897 			 */
1898 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1899 
1900 			skb_queue_tail(&hdev->cmd_q, skb);
1901 			queue_work(hdev->workqueue, &hdev->cmd_work);
1902 		}
1903 	} else {
1904 		if (!capable(CAP_NET_RAW)) {
1905 			err = -EPERM;
1906 			goto drop;
1907 		}
1908 
1909 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1910 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1911 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1912 			err = -EINVAL;
1913 			goto drop;
1914 		}
1915 
1916 		skb_queue_tail(&hdev->raw_q, skb);
1917 		queue_work(hdev->workqueue, &hdev->tx_work);
1918 	}
1919 
1920 	err = len;
1921 
1922 done:
1923 	release_sock(sk);
1924 	return err;
1925 
1926 drop:
1927 	kfree_skb(skb);
1928 	goto done;
1929 }
1930 
1931 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1932 				   sockptr_t optval, unsigned int len)
1933 {
1934 	struct hci_ufilter uf = { .opcode = 0 };
1935 	struct sock *sk = sock->sk;
1936 	int err = 0, opt = 0;
1937 
1938 	BT_DBG("sk %p, opt %d", sk, optname);
1939 
1940 	lock_sock(sk);
1941 
1942 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1943 		err = -EBADFD;
1944 		goto done;
1945 	}
1946 
1947 	switch (optname) {
1948 	case HCI_DATA_DIR:
1949 		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1950 			err = -EFAULT;
1951 			break;
1952 		}
1953 
1954 		if (opt)
1955 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1956 		else
1957 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1958 		break;
1959 
1960 	case HCI_TIME_STAMP:
1961 		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1962 			err = -EFAULT;
1963 			break;
1964 		}
1965 
1966 		if (opt)
1967 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1968 		else
1969 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1970 		break;
1971 
1972 	case HCI_FILTER:
1973 		{
1974 			struct hci_filter *f = &hci_pi(sk)->filter;
1975 
1976 			uf.type_mask = f->type_mask;
1977 			uf.opcode    = f->opcode;
1978 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1979 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1980 		}
1981 
1982 		len = min_t(unsigned int, len, sizeof(uf));
1983 		if (copy_from_sockptr(&uf, optval, len)) {
1984 			err = -EFAULT;
1985 			break;
1986 		}
1987 
1988 		if (!capable(CAP_NET_RAW)) {
1989 			uf.type_mask &= hci_sec_filter.type_mask;
1990 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1991 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1992 		}
1993 
1994 		{
1995 			struct hci_filter *f = &hci_pi(sk)->filter;
1996 
1997 			f->type_mask = uf.type_mask;
1998 			f->opcode    = uf.opcode;
1999 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
2000 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
2001 		}
2002 		break;
2003 
2004 	default:
2005 		err = -ENOPROTOOPT;
2006 		break;
2007 	}
2008 
2009 done:
2010 	release_sock(sk);
2011 	return err;
2012 }
2013 
2014 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2015 			       sockptr_t optval, unsigned int len)
2016 {
2017 	struct sock *sk = sock->sk;
2018 	int err = 0;
2019 	u16 opt;
2020 
2021 	BT_DBG("sk %p, opt %d", sk, optname);
2022 
2023 	if (level == SOL_HCI)
2024 		return hci_sock_setsockopt_old(sock, level, optname, optval,
2025 					       len);
2026 
2027 	if (level != SOL_BLUETOOTH)
2028 		return -ENOPROTOOPT;
2029 
2030 	lock_sock(sk);
2031 
2032 	switch (optname) {
2033 	case BT_SNDMTU:
2034 	case BT_RCVMTU:
2035 		switch (hci_pi(sk)->channel) {
2036 		/* Don't allow changing MTU for channels that are meant for HCI
2037 		 * traffic only.
2038 		 */
2039 		case HCI_CHANNEL_RAW:
2040 		case HCI_CHANNEL_USER:
2041 			err = -ENOPROTOOPT;
2042 			goto done;
2043 		}
2044 
2045 		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2046 			err = -EFAULT;
2047 			break;
2048 		}
2049 
2050 		hci_pi(sk)->mtu = opt;
2051 		break;
2052 
2053 	default:
2054 		err = -ENOPROTOOPT;
2055 		break;
2056 	}
2057 
2058 done:
2059 	release_sock(sk);
2060 	return err;
2061 }
2062 
2063 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2064 				   char __user *optval, int __user *optlen)
2065 {
2066 	struct hci_ufilter uf;
2067 	struct sock *sk = sock->sk;
2068 	int len, opt, err = 0;
2069 
2070 	BT_DBG("sk %p, opt %d", sk, optname);
2071 
2072 	if (get_user(len, optlen))
2073 		return -EFAULT;
2074 
2075 	lock_sock(sk);
2076 
2077 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2078 		err = -EBADFD;
2079 		goto done;
2080 	}
2081 
2082 	switch (optname) {
2083 	case HCI_DATA_DIR:
2084 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2085 			opt = 1;
2086 		else
2087 			opt = 0;
2088 
2089 		if (put_user(opt, optval))
2090 			err = -EFAULT;
2091 		break;
2092 
2093 	case HCI_TIME_STAMP:
2094 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2095 			opt = 1;
2096 		else
2097 			opt = 0;
2098 
2099 		if (put_user(opt, optval))
2100 			err = -EFAULT;
2101 		break;
2102 
2103 	case HCI_FILTER:
2104 		{
2105 			struct hci_filter *f = &hci_pi(sk)->filter;
2106 
2107 			memset(&uf, 0, sizeof(uf));
2108 			uf.type_mask = f->type_mask;
2109 			uf.opcode    = f->opcode;
2110 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2111 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2112 		}
2113 
2114 		len = min_t(unsigned int, len, sizeof(uf));
2115 		if (copy_to_user(optval, &uf, len))
2116 			err = -EFAULT;
2117 		break;
2118 
2119 	default:
2120 		err = -ENOPROTOOPT;
2121 		break;
2122 	}
2123 
2124 done:
2125 	release_sock(sk);
2126 	return err;
2127 }
2128 
2129 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2130 			       char __user *optval, int __user *optlen)
2131 {
2132 	struct sock *sk = sock->sk;
2133 	int err = 0;
2134 
2135 	BT_DBG("sk %p, opt %d", sk, optname);
2136 
2137 	if (level == SOL_HCI)
2138 		return hci_sock_getsockopt_old(sock, level, optname, optval,
2139 					       optlen);
2140 
2141 	if (level != SOL_BLUETOOTH)
2142 		return -ENOPROTOOPT;
2143 
2144 	lock_sock(sk);
2145 
2146 	switch (optname) {
2147 	case BT_SNDMTU:
2148 	case BT_RCVMTU:
2149 		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2150 			err = -EFAULT;
2151 		break;
2152 
2153 	default:
2154 		err = -ENOPROTOOPT;
2155 		break;
2156 	}
2157 
2158 	release_sock(sk);
2159 	return err;
2160 }
2161 
2162 static void hci_sock_destruct(struct sock *sk)
2163 {
2164 	mgmt_cleanup(sk);
2165 	skb_queue_purge(&sk->sk_receive_queue);
2166 	skb_queue_purge(&sk->sk_write_queue);
2167 }
2168 
2169 static const struct proto_ops hci_sock_ops = {
2170 	.family		= PF_BLUETOOTH,
2171 	.owner		= THIS_MODULE,
2172 	.release	= hci_sock_release,
2173 	.bind		= hci_sock_bind,
2174 	.getname	= hci_sock_getname,
2175 	.sendmsg	= hci_sock_sendmsg,
2176 	.recvmsg	= hci_sock_recvmsg,
2177 	.ioctl		= hci_sock_ioctl,
2178 #ifdef CONFIG_COMPAT
2179 	.compat_ioctl	= hci_sock_compat_ioctl,
2180 #endif
2181 	.poll		= datagram_poll,
2182 	.listen		= sock_no_listen,
2183 	.shutdown	= sock_no_shutdown,
2184 	.setsockopt	= hci_sock_setsockopt,
2185 	.getsockopt	= hci_sock_getsockopt,
2186 	.connect	= sock_no_connect,
2187 	.socketpair	= sock_no_socketpair,
2188 	.accept		= sock_no_accept,
2189 	.mmap		= sock_no_mmap
2190 };
2191 
2192 static struct proto hci_sk_proto = {
2193 	.name		= "HCI",
2194 	.owner		= THIS_MODULE,
2195 	.obj_size	= sizeof(struct hci_pinfo)
2196 };
2197 
2198 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2199 			   int kern)
2200 {
2201 	struct sock *sk;
2202 
2203 	BT_DBG("sock %p", sock);
2204 
2205 	if (sock->type != SOCK_RAW)
2206 		return -ESOCKTNOSUPPORT;
2207 
2208 	sock->ops = &hci_sock_ops;
2209 
2210 	sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2211 			   kern);
2212 	if (!sk)
2213 		return -ENOMEM;
2214 
2215 	sock->state = SS_UNCONNECTED;
2216 	sk->sk_destruct = hci_sock_destruct;
2217 
2218 	bt_sock_link(&hci_sk_list, sk);
2219 	return 0;
2220 }
2221 
2222 static const struct net_proto_family hci_sock_family_ops = {
2223 	.family	= PF_BLUETOOTH,
2224 	.owner	= THIS_MODULE,
2225 	.create	= hci_sock_create,
2226 };
2227 
2228 int __init hci_sock_init(void)
2229 {
2230 	int err;
2231 
2232 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2233 
2234 	err = proto_register(&hci_sk_proto, 0);
2235 	if (err < 0)
2236 		return err;
2237 
2238 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2239 	if (err < 0) {
2240 		BT_ERR("HCI socket registration failed");
2241 		goto error;
2242 	}
2243 
2244 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2245 	if (err < 0) {
2246 		BT_ERR("Failed to create HCI proc file");
2247 		bt_sock_unregister(BTPROTO_HCI);
2248 		goto error;
2249 	}
2250 
2251 	BT_INFO("HCI socket layer initialized");
2252 
2253 	return 0;
2254 
2255 error:
2256 	proto_unregister(&hci_sk_proto);
2257 	return err;
2258 }
2259 
2260 void hci_sock_cleanup(void)
2261 {
2262 	bt_procfs_cleanup(&init_net, "hci");
2263 	bt_sock_unregister(BTPROTO_HCI);
2264 	proto_unregister(&hci_sk_proto);
2265 }
2266