xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision 0c18a640)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "mgmt_util.h"
38 
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41 
42 static DEFINE_IDA(sock_cookie_ida);
43 
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45 
46 /* ----- HCI socket interface ----- */
47 
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 
51 struct hci_pinfo {
52 	struct bt_sock    bt;
53 	struct hci_dev    *hdev;
54 	struct hci_filter filter;
55 	__u8              cmsg_mask;
56 	unsigned short    channel;
57 	unsigned long     flags;
58 	__u32             cookie;
59 	char              comm[TASK_COMM_LEN];
60 	__u16             mtu;
61 };
62 
hci_hdev_from_sock(struct sock * sk)63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 {
65 	struct hci_dev *hdev = hci_pi(sk)->hdev;
66 
67 	if (!hdev)
68 		return ERR_PTR(-EBADFD);
69 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70 		return ERR_PTR(-EPIPE);
71 	return hdev;
72 }
73 
hci_sock_set_flag(struct sock * sk,int nr)74 void hci_sock_set_flag(struct sock *sk, int nr)
75 {
76 	set_bit(nr, &hci_pi(sk)->flags);
77 }
78 
hci_sock_clear_flag(struct sock * sk,int nr)79 void hci_sock_clear_flag(struct sock *sk, int nr)
80 {
81 	clear_bit(nr, &hci_pi(sk)->flags);
82 }
83 
hci_sock_test_flag(struct sock * sk,int nr)84 int hci_sock_test_flag(struct sock *sk, int nr)
85 {
86 	return test_bit(nr, &hci_pi(sk)->flags);
87 }
88 
hci_sock_get_channel(struct sock * sk)89 unsigned short hci_sock_get_channel(struct sock *sk)
90 {
91 	return hci_pi(sk)->channel;
92 }
93 
hci_sock_get_cookie(struct sock * sk)94 u32 hci_sock_get_cookie(struct sock *sk)
95 {
96 	return hci_pi(sk)->cookie;
97 }
98 
hci_sock_gen_cookie(struct sock * sk)99 static bool hci_sock_gen_cookie(struct sock *sk)
100 {
101 	int id = hci_pi(sk)->cookie;
102 
103 	if (!id) {
104 		id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
105 		if (id < 0)
106 			id = 0xffffffff;
107 
108 		hci_pi(sk)->cookie = id;
109 		get_task_comm(hci_pi(sk)->comm, current);
110 		return true;
111 	}
112 
113 	return false;
114 }
115 
hci_sock_free_cookie(struct sock * sk)116 static void hci_sock_free_cookie(struct sock *sk)
117 {
118 	int id = hci_pi(sk)->cookie;
119 
120 	if (id) {
121 		hci_pi(sk)->cookie = 0xffffffff;
122 		ida_free(&sock_cookie_ida, id);
123 	}
124 }
125 
hci_test_bit(int nr,const void * addr)126 static inline int hci_test_bit(int nr, const void *addr)
127 {
128 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
129 }
130 
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF  5
133 
134 struct hci_sec_filter {
135 	__u32 type_mask;
136 	__u32 event_mask[2];
137 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
138 };
139 
140 static const struct hci_sec_filter hci_sec_filter = {
141 	/* Packet types */
142 	0x10,
143 	/* Events */
144 	{ 0x1000d9fe, 0x0000b00c },
145 	/* Commands */
146 	{
147 		{ 0x0 },
148 		/* OGF_LINK_CTL */
149 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150 		/* OGF_LINK_POLICY */
151 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
152 		/* OGF_HOST_CTL */
153 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154 		/* OGF_INFO_PARAM */
155 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
156 		/* OGF_STATUS_PARAM */
157 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
158 	}
159 };
160 
161 static struct bt_sock_list hci_sk_list = {
162 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
163 };
164 
is_filtered_packet(struct sock * sk,struct sk_buff * skb)165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 {
167 	struct hci_filter *flt;
168 	int flt_type, flt_event;
169 
170 	/* Apply filter */
171 	flt = &hci_pi(sk)->filter;
172 
173 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174 
175 	if (!test_bit(flt_type, &flt->type_mask))
176 		return true;
177 
178 	/* Extra filter for event packets only */
179 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
180 		return false;
181 
182 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183 
184 	if (!hci_test_bit(flt_event, &flt->event_mask))
185 		return true;
186 
187 	/* Check filter only when opcode is set */
188 	if (!flt->opcode)
189 		return false;
190 
191 	if (flt_event == HCI_EV_CMD_COMPLETE &&
192 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
193 		return true;
194 
195 	if (flt_event == HCI_EV_CMD_STATUS &&
196 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
197 		return true;
198 
199 	return false;
200 }
201 
202 /* Send frame to RAW socket */
hci_send_to_sock(struct hci_dev * hdev,struct sk_buff * skb)203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
204 {
205 	struct sock *sk;
206 	struct sk_buff *skb_copy = NULL;
207 
208 	BT_DBG("hdev %p len %d", hdev, skb->len);
209 
210 	read_lock(&hci_sk_list.lock);
211 
212 	sk_for_each(sk, &hci_sk_list.head) {
213 		struct sk_buff *nskb;
214 
215 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
216 			continue;
217 
218 		/* Don't send frame to the socket it came from */
219 		if (skb->sk == sk)
220 			continue;
221 
222 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
228 				continue;
229 			if (is_filtered_packet(sk, skb))
230 				continue;
231 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232 			if (!bt_cb(skb)->incoming)
233 				continue;
234 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
238 				continue;
239 		} else {
240 			/* Don't send frame to other channel types */
241 			continue;
242 		}
243 
244 		if (!skb_copy) {
245 			/* Create a private copy with headroom */
246 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
247 			if (!skb_copy)
248 				continue;
249 
250 			/* Put type byte before the data */
251 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
252 		}
253 
254 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 		if (!nskb)
256 			continue;
257 
258 		if (sock_queue_rcv_skb(sk, nskb))
259 			kfree_skb(nskb);
260 	}
261 
262 	read_unlock(&hci_sk_list.lock);
263 
264 	kfree_skb(skb_copy);
265 }
266 
hci_sock_copy_creds(struct sock * sk,struct sk_buff * skb)267 static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
268 {
269 	struct scm_creds *creds;
270 
271 	if (!sk || WARN_ON(!skb))
272 		return;
273 
274 	creds = &bt_cb(skb)->creds;
275 
276 	/* Check if peer credentials is set */
277 	if (!sk->sk_peer_pid) {
278 		/* Check if parent peer credentials is set */
279 		if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
280 			sk = bt_sk(sk)->parent;
281 		else
282 			return;
283 	}
284 
285 	/* Check if scm_creds already set */
286 	if (creds->pid == pid_vnr(sk->sk_peer_pid))
287 		return;
288 
289 	memset(creds, 0, sizeof(*creds));
290 
291 	creds->pid = pid_vnr(sk->sk_peer_pid);
292 	if (sk->sk_peer_cred) {
293 		creds->uid = sk->sk_peer_cred->uid;
294 		creds->gid = sk->sk_peer_cred->gid;
295 	}
296 }
297 
hci_skb_clone(struct sk_buff * skb)298 static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
299 {
300 	struct sk_buff *nskb;
301 
302 	if (!skb)
303 		return NULL;
304 
305 	nskb = skb_clone(skb, GFP_ATOMIC);
306 	if (!nskb)
307 		return NULL;
308 
309 	hci_sock_copy_creds(skb->sk, nskb);
310 
311 	return nskb;
312 }
313 
314 /* Send frame to sockets with specific channel */
__hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)315 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
316 				  int flag, struct sock *skip_sk)
317 {
318 	struct sock *sk;
319 
320 	BT_DBG("channel %u len %d", channel, skb->len);
321 
322 	sk_for_each(sk, &hci_sk_list.head) {
323 		struct sk_buff *nskb;
324 
325 		/* Ignore socket without the flag set */
326 		if (!hci_sock_test_flag(sk, flag))
327 			continue;
328 
329 		/* Skip the original socket */
330 		if (sk == skip_sk)
331 			continue;
332 
333 		if (sk->sk_state != BT_BOUND)
334 			continue;
335 
336 		if (hci_pi(sk)->channel != channel)
337 			continue;
338 
339 		nskb = hci_skb_clone(skb);
340 		if (!nskb)
341 			continue;
342 
343 		if (sock_queue_rcv_skb(sk, nskb))
344 			kfree_skb(nskb);
345 	}
346 
347 }
348 
hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)349 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
350 			 int flag, struct sock *skip_sk)
351 {
352 	read_lock(&hci_sk_list.lock);
353 	__hci_send_to_channel(channel, skb, flag, skip_sk);
354 	read_unlock(&hci_sk_list.lock);
355 }
356 
357 /* Send frame to monitor socket */
hci_send_to_monitor(struct hci_dev * hdev,struct sk_buff * skb)358 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
359 {
360 	struct sk_buff *skb_copy = NULL;
361 	struct hci_mon_hdr *hdr;
362 	__le16 opcode;
363 
364 	if (!atomic_read(&monitor_promisc))
365 		return;
366 
367 	BT_DBG("hdev %p len %d", hdev, skb->len);
368 
369 	switch (hci_skb_pkt_type(skb)) {
370 	case HCI_COMMAND_PKT:
371 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
372 		break;
373 	case HCI_EVENT_PKT:
374 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
375 		break;
376 	case HCI_ACLDATA_PKT:
377 		if (bt_cb(skb)->incoming)
378 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
379 		else
380 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
381 		break;
382 	case HCI_SCODATA_PKT:
383 		if (bt_cb(skb)->incoming)
384 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
385 		else
386 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
387 		break;
388 	case HCI_ISODATA_PKT:
389 		if (bt_cb(skb)->incoming)
390 			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
391 		else
392 			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
393 		break;
394 	case HCI_DIAG_PKT:
395 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
396 		break;
397 	default:
398 		return;
399 	}
400 
401 	/* Create a private copy with headroom */
402 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
403 	if (!skb_copy)
404 		return;
405 
406 	hci_sock_copy_creds(skb->sk, skb_copy);
407 
408 	/* Put header before the data */
409 	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
410 	hdr->opcode = opcode;
411 	hdr->index = cpu_to_le16(hdev->id);
412 	hdr->len = cpu_to_le16(skb->len);
413 
414 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
415 			    HCI_SOCK_TRUSTED, NULL);
416 	kfree_skb(skb_copy);
417 }
418 
hci_send_monitor_ctrl_event(struct hci_dev * hdev,u16 event,void * data,u16 data_len,ktime_t tstamp,int flag,struct sock * skip_sk)419 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
420 				 void *data, u16 data_len, ktime_t tstamp,
421 				 int flag, struct sock *skip_sk)
422 {
423 	struct sock *sk;
424 	__le16 index;
425 
426 	if (hdev)
427 		index = cpu_to_le16(hdev->id);
428 	else
429 		index = cpu_to_le16(MGMT_INDEX_NONE);
430 
431 	read_lock(&hci_sk_list.lock);
432 
433 	sk_for_each(sk, &hci_sk_list.head) {
434 		struct hci_mon_hdr *hdr;
435 		struct sk_buff *skb;
436 
437 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
438 			continue;
439 
440 		/* Ignore socket without the flag set */
441 		if (!hci_sock_test_flag(sk, flag))
442 			continue;
443 
444 		/* Skip the original socket */
445 		if (sk == skip_sk)
446 			continue;
447 
448 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
449 		if (!skb)
450 			continue;
451 
452 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
453 		put_unaligned_le16(event, skb_put(skb, 2));
454 
455 		if (data)
456 			skb_put_data(skb, data, data_len);
457 
458 		skb->tstamp = tstamp;
459 
460 		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
461 		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
462 		hdr->index = index;
463 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
464 
465 		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 				      HCI_SOCK_TRUSTED, NULL);
467 		kfree_skb(skb);
468 	}
469 
470 	read_unlock(&hci_sk_list.lock);
471 }
472 
create_monitor_event(struct hci_dev * hdev,int event)473 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
474 {
475 	struct hci_mon_hdr *hdr;
476 	struct hci_mon_new_index *ni;
477 	struct hci_mon_index_info *ii;
478 	struct sk_buff *skb;
479 	__le16 opcode;
480 
481 	switch (event) {
482 	case HCI_DEV_REG:
483 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
484 		if (!skb)
485 			return NULL;
486 
487 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
488 		ni->type = 0x00; /* Old hdev->dev_type */
489 		ni->bus = hdev->bus;
490 		bacpy(&ni->bdaddr, &hdev->bdaddr);
491 		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
492 			       strnlen(hdev->name, sizeof(ni->name)), '\0');
493 
494 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
495 		break;
496 
497 	case HCI_DEV_UNREG:
498 		skb = bt_skb_alloc(0, GFP_ATOMIC);
499 		if (!skb)
500 			return NULL;
501 
502 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
503 		break;
504 
505 	case HCI_DEV_SETUP:
506 		if (hdev->manufacturer == 0xffff)
507 			return NULL;
508 		fallthrough;
509 
510 	case HCI_DEV_UP:
511 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
512 		if (!skb)
513 			return NULL;
514 
515 		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
516 		bacpy(&ii->bdaddr, &hdev->bdaddr);
517 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
518 
519 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
520 		break;
521 
522 	case HCI_DEV_OPEN:
523 		skb = bt_skb_alloc(0, GFP_ATOMIC);
524 		if (!skb)
525 			return NULL;
526 
527 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
528 		break;
529 
530 	case HCI_DEV_CLOSE:
531 		skb = bt_skb_alloc(0, GFP_ATOMIC);
532 		if (!skb)
533 			return NULL;
534 
535 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
536 		break;
537 
538 	default:
539 		return NULL;
540 	}
541 
542 	__net_timestamp(skb);
543 
544 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
545 	hdr->opcode = opcode;
546 	hdr->index = cpu_to_le16(hdev->id);
547 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
548 
549 	return skb;
550 }
551 
create_monitor_ctrl_open(struct sock * sk)552 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
553 {
554 	struct hci_mon_hdr *hdr;
555 	struct sk_buff *skb;
556 	u16 format;
557 	u8 ver[3];
558 	u32 flags;
559 
560 	/* No message needed when cookie is not present */
561 	if (!hci_pi(sk)->cookie)
562 		return NULL;
563 
564 	switch (hci_pi(sk)->channel) {
565 	case HCI_CHANNEL_RAW:
566 		format = 0x0000;
567 		ver[0] = BT_SUBSYS_VERSION;
568 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
569 		break;
570 	case HCI_CHANNEL_USER:
571 		format = 0x0001;
572 		ver[0] = BT_SUBSYS_VERSION;
573 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
574 		break;
575 	case HCI_CHANNEL_CONTROL:
576 		format = 0x0002;
577 		mgmt_fill_version_info(ver);
578 		break;
579 	default:
580 		/* No message for unsupported format */
581 		return NULL;
582 	}
583 
584 	skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
585 	if (!skb)
586 		return NULL;
587 
588 	hci_sock_copy_creds(sk, skb);
589 
590 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
591 
592 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
593 	put_unaligned_le16(format, skb_put(skb, 2));
594 	skb_put_data(skb, ver, sizeof(ver));
595 	put_unaligned_le32(flags, skb_put(skb, 4));
596 	skb_put_u8(skb, TASK_COMM_LEN);
597 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
598 
599 	__net_timestamp(skb);
600 
601 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
602 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
603 	if (hci_pi(sk)->hdev)
604 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
605 	else
606 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
607 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
608 
609 	return skb;
610 }
611 
create_monitor_ctrl_close(struct sock * sk)612 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
613 {
614 	struct hci_mon_hdr *hdr;
615 	struct sk_buff *skb;
616 
617 	/* No message needed when cookie is not present */
618 	if (!hci_pi(sk)->cookie)
619 		return NULL;
620 
621 	switch (hci_pi(sk)->channel) {
622 	case HCI_CHANNEL_RAW:
623 	case HCI_CHANNEL_USER:
624 	case HCI_CHANNEL_CONTROL:
625 		break;
626 	default:
627 		/* No message for unsupported format */
628 		return NULL;
629 	}
630 
631 	skb = bt_skb_alloc(4, GFP_ATOMIC);
632 	if (!skb)
633 		return NULL;
634 
635 	hci_sock_copy_creds(sk, skb);
636 
637 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
638 
639 	__net_timestamp(skb);
640 
641 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
642 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
643 	if (hci_pi(sk)->hdev)
644 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
645 	else
646 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
647 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
648 
649 	return skb;
650 }
651 
create_monitor_ctrl_command(struct sock * sk,u16 index,u16 opcode,u16 len,const void * buf)652 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
653 						   u16 opcode, u16 len,
654 						   const void *buf)
655 {
656 	struct hci_mon_hdr *hdr;
657 	struct sk_buff *skb;
658 
659 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
660 	if (!skb)
661 		return NULL;
662 
663 	hci_sock_copy_creds(sk, skb);
664 
665 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
666 	put_unaligned_le16(opcode, skb_put(skb, 2));
667 
668 	if (buf)
669 		skb_put_data(skb, buf, len);
670 
671 	__net_timestamp(skb);
672 
673 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
674 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
675 	hdr->index = cpu_to_le16(index);
676 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
677 
678 	return skb;
679 }
680 
681 static void __printf(2, 3)
send_monitor_note(struct sock * sk,const char * fmt,...)682 send_monitor_note(struct sock *sk, const char *fmt, ...)
683 {
684 	size_t len;
685 	struct hci_mon_hdr *hdr;
686 	struct sk_buff *skb;
687 	va_list args;
688 
689 	va_start(args, fmt);
690 	len = vsnprintf(NULL, 0, fmt, args);
691 	va_end(args);
692 
693 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
694 	if (!skb)
695 		return;
696 
697 	hci_sock_copy_creds(sk, skb);
698 
699 	va_start(args, fmt);
700 	vsprintf(skb_put(skb, len), fmt, args);
701 	*(u8 *)skb_put(skb, 1) = 0;
702 	va_end(args);
703 
704 	__net_timestamp(skb);
705 
706 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
707 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
708 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
709 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
710 
711 	if (sock_queue_rcv_skb(sk, skb))
712 		kfree_skb(skb);
713 }
714 
send_monitor_replay(struct sock * sk)715 static void send_monitor_replay(struct sock *sk)
716 {
717 	struct hci_dev *hdev;
718 
719 	read_lock(&hci_dev_list_lock);
720 
721 	list_for_each_entry(hdev, &hci_dev_list, list) {
722 		struct sk_buff *skb;
723 
724 		skb = create_monitor_event(hdev, HCI_DEV_REG);
725 		if (!skb)
726 			continue;
727 
728 		if (sock_queue_rcv_skb(sk, skb))
729 			kfree_skb(skb);
730 
731 		if (!test_bit(HCI_RUNNING, &hdev->flags))
732 			continue;
733 
734 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
735 		if (!skb)
736 			continue;
737 
738 		if (sock_queue_rcv_skb(sk, skb))
739 			kfree_skb(skb);
740 
741 		if (test_bit(HCI_UP, &hdev->flags))
742 			skb = create_monitor_event(hdev, HCI_DEV_UP);
743 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
744 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
745 		else
746 			skb = NULL;
747 
748 		if (skb) {
749 			if (sock_queue_rcv_skb(sk, skb))
750 				kfree_skb(skb);
751 		}
752 	}
753 
754 	read_unlock(&hci_dev_list_lock);
755 }
756 
send_monitor_control_replay(struct sock * mon_sk)757 static void send_monitor_control_replay(struct sock *mon_sk)
758 {
759 	struct sock *sk;
760 
761 	read_lock(&hci_sk_list.lock);
762 
763 	sk_for_each(sk, &hci_sk_list.head) {
764 		struct sk_buff *skb;
765 
766 		skb = create_monitor_ctrl_open(sk);
767 		if (!skb)
768 			continue;
769 
770 		if (sock_queue_rcv_skb(mon_sk, skb))
771 			kfree_skb(skb);
772 	}
773 
774 	read_unlock(&hci_sk_list.lock);
775 }
776 
777 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)778 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
779 {
780 	struct hci_event_hdr *hdr;
781 	struct hci_ev_stack_internal *ev;
782 	struct sk_buff *skb;
783 
784 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
785 	if (!skb)
786 		return;
787 
788 	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
789 	hdr->evt  = HCI_EV_STACK_INTERNAL;
790 	hdr->plen = sizeof(*ev) + dlen;
791 
792 	ev = skb_put(skb, sizeof(*ev) + dlen);
793 	ev->type = type;
794 	memcpy(ev->data, data, dlen);
795 
796 	bt_cb(skb)->incoming = 1;
797 	__net_timestamp(skb);
798 
799 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
800 	hci_send_to_sock(hdev, skb);
801 	kfree_skb(skb);
802 }
803 
hci_sock_dev_event(struct hci_dev * hdev,int event)804 void hci_sock_dev_event(struct hci_dev *hdev, int event)
805 {
806 	BT_DBG("hdev %s event %d", hdev->name, event);
807 
808 	if (atomic_read(&monitor_promisc)) {
809 		struct sk_buff *skb;
810 
811 		/* Send event to monitor */
812 		skb = create_monitor_event(hdev, event);
813 		if (skb) {
814 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
815 					    HCI_SOCK_TRUSTED, NULL);
816 			kfree_skb(skb);
817 		}
818 	}
819 
820 	if (event <= HCI_DEV_DOWN) {
821 		struct hci_ev_si_device ev;
822 
823 		/* Send event to sockets */
824 		ev.event  = event;
825 		ev.dev_id = hdev->id;
826 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
827 	}
828 
829 	if (event == HCI_DEV_UNREG) {
830 		struct sock *sk;
831 
832 		/* Wake up sockets using this dead device */
833 		read_lock(&hci_sk_list.lock);
834 		sk_for_each(sk, &hci_sk_list.head) {
835 			if (hci_pi(sk)->hdev == hdev) {
836 				sk->sk_err = EPIPE;
837 				sk->sk_state_change(sk);
838 			}
839 		}
840 		read_unlock(&hci_sk_list.lock);
841 	}
842 }
843 
__hci_mgmt_chan_find(unsigned short channel)844 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
845 {
846 	struct hci_mgmt_chan *c;
847 
848 	list_for_each_entry(c, &mgmt_chan_list, list) {
849 		if (c->channel == channel)
850 			return c;
851 	}
852 
853 	return NULL;
854 }
855 
hci_mgmt_chan_find(unsigned short channel)856 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
857 {
858 	struct hci_mgmt_chan *c;
859 
860 	mutex_lock(&mgmt_chan_list_lock);
861 	c = __hci_mgmt_chan_find(channel);
862 	mutex_unlock(&mgmt_chan_list_lock);
863 
864 	return c;
865 }
866 
hci_mgmt_chan_register(struct hci_mgmt_chan * c)867 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
868 {
869 	if (c->channel < HCI_CHANNEL_CONTROL)
870 		return -EINVAL;
871 
872 	mutex_lock(&mgmt_chan_list_lock);
873 	if (__hci_mgmt_chan_find(c->channel)) {
874 		mutex_unlock(&mgmt_chan_list_lock);
875 		return -EALREADY;
876 	}
877 
878 	list_add_tail(&c->list, &mgmt_chan_list);
879 
880 	mutex_unlock(&mgmt_chan_list_lock);
881 
882 	return 0;
883 }
884 EXPORT_SYMBOL(hci_mgmt_chan_register);
885 
hci_mgmt_chan_unregister(struct hci_mgmt_chan * c)886 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
887 {
888 	mutex_lock(&mgmt_chan_list_lock);
889 	list_del(&c->list);
890 	mutex_unlock(&mgmt_chan_list_lock);
891 }
892 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
893 
hci_sock_release(struct socket * sock)894 static int hci_sock_release(struct socket *sock)
895 {
896 	struct sock *sk = sock->sk;
897 	struct hci_dev *hdev;
898 	struct sk_buff *skb;
899 
900 	BT_DBG("sock %p sk %p", sock, sk);
901 
902 	if (!sk)
903 		return 0;
904 
905 	lock_sock(sk);
906 
907 	switch (hci_pi(sk)->channel) {
908 	case HCI_CHANNEL_MONITOR:
909 		atomic_dec(&monitor_promisc);
910 		break;
911 	case HCI_CHANNEL_RAW:
912 	case HCI_CHANNEL_USER:
913 	case HCI_CHANNEL_CONTROL:
914 		/* Send event to monitor */
915 		skb = create_monitor_ctrl_close(sk);
916 		if (skb) {
917 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
918 					    HCI_SOCK_TRUSTED, NULL);
919 			kfree_skb(skb);
920 		}
921 
922 		hci_sock_free_cookie(sk);
923 		break;
924 	}
925 
926 	bt_sock_unlink(&hci_sk_list, sk);
927 
928 	hdev = hci_pi(sk)->hdev;
929 	if (hdev) {
930 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
931 		    !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
932 			/* When releasing a user channel exclusive access,
933 			 * call hci_dev_do_close directly instead of calling
934 			 * hci_dev_close to ensure the exclusive access will
935 			 * be released and the controller brought back down.
936 			 *
937 			 * The checking of HCI_AUTO_OFF is not needed in this
938 			 * case since it will have been cleared already when
939 			 * opening the user channel.
940 			 *
941 			 * Make sure to also check that we haven't already
942 			 * unregistered since all the cleanup will have already
943 			 * been complete and hdev will get released when we put
944 			 * below.
945 			 */
946 			hci_dev_do_close(hdev);
947 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
948 			mgmt_index_added(hdev);
949 		}
950 
951 		atomic_dec(&hdev->promisc);
952 		hci_dev_put(hdev);
953 	}
954 
955 	sock_orphan(sk);
956 	release_sock(sk);
957 	sock_put(sk);
958 	return 0;
959 }
960 
hci_sock_reject_list_add(struct hci_dev * hdev,void __user * arg)961 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
962 {
963 	bdaddr_t bdaddr;
964 	int err;
965 
966 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
967 		return -EFAULT;
968 
969 	hci_dev_lock(hdev);
970 
971 	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
972 
973 	hci_dev_unlock(hdev);
974 
975 	return err;
976 }
977 
hci_sock_reject_list_del(struct hci_dev * hdev,void __user * arg)978 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
979 {
980 	bdaddr_t bdaddr;
981 	int err;
982 
983 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
984 		return -EFAULT;
985 
986 	hci_dev_lock(hdev);
987 
988 	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
989 
990 	hci_dev_unlock(hdev);
991 
992 	return err;
993 }
994 
995 /* Ioctls that require bound socket */
hci_sock_bound_ioctl(struct sock * sk,unsigned int cmd,unsigned long arg)996 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
997 				unsigned long arg)
998 {
999 	struct hci_dev *hdev = hci_hdev_from_sock(sk);
1000 
1001 	if (IS_ERR(hdev))
1002 		return PTR_ERR(hdev);
1003 
1004 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1005 		return -EBUSY;
1006 
1007 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1008 		return -EOPNOTSUPP;
1009 
1010 	switch (cmd) {
1011 	case HCISETRAW:
1012 		if (!capable(CAP_NET_ADMIN))
1013 			return -EPERM;
1014 		return -EOPNOTSUPP;
1015 
1016 	case HCIGETCONNINFO:
1017 		return hci_get_conn_info(hdev, (void __user *)arg);
1018 
1019 	case HCIGETAUTHINFO:
1020 		return hci_get_auth_info(hdev, (void __user *)arg);
1021 
1022 	case HCIBLOCKADDR:
1023 		if (!capable(CAP_NET_ADMIN))
1024 			return -EPERM;
1025 		return hci_sock_reject_list_add(hdev, (void __user *)arg);
1026 
1027 	case HCIUNBLOCKADDR:
1028 		if (!capable(CAP_NET_ADMIN))
1029 			return -EPERM;
1030 		return hci_sock_reject_list_del(hdev, (void __user *)arg);
1031 	}
1032 
1033 	return -ENOIOCTLCMD;
1034 }
1035 
hci_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1036 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1037 			  unsigned long arg)
1038 {
1039 	void __user *argp = (void __user *)arg;
1040 	struct sock *sk = sock->sk;
1041 	int err;
1042 
1043 	BT_DBG("cmd %x arg %lx", cmd, arg);
1044 
1045 	/* Make sure the cmd is valid before doing anything */
1046 	switch (cmd) {
1047 	case HCIGETDEVLIST:
1048 	case HCIGETDEVINFO:
1049 	case HCIGETCONNLIST:
1050 	case HCIDEVUP:
1051 	case HCIDEVDOWN:
1052 	case HCIDEVRESET:
1053 	case HCIDEVRESTAT:
1054 	case HCISETSCAN:
1055 	case HCISETAUTH:
1056 	case HCISETENCRYPT:
1057 	case HCISETPTYPE:
1058 	case HCISETLINKPOL:
1059 	case HCISETLINKMODE:
1060 	case HCISETACLMTU:
1061 	case HCISETSCOMTU:
1062 	case HCIINQUIRY:
1063 	case HCISETRAW:
1064 	case HCIGETCONNINFO:
1065 	case HCIGETAUTHINFO:
1066 	case HCIBLOCKADDR:
1067 	case HCIUNBLOCKADDR:
1068 		break;
1069 	default:
1070 		return -ENOIOCTLCMD;
1071 	}
1072 
1073 	lock_sock(sk);
1074 
1075 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1076 		err = -EBADFD;
1077 		goto done;
1078 	}
1079 
1080 	/* When calling an ioctl on an unbound raw socket, then ensure
1081 	 * that the monitor gets informed. Ensure that the resulting event
1082 	 * is only send once by checking if the cookie exists or not. The
1083 	 * socket cookie will be only ever generated once for the lifetime
1084 	 * of a given socket.
1085 	 */
1086 	if (hci_sock_gen_cookie(sk)) {
1087 		struct sk_buff *skb;
1088 
1089 		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
1090 		 * flag. Make sure that not only the current task but also
1091 		 * the socket opener has the required capability, since
1092 		 * privileged programs can be tricked into making ioctl calls
1093 		 * on HCI sockets, and the socket should not be marked as
1094 		 * trusted simply because the ioctl caller is privileged.
1095 		 */
1096 		if (sk_capable(sk, CAP_NET_ADMIN))
1097 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1098 
1099 		/* Send event to monitor */
1100 		skb = create_monitor_ctrl_open(sk);
1101 		if (skb) {
1102 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1103 					    HCI_SOCK_TRUSTED, NULL);
1104 			kfree_skb(skb);
1105 		}
1106 	}
1107 
1108 	release_sock(sk);
1109 
1110 	switch (cmd) {
1111 	case HCIGETDEVLIST:
1112 		return hci_get_dev_list(argp);
1113 
1114 	case HCIGETDEVINFO:
1115 		return hci_get_dev_info(argp);
1116 
1117 	case HCIGETCONNLIST:
1118 		return hci_get_conn_list(argp);
1119 
1120 	case HCIDEVUP:
1121 		if (!capable(CAP_NET_ADMIN))
1122 			return -EPERM;
1123 		return hci_dev_open(arg);
1124 
1125 	case HCIDEVDOWN:
1126 		if (!capable(CAP_NET_ADMIN))
1127 			return -EPERM;
1128 		return hci_dev_close(arg);
1129 
1130 	case HCIDEVRESET:
1131 		if (!capable(CAP_NET_ADMIN))
1132 			return -EPERM;
1133 		return hci_dev_reset(arg);
1134 
1135 	case HCIDEVRESTAT:
1136 		if (!capable(CAP_NET_ADMIN))
1137 			return -EPERM;
1138 		return hci_dev_reset_stat(arg);
1139 
1140 	case HCISETSCAN:
1141 	case HCISETAUTH:
1142 	case HCISETENCRYPT:
1143 	case HCISETPTYPE:
1144 	case HCISETLINKPOL:
1145 	case HCISETLINKMODE:
1146 	case HCISETACLMTU:
1147 	case HCISETSCOMTU:
1148 		if (!capable(CAP_NET_ADMIN))
1149 			return -EPERM;
1150 		return hci_dev_cmd(cmd, argp);
1151 
1152 	case HCIINQUIRY:
1153 		return hci_inquiry(argp);
1154 	}
1155 
1156 	lock_sock(sk);
1157 
1158 	err = hci_sock_bound_ioctl(sk, cmd, arg);
1159 
1160 done:
1161 	release_sock(sk);
1162 	return err;
1163 }
1164 
1165 #ifdef CONFIG_COMPAT
hci_sock_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1166 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1167 				 unsigned long arg)
1168 {
1169 	switch (cmd) {
1170 	case HCIDEVUP:
1171 	case HCIDEVDOWN:
1172 	case HCIDEVRESET:
1173 	case HCIDEVRESTAT:
1174 		return hci_sock_ioctl(sock, cmd, arg);
1175 	}
1176 
1177 	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1178 }
1179 #endif
1180 
hci_sock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1181 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1182 			 int addr_len)
1183 {
1184 	struct sockaddr_hci haddr;
1185 	struct sock *sk = sock->sk;
1186 	struct hci_dev *hdev = NULL;
1187 	struct sk_buff *skb;
1188 	int len, err = 0;
1189 
1190 	BT_DBG("sock %p sk %p", sock, sk);
1191 
1192 	if (!addr)
1193 		return -EINVAL;
1194 
1195 	memset(&haddr, 0, sizeof(haddr));
1196 	len = min_t(unsigned int, sizeof(haddr), addr_len);
1197 	memcpy(&haddr, addr, len);
1198 
1199 	if (haddr.hci_family != AF_BLUETOOTH)
1200 		return -EINVAL;
1201 
1202 	lock_sock(sk);
1203 
1204 	/* Allow detaching from dead device and attaching to alive device, if
1205 	 * the caller wants to re-bind (instead of close) this socket in
1206 	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1207 	 */
1208 	hdev = hci_pi(sk)->hdev;
1209 	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1210 		hci_pi(sk)->hdev = NULL;
1211 		sk->sk_state = BT_OPEN;
1212 		hci_dev_put(hdev);
1213 	}
1214 	hdev = NULL;
1215 
1216 	if (sk->sk_state == BT_BOUND) {
1217 		err = -EALREADY;
1218 		goto done;
1219 	}
1220 
1221 	switch (haddr.hci_channel) {
1222 	case HCI_CHANNEL_RAW:
1223 		if (hci_pi(sk)->hdev) {
1224 			err = -EALREADY;
1225 			goto done;
1226 		}
1227 
1228 		if (haddr.hci_dev != HCI_DEV_NONE) {
1229 			hdev = hci_dev_get(haddr.hci_dev);
1230 			if (!hdev) {
1231 				err = -ENODEV;
1232 				goto done;
1233 			}
1234 
1235 			atomic_inc(&hdev->promisc);
1236 		}
1237 
1238 		hci_pi(sk)->channel = haddr.hci_channel;
1239 
1240 		if (!hci_sock_gen_cookie(sk)) {
1241 			/* In the case when a cookie has already been assigned,
1242 			 * then there has been already an ioctl issued against
1243 			 * an unbound socket and with that triggered an open
1244 			 * notification. Send a close notification first to
1245 			 * allow the state transition to bounded.
1246 			 */
1247 			skb = create_monitor_ctrl_close(sk);
1248 			if (skb) {
1249 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1250 						    HCI_SOCK_TRUSTED, NULL);
1251 				kfree_skb(skb);
1252 			}
1253 		}
1254 
1255 		if (capable(CAP_NET_ADMIN))
1256 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1257 
1258 		hci_pi(sk)->hdev = hdev;
1259 
1260 		/* Send event to monitor */
1261 		skb = create_monitor_ctrl_open(sk);
1262 		if (skb) {
1263 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1264 					    HCI_SOCK_TRUSTED, NULL);
1265 			kfree_skb(skb);
1266 		}
1267 		break;
1268 
1269 	case HCI_CHANNEL_USER:
1270 		if (hci_pi(sk)->hdev) {
1271 			err = -EALREADY;
1272 			goto done;
1273 		}
1274 
1275 		if (haddr.hci_dev == HCI_DEV_NONE) {
1276 			err = -EINVAL;
1277 			goto done;
1278 		}
1279 
1280 		if (!capable(CAP_NET_ADMIN)) {
1281 			err = -EPERM;
1282 			goto done;
1283 		}
1284 
1285 		hdev = hci_dev_get(haddr.hci_dev);
1286 		if (!hdev) {
1287 			err = -ENODEV;
1288 			goto done;
1289 		}
1290 
1291 		if (test_bit(HCI_INIT, &hdev->flags) ||
1292 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1293 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1294 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1295 		     test_bit(HCI_UP, &hdev->flags))) {
1296 			err = -EBUSY;
1297 			hci_dev_put(hdev);
1298 			goto done;
1299 		}
1300 
1301 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1302 			err = -EUSERS;
1303 			hci_dev_put(hdev);
1304 			goto done;
1305 		}
1306 
1307 		mgmt_index_removed(hdev);
1308 
1309 		err = hci_dev_open(hdev->id);
1310 		if (err) {
1311 			if (err == -EALREADY) {
1312 				/* In case the transport is already up and
1313 				 * running, clear the error here.
1314 				 *
1315 				 * This can happen when opening a user
1316 				 * channel and HCI_AUTO_OFF grace period
1317 				 * is still active.
1318 				 */
1319 				err = 0;
1320 			} else {
1321 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1322 				mgmt_index_added(hdev);
1323 				hci_dev_put(hdev);
1324 				goto done;
1325 			}
1326 		}
1327 
1328 		hci_pi(sk)->channel = haddr.hci_channel;
1329 
1330 		if (!hci_sock_gen_cookie(sk)) {
1331 			/* In the case when a cookie has already been assigned,
1332 			 * this socket will transition from a raw socket into
1333 			 * a user channel socket. For a clean transition, send
1334 			 * the close notification first.
1335 			 */
1336 			skb = create_monitor_ctrl_close(sk);
1337 			if (skb) {
1338 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1339 						    HCI_SOCK_TRUSTED, NULL);
1340 				kfree_skb(skb);
1341 			}
1342 		}
1343 
1344 		/* The user channel is restricted to CAP_NET_ADMIN
1345 		 * capabilities and with that implicitly trusted.
1346 		 */
1347 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1348 
1349 		hci_pi(sk)->hdev = hdev;
1350 
1351 		/* Send event to monitor */
1352 		skb = create_monitor_ctrl_open(sk);
1353 		if (skb) {
1354 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1355 					    HCI_SOCK_TRUSTED, NULL);
1356 			kfree_skb(skb);
1357 		}
1358 
1359 		atomic_inc(&hdev->promisc);
1360 		break;
1361 
1362 	case HCI_CHANNEL_MONITOR:
1363 		if (haddr.hci_dev != HCI_DEV_NONE) {
1364 			err = -EINVAL;
1365 			goto done;
1366 		}
1367 
1368 		if (!capable(CAP_NET_RAW)) {
1369 			err = -EPERM;
1370 			goto done;
1371 		}
1372 
1373 		hci_pi(sk)->channel = haddr.hci_channel;
1374 
1375 		/* The monitor interface is restricted to CAP_NET_RAW
1376 		 * capabilities and with that implicitly trusted.
1377 		 */
1378 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1379 
1380 		send_monitor_note(sk, "Linux version %s (%s)",
1381 				  init_utsname()->release,
1382 				  init_utsname()->machine);
1383 		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1384 				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1385 		send_monitor_replay(sk);
1386 		send_monitor_control_replay(sk);
1387 
1388 		atomic_inc(&monitor_promisc);
1389 		break;
1390 
1391 	case HCI_CHANNEL_LOGGING:
1392 		if (haddr.hci_dev != HCI_DEV_NONE) {
1393 			err = -EINVAL;
1394 			goto done;
1395 		}
1396 
1397 		if (!capable(CAP_NET_ADMIN)) {
1398 			err = -EPERM;
1399 			goto done;
1400 		}
1401 
1402 		hci_pi(sk)->channel = haddr.hci_channel;
1403 		break;
1404 
1405 	default:
1406 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1407 			err = -EINVAL;
1408 			goto done;
1409 		}
1410 
1411 		if (haddr.hci_dev != HCI_DEV_NONE) {
1412 			err = -EINVAL;
1413 			goto done;
1414 		}
1415 
1416 		/* Users with CAP_NET_ADMIN capabilities are allowed
1417 		 * access to all management commands and events. For
1418 		 * untrusted users the interface is restricted and
1419 		 * also only untrusted events are sent.
1420 		 */
1421 		if (capable(CAP_NET_ADMIN))
1422 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1423 
1424 		hci_pi(sk)->channel = haddr.hci_channel;
1425 
1426 		/* At the moment the index and unconfigured index events
1427 		 * are enabled unconditionally. Setting them on each
1428 		 * socket when binding keeps this functionality. They
1429 		 * however might be cleared later and then sending of these
1430 		 * events will be disabled, but that is then intentional.
1431 		 *
1432 		 * This also enables generic events that are safe to be
1433 		 * received by untrusted users. Example for such events
1434 		 * are changes to settings, class of device, name etc.
1435 		 */
1436 		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1437 			if (!hci_sock_gen_cookie(sk)) {
1438 				/* In the case when a cookie has already been
1439 				 * assigned, this socket will transition from
1440 				 * a raw socket into a control socket. To
1441 				 * allow for a clean transition, send the
1442 				 * close notification first.
1443 				 */
1444 				skb = create_monitor_ctrl_close(sk);
1445 				if (skb) {
1446 					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1447 							    HCI_SOCK_TRUSTED, NULL);
1448 					kfree_skb(skb);
1449 				}
1450 			}
1451 
1452 			/* Send event to monitor */
1453 			skb = create_monitor_ctrl_open(sk);
1454 			if (skb) {
1455 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1456 						    HCI_SOCK_TRUSTED, NULL);
1457 				kfree_skb(skb);
1458 			}
1459 
1460 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1461 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1462 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1463 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1464 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1465 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1466 		}
1467 		break;
1468 	}
1469 
1470 	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1471 	if (!hci_pi(sk)->mtu)
1472 		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1473 
1474 	sk->sk_state = BT_BOUND;
1475 
1476 done:
1477 	release_sock(sk);
1478 	return err;
1479 }
1480 
hci_sock_getname(struct socket * sock,struct sockaddr * addr,int peer)1481 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1482 			    int peer)
1483 {
1484 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1485 	struct sock *sk = sock->sk;
1486 	struct hci_dev *hdev;
1487 	int err = 0;
1488 
1489 	BT_DBG("sock %p sk %p", sock, sk);
1490 
1491 	if (peer)
1492 		return -EOPNOTSUPP;
1493 
1494 	lock_sock(sk);
1495 
1496 	hdev = hci_hdev_from_sock(sk);
1497 	if (IS_ERR(hdev)) {
1498 		err = PTR_ERR(hdev);
1499 		goto done;
1500 	}
1501 
1502 	haddr->hci_family = AF_BLUETOOTH;
1503 	haddr->hci_dev    = hdev->id;
1504 	haddr->hci_channel= hci_pi(sk)->channel;
1505 	err = sizeof(*haddr);
1506 
1507 done:
1508 	release_sock(sk);
1509 	return err;
1510 }
1511 
hci_sock_cmsg(struct sock * sk,struct msghdr * msg,struct sk_buff * skb)1512 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1513 			  struct sk_buff *skb)
1514 {
1515 	__u8 mask = hci_pi(sk)->cmsg_mask;
1516 
1517 	if (mask & HCI_CMSG_DIR) {
1518 		int incoming = bt_cb(skb)->incoming;
1519 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1520 			 &incoming);
1521 	}
1522 
1523 	if (mask & HCI_CMSG_TSTAMP) {
1524 #ifdef CONFIG_COMPAT
1525 		struct old_timeval32 ctv;
1526 #endif
1527 		struct __kernel_old_timeval tv;
1528 		void *data;
1529 		int len;
1530 
1531 		skb_get_timestamp(skb, &tv);
1532 
1533 		data = &tv;
1534 		len = sizeof(tv);
1535 #ifdef CONFIG_COMPAT
1536 		if (!COMPAT_USE_64BIT_TIME &&
1537 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1538 			ctv.tv_sec = tv.tv_sec;
1539 			ctv.tv_usec = tv.tv_usec;
1540 			data = &ctv;
1541 			len = sizeof(ctv);
1542 		}
1543 #endif
1544 
1545 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1546 	}
1547 }
1548 
hci_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1549 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1550 			    size_t len, int flags)
1551 {
1552 	struct scm_cookie scm;
1553 	struct sock *sk = sock->sk;
1554 	struct sk_buff *skb;
1555 	int copied, err;
1556 	unsigned int skblen;
1557 
1558 	BT_DBG("sock %p, sk %p", sock, sk);
1559 
1560 	if (flags & MSG_OOB)
1561 		return -EOPNOTSUPP;
1562 
1563 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1564 		return -EOPNOTSUPP;
1565 
1566 	if (sk->sk_state == BT_CLOSED)
1567 		return 0;
1568 
1569 	skb = skb_recv_datagram(sk, flags, &err);
1570 	if (!skb)
1571 		return err;
1572 
1573 	skblen = skb->len;
1574 	copied = skb->len;
1575 	if (len < copied) {
1576 		msg->msg_flags |= MSG_TRUNC;
1577 		copied = len;
1578 	}
1579 
1580 	skb_reset_transport_header(skb);
1581 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1582 
1583 	switch (hci_pi(sk)->channel) {
1584 	case HCI_CHANNEL_RAW:
1585 		hci_sock_cmsg(sk, msg, skb);
1586 		break;
1587 	case HCI_CHANNEL_USER:
1588 	case HCI_CHANNEL_MONITOR:
1589 		sock_recv_timestamp(msg, sk, skb);
1590 		break;
1591 	default:
1592 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1593 			sock_recv_timestamp(msg, sk, skb);
1594 		break;
1595 	}
1596 
1597 	memset(&scm, 0, sizeof(scm));
1598 	scm.creds = bt_cb(skb)->creds;
1599 
1600 	skb_free_datagram(sk, skb);
1601 
1602 	if (flags & MSG_TRUNC)
1603 		copied = skblen;
1604 
1605 	scm_recv(sock, msg, &scm, flags);
1606 
1607 	return err ? : copied;
1608 }
1609 
hci_mgmt_cmd(struct hci_mgmt_chan * chan,struct sock * sk,struct sk_buff * skb)1610 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1611 			struct sk_buff *skb)
1612 {
1613 	u8 *cp;
1614 	struct mgmt_hdr *hdr;
1615 	u16 opcode, index, len;
1616 	struct hci_dev *hdev = NULL;
1617 	const struct hci_mgmt_handler *handler;
1618 	bool var_len, no_hdev;
1619 	int err;
1620 
1621 	BT_DBG("got %d bytes", skb->len);
1622 
1623 	if (skb->len < sizeof(*hdr))
1624 		return -EINVAL;
1625 
1626 	hdr = (void *)skb->data;
1627 	opcode = __le16_to_cpu(hdr->opcode);
1628 	index = __le16_to_cpu(hdr->index);
1629 	len = __le16_to_cpu(hdr->len);
1630 
1631 	if (len != skb->len - sizeof(*hdr)) {
1632 		err = -EINVAL;
1633 		goto done;
1634 	}
1635 
1636 	if (chan->channel == HCI_CHANNEL_CONTROL) {
1637 		struct sk_buff *cmd;
1638 
1639 		/* Send event to monitor */
1640 		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1641 						  skb->data + sizeof(*hdr));
1642 		if (cmd) {
1643 			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1644 					    HCI_SOCK_TRUSTED, NULL);
1645 			kfree_skb(cmd);
1646 		}
1647 	}
1648 
1649 	if (opcode >= chan->handler_count ||
1650 	    chan->handlers[opcode].func == NULL) {
1651 		BT_DBG("Unknown op %u", opcode);
1652 		err = mgmt_cmd_status(sk, index, opcode,
1653 				      MGMT_STATUS_UNKNOWN_COMMAND);
1654 		goto done;
1655 	}
1656 
1657 	handler = &chan->handlers[opcode];
1658 
1659 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1660 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1661 		err = mgmt_cmd_status(sk, index, opcode,
1662 				      MGMT_STATUS_PERMISSION_DENIED);
1663 		goto done;
1664 	}
1665 
1666 	if (index != MGMT_INDEX_NONE) {
1667 		hdev = hci_dev_get(index);
1668 		if (!hdev) {
1669 			err = mgmt_cmd_status(sk, index, opcode,
1670 					      MGMT_STATUS_INVALID_INDEX);
1671 			goto done;
1672 		}
1673 
1674 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1675 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1676 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1677 			err = mgmt_cmd_status(sk, index, opcode,
1678 					      MGMT_STATUS_INVALID_INDEX);
1679 			goto done;
1680 		}
1681 
1682 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1683 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1684 			err = mgmt_cmd_status(sk, index, opcode,
1685 					      MGMT_STATUS_INVALID_INDEX);
1686 			goto done;
1687 		}
1688 	}
1689 
1690 	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1691 		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1692 		if (no_hdev != !hdev) {
1693 			err = mgmt_cmd_status(sk, index, opcode,
1694 					      MGMT_STATUS_INVALID_INDEX);
1695 			goto done;
1696 		}
1697 	}
1698 
1699 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1700 	if ((var_len && len < handler->data_len) ||
1701 	    (!var_len && len != handler->data_len)) {
1702 		err = mgmt_cmd_status(sk, index, opcode,
1703 				      MGMT_STATUS_INVALID_PARAMS);
1704 		goto done;
1705 	}
1706 
1707 	if (hdev && chan->hdev_init)
1708 		chan->hdev_init(sk, hdev);
1709 
1710 	cp = skb->data + sizeof(*hdr);
1711 
1712 	err = handler->func(sk, hdev, cp, len);
1713 	if (err < 0)
1714 		goto done;
1715 
1716 	err = skb->len;
1717 
1718 done:
1719 	if (hdev)
1720 		hci_dev_put(hdev);
1721 
1722 	return err;
1723 }
1724 
hci_logging_frame(struct sock * sk,struct sk_buff * skb,unsigned int flags)1725 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1726 			     unsigned int flags)
1727 {
1728 	struct hci_mon_hdr *hdr;
1729 	struct hci_dev *hdev;
1730 	u16 index;
1731 	int err;
1732 
1733 	/* The logging frame consists at minimum of the standard header,
1734 	 * the priority byte, the ident length byte and at least one string
1735 	 * terminator NUL byte. Anything shorter are invalid packets.
1736 	 */
1737 	if (skb->len < sizeof(*hdr) + 3)
1738 		return -EINVAL;
1739 
1740 	hdr = (void *)skb->data;
1741 
1742 	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1743 		return -EINVAL;
1744 
1745 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1746 		__u8 priority = skb->data[sizeof(*hdr)];
1747 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1748 
1749 		/* Only the priorities 0-7 are valid and with that any other
1750 		 * value results in an invalid packet.
1751 		 *
1752 		 * The priority byte is followed by an ident length byte and
1753 		 * the NUL terminated ident string. Check that the ident
1754 		 * length is not overflowing the packet and also that the
1755 		 * ident string itself is NUL terminated. In case the ident
1756 		 * length is zero, the length value actually doubles as NUL
1757 		 * terminator identifier.
1758 		 *
1759 		 * The message follows the ident string (if present) and
1760 		 * must be NUL terminated. Otherwise it is not a valid packet.
1761 		 */
1762 		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1763 		    ident_len > skb->len - sizeof(*hdr) - 3 ||
1764 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1765 			return -EINVAL;
1766 	} else {
1767 		return -EINVAL;
1768 	}
1769 
1770 	index = __le16_to_cpu(hdr->index);
1771 
1772 	if (index != MGMT_INDEX_NONE) {
1773 		hdev = hci_dev_get(index);
1774 		if (!hdev)
1775 			return -ENODEV;
1776 	} else {
1777 		hdev = NULL;
1778 	}
1779 
1780 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1781 
1782 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1783 	err = skb->len;
1784 
1785 	if (hdev)
1786 		hci_dev_put(hdev);
1787 
1788 	return err;
1789 }
1790 
hci_sock_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1791 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1792 			    size_t len)
1793 {
1794 	struct sock *sk = sock->sk;
1795 	struct hci_mgmt_chan *chan;
1796 	struct hci_dev *hdev;
1797 	struct sk_buff *skb;
1798 	int err;
1799 	const unsigned int flags = msg->msg_flags;
1800 
1801 	BT_DBG("sock %p sk %p", sock, sk);
1802 
1803 	if (flags & MSG_OOB)
1804 		return -EOPNOTSUPP;
1805 
1806 	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1807 		return -EINVAL;
1808 
1809 	if (len < 4 || len > hci_pi(sk)->mtu)
1810 		return -EINVAL;
1811 
1812 	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1813 	if (IS_ERR(skb))
1814 		return PTR_ERR(skb);
1815 
1816 	lock_sock(sk);
1817 
1818 	switch (hci_pi(sk)->channel) {
1819 	case HCI_CHANNEL_RAW:
1820 	case HCI_CHANNEL_USER:
1821 		break;
1822 	case HCI_CHANNEL_MONITOR:
1823 		err = -EOPNOTSUPP;
1824 		goto drop;
1825 	case HCI_CHANNEL_LOGGING:
1826 		err = hci_logging_frame(sk, skb, flags);
1827 		goto drop;
1828 	default:
1829 		mutex_lock(&mgmt_chan_list_lock);
1830 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1831 		if (chan)
1832 			err = hci_mgmt_cmd(chan, sk, skb);
1833 		else
1834 			err = -EINVAL;
1835 
1836 		mutex_unlock(&mgmt_chan_list_lock);
1837 		goto drop;
1838 	}
1839 
1840 	hdev = hci_hdev_from_sock(sk);
1841 	if (IS_ERR(hdev)) {
1842 		err = PTR_ERR(hdev);
1843 		goto drop;
1844 	}
1845 
1846 	if (!test_bit(HCI_UP, &hdev->flags)) {
1847 		err = -ENETDOWN;
1848 		goto drop;
1849 	}
1850 
1851 	hci_skb_pkt_type(skb) = skb->data[0];
1852 	skb_pull(skb, 1);
1853 
1854 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1855 		/* No permission check is needed for user channel
1856 		 * since that gets enforced when binding the socket.
1857 		 *
1858 		 * However check that the packet type is valid.
1859 		 */
1860 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1861 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1862 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1863 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1864 			err = -EINVAL;
1865 			goto drop;
1866 		}
1867 
1868 		skb_queue_tail(&hdev->raw_q, skb);
1869 		queue_work(hdev->workqueue, &hdev->tx_work);
1870 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1871 		u16 opcode = get_unaligned_le16(skb->data);
1872 		u16 ogf = hci_opcode_ogf(opcode);
1873 		u16 ocf = hci_opcode_ocf(opcode);
1874 
1875 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1876 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1877 				   &hci_sec_filter.ocf_mask[ogf])) &&
1878 		    !capable(CAP_NET_RAW)) {
1879 			err = -EPERM;
1880 			goto drop;
1881 		}
1882 
1883 		/* Since the opcode has already been extracted here, store
1884 		 * a copy of the value for later use by the drivers.
1885 		 */
1886 		hci_skb_opcode(skb) = opcode;
1887 
1888 		if (ogf == 0x3f) {
1889 			skb_queue_tail(&hdev->raw_q, skb);
1890 			queue_work(hdev->workqueue, &hdev->tx_work);
1891 		} else {
1892 			/* Stand-alone HCI commands must be flagged as
1893 			 * single-command requests.
1894 			 */
1895 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1896 
1897 			skb_queue_tail(&hdev->cmd_q, skb);
1898 			queue_work(hdev->workqueue, &hdev->cmd_work);
1899 		}
1900 	} else {
1901 		if (!capable(CAP_NET_RAW)) {
1902 			err = -EPERM;
1903 			goto drop;
1904 		}
1905 
1906 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1907 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1908 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1909 			err = -EINVAL;
1910 			goto drop;
1911 		}
1912 
1913 		skb_queue_tail(&hdev->raw_q, skb);
1914 		queue_work(hdev->workqueue, &hdev->tx_work);
1915 	}
1916 
1917 	err = len;
1918 
1919 done:
1920 	release_sock(sk);
1921 	return err;
1922 
1923 drop:
1924 	kfree_skb(skb);
1925 	goto done;
1926 }
1927 
hci_sock_setsockopt_old(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int len)1928 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1929 				   sockptr_t optval, unsigned int len)
1930 {
1931 	struct hci_ufilter uf = { .opcode = 0 };
1932 	struct sock *sk = sock->sk;
1933 	int err = 0, opt = 0;
1934 
1935 	BT_DBG("sk %p, opt %d", sk, optname);
1936 
1937 	lock_sock(sk);
1938 
1939 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1940 		err = -EBADFD;
1941 		goto done;
1942 	}
1943 
1944 	switch (optname) {
1945 	case HCI_DATA_DIR:
1946 		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
1947 		if (err)
1948 			break;
1949 
1950 		if (opt)
1951 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1952 		else
1953 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1954 		break;
1955 
1956 	case HCI_TIME_STAMP:
1957 		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
1958 		if (err)
1959 			break;
1960 
1961 		if (opt)
1962 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1963 		else
1964 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1965 		break;
1966 
1967 	case HCI_FILTER:
1968 		{
1969 			struct hci_filter *f = &hci_pi(sk)->filter;
1970 
1971 			uf.type_mask = f->type_mask;
1972 			uf.opcode    = f->opcode;
1973 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1974 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1975 		}
1976 
1977 		err = bt_copy_from_sockptr(&uf, sizeof(uf), optval, len);
1978 		if (err)
1979 			break;
1980 
1981 		if (!capable(CAP_NET_RAW)) {
1982 			uf.type_mask &= hci_sec_filter.type_mask;
1983 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1984 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1985 		}
1986 
1987 		{
1988 			struct hci_filter *f = &hci_pi(sk)->filter;
1989 
1990 			f->type_mask = uf.type_mask;
1991 			f->opcode    = uf.opcode;
1992 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1993 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1994 		}
1995 		break;
1996 
1997 	default:
1998 		err = -ENOPROTOOPT;
1999 		break;
2000 	}
2001 
2002 done:
2003 	release_sock(sk);
2004 	return err;
2005 }
2006 
hci_sock_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int len)2007 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2008 			       sockptr_t optval, unsigned int len)
2009 {
2010 	struct sock *sk = sock->sk;
2011 	int err = 0;
2012 	u16 opt;
2013 
2014 	BT_DBG("sk %p, opt %d", sk, optname);
2015 
2016 	if (level == SOL_HCI)
2017 		return hci_sock_setsockopt_old(sock, level, optname, optval,
2018 					       len);
2019 
2020 	if (level != SOL_BLUETOOTH)
2021 		return -ENOPROTOOPT;
2022 
2023 	lock_sock(sk);
2024 
2025 	switch (optname) {
2026 	case BT_SNDMTU:
2027 	case BT_RCVMTU:
2028 		switch (hci_pi(sk)->channel) {
2029 		/* Don't allow changing MTU for channels that are meant for HCI
2030 		 * traffic only.
2031 		 */
2032 		case HCI_CHANNEL_RAW:
2033 		case HCI_CHANNEL_USER:
2034 			err = -ENOPROTOOPT;
2035 			goto done;
2036 		}
2037 
2038 		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
2039 		if (err)
2040 			break;
2041 
2042 		hci_pi(sk)->mtu = opt;
2043 		break;
2044 
2045 	default:
2046 		err = -ENOPROTOOPT;
2047 		break;
2048 	}
2049 
2050 done:
2051 	release_sock(sk);
2052 	return err;
2053 }
2054 
hci_sock_getsockopt_old(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)2055 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2056 				   char __user *optval, int __user *optlen)
2057 {
2058 	struct hci_ufilter uf;
2059 	struct sock *sk = sock->sk;
2060 	int len, opt, err = 0;
2061 
2062 	BT_DBG("sk %p, opt %d", sk, optname);
2063 
2064 	if (get_user(len, optlen))
2065 		return -EFAULT;
2066 
2067 	lock_sock(sk);
2068 
2069 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2070 		err = -EBADFD;
2071 		goto done;
2072 	}
2073 
2074 	switch (optname) {
2075 	case HCI_DATA_DIR:
2076 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2077 			opt = 1;
2078 		else
2079 			opt = 0;
2080 
2081 		if (put_user(opt, optval))
2082 			err = -EFAULT;
2083 		break;
2084 
2085 	case HCI_TIME_STAMP:
2086 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2087 			opt = 1;
2088 		else
2089 			opt = 0;
2090 
2091 		if (put_user(opt, optval))
2092 			err = -EFAULT;
2093 		break;
2094 
2095 	case HCI_FILTER:
2096 		{
2097 			struct hci_filter *f = &hci_pi(sk)->filter;
2098 
2099 			memset(&uf, 0, sizeof(uf));
2100 			uf.type_mask = f->type_mask;
2101 			uf.opcode    = f->opcode;
2102 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2103 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2104 		}
2105 
2106 		len = min_t(unsigned int, len, sizeof(uf));
2107 		if (copy_to_user(optval, &uf, len))
2108 			err = -EFAULT;
2109 		break;
2110 
2111 	default:
2112 		err = -ENOPROTOOPT;
2113 		break;
2114 	}
2115 
2116 done:
2117 	release_sock(sk);
2118 	return err;
2119 }
2120 
hci_sock_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)2121 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2122 			       char __user *optval, int __user *optlen)
2123 {
2124 	struct sock *sk = sock->sk;
2125 	int err = 0;
2126 
2127 	BT_DBG("sk %p, opt %d", sk, optname);
2128 
2129 	if (level == SOL_HCI)
2130 		return hci_sock_getsockopt_old(sock, level, optname, optval,
2131 					       optlen);
2132 
2133 	if (level != SOL_BLUETOOTH)
2134 		return -ENOPROTOOPT;
2135 
2136 	lock_sock(sk);
2137 
2138 	switch (optname) {
2139 	case BT_SNDMTU:
2140 	case BT_RCVMTU:
2141 		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2142 			err = -EFAULT;
2143 		break;
2144 
2145 	default:
2146 		err = -ENOPROTOOPT;
2147 		break;
2148 	}
2149 
2150 	release_sock(sk);
2151 	return err;
2152 }
2153 
hci_sock_destruct(struct sock * sk)2154 static void hci_sock_destruct(struct sock *sk)
2155 {
2156 	mgmt_cleanup(sk);
2157 	skb_queue_purge(&sk->sk_receive_queue);
2158 	skb_queue_purge(&sk->sk_write_queue);
2159 }
2160 
2161 static const struct proto_ops hci_sock_ops = {
2162 	.family		= PF_BLUETOOTH,
2163 	.owner		= THIS_MODULE,
2164 	.release	= hci_sock_release,
2165 	.bind		= hci_sock_bind,
2166 	.getname	= hci_sock_getname,
2167 	.sendmsg	= hci_sock_sendmsg,
2168 	.recvmsg	= hci_sock_recvmsg,
2169 	.ioctl		= hci_sock_ioctl,
2170 #ifdef CONFIG_COMPAT
2171 	.compat_ioctl	= hci_sock_compat_ioctl,
2172 #endif
2173 	.poll		= datagram_poll,
2174 	.listen		= sock_no_listen,
2175 	.shutdown	= sock_no_shutdown,
2176 	.setsockopt	= hci_sock_setsockopt,
2177 	.getsockopt	= hci_sock_getsockopt,
2178 	.connect	= sock_no_connect,
2179 	.socketpair	= sock_no_socketpair,
2180 	.accept		= sock_no_accept,
2181 	.mmap		= sock_no_mmap
2182 };
2183 
2184 static struct proto hci_sk_proto = {
2185 	.name		= "HCI",
2186 	.owner		= THIS_MODULE,
2187 	.obj_size	= sizeof(struct hci_pinfo)
2188 };
2189 
hci_sock_create(struct net * net,struct socket * sock,int protocol,int kern)2190 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2191 			   int kern)
2192 {
2193 	struct sock *sk;
2194 
2195 	BT_DBG("sock %p", sock);
2196 
2197 	if (sock->type != SOCK_RAW)
2198 		return -ESOCKTNOSUPPORT;
2199 
2200 	sock->ops = &hci_sock_ops;
2201 
2202 	sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2203 			   kern);
2204 	if (!sk)
2205 		return -ENOMEM;
2206 
2207 	sock->state = SS_UNCONNECTED;
2208 	sk->sk_destruct = hci_sock_destruct;
2209 
2210 	bt_sock_link(&hci_sk_list, sk);
2211 	return 0;
2212 }
2213 
2214 static const struct net_proto_family hci_sock_family_ops = {
2215 	.family	= PF_BLUETOOTH,
2216 	.owner	= THIS_MODULE,
2217 	.create	= hci_sock_create,
2218 };
2219 
hci_sock_init(void)2220 int __init hci_sock_init(void)
2221 {
2222 	int err;
2223 
2224 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2225 
2226 	err = proto_register(&hci_sk_proto, 0);
2227 	if (err < 0)
2228 		return err;
2229 
2230 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2231 	if (err < 0) {
2232 		BT_ERR("HCI socket registration failed");
2233 		goto error;
2234 	}
2235 
2236 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2237 	if (err < 0) {
2238 		BT_ERR("Failed to create HCI proc file");
2239 		bt_sock_unregister(BTPROTO_HCI);
2240 		goto error;
2241 	}
2242 
2243 	BT_INFO("HCI socket layer initialized");
2244 
2245 	return 0;
2246 
2247 error:
2248 	proto_unregister(&hci_sk_proto);
2249 	return err;
2250 }
2251 
hci_sock_cleanup(void)2252 void hci_sock_cleanup(void)
2253 {
2254 	bt_procfs_cleanup(&init_net, "hci");
2255 	bt_sock_unregister(BTPROTO_HCI);
2256 	proto_unregister(&hci_sk_proto);
2257 }
2258