xref: /openbmc/linux/net/bluetooth/hci_sock.c (revision a8da474e)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "mgmt_util.h"
36 
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
39 
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
41 
42 /* ----- HCI socket interface ----- */
43 
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
46 
47 struct hci_pinfo {
48 	struct bt_sock    bt;
49 	struct hci_dev    *hdev;
50 	struct hci_filter filter;
51 	__u32             cmsg_mask;
52 	unsigned short    channel;
53 	unsigned long     flags;
54 };
55 
56 void hci_sock_set_flag(struct sock *sk, int nr)
57 {
58 	set_bit(nr, &hci_pi(sk)->flags);
59 }
60 
61 void hci_sock_clear_flag(struct sock *sk, int nr)
62 {
63 	clear_bit(nr, &hci_pi(sk)->flags);
64 }
65 
66 int hci_sock_test_flag(struct sock *sk, int nr)
67 {
68 	return test_bit(nr, &hci_pi(sk)->flags);
69 }
70 
71 unsigned short hci_sock_get_channel(struct sock *sk)
72 {
73 	return hci_pi(sk)->channel;
74 }
75 
76 static inline int hci_test_bit(int nr, const void *addr)
77 {
78 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
79 }
80 
81 /* Security filter */
82 #define HCI_SFLT_MAX_OGF  5
83 
84 struct hci_sec_filter {
85 	__u32 type_mask;
86 	__u32 event_mask[2];
87 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88 };
89 
90 static const struct hci_sec_filter hci_sec_filter = {
91 	/* Packet types */
92 	0x10,
93 	/* Events */
94 	{ 0x1000d9fe, 0x0000b00c },
95 	/* Commands */
96 	{
97 		{ 0x0 },
98 		/* OGF_LINK_CTL */
99 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 		/* OGF_LINK_POLICY */
101 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
102 		/* OGF_HOST_CTL */
103 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104 		/* OGF_INFO_PARAM */
105 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 		/* OGF_STATUS_PARAM */
107 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
108 	}
109 };
110 
111 static struct bt_sock_list hci_sk_list = {
112 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
113 };
114 
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116 {
117 	struct hci_filter *flt;
118 	int flt_type, flt_event;
119 
120 	/* Apply filter */
121 	flt = &hci_pi(sk)->filter;
122 
123 	flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
124 
125 	if (!test_bit(flt_type, &flt->type_mask))
126 		return true;
127 
128 	/* Extra filter for event packets only */
129 	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
130 		return false;
131 
132 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
133 
134 	if (!hci_test_bit(flt_event, &flt->event_mask))
135 		return true;
136 
137 	/* Check filter only when opcode is set */
138 	if (!flt->opcode)
139 		return false;
140 
141 	if (flt_event == HCI_EV_CMD_COMPLETE &&
142 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143 		return true;
144 
145 	if (flt_event == HCI_EV_CMD_STATUS &&
146 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147 		return true;
148 
149 	return false;
150 }
151 
152 /* Send frame to RAW socket */
153 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
154 {
155 	struct sock *sk;
156 	struct sk_buff *skb_copy = NULL;
157 
158 	BT_DBG("hdev %p len %d", hdev, skb->len);
159 
160 	read_lock(&hci_sk_list.lock);
161 
162 	sk_for_each(sk, &hci_sk_list.head) {
163 		struct sk_buff *nskb;
164 
165 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166 			continue;
167 
168 		/* Don't send frame to the socket it came from */
169 		if (skb->sk == sk)
170 			continue;
171 
172 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173 			if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
174 			    bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
175 			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
176 			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
177 				continue;
178 			if (is_filtered_packet(sk, skb))
179 				continue;
180 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
181 			if (!bt_cb(skb)->incoming)
182 				continue;
183 			if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
184 			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
185 			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
186 				continue;
187 		} else {
188 			/* Don't send frame to other channel types */
189 			continue;
190 		}
191 
192 		if (!skb_copy) {
193 			/* Create a private copy with headroom */
194 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
195 			if (!skb_copy)
196 				continue;
197 
198 			/* Put type byte before the data */
199 			memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
200 		}
201 
202 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
203 		if (!nskb)
204 			continue;
205 
206 		if (sock_queue_rcv_skb(sk, nskb))
207 			kfree_skb(nskb);
208 	}
209 
210 	read_unlock(&hci_sk_list.lock);
211 
212 	kfree_skb(skb_copy);
213 }
214 
215 /* Send frame to sockets with specific channel */
216 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
217 			 int flag, struct sock *skip_sk)
218 {
219 	struct sock *sk;
220 
221 	BT_DBG("channel %u len %d", channel, skb->len);
222 
223 	read_lock(&hci_sk_list.lock);
224 
225 	sk_for_each(sk, &hci_sk_list.head) {
226 		struct sk_buff *nskb;
227 
228 		/* Ignore socket without the flag set */
229 		if (!hci_sock_test_flag(sk, flag))
230 			continue;
231 
232 		/* Skip the original socket */
233 		if (sk == skip_sk)
234 			continue;
235 
236 		if (sk->sk_state != BT_BOUND)
237 			continue;
238 
239 		if (hci_pi(sk)->channel != channel)
240 			continue;
241 
242 		nskb = skb_clone(skb, GFP_ATOMIC);
243 		if (!nskb)
244 			continue;
245 
246 		if (sock_queue_rcv_skb(sk, nskb))
247 			kfree_skb(nskb);
248 	}
249 
250 	read_unlock(&hci_sk_list.lock);
251 }
252 
253 /* Send frame to monitor socket */
254 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
255 {
256 	struct sk_buff *skb_copy = NULL;
257 	struct hci_mon_hdr *hdr;
258 	__le16 opcode;
259 
260 	if (!atomic_read(&monitor_promisc))
261 		return;
262 
263 	BT_DBG("hdev %p len %d", hdev, skb->len);
264 
265 	switch (bt_cb(skb)->pkt_type) {
266 	case HCI_COMMAND_PKT:
267 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
268 		break;
269 	case HCI_EVENT_PKT:
270 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
271 		break;
272 	case HCI_ACLDATA_PKT:
273 		if (bt_cb(skb)->incoming)
274 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
275 		else
276 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
277 		break;
278 	case HCI_SCODATA_PKT:
279 		if (bt_cb(skb)->incoming)
280 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
281 		else
282 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
283 		break;
284 	case HCI_DIAG_PKT:
285 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
286 		break;
287 	default:
288 		return;
289 	}
290 
291 	/* Create a private copy with headroom */
292 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
293 	if (!skb_copy)
294 		return;
295 
296 	/* Put header before the data */
297 	hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
298 	hdr->opcode = opcode;
299 	hdr->index = cpu_to_le16(hdev->id);
300 	hdr->len = cpu_to_le16(skb->len);
301 
302 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
303 			    HCI_SOCK_TRUSTED, NULL);
304 	kfree_skb(skb_copy);
305 }
306 
307 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
308 {
309 	struct hci_mon_hdr *hdr;
310 	struct hci_mon_new_index *ni;
311 	struct hci_mon_index_info *ii;
312 	struct sk_buff *skb;
313 	__le16 opcode;
314 
315 	switch (event) {
316 	case HCI_DEV_REG:
317 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
318 		if (!skb)
319 			return NULL;
320 
321 		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
322 		ni->type = hdev->dev_type;
323 		ni->bus = hdev->bus;
324 		bacpy(&ni->bdaddr, &hdev->bdaddr);
325 		memcpy(ni->name, hdev->name, 8);
326 
327 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
328 		break;
329 
330 	case HCI_DEV_UNREG:
331 		skb = bt_skb_alloc(0, GFP_ATOMIC);
332 		if (!skb)
333 			return NULL;
334 
335 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
336 		break;
337 
338 	case HCI_DEV_SETUP:
339 		if (hdev->manufacturer == 0xffff)
340 			return NULL;
341 
342 		/* fall through */
343 
344 	case HCI_DEV_UP:
345 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
346 		if (!skb)
347 			return NULL;
348 
349 		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
350 		bacpy(&ii->bdaddr, &hdev->bdaddr);
351 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
352 
353 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
354 		break;
355 
356 	case HCI_DEV_OPEN:
357 		skb = bt_skb_alloc(0, GFP_ATOMIC);
358 		if (!skb)
359 			return NULL;
360 
361 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
362 		break;
363 
364 	case HCI_DEV_CLOSE:
365 		skb = bt_skb_alloc(0, GFP_ATOMIC);
366 		if (!skb)
367 			return NULL;
368 
369 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
370 		break;
371 
372 	default:
373 		return NULL;
374 	}
375 
376 	__net_timestamp(skb);
377 
378 	hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
379 	hdr->opcode = opcode;
380 	hdr->index = cpu_to_le16(hdev->id);
381 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
382 
383 	return skb;
384 }
385 
386 static void send_monitor_replay(struct sock *sk)
387 {
388 	struct hci_dev *hdev;
389 
390 	read_lock(&hci_dev_list_lock);
391 
392 	list_for_each_entry(hdev, &hci_dev_list, list) {
393 		struct sk_buff *skb;
394 
395 		skb = create_monitor_event(hdev, HCI_DEV_REG);
396 		if (!skb)
397 			continue;
398 
399 		if (sock_queue_rcv_skb(sk, skb))
400 			kfree_skb(skb);
401 
402 		if (!test_bit(HCI_RUNNING, &hdev->flags))
403 			continue;
404 
405 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
406 		if (!skb)
407 			continue;
408 
409 		if (sock_queue_rcv_skb(sk, skb))
410 			kfree_skb(skb);
411 
412 		if (test_bit(HCI_UP, &hdev->flags))
413 			skb = create_monitor_event(hdev, HCI_DEV_UP);
414 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
415 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
416 		else
417 			skb = NULL;
418 
419 		if (skb) {
420 			if (sock_queue_rcv_skb(sk, skb))
421 				kfree_skb(skb);
422 		}
423 	}
424 
425 	read_unlock(&hci_dev_list_lock);
426 }
427 
428 /* Generate internal stack event */
429 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
430 {
431 	struct hci_event_hdr *hdr;
432 	struct hci_ev_stack_internal *ev;
433 	struct sk_buff *skb;
434 
435 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
436 	if (!skb)
437 		return;
438 
439 	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
440 	hdr->evt  = HCI_EV_STACK_INTERNAL;
441 	hdr->plen = sizeof(*ev) + dlen;
442 
443 	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
444 	ev->type = type;
445 	memcpy(ev->data, data, dlen);
446 
447 	bt_cb(skb)->incoming = 1;
448 	__net_timestamp(skb);
449 
450 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
451 	hci_send_to_sock(hdev, skb);
452 	kfree_skb(skb);
453 }
454 
455 void hci_sock_dev_event(struct hci_dev *hdev, int event)
456 {
457 	BT_DBG("hdev %s event %d", hdev->name, event);
458 
459 	if (atomic_read(&monitor_promisc)) {
460 		struct sk_buff *skb;
461 
462 		/* Send event to monitor */
463 		skb = create_monitor_event(hdev, event);
464 		if (skb) {
465 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 					    HCI_SOCK_TRUSTED, NULL);
467 			kfree_skb(skb);
468 		}
469 	}
470 
471 	if (event <= HCI_DEV_DOWN) {
472 		struct hci_ev_si_device ev;
473 
474 		/* Send event to sockets */
475 		ev.event  = event;
476 		ev.dev_id = hdev->id;
477 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
478 	}
479 
480 	if (event == HCI_DEV_UNREG) {
481 		struct sock *sk;
482 
483 		/* Detach sockets from device */
484 		read_lock(&hci_sk_list.lock);
485 		sk_for_each(sk, &hci_sk_list.head) {
486 			bh_lock_sock_nested(sk);
487 			if (hci_pi(sk)->hdev == hdev) {
488 				hci_pi(sk)->hdev = NULL;
489 				sk->sk_err = EPIPE;
490 				sk->sk_state = BT_OPEN;
491 				sk->sk_state_change(sk);
492 
493 				hci_dev_put(hdev);
494 			}
495 			bh_unlock_sock(sk);
496 		}
497 		read_unlock(&hci_sk_list.lock);
498 	}
499 }
500 
501 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
502 {
503 	struct hci_mgmt_chan *c;
504 
505 	list_for_each_entry(c, &mgmt_chan_list, list) {
506 		if (c->channel == channel)
507 			return c;
508 	}
509 
510 	return NULL;
511 }
512 
513 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
514 {
515 	struct hci_mgmt_chan *c;
516 
517 	mutex_lock(&mgmt_chan_list_lock);
518 	c = __hci_mgmt_chan_find(channel);
519 	mutex_unlock(&mgmt_chan_list_lock);
520 
521 	return c;
522 }
523 
524 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
525 {
526 	if (c->channel < HCI_CHANNEL_CONTROL)
527 		return -EINVAL;
528 
529 	mutex_lock(&mgmt_chan_list_lock);
530 	if (__hci_mgmt_chan_find(c->channel)) {
531 		mutex_unlock(&mgmt_chan_list_lock);
532 		return -EALREADY;
533 	}
534 
535 	list_add_tail(&c->list, &mgmt_chan_list);
536 
537 	mutex_unlock(&mgmt_chan_list_lock);
538 
539 	return 0;
540 }
541 EXPORT_SYMBOL(hci_mgmt_chan_register);
542 
543 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
544 {
545 	mutex_lock(&mgmt_chan_list_lock);
546 	list_del(&c->list);
547 	mutex_unlock(&mgmt_chan_list_lock);
548 }
549 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
550 
551 static int hci_sock_release(struct socket *sock)
552 {
553 	struct sock *sk = sock->sk;
554 	struct hci_dev *hdev;
555 
556 	BT_DBG("sock %p sk %p", sock, sk);
557 
558 	if (!sk)
559 		return 0;
560 
561 	hdev = hci_pi(sk)->hdev;
562 
563 	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
564 		atomic_dec(&monitor_promisc);
565 
566 	bt_sock_unlink(&hci_sk_list, sk);
567 
568 	if (hdev) {
569 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
570 			/* When releasing an user channel exclusive access,
571 			 * call hci_dev_do_close directly instead of calling
572 			 * hci_dev_close to ensure the exclusive access will
573 			 * be released and the controller brought back down.
574 			 *
575 			 * The checking of HCI_AUTO_OFF is not needed in this
576 			 * case since it will have been cleared already when
577 			 * opening the user channel.
578 			 */
579 			hci_dev_do_close(hdev);
580 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
581 			mgmt_index_added(hdev);
582 		}
583 
584 		atomic_dec(&hdev->promisc);
585 		hci_dev_put(hdev);
586 	}
587 
588 	sock_orphan(sk);
589 
590 	skb_queue_purge(&sk->sk_receive_queue);
591 	skb_queue_purge(&sk->sk_write_queue);
592 
593 	sock_put(sk);
594 	return 0;
595 }
596 
597 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
598 {
599 	bdaddr_t bdaddr;
600 	int err;
601 
602 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
603 		return -EFAULT;
604 
605 	hci_dev_lock(hdev);
606 
607 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
608 
609 	hci_dev_unlock(hdev);
610 
611 	return err;
612 }
613 
614 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
615 {
616 	bdaddr_t bdaddr;
617 	int err;
618 
619 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
620 		return -EFAULT;
621 
622 	hci_dev_lock(hdev);
623 
624 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
625 
626 	hci_dev_unlock(hdev);
627 
628 	return err;
629 }
630 
631 /* Ioctls that require bound socket */
632 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
633 				unsigned long arg)
634 {
635 	struct hci_dev *hdev = hci_pi(sk)->hdev;
636 
637 	if (!hdev)
638 		return -EBADFD;
639 
640 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
641 		return -EBUSY;
642 
643 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
644 		return -EOPNOTSUPP;
645 
646 	if (hdev->dev_type != HCI_BREDR)
647 		return -EOPNOTSUPP;
648 
649 	switch (cmd) {
650 	case HCISETRAW:
651 		if (!capable(CAP_NET_ADMIN))
652 			return -EPERM;
653 		return -EOPNOTSUPP;
654 
655 	case HCIGETCONNINFO:
656 		return hci_get_conn_info(hdev, (void __user *) arg);
657 
658 	case HCIGETAUTHINFO:
659 		return hci_get_auth_info(hdev, (void __user *) arg);
660 
661 	case HCIBLOCKADDR:
662 		if (!capable(CAP_NET_ADMIN))
663 			return -EPERM;
664 		return hci_sock_blacklist_add(hdev, (void __user *) arg);
665 
666 	case HCIUNBLOCKADDR:
667 		if (!capable(CAP_NET_ADMIN))
668 			return -EPERM;
669 		return hci_sock_blacklist_del(hdev, (void __user *) arg);
670 	}
671 
672 	return -ENOIOCTLCMD;
673 }
674 
675 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
676 			  unsigned long arg)
677 {
678 	void __user *argp = (void __user *) arg;
679 	struct sock *sk = sock->sk;
680 	int err;
681 
682 	BT_DBG("cmd %x arg %lx", cmd, arg);
683 
684 	lock_sock(sk);
685 
686 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
687 		err = -EBADFD;
688 		goto done;
689 	}
690 
691 	release_sock(sk);
692 
693 	switch (cmd) {
694 	case HCIGETDEVLIST:
695 		return hci_get_dev_list(argp);
696 
697 	case HCIGETDEVINFO:
698 		return hci_get_dev_info(argp);
699 
700 	case HCIGETCONNLIST:
701 		return hci_get_conn_list(argp);
702 
703 	case HCIDEVUP:
704 		if (!capable(CAP_NET_ADMIN))
705 			return -EPERM;
706 		return hci_dev_open(arg);
707 
708 	case HCIDEVDOWN:
709 		if (!capable(CAP_NET_ADMIN))
710 			return -EPERM;
711 		return hci_dev_close(arg);
712 
713 	case HCIDEVRESET:
714 		if (!capable(CAP_NET_ADMIN))
715 			return -EPERM;
716 		return hci_dev_reset(arg);
717 
718 	case HCIDEVRESTAT:
719 		if (!capable(CAP_NET_ADMIN))
720 			return -EPERM;
721 		return hci_dev_reset_stat(arg);
722 
723 	case HCISETSCAN:
724 	case HCISETAUTH:
725 	case HCISETENCRYPT:
726 	case HCISETPTYPE:
727 	case HCISETLINKPOL:
728 	case HCISETLINKMODE:
729 	case HCISETACLMTU:
730 	case HCISETSCOMTU:
731 		if (!capable(CAP_NET_ADMIN))
732 			return -EPERM;
733 		return hci_dev_cmd(cmd, argp);
734 
735 	case HCIINQUIRY:
736 		return hci_inquiry(argp);
737 	}
738 
739 	lock_sock(sk);
740 
741 	err = hci_sock_bound_ioctl(sk, cmd, arg);
742 
743 done:
744 	release_sock(sk);
745 	return err;
746 }
747 
748 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
749 			 int addr_len)
750 {
751 	struct sockaddr_hci haddr;
752 	struct sock *sk = sock->sk;
753 	struct hci_dev *hdev = NULL;
754 	int len, err = 0;
755 
756 	BT_DBG("sock %p sk %p", sock, sk);
757 
758 	if (!addr)
759 		return -EINVAL;
760 
761 	memset(&haddr, 0, sizeof(haddr));
762 	len = min_t(unsigned int, sizeof(haddr), addr_len);
763 	memcpy(&haddr, addr, len);
764 
765 	if (haddr.hci_family != AF_BLUETOOTH)
766 		return -EINVAL;
767 
768 	lock_sock(sk);
769 
770 	if (sk->sk_state == BT_BOUND) {
771 		err = -EALREADY;
772 		goto done;
773 	}
774 
775 	switch (haddr.hci_channel) {
776 	case HCI_CHANNEL_RAW:
777 		if (hci_pi(sk)->hdev) {
778 			err = -EALREADY;
779 			goto done;
780 		}
781 
782 		if (haddr.hci_dev != HCI_DEV_NONE) {
783 			hdev = hci_dev_get(haddr.hci_dev);
784 			if (!hdev) {
785 				err = -ENODEV;
786 				goto done;
787 			}
788 
789 			atomic_inc(&hdev->promisc);
790 		}
791 
792 		hci_pi(sk)->hdev = hdev;
793 		break;
794 
795 	case HCI_CHANNEL_USER:
796 		if (hci_pi(sk)->hdev) {
797 			err = -EALREADY;
798 			goto done;
799 		}
800 
801 		if (haddr.hci_dev == HCI_DEV_NONE) {
802 			err = -EINVAL;
803 			goto done;
804 		}
805 
806 		if (!capable(CAP_NET_ADMIN)) {
807 			err = -EPERM;
808 			goto done;
809 		}
810 
811 		hdev = hci_dev_get(haddr.hci_dev);
812 		if (!hdev) {
813 			err = -ENODEV;
814 			goto done;
815 		}
816 
817 		if (test_bit(HCI_INIT, &hdev->flags) ||
818 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
819 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
820 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
821 		     test_bit(HCI_UP, &hdev->flags))) {
822 			err = -EBUSY;
823 			hci_dev_put(hdev);
824 			goto done;
825 		}
826 
827 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
828 			err = -EUSERS;
829 			hci_dev_put(hdev);
830 			goto done;
831 		}
832 
833 		mgmt_index_removed(hdev);
834 
835 		err = hci_dev_open(hdev->id);
836 		if (err) {
837 			if (err == -EALREADY) {
838 				/* In case the transport is already up and
839 				 * running, clear the error here.
840 				 *
841 				 * This can happen when opening an user
842 				 * channel and HCI_AUTO_OFF grace period
843 				 * is still active.
844 				 */
845 				err = 0;
846 			} else {
847 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
848 				mgmt_index_added(hdev);
849 				hci_dev_put(hdev);
850 				goto done;
851 			}
852 		}
853 
854 		atomic_inc(&hdev->promisc);
855 
856 		hci_pi(sk)->hdev = hdev;
857 		break;
858 
859 	case HCI_CHANNEL_MONITOR:
860 		if (haddr.hci_dev != HCI_DEV_NONE) {
861 			err = -EINVAL;
862 			goto done;
863 		}
864 
865 		if (!capable(CAP_NET_RAW)) {
866 			err = -EPERM;
867 			goto done;
868 		}
869 
870 		/* The monitor interface is restricted to CAP_NET_RAW
871 		 * capabilities and with that implicitly trusted.
872 		 */
873 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
874 
875 		send_monitor_replay(sk);
876 
877 		atomic_inc(&monitor_promisc);
878 		break;
879 
880 	default:
881 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
882 			err = -EINVAL;
883 			goto done;
884 		}
885 
886 		if (haddr.hci_dev != HCI_DEV_NONE) {
887 			err = -EINVAL;
888 			goto done;
889 		}
890 
891 		/* Users with CAP_NET_ADMIN capabilities are allowed
892 		 * access to all management commands and events. For
893 		 * untrusted users the interface is restricted and
894 		 * also only untrusted events are sent.
895 		 */
896 		if (capable(CAP_NET_ADMIN))
897 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
898 
899 		/* At the moment the index and unconfigured index events
900 		 * are enabled unconditionally. Setting them on each
901 		 * socket when binding keeps this functionality. They
902 		 * however might be cleared later and then sending of these
903 		 * events will be disabled, but that is then intentional.
904 		 *
905 		 * This also enables generic events that are safe to be
906 		 * received by untrusted users. Example for such events
907 		 * are changes to settings, class of device, name etc.
908 		 */
909 		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
910 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
911 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
912 			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
913 		}
914 		break;
915 	}
916 
917 
918 	hci_pi(sk)->channel = haddr.hci_channel;
919 	sk->sk_state = BT_BOUND;
920 
921 done:
922 	release_sock(sk);
923 	return err;
924 }
925 
926 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
927 			    int *addr_len, int peer)
928 {
929 	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
930 	struct sock *sk = sock->sk;
931 	struct hci_dev *hdev;
932 	int err = 0;
933 
934 	BT_DBG("sock %p sk %p", sock, sk);
935 
936 	if (peer)
937 		return -EOPNOTSUPP;
938 
939 	lock_sock(sk);
940 
941 	hdev = hci_pi(sk)->hdev;
942 	if (!hdev) {
943 		err = -EBADFD;
944 		goto done;
945 	}
946 
947 	*addr_len = sizeof(*haddr);
948 	haddr->hci_family = AF_BLUETOOTH;
949 	haddr->hci_dev    = hdev->id;
950 	haddr->hci_channel= hci_pi(sk)->channel;
951 
952 done:
953 	release_sock(sk);
954 	return err;
955 }
956 
957 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
958 			  struct sk_buff *skb)
959 {
960 	__u32 mask = hci_pi(sk)->cmsg_mask;
961 
962 	if (mask & HCI_CMSG_DIR) {
963 		int incoming = bt_cb(skb)->incoming;
964 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
965 			 &incoming);
966 	}
967 
968 	if (mask & HCI_CMSG_TSTAMP) {
969 #ifdef CONFIG_COMPAT
970 		struct compat_timeval ctv;
971 #endif
972 		struct timeval tv;
973 		void *data;
974 		int len;
975 
976 		skb_get_timestamp(skb, &tv);
977 
978 		data = &tv;
979 		len = sizeof(tv);
980 #ifdef CONFIG_COMPAT
981 		if (!COMPAT_USE_64BIT_TIME &&
982 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
983 			ctv.tv_sec = tv.tv_sec;
984 			ctv.tv_usec = tv.tv_usec;
985 			data = &ctv;
986 			len = sizeof(ctv);
987 		}
988 #endif
989 
990 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
991 	}
992 }
993 
994 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
995 			    int flags)
996 {
997 	int noblock = flags & MSG_DONTWAIT;
998 	struct sock *sk = sock->sk;
999 	struct sk_buff *skb;
1000 	int copied, err;
1001 
1002 	BT_DBG("sock %p, sk %p", sock, sk);
1003 
1004 	if (flags & MSG_OOB)
1005 		return -EOPNOTSUPP;
1006 
1007 	if (sk->sk_state == BT_CLOSED)
1008 		return 0;
1009 
1010 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1011 	if (!skb)
1012 		return err;
1013 
1014 	copied = skb->len;
1015 	if (len < copied) {
1016 		msg->msg_flags |= MSG_TRUNC;
1017 		copied = len;
1018 	}
1019 
1020 	skb_reset_transport_header(skb);
1021 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1022 
1023 	switch (hci_pi(sk)->channel) {
1024 	case HCI_CHANNEL_RAW:
1025 		hci_sock_cmsg(sk, msg, skb);
1026 		break;
1027 	case HCI_CHANNEL_USER:
1028 	case HCI_CHANNEL_MONITOR:
1029 		sock_recv_timestamp(msg, sk, skb);
1030 		break;
1031 	default:
1032 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1033 			sock_recv_timestamp(msg, sk, skb);
1034 		break;
1035 	}
1036 
1037 	skb_free_datagram(sk, skb);
1038 
1039 	return err ? : copied;
1040 }
1041 
1042 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1043 			struct msghdr *msg, size_t msglen)
1044 {
1045 	void *buf;
1046 	u8 *cp;
1047 	struct mgmt_hdr *hdr;
1048 	u16 opcode, index, len;
1049 	struct hci_dev *hdev = NULL;
1050 	const struct hci_mgmt_handler *handler;
1051 	bool var_len, no_hdev;
1052 	int err;
1053 
1054 	BT_DBG("got %zu bytes", msglen);
1055 
1056 	if (msglen < sizeof(*hdr))
1057 		return -EINVAL;
1058 
1059 	buf = kmalloc(msglen, GFP_KERNEL);
1060 	if (!buf)
1061 		return -ENOMEM;
1062 
1063 	if (memcpy_from_msg(buf, msg, msglen)) {
1064 		err = -EFAULT;
1065 		goto done;
1066 	}
1067 
1068 	hdr = buf;
1069 	opcode = __le16_to_cpu(hdr->opcode);
1070 	index = __le16_to_cpu(hdr->index);
1071 	len = __le16_to_cpu(hdr->len);
1072 
1073 	if (len != msglen - sizeof(*hdr)) {
1074 		err = -EINVAL;
1075 		goto done;
1076 	}
1077 
1078 	if (opcode >= chan->handler_count ||
1079 	    chan->handlers[opcode].func == NULL) {
1080 		BT_DBG("Unknown op %u", opcode);
1081 		err = mgmt_cmd_status(sk, index, opcode,
1082 				      MGMT_STATUS_UNKNOWN_COMMAND);
1083 		goto done;
1084 	}
1085 
1086 	handler = &chan->handlers[opcode];
1087 
1088 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1089 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1090 		err = mgmt_cmd_status(sk, index, opcode,
1091 				      MGMT_STATUS_PERMISSION_DENIED);
1092 		goto done;
1093 	}
1094 
1095 	if (index != MGMT_INDEX_NONE) {
1096 		hdev = hci_dev_get(index);
1097 		if (!hdev) {
1098 			err = mgmt_cmd_status(sk, index, opcode,
1099 					      MGMT_STATUS_INVALID_INDEX);
1100 			goto done;
1101 		}
1102 
1103 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1104 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1105 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1106 			err = mgmt_cmd_status(sk, index, opcode,
1107 					      MGMT_STATUS_INVALID_INDEX);
1108 			goto done;
1109 		}
1110 
1111 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1112 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1113 			err = mgmt_cmd_status(sk, index, opcode,
1114 					      MGMT_STATUS_INVALID_INDEX);
1115 			goto done;
1116 		}
1117 	}
1118 
1119 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1120 	if (no_hdev != !hdev) {
1121 		err = mgmt_cmd_status(sk, index, opcode,
1122 				      MGMT_STATUS_INVALID_INDEX);
1123 		goto done;
1124 	}
1125 
1126 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1127 	if ((var_len && len < handler->data_len) ||
1128 	    (!var_len && len != handler->data_len)) {
1129 		err = mgmt_cmd_status(sk, index, opcode,
1130 				      MGMT_STATUS_INVALID_PARAMS);
1131 		goto done;
1132 	}
1133 
1134 	if (hdev && chan->hdev_init)
1135 		chan->hdev_init(sk, hdev);
1136 
1137 	cp = buf + sizeof(*hdr);
1138 
1139 	err = handler->func(sk, hdev, cp, len);
1140 	if (err < 0)
1141 		goto done;
1142 
1143 	err = msglen;
1144 
1145 done:
1146 	if (hdev)
1147 		hci_dev_put(hdev);
1148 
1149 	kfree(buf);
1150 	return err;
1151 }
1152 
1153 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1154 			    size_t len)
1155 {
1156 	struct sock *sk = sock->sk;
1157 	struct hci_mgmt_chan *chan;
1158 	struct hci_dev *hdev;
1159 	struct sk_buff *skb;
1160 	int err;
1161 
1162 	BT_DBG("sock %p sk %p", sock, sk);
1163 
1164 	if (msg->msg_flags & MSG_OOB)
1165 		return -EOPNOTSUPP;
1166 
1167 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1168 		return -EINVAL;
1169 
1170 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1171 		return -EINVAL;
1172 
1173 	lock_sock(sk);
1174 
1175 	switch (hci_pi(sk)->channel) {
1176 	case HCI_CHANNEL_RAW:
1177 	case HCI_CHANNEL_USER:
1178 		break;
1179 	case HCI_CHANNEL_MONITOR:
1180 		err = -EOPNOTSUPP;
1181 		goto done;
1182 	default:
1183 		mutex_lock(&mgmt_chan_list_lock);
1184 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1185 		if (chan)
1186 			err = hci_mgmt_cmd(chan, sk, msg, len);
1187 		else
1188 			err = -EINVAL;
1189 
1190 		mutex_unlock(&mgmt_chan_list_lock);
1191 		goto done;
1192 	}
1193 
1194 	hdev = hci_pi(sk)->hdev;
1195 	if (!hdev) {
1196 		err = -EBADFD;
1197 		goto done;
1198 	}
1199 
1200 	if (!test_bit(HCI_UP, &hdev->flags)) {
1201 		err = -ENETDOWN;
1202 		goto done;
1203 	}
1204 
1205 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1206 	if (!skb)
1207 		goto done;
1208 
1209 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1210 		err = -EFAULT;
1211 		goto drop;
1212 	}
1213 
1214 	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1215 	skb_pull(skb, 1);
1216 
1217 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1218 		/* No permission check is needed for user channel
1219 		 * since that gets enforced when binding the socket.
1220 		 *
1221 		 * However check that the packet type is valid.
1222 		 */
1223 		if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1224 		    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1225 		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1226 			err = -EINVAL;
1227 			goto drop;
1228 		}
1229 
1230 		skb_queue_tail(&hdev->raw_q, skb);
1231 		queue_work(hdev->workqueue, &hdev->tx_work);
1232 	} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1233 		u16 opcode = get_unaligned_le16(skb->data);
1234 		u16 ogf = hci_opcode_ogf(opcode);
1235 		u16 ocf = hci_opcode_ocf(opcode);
1236 
1237 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1238 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1239 				   &hci_sec_filter.ocf_mask[ogf])) &&
1240 		    !capable(CAP_NET_RAW)) {
1241 			err = -EPERM;
1242 			goto drop;
1243 		}
1244 
1245 		if (ogf == 0x3f) {
1246 			skb_queue_tail(&hdev->raw_q, skb);
1247 			queue_work(hdev->workqueue, &hdev->tx_work);
1248 		} else {
1249 			/* Stand-alone HCI commands must be flagged as
1250 			 * single-command requests.
1251 			 */
1252 			bt_cb(skb)->hci.req_start = true;
1253 
1254 			skb_queue_tail(&hdev->cmd_q, skb);
1255 			queue_work(hdev->workqueue, &hdev->cmd_work);
1256 		}
1257 	} else {
1258 		if (!capable(CAP_NET_RAW)) {
1259 			err = -EPERM;
1260 			goto drop;
1261 		}
1262 
1263 		if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1264 		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1265 			err = -EINVAL;
1266 			goto drop;
1267 		}
1268 
1269 		skb_queue_tail(&hdev->raw_q, skb);
1270 		queue_work(hdev->workqueue, &hdev->tx_work);
1271 	}
1272 
1273 	err = len;
1274 
1275 done:
1276 	release_sock(sk);
1277 	return err;
1278 
1279 drop:
1280 	kfree_skb(skb);
1281 	goto done;
1282 }
1283 
1284 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1285 			       char __user *optval, unsigned int len)
1286 {
1287 	struct hci_ufilter uf = { .opcode = 0 };
1288 	struct sock *sk = sock->sk;
1289 	int err = 0, opt = 0;
1290 
1291 	BT_DBG("sk %p, opt %d", sk, optname);
1292 
1293 	lock_sock(sk);
1294 
1295 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1296 		err = -EBADFD;
1297 		goto done;
1298 	}
1299 
1300 	switch (optname) {
1301 	case HCI_DATA_DIR:
1302 		if (get_user(opt, (int __user *)optval)) {
1303 			err = -EFAULT;
1304 			break;
1305 		}
1306 
1307 		if (opt)
1308 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1309 		else
1310 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1311 		break;
1312 
1313 	case HCI_TIME_STAMP:
1314 		if (get_user(opt, (int __user *)optval)) {
1315 			err = -EFAULT;
1316 			break;
1317 		}
1318 
1319 		if (opt)
1320 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1321 		else
1322 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1323 		break;
1324 
1325 	case HCI_FILTER:
1326 		{
1327 			struct hci_filter *f = &hci_pi(sk)->filter;
1328 
1329 			uf.type_mask = f->type_mask;
1330 			uf.opcode    = f->opcode;
1331 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1332 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1333 		}
1334 
1335 		len = min_t(unsigned int, len, sizeof(uf));
1336 		if (copy_from_user(&uf, optval, len)) {
1337 			err = -EFAULT;
1338 			break;
1339 		}
1340 
1341 		if (!capable(CAP_NET_RAW)) {
1342 			uf.type_mask &= hci_sec_filter.type_mask;
1343 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1344 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1345 		}
1346 
1347 		{
1348 			struct hci_filter *f = &hci_pi(sk)->filter;
1349 
1350 			f->type_mask = uf.type_mask;
1351 			f->opcode    = uf.opcode;
1352 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1353 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1354 		}
1355 		break;
1356 
1357 	default:
1358 		err = -ENOPROTOOPT;
1359 		break;
1360 	}
1361 
1362 done:
1363 	release_sock(sk);
1364 	return err;
1365 }
1366 
1367 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1368 			       char __user *optval, int __user *optlen)
1369 {
1370 	struct hci_ufilter uf;
1371 	struct sock *sk = sock->sk;
1372 	int len, opt, err = 0;
1373 
1374 	BT_DBG("sk %p, opt %d", sk, optname);
1375 
1376 	if (get_user(len, optlen))
1377 		return -EFAULT;
1378 
1379 	lock_sock(sk);
1380 
1381 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1382 		err = -EBADFD;
1383 		goto done;
1384 	}
1385 
1386 	switch (optname) {
1387 	case HCI_DATA_DIR:
1388 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1389 			opt = 1;
1390 		else
1391 			opt = 0;
1392 
1393 		if (put_user(opt, optval))
1394 			err = -EFAULT;
1395 		break;
1396 
1397 	case HCI_TIME_STAMP:
1398 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1399 			opt = 1;
1400 		else
1401 			opt = 0;
1402 
1403 		if (put_user(opt, optval))
1404 			err = -EFAULT;
1405 		break;
1406 
1407 	case HCI_FILTER:
1408 		{
1409 			struct hci_filter *f = &hci_pi(sk)->filter;
1410 
1411 			memset(&uf, 0, sizeof(uf));
1412 			uf.type_mask = f->type_mask;
1413 			uf.opcode    = f->opcode;
1414 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1415 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1416 		}
1417 
1418 		len = min_t(unsigned int, len, sizeof(uf));
1419 		if (copy_to_user(optval, &uf, len))
1420 			err = -EFAULT;
1421 		break;
1422 
1423 	default:
1424 		err = -ENOPROTOOPT;
1425 		break;
1426 	}
1427 
1428 done:
1429 	release_sock(sk);
1430 	return err;
1431 }
1432 
1433 static const struct proto_ops hci_sock_ops = {
1434 	.family		= PF_BLUETOOTH,
1435 	.owner		= THIS_MODULE,
1436 	.release	= hci_sock_release,
1437 	.bind		= hci_sock_bind,
1438 	.getname	= hci_sock_getname,
1439 	.sendmsg	= hci_sock_sendmsg,
1440 	.recvmsg	= hci_sock_recvmsg,
1441 	.ioctl		= hci_sock_ioctl,
1442 	.poll		= datagram_poll,
1443 	.listen		= sock_no_listen,
1444 	.shutdown	= sock_no_shutdown,
1445 	.setsockopt	= hci_sock_setsockopt,
1446 	.getsockopt	= hci_sock_getsockopt,
1447 	.connect	= sock_no_connect,
1448 	.socketpair	= sock_no_socketpair,
1449 	.accept		= sock_no_accept,
1450 	.mmap		= sock_no_mmap
1451 };
1452 
1453 static struct proto hci_sk_proto = {
1454 	.name		= "HCI",
1455 	.owner		= THIS_MODULE,
1456 	.obj_size	= sizeof(struct hci_pinfo)
1457 };
1458 
1459 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1460 			   int kern)
1461 {
1462 	struct sock *sk;
1463 
1464 	BT_DBG("sock %p", sock);
1465 
1466 	if (sock->type != SOCK_RAW)
1467 		return -ESOCKTNOSUPPORT;
1468 
1469 	sock->ops = &hci_sock_ops;
1470 
1471 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1472 	if (!sk)
1473 		return -ENOMEM;
1474 
1475 	sock_init_data(sock, sk);
1476 
1477 	sock_reset_flag(sk, SOCK_ZAPPED);
1478 
1479 	sk->sk_protocol = protocol;
1480 
1481 	sock->state = SS_UNCONNECTED;
1482 	sk->sk_state = BT_OPEN;
1483 
1484 	bt_sock_link(&hci_sk_list, sk);
1485 	return 0;
1486 }
1487 
1488 static const struct net_proto_family hci_sock_family_ops = {
1489 	.family	= PF_BLUETOOTH,
1490 	.owner	= THIS_MODULE,
1491 	.create	= hci_sock_create,
1492 };
1493 
1494 int __init hci_sock_init(void)
1495 {
1496 	int err;
1497 
1498 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1499 
1500 	err = proto_register(&hci_sk_proto, 0);
1501 	if (err < 0)
1502 		return err;
1503 
1504 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1505 	if (err < 0) {
1506 		BT_ERR("HCI socket registration failed");
1507 		goto error;
1508 	}
1509 
1510 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1511 	if (err < 0) {
1512 		BT_ERR("Failed to create HCI proc file");
1513 		bt_sock_unregister(BTPROTO_HCI);
1514 		goto error;
1515 	}
1516 
1517 	BT_INFO("HCI socket layer initialized");
1518 
1519 	return 0;
1520 
1521 error:
1522 	proto_unregister(&hci_sk_proto);
1523 	return err;
1524 }
1525 
1526 void hci_sock_cleanup(void)
1527 {
1528 	bt_procfs_cleanup(&init_net, "hci");
1529 	bt_sock_unregister(BTPROTO_HCI);
1530 	proto_unregister(&hci_sk_proto);
1531 }
1532