xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision b04b4f78)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40 
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
44 
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47 
48 void hci_acl_connect(struct hci_conn *conn)
49 {
50 	struct hci_dev *hdev = conn->hdev;
51 	struct inquiry_entry *ie;
52 	struct hci_cp_create_conn cp;
53 
54 	BT_DBG("%p", conn);
55 
56 	conn->state = BT_CONNECT;
57 	conn->out = 1;
58 
59 	conn->link_mode = HCI_LM_MASTER;
60 
61 	conn->attempt++;
62 
63 	conn->link_policy = hdev->link_policy;
64 
65 	memset(&cp, 0, sizeof(cp));
66 	bacpy(&cp.bdaddr, &conn->dst);
67 	cp.pscan_rep_mode = 0x02;
68 
69 	if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
70 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
71 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
72 			cp.pscan_mode     = ie->data.pscan_mode;
73 			cp.clock_offset   = ie->data.clock_offset |
74 							cpu_to_le16(0x8000);
75 		}
76 
77 		memcpy(conn->dev_class, ie->data.dev_class, 3);
78 		conn->ssp_mode = ie->data.ssp_mode;
79 	}
80 
81 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
82 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
83 		cp.role_switch = 0x01;
84 	else
85 		cp.role_switch = 0x00;
86 
87 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
88 }
89 
90 static void hci_acl_connect_cancel(struct hci_conn *conn)
91 {
92 	struct hci_cp_create_conn_cancel cp;
93 
94 	BT_DBG("%p", conn);
95 
96 	if (conn->hdev->hci_ver < 2)
97 		return;
98 
99 	bacpy(&cp.bdaddr, &conn->dst);
100 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
101 }
102 
103 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
104 {
105 	struct hci_cp_disconnect cp;
106 
107 	BT_DBG("%p", conn);
108 
109 	conn->state = BT_DISCONN;
110 
111 	cp.handle = cpu_to_le16(conn->handle);
112 	cp.reason = reason;
113 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
114 }
115 
116 void hci_add_sco(struct hci_conn *conn, __u16 handle)
117 {
118 	struct hci_dev *hdev = conn->hdev;
119 	struct hci_cp_add_sco cp;
120 
121 	BT_DBG("%p", conn);
122 
123 	conn->state = BT_CONNECT;
124 	conn->out = 1;
125 
126 	conn->attempt++;
127 
128 	cp.handle   = cpu_to_le16(handle);
129 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
130 
131 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
132 }
133 
134 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
135 {
136 	struct hci_dev *hdev = conn->hdev;
137 	struct hci_cp_setup_sync_conn cp;
138 
139 	BT_DBG("%p", conn);
140 
141 	conn->state = BT_CONNECT;
142 	conn->out = 1;
143 
144 	conn->attempt++;
145 
146 	cp.handle   = cpu_to_le16(handle);
147 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
148 
149 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
150 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
151 	cp.max_latency    = cpu_to_le16(0xffff);
152 	cp.voice_setting  = cpu_to_le16(hdev->voice_setting);
153 	cp.retrans_effort = 0xff;
154 
155 	hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
156 }
157 
158 static void hci_conn_timeout(unsigned long arg)
159 {
160 	struct hci_conn *conn = (void *) arg;
161 	struct hci_dev *hdev = conn->hdev;
162 	__u8 reason;
163 
164 	BT_DBG("conn %p state %d", conn, conn->state);
165 
166 	if (atomic_read(&conn->refcnt))
167 		return;
168 
169 	hci_dev_lock(hdev);
170 
171 	switch (conn->state) {
172 	case BT_CONNECT:
173 	case BT_CONNECT2:
174 		if (conn->type == ACL_LINK)
175 			hci_acl_connect_cancel(conn);
176 		else
177 			hci_acl_disconn(conn, 0x13);
178 		break;
179 	case BT_CONFIG:
180 	case BT_CONNECTED:
181 		reason = hci_proto_disconn_ind(conn);
182 		hci_acl_disconn(conn, reason);
183 		break;
184 	default:
185 		conn->state = BT_CLOSED;
186 		break;
187 	}
188 
189 	hci_dev_unlock(hdev);
190 }
191 
192 static void hci_conn_idle(unsigned long arg)
193 {
194 	struct hci_conn *conn = (void *) arg;
195 
196 	BT_DBG("conn %p mode %d", conn, conn->mode);
197 
198 	hci_conn_enter_sniff_mode(conn);
199 }
200 
201 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
202 {
203 	struct hci_conn *conn;
204 
205 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
206 
207 	conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
208 	if (!conn)
209 		return NULL;
210 
211 	bacpy(&conn->dst, dst);
212 	conn->hdev  = hdev;
213 	conn->type  = type;
214 	conn->mode  = HCI_CM_ACTIVE;
215 	conn->state = BT_OPEN;
216 
217 	conn->power_save = 1;
218 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
219 
220 	switch (type) {
221 	case ACL_LINK:
222 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
223 		break;
224 	case SCO_LINK:
225 		if (lmp_esco_capable(hdev))
226 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
227 					(hdev->esco_type & EDR_ESCO_MASK);
228 		else
229 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
230 		break;
231 	case ESCO_LINK:
232 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
233 		break;
234 	}
235 
236 	skb_queue_head_init(&conn->data_q);
237 
238 	setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
239 	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
240 
241 	atomic_set(&conn->refcnt, 0);
242 
243 	hci_dev_hold(hdev);
244 
245 	tasklet_disable(&hdev->tx_task);
246 
247 	hci_conn_hash_add(hdev, conn);
248 	if (hdev->notify)
249 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
250 
251 	hci_conn_init_sysfs(conn);
252 
253 	tasklet_enable(&hdev->tx_task);
254 
255 	return conn;
256 }
257 
258 int hci_conn_del(struct hci_conn *conn)
259 {
260 	struct hci_dev *hdev = conn->hdev;
261 
262 	BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
263 
264 	del_timer(&conn->idle_timer);
265 
266 	del_timer(&conn->disc_timer);
267 
268 	if (conn->type == ACL_LINK) {
269 		struct hci_conn *sco = conn->link;
270 		if (sco)
271 			sco->link = NULL;
272 
273 		/* Unacked frames */
274 		hdev->acl_cnt += conn->sent;
275 	} else {
276 		struct hci_conn *acl = conn->link;
277 		if (acl) {
278 			acl->link = NULL;
279 			hci_conn_put(acl);
280 		}
281 	}
282 
283 	tasklet_disable(&hdev->tx_task);
284 
285 	hci_conn_hash_del(hdev, conn);
286 	if (hdev->notify)
287 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
288 
289 	tasklet_enable(&hdev->tx_task);
290 
291 	skb_queue_purge(&conn->data_q);
292 
293 	hci_conn_del_sysfs(conn);
294 
295 	return 0;
296 }
297 
298 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
299 {
300 	int use_src = bacmp(src, BDADDR_ANY);
301 	struct hci_dev *hdev = NULL;
302 	struct list_head *p;
303 
304 	BT_DBG("%s -> %s", batostr(src), batostr(dst));
305 
306 	read_lock_bh(&hci_dev_list_lock);
307 
308 	list_for_each(p, &hci_dev_list) {
309 		struct hci_dev *d = list_entry(p, struct hci_dev, list);
310 
311 		if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
312 			continue;
313 
314 		/* Simple routing:
315 		 *   No source address - find interface with bdaddr != dst
316 		 *   Source address    - find interface with bdaddr == src
317 		 */
318 
319 		if (use_src) {
320 			if (!bacmp(&d->bdaddr, src)) {
321 				hdev = d; break;
322 			}
323 		} else {
324 			if (bacmp(&d->bdaddr, dst)) {
325 				hdev = d; break;
326 			}
327 		}
328 	}
329 
330 	if (hdev)
331 		hdev = hci_dev_hold(hdev);
332 
333 	read_unlock_bh(&hci_dev_list_lock);
334 	return hdev;
335 }
336 EXPORT_SYMBOL(hci_get_route);
337 
338 /* Create SCO or ACL connection.
339  * Device _must_ be locked */
340 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
341 {
342 	struct hci_conn *acl;
343 	struct hci_conn *sco;
344 
345 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
346 
347 	if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
348 		if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
349 			return NULL;
350 	}
351 
352 	hci_conn_hold(acl);
353 
354 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
355 		acl->sec_level = sec_level;
356 		acl->auth_type = auth_type;
357 		hci_acl_connect(acl);
358 	}
359 
360 	if (type == ACL_LINK)
361 		return acl;
362 
363 	if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
364 		if (!(sco = hci_conn_add(hdev, type, dst))) {
365 			hci_conn_put(acl);
366 			return NULL;
367 		}
368 	}
369 
370 	acl->link = sco;
371 	sco->link = acl;
372 
373 	hci_conn_hold(sco);
374 
375 	if (acl->state == BT_CONNECTED &&
376 			(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
377 		if (lmp_esco_capable(hdev))
378 			hci_setup_sync(sco, acl->handle);
379 		else
380 			hci_add_sco(sco, acl->handle);
381 	}
382 
383 	return sco;
384 }
385 EXPORT_SYMBOL(hci_connect);
386 
387 /* Check link security requirement */
388 int hci_conn_check_link_mode(struct hci_conn *conn)
389 {
390 	BT_DBG("conn %p", conn);
391 
392 	if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
393 					!(conn->link_mode & HCI_LM_ENCRYPT))
394 		return 0;
395 
396 	return 1;
397 }
398 EXPORT_SYMBOL(hci_conn_check_link_mode);
399 
400 /* Authenticate remote device */
401 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
402 {
403 	BT_DBG("conn %p", conn);
404 
405 	if (sec_level > conn->sec_level)
406 		conn->sec_level = sec_level;
407 	else if (conn->link_mode & HCI_LM_AUTH)
408 		return 1;
409 
410 	conn->auth_type = auth_type;
411 
412 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
413 		struct hci_cp_auth_requested cp;
414 		cp.handle = cpu_to_le16(conn->handle);
415 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
416 							sizeof(cp), &cp);
417 	}
418 
419 	return 0;
420 }
421 
422 /* Enable security */
423 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
424 {
425 	BT_DBG("conn %p", conn);
426 
427 	if (sec_level == BT_SECURITY_SDP)
428 		return 1;
429 
430 	if (sec_level == BT_SECURITY_LOW &&
431 				(!conn->ssp_mode || !conn->hdev->ssp_mode))
432 		return 1;
433 
434 	if (conn->link_mode & HCI_LM_ENCRYPT)
435 		return hci_conn_auth(conn, sec_level, auth_type);
436 
437 	if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
438 		return 0;
439 
440 	if (hci_conn_auth(conn, sec_level, auth_type)) {
441 		struct hci_cp_set_conn_encrypt cp;
442 		cp.handle  = cpu_to_le16(conn->handle);
443 		cp.encrypt = 1;
444 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
445 							sizeof(cp), &cp);
446 	}
447 
448 	return 0;
449 }
450 EXPORT_SYMBOL(hci_conn_security);
451 
452 /* Change link key */
453 int hci_conn_change_link_key(struct hci_conn *conn)
454 {
455 	BT_DBG("conn %p", conn);
456 
457 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
458 		struct hci_cp_change_conn_link_key cp;
459 		cp.handle = cpu_to_le16(conn->handle);
460 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
461 							sizeof(cp), &cp);
462 	}
463 
464 	return 0;
465 }
466 EXPORT_SYMBOL(hci_conn_change_link_key);
467 
468 /* Switch role */
469 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
470 {
471 	BT_DBG("conn %p", conn);
472 
473 	if (!role && conn->link_mode & HCI_LM_MASTER)
474 		return 1;
475 
476 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
477 		struct hci_cp_switch_role cp;
478 		bacpy(&cp.bdaddr, &conn->dst);
479 		cp.role = role;
480 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
481 	}
482 
483 	return 0;
484 }
485 EXPORT_SYMBOL(hci_conn_switch_role);
486 
487 /* Enter active mode */
488 void hci_conn_enter_active_mode(struct hci_conn *conn)
489 {
490 	struct hci_dev *hdev = conn->hdev;
491 
492 	BT_DBG("conn %p mode %d", conn, conn->mode);
493 
494 	if (test_bit(HCI_RAW, &hdev->flags))
495 		return;
496 
497 	if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
498 		goto timer;
499 
500 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
501 		struct hci_cp_exit_sniff_mode cp;
502 		cp.handle = cpu_to_le16(conn->handle);
503 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
504 	}
505 
506 timer:
507 	if (hdev->idle_timeout > 0)
508 		mod_timer(&conn->idle_timer,
509 			jiffies + msecs_to_jiffies(hdev->idle_timeout));
510 }
511 
512 /* Enter sniff mode */
513 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
514 {
515 	struct hci_dev *hdev = conn->hdev;
516 
517 	BT_DBG("conn %p mode %d", conn, conn->mode);
518 
519 	if (test_bit(HCI_RAW, &hdev->flags))
520 		return;
521 
522 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
523 		return;
524 
525 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
526 		return;
527 
528 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
529 		struct hci_cp_sniff_subrate cp;
530 		cp.handle             = cpu_to_le16(conn->handle);
531 		cp.max_latency        = cpu_to_le16(0);
532 		cp.min_remote_timeout = cpu_to_le16(0);
533 		cp.min_local_timeout  = cpu_to_le16(0);
534 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
535 	}
536 
537 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
538 		struct hci_cp_sniff_mode cp;
539 		cp.handle       = cpu_to_le16(conn->handle);
540 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
541 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
542 		cp.attempt      = cpu_to_le16(4);
543 		cp.timeout      = cpu_to_le16(1);
544 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
545 	}
546 }
547 
548 /* Drop all connection on the device */
549 void hci_conn_hash_flush(struct hci_dev *hdev)
550 {
551 	struct hci_conn_hash *h = &hdev->conn_hash;
552 	struct list_head *p;
553 
554 	BT_DBG("hdev %s", hdev->name);
555 
556 	p = h->list.next;
557 	while (p != &h->list) {
558 		struct hci_conn *c;
559 
560 		c = list_entry(p, struct hci_conn, list);
561 		p = p->next;
562 
563 		c->state = BT_CLOSED;
564 
565 		hci_proto_disconn_cfm(c, 0x16);
566 		hci_conn_del(c);
567 	}
568 }
569 
570 /* Check pending connect attempts */
571 void hci_conn_check_pending(struct hci_dev *hdev)
572 {
573 	struct hci_conn *conn;
574 
575 	BT_DBG("hdev %s", hdev->name);
576 
577 	hci_dev_lock(hdev);
578 
579 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
580 	if (conn)
581 		hci_acl_connect(conn);
582 
583 	hci_dev_unlock(hdev);
584 }
585 
586 int hci_get_conn_list(void __user *arg)
587 {
588 	struct hci_conn_list_req req, *cl;
589 	struct hci_conn_info *ci;
590 	struct hci_dev *hdev;
591 	struct list_head *p;
592 	int n = 0, size, err;
593 
594 	if (copy_from_user(&req, arg, sizeof(req)))
595 		return -EFAULT;
596 
597 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
598 		return -EINVAL;
599 
600 	size = sizeof(req) + req.conn_num * sizeof(*ci);
601 
602 	if (!(cl = kmalloc(size, GFP_KERNEL)))
603 		return -ENOMEM;
604 
605 	if (!(hdev = hci_dev_get(req.dev_id))) {
606 		kfree(cl);
607 		return -ENODEV;
608 	}
609 
610 	ci = cl->conn_info;
611 
612 	hci_dev_lock_bh(hdev);
613 	list_for_each(p, &hdev->conn_hash.list) {
614 		register struct hci_conn *c;
615 		c = list_entry(p, struct hci_conn, list);
616 
617 		bacpy(&(ci + n)->bdaddr, &c->dst);
618 		(ci + n)->handle = c->handle;
619 		(ci + n)->type  = c->type;
620 		(ci + n)->out   = c->out;
621 		(ci + n)->state = c->state;
622 		(ci + n)->link_mode = c->link_mode;
623 		if (++n >= req.conn_num)
624 			break;
625 	}
626 	hci_dev_unlock_bh(hdev);
627 
628 	cl->dev_id = hdev->id;
629 	cl->conn_num = n;
630 	size = sizeof(req) + n * sizeof(*ci);
631 
632 	hci_dev_put(hdev);
633 
634 	err = copy_to_user(arg, cl, size);
635 	kfree(cl);
636 
637 	return err ? -EFAULT : 0;
638 }
639 
640 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
641 {
642 	struct hci_conn_info_req req;
643 	struct hci_conn_info ci;
644 	struct hci_conn *conn;
645 	char __user *ptr = arg + sizeof(req);
646 
647 	if (copy_from_user(&req, arg, sizeof(req)))
648 		return -EFAULT;
649 
650 	hci_dev_lock_bh(hdev);
651 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
652 	if (conn) {
653 		bacpy(&ci.bdaddr, &conn->dst);
654 		ci.handle = conn->handle;
655 		ci.type  = conn->type;
656 		ci.out   = conn->out;
657 		ci.state = conn->state;
658 		ci.link_mode = conn->link_mode;
659 	}
660 	hci_dev_unlock_bh(hdev);
661 
662 	if (!conn)
663 		return -ENOENT;
664 
665 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
666 }
667 
668 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
669 {
670 	struct hci_auth_info_req req;
671 	struct hci_conn *conn;
672 
673 	if (copy_from_user(&req, arg, sizeof(req)))
674 		return -EFAULT;
675 
676 	hci_dev_lock_bh(hdev);
677 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
678 	if (conn)
679 		req.type = conn->auth_type;
680 	hci_dev_unlock_bh(hdev);
681 
682 	if (!conn)
683 		return -ENOENT;
684 
685 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
686 }
687