xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision b627b4ed)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40 
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
44 
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47 
48 void hci_acl_connect(struct hci_conn *conn)
49 {
50 	struct hci_dev *hdev = conn->hdev;
51 	struct inquiry_entry *ie;
52 	struct hci_cp_create_conn cp;
53 
54 	BT_DBG("%p", conn);
55 
56 	conn->state = BT_CONNECT;
57 	conn->out = 1;
58 
59 	conn->link_mode = HCI_LM_MASTER;
60 
61 	conn->attempt++;
62 
63 	conn->link_policy = hdev->link_policy;
64 
65 	memset(&cp, 0, sizeof(cp));
66 	bacpy(&cp.bdaddr, &conn->dst);
67 	cp.pscan_rep_mode = 0x02;
68 
69 	if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
70 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
71 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
72 			cp.pscan_mode     = ie->data.pscan_mode;
73 			cp.clock_offset   = ie->data.clock_offset |
74 							cpu_to_le16(0x8000);
75 		}
76 
77 		memcpy(conn->dev_class, ie->data.dev_class, 3);
78 		conn->ssp_mode = ie->data.ssp_mode;
79 	}
80 
81 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
82 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
83 		cp.role_switch = 0x01;
84 	else
85 		cp.role_switch = 0x00;
86 
87 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
88 }
89 
90 static void hci_acl_connect_cancel(struct hci_conn *conn)
91 {
92 	struct hci_cp_create_conn_cancel cp;
93 
94 	BT_DBG("%p", conn);
95 
96 	if (conn->hdev->hci_ver < 2)
97 		return;
98 
99 	bacpy(&cp.bdaddr, &conn->dst);
100 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
101 }
102 
103 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
104 {
105 	struct hci_cp_disconnect cp;
106 
107 	BT_DBG("%p", conn);
108 
109 	conn->state = BT_DISCONN;
110 
111 	cp.handle = cpu_to_le16(conn->handle);
112 	cp.reason = reason;
113 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
114 }
115 
116 void hci_add_sco(struct hci_conn *conn, __u16 handle)
117 {
118 	struct hci_dev *hdev = conn->hdev;
119 	struct hci_cp_add_sco cp;
120 
121 	BT_DBG("%p", conn);
122 
123 	conn->state = BT_CONNECT;
124 	conn->out = 1;
125 
126 	conn->attempt++;
127 
128 	cp.handle   = cpu_to_le16(handle);
129 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
130 
131 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
132 }
133 
134 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
135 {
136 	struct hci_dev *hdev = conn->hdev;
137 	struct hci_cp_setup_sync_conn cp;
138 
139 	BT_DBG("%p", conn);
140 
141 	conn->state = BT_CONNECT;
142 	conn->out = 1;
143 
144 	conn->attempt++;
145 
146 	cp.handle   = cpu_to_le16(handle);
147 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
148 
149 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
150 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
151 	cp.max_latency    = cpu_to_le16(0xffff);
152 	cp.voice_setting  = cpu_to_le16(hdev->voice_setting);
153 	cp.retrans_effort = 0xff;
154 
155 	hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
156 }
157 
158 static void hci_conn_timeout(unsigned long arg)
159 {
160 	struct hci_conn *conn = (void *) arg;
161 	struct hci_dev *hdev = conn->hdev;
162 	__u8 reason;
163 
164 	BT_DBG("conn %p state %d", conn, conn->state);
165 
166 	if (atomic_read(&conn->refcnt))
167 		return;
168 
169 	hci_dev_lock(hdev);
170 
171 	switch (conn->state) {
172 	case BT_CONNECT:
173 	case BT_CONNECT2:
174 		if (conn->type == ACL_LINK)
175 			hci_acl_connect_cancel(conn);
176 		else
177 			hci_acl_disconn(conn, 0x13);
178 		break;
179 	case BT_CONFIG:
180 	case BT_CONNECTED:
181 		reason = hci_proto_disconn_ind(conn);
182 		hci_acl_disconn(conn, reason);
183 		break;
184 	default:
185 		conn->state = BT_CLOSED;
186 		break;
187 	}
188 
189 	hci_dev_unlock(hdev);
190 }
191 
192 static void hci_conn_idle(unsigned long arg)
193 {
194 	struct hci_conn *conn = (void *) arg;
195 
196 	BT_DBG("conn %p mode %d", conn, conn->mode);
197 
198 	hci_conn_enter_sniff_mode(conn);
199 }
200 
201 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
202 {
203 	struct hci_conn *conn;
204 
205 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
206 
207 	conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
208 	if (!conn)
209 		return NULL;
210 
211 	bacpy(&conn->dst, dst);
212 	conn->hdev  = hdev;
213 	conn->type  = type;
214 	conn->mode  = HCI_CM_ACTIVE;
215 	conn->state = BT_OPEN;
216 
217 	conn->power_save = 1;
218 
219 	switch (type) {
220 	case ACL_LINK:
221 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
222 		break;
223 	case SCO_LINK:
224 		if (lmp_esco_capable(hdev))
225 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
226 					(hdev->esco_type & EDR_ESCO_MASK);
227 		else
228 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
229 		break;
230 	case ESCO_LINK:
231 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
232 		break;
233 	}
234 
235 	skb_queue_head_init(&conn->data_q);
236 
237 	setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
238 	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
239 
240 	atomic_set(&conn->refcnt, 0);
241 
242 	hci_dev_hold(hdev);
243 
244 	tasklet_disable(&hdev->tx_task);
245 
246 	hci_conn_hash_add(hdev, conn);
247 	if (hdev->notify)
248 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
249 
250 	tasklet_enable(&hdev->tx_task);
251 
252 	return conn;
253 }
254 
255 int hci_conn_del(struct hci_conn *conn)
256 {
257 	struct hci_dev *hdev = conn->hdev;
258 
259 	BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
260 
261 	del_timer(&conn->idle_timer);
262 
263 	del_timer(&conn->disc_timer);
264 
265 	if (conn->type == ACL_LINK) {
266 		struct hci_conn *sco = conn->link;
267 		if (sco)
268 			sco->link = NULL;
269 
270 		/* Unacked frames */
271 		hdev->acl_cnt += conn->sent;
272 	} else {
273 		struct hci_conn *acl = conn->link;
274 		if (acl) {
275 			acl->link = NULL;
276 			hci_conn_put(acl);
277 		}
278 	}
279 
280 	tasklet_disable(&hdev->tx_task);
281 
282 	hci_conn_hash_del(hdev, conn);
283 	if (hdev->notify)
284 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
285 
286 	tasklet_enable(&hdev->tx_task);
287 
288 	skb_queue_purge(&conn->data_q);
289 
290 	hci_conn_del_sysfs(conn);
291 
292 	return 0;
293 }
294 
295 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
296 {
297 	int use_src = bacmp(src, BDADDR_ANY);
298 	struct hci_dev *hdev = NULL;
299 	struct list_head *p;
300 
301 	BT_DBG("%s -> %s", batostr(src), batostr(dst));
302 
303 	read_lock_bh(&hci_dev_list_lock);
304 
305 	list_for_each(p, &hci_dev_list) {
306 		struct hci_dev *d = list_entry(p, struct hci_dev, list);
307 
308 		if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
309 			continue;
310 
311 		/* Simple routing:
312 		 *   No source address - find interface with bdaddr != dst
313 		 *   Source address    - find interface with bdaddr == src
314 		 */
315 
316 		if (use_src) {
317 			if (!bacmp(&d->bdaddr, src)) {
318 				hdev = d; break;
319 			}
320 		} else {
321 			if (bacmp(&d->bdaddr, dst)) {
322 				hdev = d; break;
323 			}
324 		}
325 	}
326 
327 	if (hdev)
328 		hdev = hci_dev_hold(hdev);
329 
330 	read_unlock_bh(&hci_dev_list_lock);
331 	return hdev;
332 }
333 EXPORT_SYMBOL(hci_get_route);
334 
335 /* Create SCO or ACL connection.
336  * Device _must_ be locked */
337 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
338 {
339 	struct hci_conn *acl;
340 	struct hci_conn *sco;
341 
342 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
343 
344 	if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
345 		if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
346 			return NULL;
347 	}
348 
349 	hci_conn_hold(acl);
350 
351 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
352 		acl->sec_level = sec_level;
353 		acl->auth_type = auth_type;
354 		hci_acl_connect(acl);
355 	}
356 
357 	if (type == ACL_LINK)
358 		return acl;
359 
360 	if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
361 		if (!(sco = hci_conn_add(hdev, type, dst))) {
362 			hci_conn_put(acl);
363 			return NULL;
364 		}
365 	}
366 
367 	acl->link = sco;
368 	sco->link = acl;
369 
370 	hci_conn_hold(sco);
371 
372 	if (acl->state == BT_CONNECTED &&
373 			(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
374 		if (lmp_esco_capable(hdev))
375 			hci_setup_sync(sco, acl->handle);
376 		else
377 			hci_add_sco(sco, acl->handle);
378 	}
379 
380 	return sco;
381 }
382 EXPORT_SYMBOL(hci_connect);
383 
384 /* Check link security requirement */
385 int hci_conn_check_link_mode(struct hci_conn *conn)
386 {
387 	BT_DBG("conn %p", conn);
388 
389 	if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
390 					!(conn->link_mode & HCI_LM_ENCRYPT))
391 		return 0;
392 
393 	return 1;
394 }
395 EXPORT_SYMBOL(hci_conn_check_link_mode);
396 
397 /* Authenticate remote device */
398 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
399 {
400 	BT_DBG("conn %p", conn);
401 
402 	if (sec_level > conn->sec_level)
403 		conn->sec_level = sec_level;
404 	else if (conn->link_mode & HCI_LM_AUTH)
405 		return 1;
406 
407 	conn->auth_type = auth_type;
408 
409 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
410 		struct hci_cp_auth_requested cp;
411 		cp.handle = cpu_to_le16(conn->handle);
412 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
413 							sizeof(cp), &cp);
414 	}
415 
416 	return 0;
417 }
418 
419 /* Enable security */
420 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
421 {
422 	BT_DBG("conn %p", conn);
423 
424 	if (sec_level == BT_SECURITY_SDP)
425 		return 1;
426 
427 	if (sec_level == BT_SECURITY_LOW) {
428 		if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0)
429 			return hci_conn_auth(conn, sec_level, auth_type);
430 		else
431 			return 1;
432 	}
433 
434 	if (conn->link_mode & HCI_LM_ENCRYPT)
435 		return hci_conn_auth(conn, sec_level, auth_type);
436 
437 	if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
438 		return 0;
439 
440 	if (hci_conn_auth(conn, sec_level, auth_type)) {
441 		struct hci_cp_set_conn_encrypt cp;
442 		cp.handle  = cpu_to_le16(conn->handle);
443 		cp.encrypt = 1;
444 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
445 							sizeof(cp), &cp);
446 	}
447 
448 	return 0;
449 }
450 EXPORT_SYMBOL(hci_conn_security);
451 
452 /* Change link key */
453 int hci_conn_change_link_key(struct hci_conn *conn)
454 {
455 	BT_DBG("conn %p", conn);
456 
457 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
458 		struct hci_cp_change_conn_link_key cp;
459 		cp.handle = cpu_to_le16(conn->handle);
460 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
461 							sizeof(cp), &cp);
462 	}
463 
464 	return 0;
465 }
466 EXPORT_SYMBOL(hci_conn_change_link_key);
467 
468 /* Switch role */
469 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
470 {
471 	BT_DBG("conn %p", conn);
472 
473 	if (!role && conn->link_mode & HCI_LM_MASTER)
474 		return 1;
475 
476 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
477 		struct hci_cp_switch_role cp;
478 		bacpy(&cp.bdaddr, &conn->dst);
479 		cp.role = role;
480 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
481 	}
482 
483 	return 0;
484 }
485 EXPORT_SYMBOL(hci_conn_switch_role);
486 
487 /* Enter active mode */
488 void hci_conn_enter_active_mode(struct hci_conn *conn)
489 {
490 	struct hci_dev *hdev = conn->hdev;
491 
492 	BT_DBG("conn %p mode %d", conn, conn->mode);
493 
494 	if (test_bit(HCI_RAW, &hdev->flags))
495 		return;
496 
497 	if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
498 		goto timer;
499 
500 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
501 		struct hci_cp_exit_sniff_mode cp;
502 		cp.handle = cpu_to_le16(conn->handle);
503 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
504 	}
505 
506 timer:
507 	if (hdev->idle_timeout > 0)
508 		mod_timer(&conn->idle_timer,
509 			jiffies + msecs_to_jiffies(hdev->idle_timeout));
510 }
511 
512 /* Enter sniff mode */
513 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
514 {
515 	struct hci_dev *hdev = conn->hdev;
516 
517 	BT_DBG("conn %p mode %d", conn, conn->mode);
518 
519 	if (test_bit(HCI_RAW, &hdev->flags))
520 		return;
521 
522 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
523 		return;
524 
525 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
526 		return;
527 
528 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
529 		struct hci_cp_sniff_subrate cp;
530 		cp.handle             = cpu_to_le16(conn->handle);
531 		cp.max_latency        = cpu_to_le16(0);
532 		cp.min_remote_timeout = cpu_to_le16(0);
533 		cp.min_local_timeout  = cpu_to_le16(0);
534 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
535 	}
536 
537 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
538 		struct hci_cp_sniff_mode cp;
539 		cp.handle       = cpu_to_le16(conn->handle);
540 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
541 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
542 		cp.attempt      = cpu_to_le16(4);
543 		cp.timeout      = cpu_to_le16(1);
544 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
545 	}
546 }
547 
548 /* Drop all connection on the device */
549 void hci_conn_hash_flush(struct hci_dev *hdev)
550 {
551 	struct hci_conn_hash *h = &hdev->conn_hash;
552 	struct list_head *p;
553 
554 	BT_DBG("hdev %s", hdev->name);
555 
556 	p = h->list.next;
557 	while (p != &h->list) {
558 		struct hci_conn *c;
559 
560 		c = list_entry(p, struct hci_conn, list);
561 		p = p->next;
562 
563 		c->state = BT_CLOSED;
564 
565 		hci_proto_disconn_cfm(c, 0x16);
566 		hci_conn_del(c);
567 	}
568 }
569 
570 /* Check pending connect attempts */
571 void hci_conn_check_pending(struct hci_dev *hdev)
572 {
573 	struct hci_conn *conn;
574 
575 	BT_DBG("hdev %s", hdev->name);
576 
577 	hci_dev_lock(hdev);
578 
579 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
580 	if (conn)
581 		hci_acl_connect(conn);
582 
583 	hci_dev_unlock(hdev);
584 }
585 
586 int hci_get_conn_list(void __user *arg)
587 {
588 	struct hci_conn_list_req req, *cl;
589 	struct hci_conn_info *ci;
590 	struct hci_dev *hdev;
591 	struct list_head *p;
592 	int n = 0, size, err;
593 
594 	if (copy_from_user(&req, arg, sizeof(req)))
595 		return -EFAULT;
596 
597 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
598 		return -EINVAL;
599 
600 	size = sizeof(req) + req.conn_num * sizeof(*ci);
601 
602 	if (!(cl = kmalloc(size, GFP_KERNEL)))
603 		return -ENOMEM;
604 
605 	if (!(hdev = hci_dev_get(req.dev_id))) {
606 		kfree(cl);
607 		return -ENODEV;
608 	}
609 
610 	ci = cl->conn_info;
611 
612 	hci_dev_lock_bh(hdev);
613 	list_for_each(p, &hdev->conn_hash.list) {
614 		register struct hci_conn *c;
615 		c = list_entry(p, struct hci_conn, list);
616 
617 		bacpy(&(ci + n)->bdaddr, &c->dst);
618 		(ci + n)->handle = c->handle;
619 		(ci + n)->type  = c->type;
620 		(ci + n)->out   = c->out;
621 		(ci + n)->state = c->state;
622 		(ci + n)->link_mode = c->link_mode;
623 		if (++n >= req.conn_num)
624 			break;
625 	}
626 	hci_dev_unlock_bh(hdev);
627 
628 	cl->dev_id = hdev->id;
629 	cl->conn_num = n;
630 	size = sizeof(req) + n * sizeof(*ci);
631 
632 	hci_dev_put(hdev);
633 
634 	err = copy_to_user(arg, cl, size);
635 	kfree(cl);
636 
637 	return err ? -EFAULT : 0;
638 }
639 
640 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
641 {
642 	struct hci_conn_info_req req;
643 	struct hci_conn_info ci;
644 	struct hci_conn *conn;
645 	char __user *ptr = arg + sizeof(req);
646 
647 	if (copy_from_user(&req, arg, sizeof(req)))
648 		return -EFAULT;
649 
650 	hci_dev_lock_bh(hdev);
651 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
652 	if (conn) {
653 		bacpy(&ci.bdaddr, &conn->dst);
654 		ci.handle = conn->handle;
655 		ci.type  = conn->type;
656 		ci.out   = conn->out;
657 		ci.state = conn->state;
658 		ci.link_mode = conn->link_mode;
659 	}
660 	hci_dev_unlock_bh(hdev);
661 
662 	if (!conn)
663 		return -ENOENT;
664 
665 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
666 }
667 
668 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
669 {
670 	struct hci_auth_info_req req;
671 	struct hci_conn *conn;
672 
673 	if (copy_from_user(&req, arg, sizeof(req)))
674 		return -EFAULT;
675 
676 	hci_dev_lock_bh(hdev);
677 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
678 	if (conn)
679 		req.type = conn->auth_type;
680 	hci_dev_unlock_bh(hdev);
681 
682 	if (!conn)
683 		return -ENOENT;
684 
685 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
686 }
687