xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 5a134faeef82b46ff4ad244d11d8c6be41679834)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 
32 #include "smp.h"
33 #include "a2mp.h"
34 
35 struct sco_param {
36 	u16 pkt_type;
37 	u16 max_latency;
38 };
39 
40 static const struct sco_param sco_param_cvsd[] = {
41 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
42 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
43 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007 }, /* S1 */
44 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff }, /* D1 */
45 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff }, /* D0 */
46 };
47 
48 static const struct sco_param sco_param_wideband[] = {
49 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
50 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
51 };
52 
53 static void hci_le_create_connection_cancel(struct hci_conn *conn)
54 {
55 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
56 }
57 
58 static void hci_acl_create_connection(struct hci_conn *conn)
59 {
60 	struct hci_dev *hdev = conn->hdev;
61 	struct inquiry_entry *ie;
62 	struct hci_cp_create_conn cp;
63 
64 	BT_DBG("hcon %p", conn);
65 
66 	conn->state = BT_CONNECT;
67 	conn->out = true;
68 
69 	conn->link_mode = HCI_LM_MASTER;
70 
71 	conn->attempt++;
72 
73 	conn->link_policy = hdev->link_policy;
74 
75 	memset(&cp, 0, sizeof(cp));
76 	bacpy(&cp.bdaddr, &conn->dst);
77 	cp.pscan_rep_mode = 0x02;
78 
79 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
80 	if (ie) {
81 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
82 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
83 			cp.pscan_mode     = ie->data.pscan_mode;
84 			cp.clock_offset   = ie->data.clock_offset |
85 					    cpu_to_le16(0x8000);
86 		}
87 
88 		memcpy(conn->dev_class, ie->data.dev_class, 3);
89 		if (ie->data.ssp_mode > 0)
90 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
91 	}
92 
93 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
94 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
95 		cp.role_switch = 0x01;
96 	else
97 		cp.role_switch = 0x00;
98 
99 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
100 }
101 
102 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
103 {
104 	struct hci_cp_create_conn_cancel cp;
105 
106 	BT_DBG("hcon %p", conn);
107 
108 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
109 		return;
110 
111 	bacpy(&cp.bdaddr, &conn->dst);
112 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
113 }
114 
115 static void hci_reject_sco(struct hci_conn *conn)
116 {
117 	struct hci_cp_reject_sync_conn_req cp;
118 
119 	cp.reason = HCI_ERROR_REMOTE_USER_TERM;
120 	bacpy(&cp.bdaddr, &conn->dst);
121 
122 	hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
123 }
124 
125 void hci_disconnect(struct hci_conn *conn, __u8 reason)
126 {
127 	struct hci_cp_disconnect cp;
128 
129 	BT_DBG("hcon %p", conn);
130 
131 	conn->state = BT_DISCONN;
132 
133 	cp.handle = cpu_to_le16(conn->handle);
134 	cp.reason = reason;
135 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
136 }
137 
138 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
139 {
140 	struct hci_cp_disconn_phy_link cp;
141 
142 	BT_DBG("hcon %p", conn);
143 
144 	conn->state = BT_DISCONN;
145 
146 	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
147 	cp.reason = reason;
148 	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
149 		     sizeof(cp), &cp);
150 }
151 
152 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
153 {
154 	struct hci_dev *hdev = conn->hdev;
155 	struct hci_cp_add_sco cp;
156 
157 	BT_DBG("hcon %p", conn);
158 
159 	conn->state = BT_CONNECT;
160 	conn->out = true;
161 
162 	conn->attempt++;
163 
164 	cp.handle   = cpu_to_le16(handle);
165 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
166 
167 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
168 }
169 
170 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
171 {
172 	struct hci_dev *hdev = conn->hdev;
173 	struct hci_cp_setup_sync_conn cp;
174 	const struct sco_param *param;
175 
176 	BT_DBG("hcon %p", conn);
177 
178 	conn->state = BT_CONNECT;
179 	conn->out = true;
180 
181 	conn->attempt++;
182 
183 	cp.handle   = cpu_to_le16(handle);
184 
185 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
186 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
187 	cp.voice_setting  = cpu_to_le16(conn->setting);
188 
189 	switch (conn->setting & SCO_AIRMODE_MASK) {
190 	case SCO_AIRMODE_TRANSP:
191 		if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
192 			return false;
193 		cp.retrans_effort = 0x02;
194 		param = &sco_param_wideband[conn->attempt - 1];
195 		break;
196 	case SCO_AIRMODE_CVSD:
197 		if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
198 			return false;
199 		cp.retrans_effort = 0x01;
200 		param = &sco_param_cvsd[conn->attempt - 1];
201 		break;
202 	default:
203 		return false;
204 	}
205 
206 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
207 	cp.max_latency = __cpu_to_le16(param->max_latency);
208 
209 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
210 		return false;
211 
212 	return true;
213 }
214 
215 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
216 			u16 latency, u16 to_multiplier)
217 {
218 	struct hci_cp_le_conn_update cp;
219 	struct hci_dev *hdev = conn->hdev;
220 
221 	memset(&cp, 0, sizeof(cp));
222 
223 	cp.handle		= cpu_to_le16(conn->handle);
224 	cp.conn_interval_min	= cpu_to_le16(min);
225 	cp.conn_interval_max	= cpu_to_le16(max);
226 	cp.conn_latency		= cpu_to_le16(latency);
227 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
228 	cp.min_ce_len		= cpu_to_le16(0x0000);
229 	cp.max_ce_len		= cpu_to_le16(0x0000);
230 
231 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
232 }
233 
234 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
235 		      __u8 ltk[16])
236 {
237 	struct hci_dev *hdev = conn->hdev;
238 	struct hci_cp_le_start_enc cp;
239 
240 	BT_DBG("hcon %p", conn);
241 
242 	memset(&cp, 0, sizeof(cp));
243 
244 	cp.handle = cpu_to_le16(conn->handle);
245 	cp.rand = rand;
246 	cp.ediv = ediv;
247 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
248 
249 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
250 }
251 
252 /* Device _must_ be locked */
253 void hci_sco_setup(struct hci_conn *conn, __u8 status)
254 {
255 	struct hci_conn *sco = conn->link;
256 
257 	if (!sco)
258 		return;
259 
260 	BT_DBG("hcon %p", conn);
261 
262 	if (!status) {
263 		if (lmp_esco_capable(conn->hdev))
264 			hci_setup_sync(sco, conn->handle);
265 		else
266 			hci_add_sco(sco, conn->handle);
267 	} else {
268 		hci_proto_connect_cfm(sco, status);
269 		hci_conn_del(sco);
270 	}
271 }
272 
273 static void hci_conn_disconnect(struct hci_conn *conn)
274 {
275 	__u8 reason = hci_proto_disconn_ind(conn);
276 
277 	switch (conn->type) {
278 	case AMP_LINK:
279 		hci_amp_disconn(conn, reason);
280 		break;
281 	default:
282 		hci_disconnect(conn, reason);
283 		break;
284 	}
285 }
286 
287 static void hci_conn_timeout(struct work_struct *work)
288 {
289 	struct hci_conn *conn = container_of(work, struct hci_conn,
290 					     disc_work.work);
291 
292 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
293 
294 	if (atomic_read(&conn->refcnt))
295 		return;
296 
297 	switch (conn->state) {
298 	case BT_CONNECT:
299 	case BT_CONNECT2:
300 		if (conn->out) {
301 			if (conn->type == ACL_LINK)
302 				hci_acl_create_connection_cancel(conn);
303 			else if (conn->type == LE_LINK)
304 				hci_le_create_connection_cancel(conn);
305 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
306 			hci_reject_sco(conn);
307 		}
308 		break;
309 	case BT_CONFIG:
310 	case BT_CONNECTED:
311 		hci_conn_disconnect(conn);
312 		break;
313 	default:
314 		conn->state = BT_CLOSED;
315 		break;
316 	}
317 }
318 
319 /* Enter sniff mode */
320 static void hci_conn_idle(struct work_struct *work)
321 {
322 	struct hci_conn *conn = container_of(work, struct hci_conn,
323 					     idle_work.work);
324 	struct hci_dev *hdev = conn->hdev;
325 
326 	BT_DBG("hcon %p mode %d", conn, conn->mode);
327 
328 	if (test_bit(HCI_RAW, &hdev->flags))
329 		return;
330 
331 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
332 		return;
333 
334 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
335 		return;
336 
337 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
338 		struct hci_cp_sniff_subrate cp;
339 		cp.handle             = cpu_to_le16(conn->handle);
340 		cp.max_latency        = cpu_to_le16(0);
341 		cp.min_remote_timeout = cpu_to_le16(0);
342 		cp.min_local_timeout  = cpu_to_le16(0);
343 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
344 	}
345 
346 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
347 		struct hci_cp_sniff_mode cp;
348 		cp.handle       = cpu_to_le16(conn->handle);
349 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
350 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
351 		cp.attempt      = cpu_to_le16(4);
352 		cp.timeout      = cpu_to_le16(1);
353 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
354 	}
355 }
356 
357 static void hci_conn_auto_accept(struct work_struct *work)
358 {
359 	struct hci_conn *conn = container_of(work, struct hci_conn,
360 					     auto_accept_work.work);
361 
362 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
363 		     &conn->dst);
364 }
365 
366 static void le_conn_timeout(struct work_struct *work)
367 {
368 	struct hci_conn *conn = container_of(work, struct hci_conn,
369 					     le_conn_timeout.work);
370 	struct hci_dev *hdev = conn->hdev;
371 
372 	BT_DBG("");
373 
374 	/* We could end up here due to having done directed advertising,
375 	 * so clean up the state if necessary. This should however only
376 	 * happen with broken hardware or if low duty cycle was used
377 	 * (which doesn't have a timeout of its own).
378 	 */
379 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
380 		u8 enable = 0x00;
381 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
382 			     &enable);
383 		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
384 		return;
385 	}
386 
387 	hci_le_create_connection_cancel(conn);
388 }
389 
390 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
391 {
392 	struct hci_conn *conn;
393 
394 	BT_DBG("%s dst %pMR", hdev->name, dst);
395 
396 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
397 	if (!conn)
398 		return NULL;
399 
400 	bacpy(&conn->dst, dst);
401 	bacpy(&conn->src, &hdev->bdaddr);
402 	conn->hdev  = hdev;
403 	conn->type  = type;
404 	conn->mode  = HCI_CM_ACTIVE;
405 	conn->state = BT_OPEN;
406 	conn->auth_type = HCI_AT_GENERAL_BONDING;
407 	conn->io_capability = hdev->io_capability;
408 	conn->remote_auth = 0xff;
409 	conn->key_type = 0xff;
410 	conn->tx_power = HCI_TX_POWER_INVALID;
411 
412 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
413 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
414 
415 	switch (type) {
416 	case ACL_LINK:
417 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
418 		break;
419 	case LE_LINK:
420 		/* conn->src should reflect the local identity address */
421 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
422 		break;
423 	case SCO_LINK:
424 		if (lmp_esco_capable(hdev))
425 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
426 					(hdev->esco_type & EDR_ESCO_MASK);
427 		else
428 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
429 		break;
430 	case ESCO_LINK:
431 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
432 		break;
433 	}
434 
435 	skb_queue_head_init(&conn->data_q);
436 
437 	INIT_LIST_HEAD(&conn->chan_list);
438 
439 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
440 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
441 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
442 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
443 
444 	atomic_set(&conn->refcnt, 0);
445 
446 	hci_dev_hold(hdev);
447 
448 	hci_conn_hash_add(hdev, conn);
449 	if (hdev->notify)
450 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
451 
452 	hci_conn_init_sysfs(conn);
453 
454 	return conn;
455 }
456 
457 int hci_conn_del(struct hci_conn *conn)
458 {
459 	struct hci_dev *hdev = conn->hdev;
460 
461 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
462 
463 	cancel_delayed_work_sync(&conn->disc_work);
464 	cancel_delayed_work_sync(&conn->auto_accept_work);
465 	cancel_delayed_work_sync(&conn->idle_work);
466 
467 	if (conn->type == ACL_LINK) {
468 		struct hci_conn *sco = conn->link;
469 		if (sco)
470 			sco->link = NULL;
471 
472 		/* Unacked frames */
473 		hdev->acl_cnt += conn->sent;
474 	} else if (conn->type == LE_LINK) {
475 		cancel_delayed_work_sync(&conn->le_conn_timeout);
476 
477 		if (hdev->le_pkts)
478 			hdev->le_cnt += conn->sent;
479 		else
480 			hdev->acl_cnt += conn->sent;
481 	} else {
482 		struct hci_conn *acl = conn->link;
483 		if (acl) {
484 			acl->link = NULL;
485 			hci_conn_drop(acl);
486 		}
487 	}
488 
489 	hci_chan_list_flush(conn);
490 
491 	if (conn->amp_mgr)
492 		amp_mgr_put(conn->amp_mgr);
493 
494 	hci_conn_hash_del(hdev, conn);
495 	if (hdev->notify)
496 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
497 
498 	skb_queue_purge(&conn->data_q);
499 
500 	hci_conn_del_sysfs(conn);
501 
502 	hci_dev_put(hdev);
503 
504 	hci_conn_put(conn);
505 
506 	return 0;
507 }
508 
509 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
510 {
511 	int use_src = bacmp(src, BDADDR_ANY);
512 	struct hci_dev *hdev = NULL, *d;
513 
514 	BT_DBG("%pMR -> %pMR", src, dst);
515 
516 	read_lock(&hci_dev_list_lock);
517 
518 	list_for_each_entry(d, &hci_dev_list, list) {
519 		if (!test_bit(HCI_UP, &d->flags) ||
520 		    test_bit(HCI_RAW, &d->flags) ||
521 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
522 		    d->dev_type != HCI_BREDR)
523 			continue;
524 
525 		/* Simple routing:
526 		 *   No source address - find interface with bdaddr != dst
527 		 *   Source address    - find interface with bdaddr == src
528 		 */
529 
530 		if (use_src) {
531 			if (!bacmp(&d->bdaddr, src)) {
532 				hdev = d; break;
533 			}
534 		} else {
535 			if (bacmp(&d->bdaddr, dst)) {
536 				hdev = d; break;
537 			}
538 		}
539 	}
540 
541 	if (hdev)
542 		hdev = hci_dev_hold(hdev);
543 
544 	read_unlock(&hci_dev_list_lock);
545 	return hdev;
546 }
547 EXPORT_SYMBOL(hci_get_route);
548 
549 /* This function requires the caller holds hdev->lock */
550 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
551 {
552 	struct hci_dev *hdev = conn->hdev;
553 
554 	conn->state = BT_CLOSED;
555 
556 	mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
557 			    status);
558 
559 	hci_proto_connect_cfm(conn, status);
560 
561 	hci_conn_del(conn);
562 
563 	/* Since we may have temporarily stopped the background scanning in
564 	 * favor of connection establishment, we should restart it.
565 	 */
566 	hci_update_background_scan(hdev);
567 
568 	/* Re-enable advertising in case this was a failed connection
569 	 * attempt as a peripheral.
570 	 */
571 	mgmt_reenable_advertising(hdev);
572 }
573 
574 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
575 {
576 	struct hci_conn *conn;
577 
578 	if (status == 0)
579 		return;
580 
581 	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
582 	       status);
583 
584 	hci_dev_lock(hdev);
585 
586 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
587 	if (!conn)
588 		goto done;
589 
590 	hci_le_conn_failed(conn, status);
591 
592 done:
593 	hci_dev_unlock(hdev);
594 }
595 
596 static void hci_req_add_le_create_conn(struct hci_request *req,
597 				       struct hci_conn *conn)
598 {
599 	struct hci_cp_le_create_conn cp;
600 	struct hci_dev *hdev = conn->hdev;
601 	u8 own_addr_type;
602 
603 	memset(&cp, 0, sizeof(cp));
604 
605 	/* Update random address, but set require_privacy to false so
606 	 * that we never connect with an unresolvable address.
607 	 */
608 	if (hci_update_random_address(req, false, &own_addr_type))
609 		return;
610 
611 	/* Save the address type used for this connnection attempt so we able
612 	 * to retrieve this information if we need it.
613 	 */
614 	conn->src_type = own_addr_type;
615 
616 	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
617 	cp.scan_window = cpu_to_le16(hdev->le_scan_window);
618 	bacpy(&cp.peer_addr, &conn->dst);
619 	cp.peer_addr_type = conn->dst_type;
620 	cp.own_address_type = own_addr_type;
621 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
622 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
623 	cp.supervision_timeout = cpu_to_le16(0x002a);
624 	cp.min_ce_len = cpu_to_le16(0x0000);
625 	cp.max_ce_len = cpu_to_le16(0x0000);
626 
627 	hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
628 
629 	conn->state = BT_CONNECT;
630 }
631 
632 static void hci_req_directed_advertising(struct hci_request *req,
633 					 struct hci_conn *conn)
634 {
635 	struct hci_dev *hdev = req->hdev;
636 	struct hci_cp_le_set_adv_param cp;
637 	u8 own_addr_type;
638 	u8 enable;
639 
640 	enable = 0x00;
641 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
642 
643 	/* Clear the HCI_ADVERTISING bit temporarily so that the
644 	 * hci_update_random_address knows that it's safe to go ahead
645 	 * and write a new random address. The flag will be set back on
646 	 * as soon as the SET_ADV_ENABLE HCI command completes.
647 	 */
648 	clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
649 
650 	/* Set require_privacy to false so that the remote device has a
651 	 * chance of identifying us.
652 	 */
653 	if (hci_update_random_address(req, false, &own_addr_type) < 0)
654 		return;
655 
656 	memset(&cp, 0, sizeof(cp));
657 	cp.type = LE_ADV_DIRECT_IND;
658 	cp.own_address_type = own_addr_type;
659 	cp.direct_addr_type = conn->dst_type;
660 	bacpy(&cp.direct_addr, &conn->dst);
661 	cp.channel_map = hdev->le_adv_channel_map;
662 
663 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
664 
665 	enable = 0x01;
666 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
667 
668 	conn->state = BT_CONNECT;
669 }
670 
671 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
672 				u8 dst_type, u8 sec_level, u8 auth_type)
673 {
674 	struct hci_conn_params *params;
675 	struct hci_conn *conn;
676 	struct smp_irk *irk;
677 	struct hci_request req;
678 	int err;
679 
680 	/* Some devices send ATT messages as soon as the physical link is
681 	 * established. To be able to handle these ATT messages, the user-
682 	 * space first establishes the connection and then starts the pairing
683 	 * process.
684 	 *
685 	 * So if a hci_conn object already exists for the following connection
686 	 * attempt, we simply update pending_sec_level and auth_type fields
687 	 * and return the object found.
688 	 */
689 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
690 	if (conn) {
691 		conn->pending_sec_level = sec_level;
692 		conn->auth_type = auth_type;
693 		goto done;
694 	}
695 
696 	/* Since the controller supports only one LE connection attempt at a
697 	 * time, we return -EBUSY if there is any connection attempt running.
698 	 */
699 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
700 	if (conn)
701 		return ERR_PTR(-EBUSY);
702 
703 	/* When given an identity address with existing identity
704 	 * resolving key, the connection needs to be established
705 	 * to a resolvable random address.
706 	 *
707 	 * This uses the cached random resolvable address from
708 	 * a previous scan. When no cached address is available,
709 	 * try connecting to the identity address instead.
710 	 *
711 	 * Storing the resolvable random address is required here
712 	 * to handle connection failures. The address will later
713 	 * be resolved back into the original identity address
714 	 * from the connect request.
715 	 */
716 	irk = hci_find_irk_by_addr(hdev, dst, dst_type);
717 	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
718 		dst = &irk->rpa;
719 		dst_type = ADDR_LE_DEV_RANDOM;
720 	}
721 
722 	conn = hci_conn_add(hdev, LE_LINK, dst);
723 	if (!conn)
724 		return ERR_PTR(-ENOMEM);
725 
726 	conn->dst_type = dst_type;
727 	conn->sec_level = BT_SECURITY_LOW;
728 	conn->pending_sec_level = sec_level;
729 	conn->auth_type = auth_type;
730 
731 	hci_req_init(&req, hdev);
732 
733 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
734 		hci_req_directed_advertising(&req, conn);
735 		goto create_conn;
736 	}
737 
738 	conn->out = true;
739 	conn->link_mode |= HCI_LM_MASTER;
740 
741 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
742 	if (params) {
743 		conn->le_conn_min_interval = params->conn_min_interval;
744 		conn->le_conn_max_interval = params->conn_max_interval;
745 	} else {
746 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
747 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
748 	}
749 
750 	/* If controller is scanning, we stop it since some controllers are
751 	 * not able to scan and connect at the same time. Also set the
752 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
753 	 * handler for scan disabling knows to set the correct discovery
754 	 * state.
755 	 */
756 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
757 		hci_req_add_le_scan_disable(&req);
758 		set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
759 	}
760 
761 	hci_req_add_le_create_conn(&req, conn);
762 
763 create_conn:
764 	err = hci_req_run(&req, create_le_conn_complete);
765 	if (err) {
766 		hci_conn_del(conn);
767 		return ERR_PTR(err);
768 	}
769 
770 done:
771 	hci_conn_hold(conn);
772 	return conn;
773 }
774 
775 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
776 				 u8 sec_level, u8 auth_type)
777 {
778 	struct hci_conn *acl;
779 
780 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
781 		return ERR_PTR(-ENOTSUPP);
782 
783 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
784 	if (!acl) {
785 		acl = hci_conn_add(hdev, ACL_LINK, dst);
786 		if (!acl)
787 			return ERR_PTR(-ENOMEM);
788 	}
789 
790 	hci_conn_hold(acl);
791 
792 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
793 		acl->sec_level = BT_SECURITY_LOW;
794 		acl->pending_sec_level = sec_level;
795 		acl->auth_type = auth_type;
796 		hci_acl_create_connection(acl);
797 	}
798 
799 	return acl;
800 }
801 
802 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
803 				 __u16 setting)
804 {
805 	struct hci_conn *acl;
806 	struct hci_conn *sco;
807 
808 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
809 	if (IS_ERR(acl))
810 		return acl;
811 
812 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
813 	if (!sco) {
814 		sco = hci_conn_add(hdev, type, dst);
815 		if (!sco) {
816 			hci_conn_drop(acl);
817 			return ERR_PTR(-ENOMEM);
818 		}
819 	}
820 
821 	acl->link = sco;
822 	sco->link = acl;
823 
824 	hci_conn_hold(sco);
825 
826 	sco->setting = setting;
827 
828 	if (acl->state == BT_CONNECTED &&
829 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
830 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
831 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
832 
833 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
834 			/* defer SCO setup until mode change completed */
835 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
836 			return sco;
837 		}
838 
839 		hci_sco_setup(acl, 0x00);
840 	}
841 
842 	return sco;
843 }
844 
845 /* Check link security requirement */
846 int hci_conn_check_link_mode(struct hci_conn *conn)
847 {
848 	BT_DBG("hcon %p", conn);
849 
850 	/* In Secure Connections Only mode, it is required that Secure
851 	 * Connections is used and the link is encrypted with AES-CCM
852 	 * using a P-256 authenticated combination key.
853 	 */
854 	if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
855 		if (!hci_conn_sc_enabled(conn) ||
856 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
857 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
858 			return 0;
859 	}
860 
861 	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
862 		return 0;
863 
864 	return 1;
865 }
866 
867 /* Authenticate remote device */
868 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
869 {
870 	BT_DBG("hcon %p", conn);
871 
872 	if (conn->pending_sec_level > sec_level)
873 		sec_level = conn->pending_sec_level;
874 
875 	if (sec_level > conn->sec_level)
876 		conn->pending_sec_level = sec_level;
877 	else if (conn->link_mode & HCI_LM_AUTH)
878 		return 1;
879 
880 	/* Make sure we preserve an existing MITM requirement*/
881 	auth_type |= (conn->auth_type & 0x01);
882 
883 	conn->auth_type = auth_type;
884 
885 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
886 		struct hci_cp_auth_requested cp;
887 
888 		/* encrypt must be pending if auth is also pending */
889 		set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
890 
891 		cp.handle = cpu_to_le16(conn->handle);
892 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
893 			     sizeof(cp), &cp);
894 		if (conn->key_type != 0xff)
895 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
896 	}
897 
898 	return 0;
899 }
900 
901 /* Encrypt the the link */
902 static void hci_conn_encrypt(struct hci_conn *conn)
903 {
904 	BT_DBG("hcon %p", conn);
905 
906 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
907 		struct hci_cp_set_conn_encrypt cp;
908 		cp.handle  = cpu_to_le16(conn->handle);
909 		cp.encrypt = 0x01;
910 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
911 			     &cp);
912 	}
913 }
914 
915 /* Enable security */
916 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
917 {
918 	BT_DBG("hcon %p", conn);
919 
920 	if (conn->type == LE_LINK)
921 		return smp_conn_security(conn, sec_level);
922 
923 	/* For sdp we don't need the link key. */
924 	if (sec_level == BT_SECURITY_SDP)
925 		return 1;
926 
927 	/* For non 2.1 devices and low security level we don't need the link
928 	   key. */
929 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
930 		return 1;
931 
932 	/* For other security levels we need the link key. */
933 	if (!(conn->link_mode & HCI_LM_AUTH))
934 		goto auth;
935 
936 	/* An authenticated FIPS approved combination key has sufficient
937 	 * security for security level 4. */
938 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
939 	    sec_level == BT_SECURITY_FIPS)
940 		goto encrypt;
941 
942 	/* An authenticated combination key has sufficient security for
943 	   security level 3. */
944 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
945 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
946 	    sec_level == BT_SECURITY_HIGH)
947 		goto encrypt;
948 
949 	/* An unauthenticated combination key has sufficient security for
950 	   security level 1 and 2. */
951 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
952 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
953 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
954 		goto encrypt;
955 
956 	/* A combination key has always sufficient security for the security
957 	   levels 1 or 2. High security level requires the combination key
958 	   is generated using maximum PIN code length (16).
959 	   For pre 2.1 units. */
960 	if (conn->key_type == HCI_LK_COMBINATION &&
961 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
962 	     conn->pin_length == 16))
963 		goto encrypt;
964 
965 auth:
966 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
967 		return 0;
968 
969 	if (!hci_conn_auth(conn, sec_level, auth_type))
970 		return 0;
971 
972 encrypt:
973 	if (conn->link_mode & HCI_LM_ENCRYPT)
974 		return 1;
975 
976 	hci_conn_encrypt(conn);
977 	return 0;
978 }
979 EXPORT_SYMBOL(hci_conn_security);
980 
981 /* Check secure link requirement */
982 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
983 {
984 	BT_DBG("hcon %p", conn);
985 
986 	/* Accept if non-secure or higher security level is required */
987 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
988 		return 1;
989 
990 	/* Accept if secure or higher security level is already present */
991 	if (conn->sec_level == BT_SECURITY_HIGH ||
992 	    conn->sec_level == BT_SECURITY_FIPS)
993 		return 1;
994 
995 	/* Reject not secure link */
996 	return 0;
997 }
998 EXPORT_SYMBOL(hci_conn_check_secure);
999 
1000 /* Change link key */
1001 int hci_conn_change_link_key(struct hci_conn *conn)
1002 {
1003 	BT_DBG("hcon %p", conn);
1004 
1005 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1006 		struct hci_cp_change_conn_link_key cp;
1007 		cp.handle = cpu_to_le16(conn->handle);
1008 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1009 			     sizeof(cp), &cp);
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 /* Switch role */
1016 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1017 {
1018 	BT_DBG("hcon %p", conn);
1019 
1020 	if (!role && conn->link_mode & HCI_LM_MASTER)
1021 		return 1;
1022 
1023 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1024 		struct hci_cp_switch_role cp;
1025 		bacpy(&cp.bdaddr, &conn->dst);
1026 		cp.role = role;
1027 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1028 	}
1029 
1030 	return 0;
1031 }
1032 EXPORT_SYMBOL(hci_conn_switch_role);
1033 
1034 /* Enter active mode */
1035 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1036 {
1037 	struct hci_dev *hdev = conn->hdev;
1038 
1039 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1040 
1041 	if (test_bit(HCI_RAW, &hdev->flags))
1042 		return;
1043 
1044 	if (conn->mode != HCI_CM_SNIFF)
1045 		goto timer;
1046 
1047 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1048 		goto timer;
1049 
1050 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1051 		struct hci_cp_exit_sniff_mode cp;
1052 		cp.handle = cpu_to_le16(conn->handle);
1053 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1054 	}
1055 
1056 timer:
1057 	if (hdev->idle_timeout > 0)
1058 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1059 				   msecs_to_jiffies(hdev->idle_timeout));
1060 }
1061 
1062 /* Drop all connection on the device */
1063 void hci_conn_hash_flush(struct hci_dev *hdev)
1064 {
1065 	struct hci_conn_hash *h = &hdev->conn_hash;
1066 	struct hci_conn *c, *n;
1067 
1068 	BT_DBG("hdev %s", hdev->name);
1069 
1070 	list_for_each_entry_safe(c, n, &h->list, list) {
1071 		c->state = BT_CLOSED;
1072 
1073 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1074 		hci_conn_del(c);
1075 	}
1076 }
1077 
1078 /* Check pending connect attempts */
1079 void hci_conn_check_pending(struct hci_dev *hdev)
1080 {
1081 	struct hci_conn *conn;
1082 
1083 	BT_DBG("hdev %s", hdev->name);
1084 
1085 	hci_dev_lock(hdev);
1086 
1087 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1088 	if (conn)
1089 		hci_acl_create_connection(conn);
1090 
1091 	hci_dev_unlock(hdev);
1092 }
1093 
1094 int hci_get_conn_list(void __user *arg)
1095 {
1096 	struct hci_conn *c;
1097 	struct hci_conn_list_req req, *cl;
1098 	struct hci_conn_info *ci;
1099 	struct hci_dev *hdev;
1100 	int n = 0, size, err;
1101 
1102 	if (copy_from_user(&req, arg, sizeof(req)))
1103 		return -EFAULT;
1104 
1105 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1106 		return -EINVAL;
1107 
1108 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1109 
1110 	cl = kmalloc(size, GFP_KERNEL);
1111 	if (!cl)
1112 		return -ENOMEM;
1113 
1114 	hdev = hci_dev_get(req.dev_id);
1115 	if (!hdev) {
1116 		kfree(cl);
1117 		return -ENODEV;
1118 	}
1119 
1120 	ci = cl->conn_info;
1121 
1122 	hci_dev_lock(hdev);
1123 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1124 		bacpy(&(ci + n)->bdaddr, &c->dst);
1125 		(ci + n)->handle = c->handle;
1126 		(ci + n)->type  = c->type;
1127 		(ci + n)->out   = c->out;
1128 		(ci + n)->state = c->state;
1129 		(ci + n)->link_mode = c->link_mode;
1130 		if (++n >= req.conn_num)
1131 			break;
1132 	}
1133 	hci_dev_unlock(hdev);
1134 
1135 	cl->dev_id = hdev->id;
1136 	cl->conn_num = n;
1137 	size = sizeof(req) + n * sizeof(*ci);
1138 
1139 	hci_dev_put(hdev);
1140 
1141 	err = copy_to_user(arg, cl, size);
1142 	kfree(cl);
1143 
1144 	return err ? -EFAULT : 0;
1145 }
1146 
1147 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1148 {
1149 	struct hci_conn_info_req req;
1150 	struct hci_conn_info ci;
1151 	struct hci_conn *conn;
1152 	char __user *ptr = arg + sizeof(req);
1153 
1154 	if (copy_from_user(&req, arg, sizeof(req)))
1155 		return -EFAULT;
1156 
1157 	hci_dev_lock(hdev);
1158 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1159 	if (conn) {
1160 		bacpy(&ci.bdaddr, &conn->dst);
1161 		ci.handle = conn->handle;
1162 		ci.type  = conn->type;
1163 		ci.out   = conn->out;
1164 		ci.state = conn->state;
1165 		ci.link_mode = conn->link_mode;
1166 	}
1167 	hci_dev_unlock(hdev);
1168 
1169 	if (!conn)
1170 		return -ENOENT;
1171 
1172 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1173 }
1174 
1175 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1176 {
1177 	struct hci_auth_info_req req;
1178 	struct hci_conn *conn;
1179 
1180 	if (copy_from_user(&req, arg, sizeof(req)))
1181 		return -EFAULT;
1182 
1183 	hci_dev_lock(hdev);
1184 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1185 	if (conn)
1186 		req.type = conn->auth_type;
1187 	hci_dev_unlock(hdev);
1188 
1189 	if (!conn)
1190 		return -ENOENT;
1191 
1192 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1193 }
1194 
1195 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1196 {
1197 	struct hci_dev *hdev = conn->hdev;
1198 	struct hci_chan *chan;
1199 
1200 	BT_DBG("%s hcon %p", hdev->name, conn);
1201 
1202 	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
1203 	if (!chan)
1204 		return NULL;
1205 
1206 	chan->conn = conn;
1207 	skb_queue_head_init(&chan->data_q);
1208 	chan->state = BT_CONNECTED;
1209 
1210 	list_add_rcu(&chan->list, &conn->chan_list);
1211 
1212 	return chan;
1213 }
1214 
1215 void hci_chan_del(struct hci_chan *chan)
1216 {
1217 	struct hci_conn *conn = chan->conn;
1218 	struct hci_dev *hdev = conn->hdev;
1219 
1220 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1221 
1222 	list_del_rcu(&chan->list);
1223 
1224 	synchronize_rcu();
1225 
1226 	hci_conn_drop(conn);
1227 
1228 	skb_queue_purge(&chan->data_q);
1229 	kfree(chan);
1230 }
1231 
1232 void hci_chan_list_flush(struct hci_conn *conn)
1233 {
1234 	struct hci_chan *chan, *n;
1235 
1236 	BT_DBG("hcon %p", conn);
1237 
1238 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1239 		hci_chan_del(chan);
1240 }
1241 
1242 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1243 						 __u16 handle)
1244 {
1245 	struct hci_chan *hchan;
1246 
1247 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1248 		if (hchan->handle == handle)
1249 			return hchan;
1250 	}
1251 
1252 	return NULL;
1253 }
1254 
1255 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1256 {
1257 	struct hci_conn_hash *h = &hdev->conn_hash;
1258 	struct hci_conn *hcon;
1259 	struct hci_chan *hchan = NULL;
1260 
1261 	rcu_read_lock();
1262 
1263 	list_for_each_entry_rcu(hcon, &h->list, list) {
1264 		hchan = __hci_chan_lookup_handle(hcon, handle);
1265 		if (hchan)
1266 			break;
1267 	}
1268 
1269 	rcu_read_unlock();
1270 
1271 	return hchan;
1272 }
1273