xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision d2999e1b)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32 
33 #include "smp.h"
34 #include "a2mp.h"
35 
36 struct sco_param {
37 	u16 pkt_type;
38 	u16 max_latency;
39 };
40 
41 static const struct sco_param sco_param_cvsd[] = {
42 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
43 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
44 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007 }, /* S1 */
45 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff }, /* D1 */
46 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff }, /* D0 */
47 };
48 
49 static const struct sco_param sco_param_wideband[] = {
50 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
51 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
52 };
53 
54 static void hci_le_create_connection_cancel(struct hci_conn *conn)
55 {
56 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
57 }
58 
59 static void hci_acl_create_connection(struct hci_conn *conn)
60 {
61 	struct hci_dev *hdev = conn->hdev;
62 	struct inquiry_entry *ie;
63 	struct hci_cp_create_conn cp;
64 
65 	BT_DBG("hcon %p", conn);
66 
67 	conn->state = BT_CONNECT;
68 	conn->out = true;
69 
70 	conn->link_mode = HCI_LM_MASTER;
71 
72 	conn->attempt++;
73 
74 	conn->link_policy = hdev->link_policy;
75 
76 	memset(&cp, 0, sizeof(cp));
77 	bacpy(&cp.bdaddr, &conn->dst);
78 	cp.pscan_rep_mode = 0x02;
79 
80 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
81 	if (ie) {
82 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
83 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
84 			cp.pscan_mode     = ie->data.pscan_mode;
85 			cp.clock_offset   = ie->data.clock_offset |
86 					    cpu_to_le16(0x8000);
87 		}
88 
89 		memcpy(conn->dev_class, ie->data.dev_class, 3);
90 		if (ie->data.ssp_mode > 0)
91 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
92 	}
93 
94 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
95 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
96 		cp.role_switch = 0x01;
97 	else
98 		cp.role_switch = 0x00;
99 
100 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
101 }
102 
103 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
104 {
105 	struct hci_cp_create_conn_cancel cp;
106 
107 	BT_DBG("hcon %p", conn);
108 
109 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
110 		return;
111 
112 	bacpy(&cp.bdaddr, &conn->dst);
113 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
114 }
115 
116 static void hci_reject_sco(struct hci_conn *conn)
117 {
118 	struct hci_cp_reject_sync_conn_req cp;
119 
120 	cp.reason = HCI_ERROR_REMOTE_USER_TERM;
121 	bacpy(&cp.bdaddr, &conn->dst);
122 
123 	hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
124 }
125 
126 void hci_disconnect(struct hci_conn *conn, __u8 reason)
127 {
128 	struct hci_cp_disconnect cp;
129 
130 	BT_DBG("hcon %p", conn);
131 
132 	conn->state = BT_DISCONN;
133 
134 	cp.handle = cpu_to_le16(conn->handle);
135 	cp.reason = reason;
136 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
137 }
138 
139 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
140 {
141 	struct hci_cp_disconn_phy_link cp;
142 
143 	BT_DBG("hcon %p", conn);
144 
145 	conn->state = BT_DISCONN;
146 
147 	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
148 	cp.reason = reason;
149 	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
150 		     sizeof(cp), &cp);
151 }
152 
153 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
154 {
155 	struct hci_dev *hdev = conn->hdev;
156 	struct hci_cp_add_sco cp;
157 
158 	BT_DBG("hcon %p", conn);
159 
160 	conn->state = BT_CONNECT;
161 	conn->out = true;
162 
163 	conn->attempt++;
164 
165 	cp.handle   = cpu_to_le16(handle);
166 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
167 
168 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
169 }
170 
171 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
172 {
173 	struct hci_dev *hdev = conn->hdev;
174 	struct hci_cp_setup_sync_conn cp;
175 	const struct sco_param *param;
176 
177 	BT_DBG("hcon %p", conn);
178 
179 	conn->state = BT_CONNECT;
180 	conn->out = true;
181 
182 	conn->attempt++;
183 
184 	cp.handle   = cpu_to_le16(handle);
185 
186 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
187 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
188 	cp.voice_setting  = cpu_to_le16(conn->setting);
189 
190 	switch (conn->setting & SCO_AIRMODE_MASK) {
191 	case SCO_AIRMODE_TRANSP:
192 		if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
193 			return false;
194 		cp.retrans_effort = 0x02;
195 		param = &sco_param_wideband[conn->attempt - 1];
196 		break;
197 	case SCO_AIRMODE_CVSD:
198 		if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
199 			return false;
200 		cp.retrans_effort = 0x01;
201 		param = &sco_param_cvsd[conn->attempt - 1];
202 		break;
203 	default:
204 		return false;
205 	}
206 
207 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
208 	cp.max_latency = __cpu_to_le16(param->max_latency);
209 
210 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
211 		return false;
212 
213 	return true;
214 }
215 
216 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
217 			u16 latency, u16 to_multiplier)
218 {
219 	struct hci_cp_le_conn_update cp;
220 	struct hci_dev *hdev = conn->hdev;
221 
222 	memset(&cp, 0, sizeof(cp));
223 
224 	cp.handle		= cpu_to_le16(conn->handle);
225 	cp.conn_interval_min	= cpu_to_le16(min);
226 	cp.conn_interval_max	= cpu_to_le16(max);
227 	cp.conn_latency		= cpu_to_le16(latency);
228 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
229 	cp.min_ce_len		= cpu_to_le16(0x0000);
230 	cp.max_ce_len		= cpu_to_le16(0x0000);
231 
232 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
233 }
234 
235 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
236 		      __u8 ltk[16])
237 {
238 	struct hci_dev *hdev = conn->hdev;
239 	struct hci_cp_le_start_enc cp;
240 
241 	BT_DBG("hcon %p", conn);
242 
243 	memset(&cp, 0, sizeof(cp));
244 
245 	cp.handle = cpu_to_le16(conn->handle);
246 	cp.rand = rand;
247 	cp.ediv = ediv;
248 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
249 
250 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
251 }
252 
253 /* Device _must_ be locked */
254 void hci_sco_setup(struct hci_conn *conn, __u8 status)
255 {
256 	struct hci_conn *sco = conn->link;
257 
258 	if (!sco)
259 		return;
260 
261 	BT_DBG("hcon %p", conn);
262 
263 	if (!status) {
264 		if (lmp_esco_capable(conn->hdev))
265 			hci_setup_sync(sco, conn->handle);
266 		else
267 			hci_add_sco(sco, conn->handle);
268 	} else {
269 		hci_proto_connect_cfm(sco, status);
270 		hci_conn_del(sco);
271 	}
272 }
273 
274 static void hci_conn_disconnect(struct hci_conn *conn)
275 {
276 	__u8 reason = hci_proto_disconn_ind(conn);
277 
278 	switch (conn->type) {
279 	case AMP_LINK:
280 		hci_amp_disconn(conn, reason);
281 		break;
282 	default:
283 		hci_disconnect(conn, reason);
284 		break;
285 	}
286 }
287 
288 static void hci_conn_timeout(struct work_struct *work)
289 {
290 	struct hci_conn *conn = container_of(work, struct hci_conn,
291 					     disc_work.work);
292 
293 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
294 
295 	if (atomic_read(&conn->refcnt))
296 		return;
297 
298 	switch (conn->state) {
299 	case BT_CONNECT:
300 	case BT_CONNECT2:
301 		if (conn->out) {
302 			if (conn->type == ACL_LINK)
303 				hci_acl_create_connection_cancel(conn);
304 			else if (conn->type == LE_LINK)
305 				hci_le_create_connection_cancel(conn);
306 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
307 			hci_reject_sco(conn);
308 		}
309 		break;
310 	case BT_CONFIG:
311 	case BT_CONNECTED:
312 		hci_conn_disconnect(conn);
313 		break;
314 	default:
315 		conn->state = BT_CLOSED;
316 		break;
317 	}
318 }
319 
320 /* Enter sniff mode */
321 static void hci_conn_idle(struct work_struct *work)
322 {
323 	struct hci_conn *conn = container_of(work, struct hci_conn,
324 					     idle_work.work);
325 	struct hci_dev *hdev = conn->hdev;
326 
327 	BT_DBG("hcon %p mode %d", conn, conn->mode);
328 
329 	if (test_bit(HCI_RAW, &hdev->flags))
330 		return;
331 
332 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
333 		return;
334 
335 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
336 		return;
337 
338 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
339 		struct hci_cp_sniff_subrate cp;
340 		cp.handle             = cpu_to_le16(conn->handle);
341 		cp.max_latency        = cpu_to_le16(0);
342 		cp.min_remote_timeout = cpu_to_le16(0);
343 		cp.min_local_timeout  = cpu_to_le16(0);
344 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
345 	}
346 
347 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
348 		struct hci_cp_sniff_mode cp;
349 		cp.handle       = cpu_to_le16(conn->handle);
350 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
351 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
352 		cp.attempt      = cpu_to_le16(4);
353 		cp.timeout      = cpu_to_le16(1);
354 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
355 	}
356 }
357 
358 static void hci_conn_auto_accept(struct work_struct *work)
359 {
360 	struct hci_conn *conn = container_of(work, struct hci_conn,
361 					     auto_accept_work.work);
362 
363 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
364 		     &conn->dst);
365 }
366 
367 static void le_conn_timeout(struct work_struct *work)
368 {
369 	struct hci_conn *conn = container_of(work, struct hci_conn,
370 					     le_conn_timeout.work);
371 	struct hci_dev *hdev = conn->hdev;
372 
373 	BT_DBG("");
374 
375 	/* We could end up here due to having done directed advertising,
376 	 * so clean up the state if necessary. This should however only
377 	 * happen with broken hardware or if low duty cycle was used
378 	 * (which doesn't have a timeout of its own).
379 	 */
380 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
381 		u8 enable = 0x00;
382 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
383 			     &enable);
384 		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
385 		return;
386 	}
387 
388 	hci_le_create_connection_cancel(conn);
389 }
390 
391 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
392 {
393 	struct hci_conn *conn;
394 
395 	BT_DBG("%s dst %pMR", hdev->name, dst);
396 
397 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
398 	if (!conn)
399 		return NULL;
400 
401 	bacpy(&conn->dst, dst);
402 	bacpy(&conn->src, &hdev->bdaddr);
403 	conn->hdev  = hdev;
404 	conn->type  = type;
405 	conn->mode  = HCI_CM_ACTIVE;
406 	conn->state = BT_OPEN;
407 	conn->auth_type = HCI_AT_GENERAL_BONDING;
408 	conn->io_capability = hdev->io_capability;
409 	conn->remote_auth = 0xff;
410 	conn->key_type = 0xff;
411 	conn->tx_power = HCI_TX_POWER_INVALID;
412 	conn->max_tx_power = HCI_TX_POWER_INVALID;
413 
414 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
415 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
416 
417 	switch (type) {
418 	case ACL_LINK:
419 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
420 		break;
421 	case LE_LINK:
422 		/* conn->src should reflect the local identity address */
423 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
424 		break;
425 	case SCO_LINK:
426 		if (lmp_esco_capable(hdev))
427 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
428 					(hdev->esco_type & EDR_ESCO_MASK);
429 		else
430 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
431 		break;
432 	case ESCO_LINK:
433 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
434 		break;
435 	}
436 
437 	skb_queue_head_init(&conn->data_q);
438 
439 	INIT_LIST_HEAD(&conn->chan_list);
440 
441 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
442 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
443 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
444 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
445 
446 	atomic_set(&conn->refcnt, 0);
447 
448 	hci_dev_hold(hdev);
449 
450 	hci_conn_hash_add(hdev, conn);
451 	if (hdev->notify)
452 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
453 
454 	hci_conn_init_sysfs(conn);
455 
456 	return conn;
457 }
458 
459 int hci_conn_del(struct hci_conn *conn)
460 {
461 	struct hci_dev *hdev = conn->hdev;
462 
463 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
464 
465 	cancel_delayed_work_sync(&conn->disc_work);
466 	cancel_delayed_work_sync(&conn->auto_accept_work);
467 	cancel_delayed_work_sync(&conn->idle_work);
468 
469 	if (conn->type == ACL_LINK) {
470 		struct hci_conn *sco = conn->link;
471 		if (sco)
472 			sco->link = NULL;
473 
474 		/* Unacked frames */
475 		hdev->acl_cnt += conn->sent;
476 	} else if (conn->type == LE_LINK) {
477 		cancel_delayed_work_sync(&conn->le_conn_timeout);
478 
479 		if (hdev->le_pkts)
480 			hdev->le_cnt += conn->sent;
481 		else
482 			hdev->acl_cnt += conn->sent;
483 	} else {
484 		struct hci_conn *acl = conn->link;
485 		if (acl) {
486 			acl->link = NULL;
487 			hci_conn_drop(acl);
488 		}
489 	}
490 
491 	hci_chan_list_flush(conn);
492 
493 	if (conn->amp_mgr)
494 		amp_mgr_put(conn->amp_mgr);
495 
496 	hci_conn_hash_del(hdev, conn);
497 	if (hdev->notify)
498 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
499 
500 	skb_queue_purge(&conn->data_q);
501 
502 	hci_conn_del_sysfs(conn);
503 
504 	hci_dev_put(hdev);
505 
506 	hci_conn_put(conn);
507 
508 	return 0;
509 }
510 
511 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
512 {
513 	int use_src = bacmp(src, BDADDR_ANY);
514 	struct hci_dev *hdev = NULL, *d;
515 
516 	BT_DBG("%pMR -> %pMR", src, dst);
517 
518 	read_lock(&hci_dev_list_lock);
519 
520 	list_for_each_entry(d, &hci_dev_list, list) {
521 		if (!test_bit(HCI_UP, &d->flags) ||
522 		    test_bit(HCI_RAW, &d->flags) ||
523 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
524 		    d->dev_type != HCI_BREDR)
525 			continue;
526 
527 		/* Simple routing:
528 		 *   No source address - find interface with bdaddr != dst
529 		 *   Source address    - find interface with bdaddr == src
530 		 */
531 
532 		if (use_src) {
533 			if (!bacmp(&d->bdaddr, src)) {
534 				hdev = d; break;
535 			}
536 		} else {
537 			if (bacmp(&d->bdaddr, dst)) {
538 				hdev = d; break;
539 			}
540 		}
541 	}
542 
543 	if (hdev)
544 		hdev = hci_dev_hold(hdev);
545 
546 	read_unlock(&hci_dev_list_lock);
547 	return hdev;
548 }
549 EXPORT_SYMBOL(hci_get_route);
550 
551 /* This function requires the caller holds hdev->lock */
552 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
553 {
554 	struct hci_dev *hdev = conn->hdev;
555 
556 	conn->state = BT_CLOSED;
557 
558 	mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
559 			    status);
560 
561 	hci_proto_connect_cfm(conn, status);
562 
563 	hci_conn_del(conn);
564 
565 	/* Since we may have temporarily stopped the background scanning in
566 	 * favor of connection establishment, we should restart it.
567 	 */
568 	hci_update_background_scan(hdev);
569 
570 	/* Re-enable advertising in case this was a failed connection
571 	 * attempt as a peripheral.
572 	 */
573 	mgmt_reenable_advertising(hdev);
574 }
575 
576 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
577 {
578 	struct hci_conn *conn;
579 
580 	if (status == 0)
581 		return;
582 
583 	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
584 	       status);
585 
586 	hci_dev_lock(hdev);
587 
588 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
589 	if (!conn)
590 		goto done;
591 
592 	hci_le_conn_failed(conn, status);
593 
594 done:
595 	hci_dev_unlock(hdev);
596 }
597 
598 static void hci_req_add_le_create_conn(struct hci_request *req,
599 				       struct hci_conn *conn)
600 {
601 	struct hci_cp_le_create_conn cp;
602 	struct hci_dev *hdev = conn->hdev;
603 	u8 own_addr_type;
604 
605 	memset(&cp, 0, sizeof(cp));
606 
607 	/* Update random address, but set require_privacy to false so
608 	 * that we never connect with an unresolvable address.
609 	 */
610 	if (hci_update_random_address(req, false, &own_addr_type))
611 		return;
612 
613 	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
614 	cp.scan_window = cpu_to_le16(hdev->le_scan_window);
615 	bacpy(&cp.peer_addr, &conn->dst);
616 	cp.peer_addr_type = conn->dst_type;
617 	cp.own_address_type = own_addr_type;
618 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
619 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
620 	cp.supervision_timeout = cpu_to_le16(0x002a);
621 	cp.min_ce_len = cpu_to_le16(0x0000);
622 	cp.max_ce_len = cpu_to_le16(0x0000);
623 
624 	hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
625 
626 	conn->state = BT_CONNECT;
627 }
628 
629 static void hci_req_directed_advertising(struct hci_request *req,
630 					 struct hci_conn *conn)
631 {
632 	struct hci_dev *hdev = req->hdev;
633 	struct hci_cp_le_set_adv_param cp;
634 	u8 own_addr_type;
635 	u8 enable;
636 
637 	enable = 0x00;
638 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
639 
640 	/* Clear the HCI_ADVERTISING bit temporarily so that the
641 	 * hci_update_random_address knows that it's safe to go ahead
642 	 * and write a new random address. The flag will be set back on
643 	 * as soon as the SET_ADV_ENABLE HCI command completes.
644 	 */
645 	clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
646 
647 	/* Set require_privacy to false so that the remote device has a
648 	 * chance of identifying us.
649 	 */
650 	if (hci_update_random_address(req, false, &own_addr_type) < 0)
651 		return;
652 
653 	memset(&cp, 0, sizeof(cp));
654 	cp.type = LE_ADV_DIRECT_IND;
655 	cp.own_address_type = own_addr_type;
656 	cp.direct_addr_type = conn->dst_type;
657 	bacpy(&cp.direct_addr, &conn->dst);
658 	cp.channel_map = hdev->le_adv_channel_map;
659 
660 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
661 
662 	enable = 0x01;
663 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
664 
665 	conn->state = BT_CONNECT;
666 }
667 
668 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
669 				u8 dst_type, u8 sec_level, u8 auth_type)
670 {
671 	struct hci_conn_params *params;
672 	struct hci_conn *conn;
673 	struct smp_irk *irk;
674 	struct hci_request req;
675 	int err;
676 
677 	/* Some devices send ATT messages as soon as the physical link is
678 	 * established. To be able to handle these ATT messages, the user-
679 	 * space first establishes the connection and then starts the pairing
680 	 * process.
681 	 *
682 	 * So if a hci_conn object already exists for the following connection
683 	 * attempt, we simply update pending_sec_level and auth_type fields
684 	 * and return the object found.
685 	 */
686 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
687 	if (conn) {
688 		conn->pending_sec_level = sec_level;
689 		conn->auth_type = auth_type;
690 		goto done;
691 	}
692 
693 	/* Since the controller supports only one LE connection attempt at a
694 	 * time, we return -EBUSY if there is any connection attempt running.
695 	 */
696 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
697 	if (conn)
698 		return ERR_PTR(-EBUSY);
699 
700 	/* When given an identity address with existing identity
701 	 * resolving key, the connection needs to be established
702 	 * to a resolvable random address.
703 	 *
704 	 * This uses the cached random resolvable address from
705 	 * a previous scan. When no cached address is available,
706 	 * try connecting to the identity address instead.
707 	 *
708 	 * Storing the resolvable random address is required here
709 	 * to handle connection failures. The address will later
710 	 * be resolved back into the original identity address
711 	 * from the connect request.
712 	 */
713 	irk = hci_find_irk_by_addr(hdev, dst, dst_type);
714 	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
715 		dst = &irk->rpa;
716 		dst_type = ADDR_LE_DEV_RANDOM;
717 	}
718 
719 	conn = hci_conn_add(hdev, LE_LINK, dst);
720 	if (!conn)
721 		return ERR_PTR(-ENOMEM);
722 
723 	conn->dst_type = dst_type;
724 	conn->sec_level = BT_SECURITY_LOW;
725 	conn->pending_sec_level = sec_level;
726 	conn->auth_type = auth_type;
727 
728 	hci_req_init(&req, hdev);
729 
730 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
731 		hci_req_directed_advertising(&req, conn);
732 		goto create_conn;
733 	}
734 
735 	conn->out = true;
736 	conn->link_mode |= HCI_LM_MASTER;
737 
738 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
739 	if (params) {
740 		conn->le_conn_min_interval = params->conn_min_interval;
741 		conn->le_conn_max_interval = params->conn_max_interval;
742 	} else {
743 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
744 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
745 	}
746 
747 	/* If controller is scanning, we stop it since some controllers are
748 	 * not able to scan and connect at the same time. Also set the
749 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
750 	 * handler for scan disabling knows to set the correct discovery
751 	 * state.
752 	 */
753 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
754 		hci_req_add_le_scan_disable(&req);
755 		set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
756 	}
757 
758 	hci_req_add_le_create_conn(&req, conn);
759 
760 create_conn:
761 	err = hci_req_run(&req, create_le_conn_complete);
762 	if (err) {
763 		hci_conn_del(conn);
764 		return ERR_PTR(err);
765 	}
766 
767 done:
768 	hci_conn_hold(conn);
769 	return conn;
770 }
771 
772 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
773 				 u8 sec_level, u8 auth_type)
774 {
775 	struct hci_conn *acl;
776 
777 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
778 		return ERR_PTR(-ENOTSUPP);
779 
780 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
781 	if (!acl) {
782 		acl = hci_conn_add(hdev, ACL_LINK, dst);
783 		if (!acl)
784 			return ERR_PTR(-ENOMEM);
785 	}
786 
787 	hci_conn_hold(acl);
788 
789 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
790 		acl->sec_level = BT_SECURITY_LOW;
791 		acl->pending_sec_level = sec_level;
792 		acl->auth_type = auth_type;
793 		hci_acl_create_connection(acl);
794 	}
795 
796 	return acl;
797 }
798 
799 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
800 				 __u16 setting)
801 {
802 	struct hci_conn *acl;
803 	struct hci_conn *sco;
804 
805 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
806 	if (IS_ERR(acl))
807 		return acl;
808 
809 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
810 	if (!sco) {
811 		sco = hci_conn_add(hdev, type, dst);
812 		if (!sco) {
813 			hci_conn_drop(acl);
814 			return ERR_PTR(-ENOMEM);
815 		}
816 	}
817 
818 	acl->link = sco;
819 	sco->link = acl;
820 
821 	hci_conn_hold(sco);
822 
823 	sco->setting = setting;
824 
825 	if (acl->state == BT_CONNECTED &&
826 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
827 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
828 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
829 
830 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
831 			/* defer SCO setup until mode change completed */
832 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
833 			return sco;
834 		}
835 
836 		hci_sco_setup(acl, 0x00);
837 	}
838 
839 	return sco;
840 }
841 
842 /* Check link security requirement */
843 int hci_conn_check_link_mode(struct hci_conn *conn)
844 {
845 	BT_DBG("hcon %p", conn);
846 
847 	/* In Secure Connections Only mode, it is required that Secure
848 	 * Connections is used and the link is encrypted with AES-CCM
849 	 * using a P-256 authenticated combination key.
850 	 */
851 	if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
852 		if (!hci_conn_sc_enabled(conn) ||
853 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
854 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
855 			return 0;
856 	}
857 
858 	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
859 		return 0;
860 
861 	return 1;
862 }
863 
864 /* Authenticate remote device */
865 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
866 {
867 	BT_DBG("hcon %p", conn);
868 
869 	if (conn->pending_sec_level > sec_level)
870 		sec_level = conn->pending_sec_level;
871 
872 	if (sec_level > conn->sec_level)
873 		conn->pending_sec_level = sec_level;
874 	else if (conn->link_mode & HCI_LM_AUTH)
875 		return 1;
876 
877 	/* Make sure we preserve an existing MITM requirement*/
878 	auth_type |= (conn->auth_type & 0x01);
879 
880 	conn->auth_type = auth_type;
881 
882 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
883 		struct hci_cp_auth_requested cp;
884 
885 		cp.handle = cpu_to_le16(conn->handle);
886 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
887 			     sizeof(cp), &cp);
888 
889 		/* If we're already encrypted set the REAUTH_PEND flag,
890 		 * otherwise set the ENCRYPT_PEND.
891 		 */
892 		if (conn->link_mode & HCI_LM_ENCRYPT)
893 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
894 		else
895 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
896 	}
897 
898 	return 0;
899 }
900 
901 /* Encrypt the the link */
902 static void hci_conn_encrypt(struct hci_conn *conn)
903 {
904 	BT_DBG("hcon %p", conn);
905 
906 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
907 		struct hci_cp_set_conn_encrypt cp;
908 		cp.handle  = cpu_to_le16(conn->handle);
909 		cp.encrypt = 0x01;
910 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
911 			     &cp);
912 	}
913 }
914 
915 /* Enable security */
916 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
917 {
918 	BT_DBG("hcon %p", conn);
919 
920 	if (conn->type == LE_LINK)
921 		return smp_conn_security(conn, sec_level);
922 
923 	/* For sdp we don't need the link key. */
924 	if (sec_level == BT_SECURITY_SDP)
925 		return 1;
926 
927 	/* For non 2.1 devices and low security level we don't need the link
928 	   key. */
929 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
930 		return 1;
931 
932 	/* For other security levels we need the link key. */
933 	if (!(conn->link_mode & HCI_LM_AUTH))
934 		goto auth;
935 
936 	/* An authenticated FIPS approved combination key has sufficient
937 	 * security for security level 4. */
938 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
939 	    sec_level == BT_SECURITY_FIPS)
940 		goto encrypt;
941 
942 	/* An authenticated combination key has sufficient security for
943 	   security level 3. */
944 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
945 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
946 	    sec_level == BT_SECURITY_HIGH)
947 		goto encrypt;
948 
949 	/* An unauthenticated combination key has sufficient security for
950 	   security level 1 and 2. */
951 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
952 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
953 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
954 		goto encrypt;
955 
956 	/* A combination key has always sufficient security for the security
957 	   levels 1 or 2. High security level requires the combination key
958 	   is generated using maximum PIN code length (16).
959 	   For pre 2.1 units. */
960 	if (conn->key_type == HCI_LK_COMBINATION &&
961 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
962 	     conn->pin_length == 16))
963 		goto encrypt;
964 
965 auth:
966 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
967 		return 0;
968 
969 	if (!hci_conn_auth(conn, sec_level, auth_type))
970 		return 0;
971 
972 encrypt:
973 	if (conn->link_mode & HCI_LM_ENCRYPT)
974 		return 1;
975 
976 	hci_conn_encrypt(conn);
977 	return 0;
978 }
979 EXPORT_SYMBOL(hci_conn_security);
980 
981 /* Check secure link requirement */
982 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
983 {
984 	BT_DBG("hcon %p", conn);
985 
986 	/* Accept if non-secure or higher security level is required */
987 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
988 		return 1;
989 
990 	/* Accept if secure or higher security level is already present */
991 	if (conn->sec_level == BT_SECURITY_HIGH ||
992 	    conn->sec_level == BT_SECURITY_FIPS)
993 		return 1;
994 
995 	/* Reject not secure link */
996 	return 0;
997 }
998 EXPORT_SYMBOL(hci_conn_check_secure);
999 
1000 /* Change link key */
1001 int hci_conn_change_link_key(struct hci_conn *conn)
1002 {
1003 	BT_DBG("hcon %p", conn);
1004 
1005 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1006 		struct hci_cp_change_conn_link_key cp;
1007 		cp.handle = cpu_to_le16(conn->handle);
1008 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1009 			     sizeof(cp), &cp);
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 /* Switch role */
1016 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1017 {
1018 	BT_DBG("hcon %p", conn);
1019 
1020 	if (!role && conn->link_mode & HCI_LM_MASTER)
1021 		return 1;
1022 
1023 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1024 		struct hci_cp_switch_role cp;
1025 		bacpy(&cp.bdaddr, &conn->dst);
1026 		cp.role = role;
1027 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1028 	}
1029 
1030 	return 0;
1031 }
1032 EXPORT_SYMBOL(hci_conn_switch_role);
1033 
1034 /* Enter active mode */
1035 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1036 {
1037 	struct hci_dev *hdev = conn->hdev;
1038 
1039 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1040 
1041 	if (test_bit(HCI_RAW, &hdev->flags))
1042 		return;
1043 
1044 	if (conn->mode != HCI_CM_SNIFF)
1045 		goto timer;
1046 
1047 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1048 		goto timer;
1049 
1050 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1051 		struct hci_cp_exit_sniff_mode cp;
1052 		cp.handle = cpu_to_le16(conn->handle);
1053 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1054 	}
1055 
1056 timer:
1057 	if (hdev->idle_timeout > 0)
1058 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1059 				   msecs_to_jiffies(hdev->idle_timeout));
1060 }
1061 
1062 /* Drop all connection on the device */
1063 void hci_conn_hash_flush(struct hci_dev *hdev)
1064 {
1065 	struct hci_conn_hash *h = &hdev->conn_hash;
1066 	struct hci_conn *c, *n;
1067 
1068 	BT_DBG("hdev %s", hdev->name);
1069 
1070 	list_for_each_entry_safe(c, n, &h->list, list) {
1071 		c->state = BT_CLOSED;
1072 
1073 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1074 		hci_conn_del(c);
1075 	}
1076 }
1077 
1078 /* Check pending connect attempts */
1079 void hci_conn_check_pending(struct hci_dev *hdev)
1080 {
1081 	struct hci_conn *conn;
1082 
1083 	BT_DBG("hdev %s", hdev->name);
1084 
1085 	hci_dev_lock(hdev);
1086 
1087 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1088 	if (conn)
1089 		hci_acl_create_connection(conn);
1090 
1091 	hci_dev_unlock(hdev);
1092 }
1093 
1094 int hci_get_conn_list(void __user *arg)
1095 {
1096 	struct hci_conn *c;
1097 	struct hci_conn_list_req req, *cl;
1098 	struct hci_conn_info *ci;
1099 	struct hci_dev *hdev;
1100 	int n = 0, size, err;
1101 
1102 	if (copy_from_user(&req, arg, sizeof(req)))
1103 		return -EFAULT;
1104 
1105 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1106 		return -EINVAL;
1107 
1108 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1109 
1110 	cl = kmalloc(size, GFP_KERNEL);
1111 	if (!cl)
1112 		return -ENOMEM;
1113 
1114 	hdev = hci_dev_get(req.dev_id);
1115 	if (!hdev) {
1116 		kfree(cl);
1117 		return -ENODEV;
1118 	}
1119 
1120 	ci = cl->conn_info;
1121 
1122 	hci_dev_lock(hdev);
1123 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1124 		bacpy(&(ci + n)->bdaddr, &c->dst);
1125 		(ci + n)->handle = c->handle;
1126 		(ci + n)->type  = c->type;
1127 		(ci + n)->out   = c->out;
1128 		(ci + n)->state = c->state;
1129 		(ci + n)->link_mode = c->link_mode;
1130 		if (++n >= req.conn_num)
1131 			break;
1132 	}
1133 	hci_dev_unlock(hdev);
1134 
1135 	cl->dev_id = hdev->id;
1136 	cl->conn_num = n;
1137 	size = sizeof(req) + n * sizeof(*ci);
1138 
1139 	hci_dev_put(hdev);
1140 
1141 	err = copy_to_user(arg, cl, size);
1142 	kfree(cl);
1143 
1144 	return err ? -EFAULT : 0;
1145 }
1146 
1147 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1148 {
1149 	struct hci_conn_info_req req;
1150 	struct hci_conn_info ci;
1151 	struct hci_conn *conn;
1152 	char __user *ptr = arg + sizeof(req);
1153 
1154 	if (copy_from_user(&req, arg, sizeof(req)))
1155 		return -EFAULT;
1156 
1157 	hci_dev_lock(hdev);
1158 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1159 	if (conn) {
1160 		bacpy(&ci.bdaddr, &conn->dst);
1161 		ci.handle = conn->handle;
1162 		ci.type  = conn->type;
1163 		ci.out   = conn->out;
1164 		ci.state = conn->state;
1165 		ci.link_mode = conn->link_mode;
1166 	}
1167 	hci_dev_unlock(hdev);
1168 
1169 	if (!conn)
1170 		return -ENOENT;
1171 
1172 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1173 }
1174 
1175 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1176 {
1177 	struct hci_auth_info_req req;
1178 	struct hci_conn *conn;
1179 
1180 	if (copy_from_user(&req, arg, sizeof(req)))
1181 		return -EFAULT;
1182 
1183 	hci_dev_lock(hdev);
1184 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1185 	if (conn)
1186 		req.type = conn->auth_type;
1187 	hci_dev_unlock(hdev);
1188 
1189 	if (!conn)
1190 		return -ENOENT;
1191 
1192 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1193 }
1194 
1195 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1196 {
1197 	struct hci_dev *hdev = conn->hdev;
1198 	struct hci_chan *chan;
1199 
1200 	BT_DBG("%s hcon %p", hdev->name, conn);
1201 
1202 	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
1203 	if (!chan)
1204 		return NULL;
1205 
1206 	chan->conn = conn;
1207 	skb_queue_head_init(&chan->data_q);
1208 	chan->state = BT_CONNECTED;
1209 
1210 	list_add_rcu(&chan->list, &conn->chan_list);
1211 
1212 	return chan;
1213 }
1214 
1215 void hci_chan_del(struct hci_chan *chan)
1216 {
1217 	struct hci_conn *conn = chan->conn;
1218 	struct hci_dev *hdev = conn->hdev;
1219 
1220 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1221 
1222 	list_del_rcu(&chan->list);
1223 
1224 	synchronize_rcu();
1225 
1226 	hci_conn_drop(conn);
1227 
1228 	skb_queue_purge(&chan->data_q);
1229 	kfree(chan);
1230 }
1231 
1232 void hci_chan_list_flush(struct hci_conn *conn)
1233 {
1234 	struct hci_chan *chan, *n;
1235 
1236 	BT_DBG("hcon %p", conn);
1237 
1238 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1239 		hci_chan_del(chan);
1240 }
1241 
1242 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1243 						 __u16 handle)
1244 {
1245 	struct hci_chan *hchan;
1246 
1247 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1248 		if (hchan->handle == handle)
1249 			return hchan;
1250 	}
1251 
1252 	return NULL;
1253 }
1254 
1255 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1256 {
1257 	struct hci_conn_hash *h = &hdev->conn_hash;
1258 	struct hci_conn *hcon;
1259 	struct hci_chan *hchan = NULL;
1260 
1261 	rcu_read_lock();
1262 
1263 	list_for_each_entry_rcu(hcon, &h->list, list) {
1264 		hchan = __hci_chan_lookup_handle(hcon, handle);
1265 		if (hchan)
1266 			break;
1267 	}
1268 
1269 	rcu_read_unlock();
1270 
1271 	return hchan;
1272 }
1273