xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 206a81c1)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32 
33 #include "smp.h"
34 #include "a2mp.h"
35 
36 struct sco_param {
37 	u16 pkt_type;
38 	u16 max_latency;
39 };
40 
41 static const struct sco_param sco_param_cvsd[] = {
42 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
43 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
44 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007 }, /* S1 */
45 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff }, /* D1 */
46 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff }, /* D0 */
47 };
48 
49 static const struct sco_param sco_param_wideband[] = {
50 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
51 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
52 };
53 
54 static void hci_le_create_connection_cancel(struct hci_conn *conn)
55 {
56 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
57 }
58 
59 static void hci_acl_create_connection(struct hci_conn *conn)
60 {
61 	struct hci_dev *hdev = conn->hdev;
62 	struct inquiry_entry *ie;
63 	struct hci_cp_create_conn cp;
64 
65 	BT_DBG("hcon %p", conn);
66 
67 	conn->state = BT_CONNECT;
68 	conn->out = true;
69 
70 	conn->link_mode = HCI_LM_MASTER;
71 
72 	conn->attempt++;
73 
74 	conn->link_policy = hdev->link_policy;
75 
76 	memset(&cp, 0, sizeof(cp));
77 	bacpy(&cp.bdaddr, &conn->dst);
78 	cp.pscan_rep_mode = 0x02;
79 
80 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
81 	if (ie) {
82 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
83 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
84 			cp.pscan_mode     = ie->data.pscan_mode;
85 			cp.clock_offset   = ie->data.clock_offset |
86 					    cpu_to_le16(0x8000);
87 		}
88 
89 		memcpy(conn->dev_class, ie->data.dev_class, 3);
90 		if (ie->data.ssp_mode > 0)
91 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
92 	}
93 
94 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
95 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
96 		cp.role_switch = 0x01;
97 	else
98 		cp.role_switch = 0x00;
99 
100 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
101 }
102 
103 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
104 {
105 	struct hci_cp_create_conn_cancel cp;
106 
107 	BT_DBG("hcon %p", conn);
108 
109 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
110 		return;
111 
112 	bacpy(&cp.bdaddr, &conn->dst);
113 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
114 }
115 
116 static void hci_reject_sco(struct hci_conn *conn)
117 {
118 	struct hci_cp_reject_sync_conn_req cp;
119 
120 	cp.reason = HCI_ERROR_REMOTE_USER_TERM;
121 	bacpy(&cp.bdaddr, &conn->dst);
122 
123 	hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
124 }
125 
126 void hci_disconnect(struct hci_conn *conn, __u8 reason)
127 {
128 	struct hci_cp_disconnect cp;
129 
130 	BT_DBG("hcon %p", conn);
131 
132 	conn->state = BT_DISCONN;
133 
134 	cp.handle = cpu_to_le16(conn->handle);
135 	cp.reason = reason;
136 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
137 }
138 
139 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
140 {
141 	struct hci_cp_disconn_phy_link cp;
142 
143 	BT_DBG("hcon %p", conn);
144 
145 	conn->state = BT_DISCONN;
146 
147 	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
148 	cp.reason = reason;
149 	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
150 		     sizeof(cp), &cp);
151 }
152 
153 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
154 {
155 	struct hci_dev *hdev = conn->hdev;
156 	struct hci_cp_add_sco cp;
157 
158 	BT_DBG("hcon %p", conn);
159 
160 	conn->state = BT_CONNECT;
161 	conn->out = true;
162 
163 	conn->attempt++;
164 
165 	cp.handle   = cpu_to_le16(handle);
166 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
167 
168 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
169 }
170 
171 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
172 {
173 	struct hci_dev *hdev = conn->hdev;
174 	struct hci_cp_setup_sync_conn cp;
175 	const struct sco_param *param;
176 
177 	BT_DBG("hcon %p", conn);
178 
179 	conn->state = BT_CONNECT;
180 	conn->out = true;
181 
182 	conn->attempt++;
183 
184 	cp.handle   = cpu_to_le16(handle);
185 
186 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
187 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
188 	cp.voice_setting  = cpu_to_le16(conn->setting);
189 
190 	switch (conn->setting & SCO_AIRMODE_MASK) {
191 	case SCO_AIRMODE_TRANSP:
192 		if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
193 			return false;
194 		cp.retrans_effort = 0x02;
195 		param = &sco_param_wideband[conn->attempt - 1];
196 		break;
197 	case SCO_AIRMODE_CVSD:
198 		if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
199 			return false;
200 		cp.retrans_effort = 0x01;
201 		param = &sco_param_cvsd[conn->attempt - 1];
202 		break;
203 	default:
204 		return false;
205 	}
206 
207 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
208 	cp.max_latency = __cpu_to_le16(param->max_latency);
209 
210 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
211 		return false;
212 
213 	return true;
214 }
215 
216 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
217 			u16 latency, u16 to_multiplier)
218 {
219 	struct hci_cp_le_conn_update cp;
220 	struct hci_dev *hdev = conn->hdev;
221 
222 	memset(&cp, 0, sizeof(cp));
223 
224 	cp.handle		= cpu_to_le16(conn->handle);
225 	cp.conn_interval_min	= cpu_to_le16(min);
226 	cp.conn_interval_max	= cpu_to_le16(max);
227 	cp.conn_latency		= cpu_to_le16(latency);
228 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
229 	cp.min_ce_len		= cpu_to_le16(0x0000);
230 	cp.max_ce_len		= cpu_to_le16(0x0000);
231 
232 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
233 }
234 
235 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
236 		      __u8 ltk[16])
237 {
238 	struct hci_dev *hdev = conn->hdev;
239 	struct hci_cp_le_start_enc cp;
240 
241 	BT_DBG("hcon %p", conn);
242 
243 	memset(&cp, 0, sizeof(cp));
244 
245 	cp.handle = cpu_to_le16(conn->handle);
246 	cp.rand = rand;
247 	cp.ediv = ediv;
248 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
249 
250 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
251 }
252 
253 /* Device _must_ be locked */
254 void hci_sco_setup(struct hci_conn *conn, __u8 status)
255 {
256 	struct hci_conn *sco = conn->link;
257 
258 	if (!sco)
259 		return;
260 
261 	BT_DBG("hcon %p", conn);
262 
263 	if (!status) {
264 		if (lmp_esco_capable(conn->hdev))
265 			hci_setup_sync(sco, conn->handle);
266 		else
267 			hci_add_sco(sco, conn->handle);
268 	} else {
269 		hci_proto_connect_cfm(sco, status);
270 		hci_conn_del(sco);
271 	}
272 }
273 
274 static void hci_conn_disconnect(struct hci_conn *conn)
275 {
276 	__u8 reason = hci_proto_disconn_ind(conn);
277 
278 	switch (conn->type) {
279 	case AMP_LINK:
280 		hci_amp_disconn(conn, reason);
281 		break;
282 	default:
283 		hci_disconnect(conn, reason);
284 		break;
285 	}
286 }
287 
288 static void hci_conn_timeout(struct work_struct *work)
289 {
290 	struct hci_conn *conn = container_of(work, struct hci_conn,
291 					     disc_work.work);
292 
293 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
294 
295 	if (atomic_read(&conn->refcnt))
296 		return;
297 
298 	switch (conn->state) {
299 	case BT_CONNECT:
300 	case BT_CONNECT2:
301 		if (conn->out) {
302 			if (conn->type == ACL_LINK)
303 				hci_acl_create_connection_cancel(conn);
304 			else if (conn->type == LE_LINK)
305 				hci_le_create_connection_cancel(conn);
306 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
307 			hci_reject_sco(conn);
308 		}
309 		break;
310 	case BT_CONFIG:
311 	case BT_CONNECTED:
312 		hci_conn_disconnect(conn);
313 		break;
314 	default:
315 		conn->state = BT_CLOSED;
316 		break;
317 	}
318 }
319 
320 /* Enter sniff mode */
321 static void hci_conn_idle(struct work_struct *work)
322 {
323 	struct hci_conn *conn = container_of(work, struct hci_conn,
324 					     idle_work.work);
325 	struct hci_dev *hdev = conn->hdev;
326 
327 	BT_DBG("hcon %p mode %d", conn, conn->mode);
328 
329 	if (test_bit(HCI_RAW, &hdev->flags))
330 		return;
331 
332 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
333 		return;
334 
335 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
336 		return;
337 
338 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
339 		struct hci_cp_sniff_subrate cp;
340 		cp.handle             = cpu_to_le16(conn->handle);
341 		cp.max_latency        = cpu_to_le16(0);
342 		cp.min_remote_timeout = cpu_to_le16(0);
343 		cp.min_local_timeout  = cpu_to_le16(0);
344 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
345 	}
346 
347 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
348 		struct hci_cp_sniff_mode cp;
349 		cp.handle       = cpu_to_le16(conn->handle);
350 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
351 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
352 		cp.attempt      = cpu_to_le16(4);
353 		cp.timeout      = cpu_to_le16(1);
354 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
355 	}
356 }
357 
358 static void hci_conn_auto_accept(struct work_struct *work)
359 {
360 	struct hci_conn *conn = container_of(work, struct hci_conn,
361 					     auto_accept_work.work);
362 
363 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
364 		     &conn->dst);
365 }
366 
367 static void le_conn_timeout(struct work_struct *work)
368 {
369 	struct hci_conn *conn = container_of(work, struct hci_conn,
370 					     le_conn_timeout.work);
371 	struct hci_dev *hdev = conn->hdev;
372 
373 	BT_DBG("");
374 
375 	/* We could end up here due to having done directed advertising,
376 	 * so clean up the state if necessary. This should however only
377 	 * happen with broken hardware or if low duty cycle was used
378 	 * (which doesn't have a timeout of its own).
379 	 */
380 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
381 		u8 enable = 0x00;
382 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
383 			     &enable);
384 		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
385 		return;
386 	}
387 
388 	hci_le_create_connection_cancel(conn);
389 }
390 
391 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
392 {
393 	struct hci_conn *conn;
394 
395 	BT_DBG("%s dst %pMR", hdev->name, dst);
396 
397 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
398 	if (!conn)
399 		return NULL;
400 
401 	bacpy(&conn->dst, dst);
402 	bacpy(&conn->src, &hdev->bdaddr);
403 	conn->hdev  = hdev;
404 	conn->type  = type;
405 	conn->mode  = HCI_CM_ACTIVE;
406 	conn->state = BT_OPEN;
407 	conn->auth_type = HCI_AT_GENERAL_BONDING;
408 	conn->io_capability = hdev->io_capability;
409 	conn->remote_auth = 0xff;
410 	conn->key_type = 0xff;
411 	conn->tx_power = HCI_TX_POWER_INVALID;
412 	conn->max_tx_power = HCI_TX_POWER_INVALID;
413 
414 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
415 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
416 
417 	switch (type) {
418 	case ACL_LINK:
419 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
420 		break;
421 	case LE_LINK:
422 		/* conn->src should reflect the local identity address */
423 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
424 		break;
425 	case SCO_LINK:
426 		if (lmp_esco_capable(hdev))
427 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
428 					(hdev->esco_type & EDR_ESCO_MASK);
429 		else
430 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
431 		break;
432 	case ESCO_LINK:
433 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
434 		break;
435 	}
436 
437 	skb_queue_head_init(&conn->data_q);
438 
439 	INIT_LIST_HEAD(&conn->chan_list);
440 
441 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
442 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
443 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
444 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
445 
446 	atomic_set(&conn->refcnt, 0);
447 
448 	hci_dev_hold(hdev);
449 
450 	hci_conn_hash_add(hdev, conn);
451 	if (hdev->notify)
452 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
453 
454 	hci_conn_init_sysfs(conn);
455 
456 	return conn;
457 }
458 
459 int hci_conn_del(struct hci_conn *conn)
460 {
461 	struct hci_dev *hdev = conn->hdev;
462 
463 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
464 
465 	cancel_delayed_work_sync(&conn->disc_work);
466 	cancel_delayed_work_sync(&conn->auto_accept_work);
467 	cancel_delayed_work_sync(&conn->idle_work);
468 
469 	if (conn->type == ACL_LINK) {
470 		struct hci_conn *sco = conn->link;
471 		if (sco)
472 			sco->link = NULL;
473 
474 		/* Unacked frames */
475 		hdev->acl_cnt += conn->sent;
476 	} else if (conn->type == LE_LINK) {
477 		cancel_delayed_work_sync(&conn->le_conn_timeout);
478 
479 		if (hdev->le_pkts)
480 			hdev->le_cnt += conn->sent;
481 		else
482 			hdev->acl_cnt += conn->sent;
483 	} else {
484 		struct hci_conn *acl = conn->link;
485 		if (acl) {
486 			acl->link = NULL;
487 			hci_conn_drop(acl);
488 		}
489 	}
490 
491 	hci_chan_list_flush(conn);
492 
493 	if (conn->amp_mgr)
494 		amp_mgr_put(conn->amp_mgr);
495 
496 	hci_conn_hash_del(hdev, conn);
497 	if (hdev->notify)
498 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
499 
500 	skb_queue_purge(&conn->data_q);
501 
502 	hci_conn_del_sysfs(conn);
503 
504 	hci_dev_put(hdev);
505 
506 	hci_conn_put(conn);
507 
508 	return 0;
509 }
510 
511 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
512 {
513 	int use_src = bacmp(src, BDADDR_ANY);
514 	struct hci_dev *hdev = NULL, *d;
515 
516 	BT_DBG("%pMR -> %pMR", src, dst);
517 
518 	read_lock(&hci_dev_list_lock);
519 
520 	list_for_each_entry(d, &hci_dev_list, list) {
521 		if (!test_bit(HCI_UP, &d->flags) ||
522 		    test_bit(HCI_RAW, &d->flags) ||
523 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
524 		    d->dev_type != HCI_BREDR)
525 			continue;
526 
527 		/* Simple routing:
528 		 *   No source address - find interface with bdaddr != dst
529 		 *   Source address    - find interface with bdaddr == src
530 		 */
531 
532 		if (use_src) {
533 			if (!bacmp(&d->bdaddr, src)) {
534 				hdev = d; break;
535 			}
536 		} else {
537 			if (bacmp(&d->bdaddr, dst)) {
538 				hdev = d; break;
539 			}
540 		}
541 	}
542 
543 	if (hdev)
544 		hdev = hci_dev_hold(hdev);
545 
546 	read_unlock(&hci_dev_list_lock);
547 	return hdev;
548 }
549 EXPORT_SYMBOL(hci_get_route);
550 
551 /* This function requires the caller holds hdev->lock */
552 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
553 {
554 	struct hci_dev *hdev = conn->hdev;
555 
556 	conn->state = BT_CLOSED;
557 
558 	mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
559 			    status);
560 
561 	hci_proto_connect_cfm(conn, status);
562 
563 	hci_conn_del(conn);
564 
565 	/* Since we may have temporarily stopped the background scanning in
566 	 * favor of connection establishment, we should restart it.
567 	 */
568 	hci_update_background_scan(hdev);
569 
570 	/* Re-enable advertising in case this was a failed connection
571 	 * attempt as a peripheral.
572 	 */
573 	mgmt_reenable_advertising(hdev);
574 }
575 
576 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
577 {
578 	struct hci_conn *conn;
579 
580 	if (status == 0)
581 		return;
582 
583 	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
584 	       status);
585 
586 	hci_dev_lock(hdev);
587 
588 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
589 	if (!conn)
590 		goto done;
591 
592 	hci_le_conn_failed(conn, status);
593 
594 done:
595 	hci_dev_unlock(hdev);
596 }
597 
598 static void hci_req_add_le_create_conn(struct hci_request *req,
599 				       struct hci_conn *conn)
600 {
601 	struct hci_cp_le_create_conn cp;
602 	struct hci_dev *hdev = conn->hdev;
603 	u8 own_addr_type;
604 
605 	memset(&cp, 0, sizeof(cp));
606 
607 	/* Update random address, but set require_privacy to false so
608 	 * that we never connect with an unresolvable address.
609 	 */
610 	if (hci_update_random_address(req, false, &own_addr_type))
611 		return;
612 
613 	/* Save the address type used for this connnection attempt so we able
614 	 * to retrieve this information if we need it.
615 	 */
616 	conn->src_type = own_addr_type;
617 
618 	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
619 	cp.scan_window = cpu_to_le16(hdev->le_scan_window);
620 	bacpy(&cp.peer_addr, &conn->dst);
621 	cp.peer_addr_type = conn->dst_type;
622 	cp.own_address_type = own_addr_type;
623 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
624 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
625 	cp.supervision_timeout = cpu_to_le16(0x002a);
626 	cp.min_ce_len = cpu_to_le16(0x0000);
627 	cp.max_ce_len = cpu_to_le16(0x0000);
628 
629 	hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
630 
631 	conn->state = BT_CONNECT;
632 }
633 
634 static void hci_req_directed_advertising(struct hci_request *req,
635 					 struct hci_conn *conn)
636 {
637 	struct hci_dev *hdev = req->hdev;
638 	struct hci_cp_le_set_adv_param cp;
639 	u8 own_addr_type;
640 	u8 enable;
641 
642 	enable = 0x00;
643 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
644 
645 	/* Clear the HCI_ADVERTISING bit temporarily so that the
646 	 * hci_update_random_address knows that it's safe to go ahead
647 	 * and write a new random address. The flag will be set back on
648 	 * as soon as the SET_ADV_ENABLE HCI command completes.
649 	 */
650 	clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
651 
652 	/* Set require_privacy to false so that the remote device has a
653 	 * chance of identifying us.
654 	 */
655 	if (hci_update_random_address(req, false, &own_addr_type) < 0)
656 		return;
657 
658 	memset(&cp, 0, sizeof(cp));
659 	cp.type = LE_ADV_DIRECT_IND;
660 	cp.own_address_type = own_addr_type;
661 	cp.direct_addr_type = conn->dst_type;
662 	bacpy(&cp.direct_addr, &conn->dst);
663 	cp.channel_map = hdev->le_adv_channel_map;
664 
665 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
666 
667 	enable = 0x01;
668 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
669 
670 	conn->state = BT_CONNECT;
671 }
672 
673 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
674 				u8 dst_type, u8 sec_level, u8 auth_type)
675 {
676 	struct hci_conn_params *params;
677 	struct hci_conn *conn;
678 	struct smp_irk *irk;
679 	struct hci_request req;
680 	int err;
681 
682 	/* Some devices send ATT messages as soon as the physical link is
683 	 * established. To be able to handle these ATT messages, the user-
684 	 * space first establishes the connection and then starts the pairing
685 	 * process.
686 	 *
687 	 * So if a hci_conn object already exists for the following connection
688 	 * attempt, we simply update pending_sec_level and auth_type fields
689 	 * and return the object found.
690 	 */
691 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
692 	if (conn) {
693 		conn->pending_sec_level = sec_level;
694 		conn->auth_type = auth_type;
695 		goto done;
696 	}
697 
698 	/* Since the controller supports only one LE connection attempt at a
699 	 * time, we return -EBUSY if there is any connection attempt running.
700 	 */
701 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
702 	if (conn)
703 		return ERR_PTR(-EBUSY);
704 
705 	/* When given an identity address with existing identity
706 	 * resolving key, the connection needs to be established
707 	 * to a resolvable random address.
708 	 *
709 	 * This uses the cached random resolvable address from
710 	 * a previous scan. When no cached address is available,
711 	 * try connecting to the identity address instead.
712 	 *
713 	 * Storing the resolvable random address is required here
714 	 * to handle connection failures. The address will later
715 	 * be resolved back into the original identity address
716 	 * from the connect request.
717 	 */
718 	irk = hci_find_irk_by_addr(hdev, dst, dst_type);
719 	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
720 		dst = &irk->rpa;
721 		dst_type = ADDR_LE_DEV_RANDOM;
722 	}
723 
724 	conn = hci_conn_add(hdev, LE_LINK, dst);
725 	if (!conn)
726 		return ERR_PTR(-ENOMEM);
727 
728 	conn->dst_type = dst_type;
729 	conn->sec_level = BT_SECURITY_LOW;
730 	conn->pending_sec_level = sec_level;
731 	conn->auth_type = auth_type;
732 
733 	hci_req_init(&req, hdev);
734 
735 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
736 		hci_req_directed_advertising(&req, conn);
737 		goto create_conn;
738 	}
739 
740 	conn->out = true;
741 	conn->link_mode |= HCI_LM_MASTER;
742 
743 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
744 	if (params) {
745 		conn->le_conn_min_interval = params->conn_min_interval;
746 		conn->le_conn_max_interval = params->conn_max_interval;
747 	} else {
748 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
749 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
750 	}
751 
752 	/* If controller is scanning, we stop it since some controllers are
753 	 * not able to scan and connect at the same time. Also set the
754 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
755 	 * handler for scan disabling knows to set the correct discovery
756 	 * state.
757 	 */
758 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
759 		hci_req_add_le_scan_disable(&req);
760 		set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
761 	}
762 
763 	hci_req_add_le_create_conn(&req, conn);
764 
765 create_conn:
766 	err = hci_req_run(&req, create_le_conn_complete);
767 	if (err) {
768 		hci_conn_del(conn);
769 		return ERR_PTR(err);
770 	}
771 
772 done:
773 	hci_conn_hold(conn);
774 	return conn;
775 }
776 
777 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
778 				 u8 sec_level, u8 auth_type)
779 {
780 	struct hci_conn *acl;
781 
782 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
783 		return ERR_PTR(-ENOTSUPP);
784 
785 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
786 	if (!acl) {
787 		acl = hci_conn_add(hdev, ACL_LINK, dst);
788 		if (!acl)
789 			return ERR_PTR(-ENOMEM);
790 	}
791 
792 	hci_conn_hold(acl);
793 
794 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
795 		acl->sec_level = BT_SECURITY_LOW;
796 		acl->pending_sec_level = sec_level;
797 		acl->auth_type = auth_type;
798 		hci_acl_create_connection(acl);
799 	}
800 
801 	return acl;
802 }
803 
804 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
805 				 __u16 setting)
806 {
807 	struct hci_conn *acl;
808 	struct hci_conn *sco;
809 
810 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
811 	if (IS_ERR(acl))
812 		return acl;
813 
814 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
815 	if (!sco) {
816 		sco = hci_conn_add(hdev, type, dst);
817 		if (!sco) {
818 			hci_conn_drop(acl);
819 			return ERR_PTR(-ENOMEM);
820 		}
821 	}
822 
823 	acl->link = sco;
824 	sco->link = acl;
825 
826 	hci_conn_hold(sco);
827 
828 	sco->setting = setting;
829 
830 	if (acl->state == BT_CONNECTED &&
831 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
832 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
833 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
834 
835 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
836 			/* defer SCO setup until mode change completed */
837 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
838 			return sco;
839 		}
840 
841 		hci_sco_setup(acl, 0x00);
842 	}
843 
844 	return sco;
845 }
846 
847 /* Check link security requirement */
848 int hci_conn_check_link_mode(struct hci_conn *conn)
849 {
850 	BT_DBG("hcon %p", conn);
851 
852 	/* In Secure Connections Only mode, it is required that Secure
853 	 * Connections is used and the link is encrypted with AES-CCM
854 	 * using a P-256 authenticated combination key.
855 	 */
856 	if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
857 		if (!hci_conn_sc_enabled(conn) ||
858 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
859 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
860 			return 0;
861 	}
862 
863 	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
864 		return 0;
865 
866 	return 1;
867 }
868 
869 /* Authenticate remote device */
870 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
871 {
872 	BT_DBG("hcon %p", conn);
873 
874 	if (conn->pending_sec_level > sec_level)
875 		sec_level = conn->pending_sec_level;
876 
877 	if (sec_level > conn->sec_level)
878 		conn->pending_sec_level = sec_level;
879 	else if (conn->link_mode & HCI_LM_AUTH)
880 		return 1;
881 
882 	/* Make sure we preserve an existing MITM requirement*/
883 	auth_type |= (conn->auth_type & 0x01);
884 
885 	conn->auth_type = auth_type;
886 
887 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
888 		struct hci_cp_auth_requested cp;
889 
890 		cp.handle = cpu_to_le16(conn->handle);
891 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
892 			     sizeof(cp), &cp);
893 
894 		/* If we're already encrypted set the REAUTH_PEND flag,
895 		 * otherwise set the ENCRYPT_PEND.
896 		 */
897 		if (conn->key_type != 0xff)
898 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
899 		else
900 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
901 	}
902 
903 	return 0;
904 }
905 
906 /* Encrypt the the link */
907 static void hci_conn_encrypt(struct hci_conn *conn)
908 {
909 	BT_DBG("hcon %p", conn);
910 
911 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
912 		struct hci_cp_set_conn_encrypt cp;
913 		cp.handle  = cpu_to_le16(conn->handle);
914 		cp.encrypt = 0x01;
915 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
916 			     &cp);
917 	}
918 }
919 
920 /* Enable security */
921 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
922 {
923 	BT_DBG("hcon %p", conn);
924 
925 	if (conn->type == LE_LINK)
926 		return smp_conn_security(conn, sec_level);
927 
928 	/* For sdp we don't need the link key. */
929 	if (sec_level == BT_SECURITY_SDP)
930 		return 1;
931 
932 	/* For non 2.1 devices and low security level we don't need the link
933 	   key. */
934 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
935 		return 1;
936 
937 	/* For other security levels we need the link key. */
938 	if (!(conn->link_mode & HCI_LM_AUTH))
939 		goto auth;
940 
941 	/* An authenticated FIPS approved combination key has sufficient
942 	 * security for security level 4. */
943 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
944 	    sec_level == BT_SECURITY_FIPS)
945 		goto encrypt;
946 
947 	/* An authenticated combination key has sufficient security for
948 	   security level 3. */
949 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
950 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
951 	    sec_level == BT_SECURITY_HIGH)
952 		goto encrypt;
953 
954 	/* An unauthenticated combination key has sufficient security for
955 	   security level 1 and 2. */
956 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
957 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
958 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
959 		goto encrypt;
960 
961 	/* A combination key has always sufficient security for the security
962 	   levels 1 or 2. High security level requires the combination key
963 	   is generated using maximum PIN code length (16).
964 	   For pre 2.1 units. */
965 	if (conn->key_type == HCI_LK_COMBINATION &&
966 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
967 	     conn->pin_length == 16))
968 		goto encrypt;
969 
970 auth:
971 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
972 		return 0;
973 
974 	if (!hci_conn_auth(conn, sec_level, auth_type))
975 		return 0;
976 
977 encrypt:
978 	if (conn->link_mode & HCI_LM_ENCRYPT)
979 		return 1;
980 
981 	hci_conn_encrypt(conn);
982 	return 0;
983 }
984 EXPORT_SYMBOL(hci_conn_security);
985 
986 /* Check secure link requirement */
987 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
988 {
989 	BT_DBG("hcon %p", conn);
990 
991 	/* Accept if non-secure or higher security level is required */
992 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
993 		return 1;
994 
995 	/* Accept if secure or higher security level is already present */
996 	if (conn->sec_level == BT_SECURITY_HIGH ||
997 	    conn->sec_level == BT_SECURITY_FIPS)
998 		return 1;
999 
1000 	/* Reject not secure link */
1001 	return 0;
1002 }
1003 EXPORT_SYMBOL(hci_conn_check_secure);
1004 
1005 /* Change link key */
1006 int hci_conn_change_link_key(struct hci_conn *conn)
1007 {
1008 	BT_DBG("hcon %p", conn);
1009 
1010 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1011 		struct hci_cp_change_conn_link_key cp;
1012 		cp.handle = cpu_to_le16(conn->handle);
1013 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1014 			     sizeof(cp), &cp);
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 /* Switch role */
1021 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1022 {
1023 	BT_DBG("hcon %p", conn);
1024 
1025 	if (!role && conn->link_mode & HCI_LM_MASTER)
1026 		return 1;
1027 
1028 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1029 		struct hci_cp_switch_role cp;
1030 		bacpy(&cp.bdaddr, &conn->dst);
1031 		cp.role = role;
1032 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1033 	}
1034 
1035 	return 0;
1036 }
1037 EXPORT_SYMBOL(hci_conn_switch_role);
1038 
1039 /* Enter active mode */
1040 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1041 {
1042 	struct hci_dev *hdev = conn->hdev;
1043 
1044 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1045 
1046 	if (test_bit(HCI_RAW, &hdev->flags))
1047 		return;
1048 
1049 	if (conn->mode != HCI_CM_SNIFF)
1050 		goto timer;
1051 
1052 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1053 		goto timer;
1054 
1055 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1056 		struct hci_cp_exit_sniff_mode cp;
1057 		cp.handle = cpu_to_le16(conn->handle);
1058 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1059 	}
1060 
1061 timer:
1062 	if (hdev->idle_timeout > 0)
1063 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1064 				   msecs_to_jiffies(hdev->idle_timeout));
1065 }
1066 
1067 /* Drop all connection on the device */
1068 void hci_conn_hash_flush(struct hci_dev *hdev)
1069 {
1070 	struct hci_conn_hash *h = &hdev->conn_hash;
1071 	struct hci_conn *c, *n;
1072 
1073 	BT_DBG("hdev %s", hdev->name);
1074 
1075 	list_for_each_entry_safe(c, n, &h->list, list) {
1076 		c->state = BT_CLOSED;
1077 
1078 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1079 		hci_conn_del(c);
1080 	}
1081 }
1082 
1083 /* Check pending connect attempts */
1084 void hci_conn_check_pending(struct hci_dev *hdev)
1085 {
1086 	struct hci_conn *conn;
1087 
1088 	BT_DBG("hdev %s", hdev->name);
1089 
1090 	hci_dev_lock(hdev);
1091 
1092 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1093 	if (conn)
1094 		hci_acl_create_connection(conn);
1095 
1096 	hci_dev_unlock(hdev);
1097 }
1098 
1099 int hci_get_conn_list(void __user *arg)
1100 {
1101 	struct hci_conn *c;
1102 	struct hci_conn_list_req req, *cl;
1103 	struct hci_conn_info *ci;
1104 	struct hci_dev *hdev;
1105 	int n = 0, size, err;
1106 
1107 	if (copy_from_user(&req, arg, sizeof(req)))
1108 		return -EFAULT;
1109 
1110 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1111 		return -EINVAL;
1112 
1113 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1114 
1115 	cl = kmalloc(size, GFP_KERNEL);
1116 	if (!cl)
1117 		return -ENOMEM;
1118 
1119 	hdev = hci_dev_get(req.dev_id);
1120 	if (!hdev) {
1121 		kfree(cl);
1122 		return -ENODEV;
1123 	}
1124 
1125 	ci = cl->conn_info;
1126 
1127 	hci_dev_lock(hdev);
1128 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1129 		bacpy(&(ci + n)->bdaddr, &c->dst);
1130 		(ci + n)->handle = c->handle;
1131 		(ci + n)->type  = c->type;
1132 		(ci + n)->out   = c->out;
1133 		(ci + n)->state = c->state;
1134 		(ci + n)->link_mode = c->link_mode;
1135 		if (++n >= req.conn_num)
1136 			break;
1137 	}
1138 	hci_dev_unlock(hdev);
1139 
1140 	cl->dev_id = hdev->id;
1141 	cl->conn_num = n;
1142 	size = sizeof(req) + n * sizeof(*ci);
1143 
1144 	hci_dev_put(hdev);
1145 
1146 	err = copy_to_user(arg, cl, size);
1147 	kfree(cl);
1148 
1149 	return err ? -EFAULT : 0;
1150 }
1151 
1152 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1153 {
1154 	struct hci_conn_info_req req;
1155 	struct hci_conn_info ci;
1156 	struct hci_conn *conn;
1157 	char __user *ptr = arg + sizeof(req);
1158 
1159 	if (copy_from_user(&req, arg, sizeof(req)))
1160 		return -EFAULT;
1161 
1162 	hci_dev_lock(hdev);
1163 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1164 	if (conn) {
1165 		bacpy(&ci.bdaddr, &conn->dst);
1166 		ci.handle = conn->handle;
1167 		ci.type  = conn->type;
1168 		ci.out   = conn->out;
1169 		ci.state = conn->state;
1170 		ci.link_mode = conn->link_mode;
1171 	}
1172 	hci_dev_unlock(hdev);
1173 
1174 	if (!conn)
1175 		return -ENOENT;
1176 
1177 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1178 }
1179 
1180 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1181 {
1182 	struct hci_auth_info_req req;
1183 	struct hci_conn *conn;
1184 
1185 	if (copy_from_user(&req, arg, sizeof(req)))
1186 		return -EFAULT;
1187 
1188 	hci_dev_lock(hdev);
1189 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1190 	if (conn)
1191 		req.type = conn->auth_type;
1192 	hci_dev_unlock(hdev);
1193 
1194 	if (!conn)
1195 		return -ENOENT;
1196 
1197 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1198 }
1199 
1200 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1201 {
1202 	struct hci_dev *hdev = conn->hdev;
1203 	struct hci_chan *chan;
1204 
1205 	BT_DBG("%s hcon %p", hdev->name, conn);
1206 
1207 	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
1208 	if (!chan)
1209 		return NULL;
1210 
1211 	chan->conn = conn;
1212 	skb_queue_head_init(&chan->data_q);
1213 	chan->state = BT_CONNECTED;
1214 
1215 	list_add_rcu(&chan->list, &conn->chan_list);
1216 
1217 	return chan;
1218 }
1219 
1220 void hci_chan_del(struct hci_chan *chan)
1221 {
1222 	struct hci_conn *conn = chan->conn;
1223 	struct hci_dev *hdev = conn->hdev;
1224 
1225 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1226 
1227 	list_del_rcu(&chan->list);
1228 
1229 	synchronize_rcu();
1230 
1231 	hci_conn_drop(conn);
1232 
1233 	skb_queue_purge(&chan->data_q);
1234 	kfree(chan);
1235 }
1236 
1237 void hci_chan_list_flush(struct hci_conn *conn)
1238 {
1239 	struct hci_chan *chan, *n;
1240 
1241 	BT_DBG("hcon %p", conn);
1242 
1243 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1244 		hci_chan_del(chan);
1245 }
1246 
1247 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1248 						 __u16 handle)
1249 {
1250 	struct hci_chan *hchan;
1251 
1252 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1253 		if (hchan->handle == handle)
1254 			return hchan;
1255 	}
1256 
1257 	return NULL;
1258 }
1259 
1260 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1261 {
1262 	struct hci_conn_hash *h = &hdev->conn_hash;
1263 	struct hci_conn *hcon;
1264 	struct hci_chan *hchan = NULL;
1265 
1266 	rcu_read_lock();
1267 
1268 	list_for_each_entry_rcu(hcon, &h->list, list) {
1269 		hchan = __hci_chan_lookup_handle(hcon, handle);
1270 		if (hchan)
1271 			break;
1272 	}
1273 
1274 	rcu_read_unlock();
1275 
1276 	return hchan;
1277 }
1278