xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 3932b9ca)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32 
33 #include "smp.h"
34 #include "a2mp.h"
35 
36 struct sco_param {
37 	u16 pkt_type;
38 	u16 max_latency;
39 };
40 
41 static const struct sco_param sco_param_cvsd[] = {
42 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
43 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
44 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007 }, /* S1 */
45 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff }, /* D1 */
46 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff }, /* D0 */
47 };
48 
49 static const struct sco_param sco_param_wideband[] = {
50 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
51 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
52 };
53 
54 static void hci_le_create_connection_cancel(struct hci_conn *conn)
55 {
56 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
57 }
58 
59 static void hci_acl_create_connection(struct hci_conn *conn)
60 {
61 	struct hci_dev *hdev = conn->hdev;
62 	struct inquiry_entry *ie;
63 	struct hci_cp_create_conn cp;
64 
65 	BT_DBG("hcon %p", conn);
66 
67 	conn->state = BT_CONNECT;
68 	conn->out = true;
69 	conn->role = HCI_ROLE_MASTER;
70 
71 	conn->attempt++;
72 
73 	conn->link_policy = hdev->link_policy;
74 
75 	memset(&cp, 0, sizeof(cp));
76 	bacpy(&cp.bdaddr, &conn->dst);
77 	cp.pscan_rep_mode = 0x02;
78 
79 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
80 	if (ie) {
81 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
82 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
83 			cp.pscan_mode     = ie->data.pscan_mode;
84 			cp.clock_offset   = ie->data.clock_offset |
85 					    cpu_to_le16(0x8000);
86 		}
87 
88 		memcpy(conn->dev_class, ie->data.dev_class, 3);
89 		if (ie->data.ssp_mode > 0)
90 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
91 	}
92 
93 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
94 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
95 		cp.role_switch = 0x01;
96 	else
97 		cp.role_switch = 0x00;
98 
99 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
100 }
101 
102 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
103 {
104 	struct hci_cp_create_conn_cancel cp;
105 
106 	BT_DBG("hcon %p", conn);
107 
108 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
109 		return;
110 
111 	bacpy(&cp.bdaddr, &conn->dst);
112 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
113 }
114 
115 static void hci_reject_sco(struct hci_conn *conn)
116 {
117 	struct hci_cp_reject_sync_conn_req cp;
118 
119 	cp.reason = HCI_ERROR_REMOTE_USER_TERM;
120 	bacpy(&cp.bdaddr, &conn->dst);
121 
122 	hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
123 }
124 
125 void hci_disconnect(struct hci_conn *conn, __u8 reason)
126 {
127 	struct hci_cp_disconnect cp;
128 
129 	BT_DBG("hcon %p", conn);
130 
131 	conn->state = BT_DISCONN;
132 
133 	cp.handle = cpu_to_le16(conn->handle);
134 	cp.reason = reason;
135 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
136 }
137 
138 static void hci_amp_disconn(struct hci_conn *conn)
139 {
140 	struct hci_cp_disconn_phy_link cp;
141 
142 	BT_DBG("hcon %p", conn);
143 
144 	conn->state = BT_DISCONN;
145 
146 	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
147 	cp.reason = hci_proto_disconn_ind(conn);
148 	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
149 		     sizeof(cp), &cp);
150 }
151 
152 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
153 {
154 	struct hci_dev *hdev = conn->hdev;
155 	struct hci_cp_add_sco cp;
156 
157 	BT_DBG("hcon %p", conn);
158 
159 	conn->state = BT_CONNECT;
160 	conn->out = true;
161 
162 	conn->attempt++;
163 
164 	cp.handle   = cpu_to_le16(handle);
165 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
166 
167 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
168 }
169 
170 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
171 {
172 	struct hci_dev *hdev = conn->hdev;
173 	struct hci_cp_setup_sync_conn cp;
174 	const struct sco_param *param;
175 
176 	BT_DBG("hcon %p", conn);
177 
178 	conn->state = BT_CONNECT;
179 	conn->out = true;
180 
181 	conn->attempt++;
182 
183 	cp.handle   = cpu_to_le16(handle);
184 
185 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
186 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
187 	cp.voice_setting  = cpu_to_le16(conn->setting);
188 
189 	switch (conn->setting & SCO_AIRMODE_MASK) {
190 	case SCO_AIRMODE_TRANSP:
191 		if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
192 			return false;
193 		cp.retrans_effort = 0x02;
194 		param = &sco_param_wideband[conn->attempt - 1];
195 		break;
196 	case SCO_AIRMODE_CVSD:
197 		if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
198 			return false;
199 		cp.retrans_effort = 0x01;
200 		param = &sco_param_cvsd[conn->attempt - 1];
201 		break;
202 	default:
203 		return false;
204 	}
205 
206 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
207 	cp.max_latency = __cpu_to_le16(param->max_latency);
208 
209 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
210 		return false;
211 
212 	return true;
213 }
214 
215 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
216 		      u16 to_multiplier)
217 {
218 	struct hci_dev *hdev = conn->hdev;
219 	struct hci_conn_params *params;
220 	struct hci_cp_le_conn_update cp;
221 
222 	hci_dev_lock(hdev);
223 
224 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
225 	if (params) {
226 		params->conn_min_interval = min;
227 		params->conn_max_interval = max;
228 		params->conn_latency = latency;
229 		params->supervision_timeout = to_multiplier;
230 	}
231 
232 	hci_dev_unlock(hdev);
233 
234 	memset(&cp, 0, sizeof(cp));
235 	cp.handle		= cpu_to_le16(conn->handle);
236 	cp.conn_interval_min	= cpu_to_le16(min);
237 	cp.conn_interval_max	= cpu_to_le16(max);
238 	cp.conn_latency		= cpu_to_le16(latency);
239 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
240 	cp.min_ce_len		= cpu_to_le16(0x0000);
241 	cp.max_ce_len		= cpu_to_le16(0x0000);
242 
243 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
244 
245 	if (params)
246 		return 0x01;
247 
248 	return 0x00;
249 }
250 
251 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
252 		      __u8 ltk[16])
253 {
254 	struct hci_dev *hdev = conn->hdev;
255 	struct hci_cp_le_start_enc cp;
256 
257 	BT_DBG("hcon %p", conn);
258 
259 	memset(&cp, 0, sizeof(cp));
260 
261 	cp.handle = cpu_to_le16(conn->handle);
262 	cp.rand = rand;
263 	cp.ediv = ediv;
264 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
265 
266 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
267 }
268 
269 /* Device _must_ be locked */
270 void hci_sco_setup(struct hci_conn *conn, __u8 status)
271 {
272 	struct hci_conn *sco = conn->link;
273 
274 	if (!sco)
275 		return;
276 
277 	BT_DBG("hcon %p", conn);
278 
279 	if (!status) {
280 		if (lmp_esco_capable(conn->hdev))
281 			hci_setup_sync(sco, conn->handle);
282 		else
283 			hci_add_sco(sco, conn->handle);
284 	} else {
285 		hci_proto_connect_cfm(sco, status);
286 		hci_conn_del(sco);
287 	}
288 }
289 
290 static void hci_conn_timeout(struct work_struct *work)
291 {
292 	struct hci_conn *conn = container_of(work, struct hci_conn,
293 					     disc_work.work);
294 	int refcnt = atomic_read(&conn->refcnt);
295 
296 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
297 
298 	WARN_ON(refcnt < 0);
299 
300 	/* FIXME: It was observed that in pairing failed scenario, refcnt
301 	 * drops below 0. Probably this is because l2cap_conn_del calls
302 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
303 	 * dropped. After that loop hci_chan_del is called which also drops
304 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
305 	 * otherwise drop it.
306 	 */
307 	if (refcnt > 0)
308 		return;
309 
310 	switch (conn->state) {
311 	case BT_CONNECT:
312 	case BT_CONNECT2:
313 		if (conn->out) {
314 			if (conn->type == ACL_LINK)
315 				hci_acl_create_connection_cancel(conn);
316 			else if (conn->type == LE_LINK)
317 				hci_le_create_connection_cancel(conn);
318 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
319 			hci_reject_sco(conn);
320 		}
321 		break;
322 	case BT_CONFIG:
323 	case BT_CONNECTED:
324 		if (conn->type == AMP_LINK) {
325 			hci_amp_disconn(conn);
326 		} else {
327 			__u8 reason = hci_proto_disconn_ind(conn);
328 
329 			/* When we are master of an established connection
330 			 * and it enters the disconnect timeout, then go
331 			 * ahead and try to read the current clock offset.
332 			 *
333 			 * Processing of the result is done within the
334 			 * event handling and hci_clock_offset_evt function.
335 			 */
336 			if (conn->type == ACL_LINK &&
337 			    conn->role == HCI_ROLE_MASTER) {
338 				struct hci_dev *hdev = conn->hdev;
339 				struct hci_cp_read_clock_offset cp;
340 
341 				cp.handle = cpu_to_le16(conn->handle);
342 
343 				hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
344 					     sizeof(cp), &cp);
345 			}
346 
347 			hci_disconnect(conn, reason);
348 		}
349 		break;
350 	default:
351 		conn->state = BT_CLOSED;
352 		break;
353 	}
354 }
355 
356 /* Enter sniff mode */
357 static void hci_conn_idle(struct work_struct *work)
358 {
359 	struct hci_conn *conn = container_of(work, struct hci_conn,
360 					     idle_work.work);
361 	struct hci_dev *hdev = conn->hdev;
362 
363 	BT_DBG("hcon %p mode %d", conn, conn->mode);
364 
365 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
366 		return;
367 
368 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
369 		return;
370 
371 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
372 		struct hci_cp_sniff_subrate cp;
373 		cp.handle             = cpu_to_le16(conn->handle);
374 		cp.max_latency        = cpu_to_le16(0);
375 		cp.min_remote_timeout = cpu_to_le16(0);
376 		cp.min_local_timeout  = cpu_to_le16(0);
377 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
378 	}
379 
380 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
381 		struct hci_cp_sniff_mode cp;
382 		cp.handle       = cpu_to_le16(conn->handle);
383 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
384 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
385 		cp.attempt      = cpu_to_le16(4);
386 		cp.timeout      = cpu_to_le16(1);
387 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
388 	}
389 }
390 
391 static void hci_conn_auto_accept(struct work_struct *work)
392 {
393 	struct hci_conn *conn = container_of(work, struct hci_conn,
394 					     auto_accept_work.work);
395 
396 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
397 		     &conn->dst);
398 }
399 
400 static void le_conn_timeout(struct work_struct *work)
401 {
402 	struct hci_conn *conn = container_of(work, struct hci_conn,
403 					     le_conn_timeout.work);
404 	struct hci_dev *hdev = conn->hdev;
405 
406 	BT_DBG("");
407 
408 	/* We could end up here due to having done directed advertising,
409 	 * so clean up the state if necessary. This should however only
410 	 * happen with broken hardware or if low duty cycle was used
411 	 * (which doesn't have a timeout of its own).
412 	 */
413 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
414 		u8 enable = 0x00;
415 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
416 			     &enable);
417 		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
418 		return;
419 	}
420 
421 	hci_le_create_connection_cancel(conn);
422 }
423 
424 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
425 			      u8 role)
426 {
427 	struct hci_conn *conn;
428 
429 	BT_DBG("%s dst %pMR", hdev->name, dst);
430 
431 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
432 	if (!conn)
433 		return NULL;
434 
435 	bacpy(&conn->dst, dst);
436 	bacpy(&conn->src, &hdev->bdaddr);
437 	conn->hdev  = hdev;
438 	conn->type  = type;
439 	conn->role  = role;
440 	conn->mode  = HCI_CM_ACTIVE;
441 	conn->state = BT_OPEN;
442 	conn->auth_type = HCI_AT_GENERAL_BONDING;
443 	conn->io_capability = hdev->io_capability;
444 	conn->remote_auth = 0xff;
445 	conn->key_type = 0xff;
446 	conn->tx_power = HCI_TX_POWER_INVALID;
447 	conn->max_tx_power = HCI_TX_POWER_INVALID;
448 
449 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
450 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
451 
452 	if (conn->role == HCI_ROLE_MASTER)
453 		conn->out = true;
454 
455 	switch (type) {
456 	case ACL_LINK:
457 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
458 		break;
459 	case LE_LINK:
460 		/* conn->src should reflect the local identity address */
461 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
462 		break;
463 	case SCO_LINK:
464 		if (lmp_esco_capable(hdev))
465 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
466 					(hdev->esco_type & EDR_ESCO_MASK);
467 		else
468 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
469 		break;
470 	case ESCO_LINK:
471 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
472 		break;
473 	}
474 
475 	skb_queue_head_init(&conn->data_q);
476 
477 	INIT_LIST_HEAD(&conn->chan_list);
478 
479 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
480 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
481 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
482 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
483 
484 	atomic_set(&conn->refcnt, 0);
485 
486 	hci_dev_hold(hdev);
487 
488 	hci_conn_hash_add(hdev, conn);
489 	if (hdev->notify)
490 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
491 
492 	hci_conn_init_sysfs(conn);
493 
494 	return conn;
495 }
496 
497 int hci_conn_del(struct hci_conn *conn)
498 {
499 	struct hci_dev *hdev = conn->hdev;
500 
501 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
502 
503 	cancel_delayed_work_sync(&conn->disc_work);
504 	cancel_delayed_work_sync(&conn->auto_accept_work);
505 	cancel_delayed_work_sync(&conn->idle_work);
506 
507 	if (conn->type == ACL_LINK) {
508 		struct hci_conn *sco = conn->link;
509 		if (sco)
510 			sco->link = NULL;
511 
512 		/* Unacked frames */
513 		hdev->acl_cnt += conn->sent;
514 	} else if (conn->type == LE_LINK) {
515 		cancel_delayed_work_sync(&conn->le_conn_timeout);
516 
517 		if (hdev->le_pkts)
518 			hdev->le_cnt += conn->sent;
519 		else
520 			hdev->acl_cnt += conn->sent;
521 	} else {
522 		struct hci_conn *acl = conn->link;
523 		if (acl) {
524 			acl->link = NULL;
525 			hci_conn_drop(acl);
526 		}
527 	}
528 
529 	hci_chan_list_flush(conn);
530 
531 	if (conn->amp_mgr)
532 		amp_mgr_put(conn->amp_mgr);
533 
534 	hci_conn_hash_del(hdev, conn);
535 	if (hdev->notify)
536 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
537 
538 	skb_queue_purge(&conn->data_q);
539 
540 	hci_conn_del_sysfs(conn);
541 
542 	hci_dev_put(hdev);
543 
544 	hci_conn_put(conn);
545 
546 	return 0;
547 }
548 
549 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
550 {
551 	int use_src = bacmp(src, BDADDR_ANY);
552 	struct hci_dev *hdev = NULL, *d;
553 
554 	BT_DBG("%pMR -> %pMR", src, dst);
555 
556 	read_lock(&hci_dev_list_lock);
557 
558 	list_for_each_entry(d, &hci_dev_list, list) {
559 		if (!test_bit(HCI_UP, &d->flags) ||
560 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
561 		    d->dev_type != HCI_BREDR)
562 			continue;
563 
564 		/* Simple routing:
565 		 *   No source address - find interface with bdaddr != dst
566 		 *   Source address    - find interface with bdaddr == src
567 		 */
568 
569 		if (use_src) {
570 			if (!bacmp(&d->bdaddr, src)) {
571 				hdev = d; break;
572 			}
573 		} else {
574 			if (bacmp(&d->bdaddr, dst)) {
575 				hdev = d; break;
576 			}
577 		}
578 	}
579 
580 	if (hdev)
581 		hdev = hci_dev_hold(hdev);
582 
583 	read_unlock(&hci_dev_list_lock);
584 	return hdev;
585 }
586 EXPORT_SYMBOL(hci_get_route);
587 
588 /* This function requires the caller holds hdev->lock */
589 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
590 {
591 	struct hci_dev *hdev = conn->hdev;
592 	struct hci_conn_params *params;
593 
594 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
595 					   conn->dst_type);
596 	if (params && params->conn) {
597 		hci_conn_drop(params->conn);
598 		params->conn = NULL;
599 	}
600 
601 	conn->state = BT_CLOSED;
602 
603 	mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
604 			    status);
605 
606 	hci_proto_connect_cfm(conn, status);
607 
608 	hci_conn_del(conn);
609 
610 	/* Since we may have temporarily stopped the background scanning in
611 	 * favor of connection establishment, we should restart it.
612 	 */
613 	hci_update_background_scan(hdev);
614 
615 	/* Re-enable advertising in case this was a failed connection
616 	 * attempt as a peripheral.
617 	 */
618 	mgmt_reenable_advertising(hdev);
619 }
620 
621 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
622 {
623 	struct hci_conn *conn;
624 
625 	if (status == 0)
626 		return;
627 
628 	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
629 	       status);
630 
631 	hci_dev_lock(hdev);
632 
633 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
634 	if (!conn)
635 		goto done;
636 
637 	hci_le_conn_failed(conn, status);
638 
639 done:
640 	hci_dev_unlock(hdev);
641 }
642 
643 static void hci_req_add_le_create_conn(struct hci_request *req,
644 				       struct hci_conn *conn)
645 {
646 	struct hci_cp_le_create_conn cp;
647 	struct hci_dev *hdev = conn->hdev;
648 	u8 own_addr_type;
649 
650 	memset(&cp, 0, sizeof(cp));
651 
652 	/* Update random address, but set require_privacy to false so
653 	 * that we never connect with an unresolvable address.
654 	 */
655 	if (hci_update_random_address(req, false, &own_addr_type))
656 		return;
657 
658 	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
659 	cp.scan_window = cpu_to_le16(hdev->le_scan_window);
660 	bacpy(&cp.peer_addr, &conn->dst);
661 	cp.peer_addr_type = conn->dst_type;
662 	cp.own_address_type = own_addr_type;
663 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
664 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
665 	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
666 	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
667 	cp.min_ce_len = cpu_to_le16(0x0000);
668 	cp.max_ce_len = cpu_to_le16(0x0000);
669 
670 	hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
671 
672 	conn->state = BT_CONNECT;
673 }
674 
675 static void hci_req_directed_advertising(struct hci_request *req,
676 					 struct hci_conn *conn)
677 {
678 	struct hci_dev *hdev = req->hdev;
679 	struct hci_cp_le_set_adv_param cp;
680 	u8 own_addr_type;
681 	u8 enable;
682 
683 	/* Clear the HCI_LE_ADV bit temporarily so that the
684 	 * hci_update_random_address knows that it's safe to go ahead
685 	 * and write a new random address. The flag will be set back on
686 	 * as soon as the SET_ADV_ENABLE HCI command completes.
687 	 */
688 	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
689 
690 	/* Set require_privacy to false so that the remote device has a
691 	 * chance of identifying us.
692 	 */
693 	if (hci_update_random_address(req, false, &own_addr_type) < 0)
694 		return;
695 
696 	memset(&cp, 0, sizeof(cp));
697 	cp.type = LE_ADV_DIRECT_IND;
698 	cp.own_address_type = own_addr_type;
699 	cp.direct_addr_type = conn->dst_type;
700 	bacpy(&cp.direct_addr, &conn->dst);
701 	cp.channel_map = hdev->le_adv_channel_map;
702 
703 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
704 
705 	enable = 0x01;
706 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
707 
708 	conn->state = BT_CONNECT;
709 }
710 
711 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
712 				u8 dst_type, u8 sec_level, u16 conn_timeout,
713 				u8 role)
714 {
715 	struct hci_conn_params *params;
716 	struct hci_conn *conn;
717 	struct smp_irk *irk;
718 	struct hci_request req;
719 	int err;
720 
721 	/* Some devices send ATT messages as soon as the physical link is
722 	 * established. To be able to handle these ATT messages, the user-
723 	 * space first establishes the connection and then starts the pairing
724 	 * process.
725 	 *
726 	 * So if a hci_conn object already exists for the following connection
727 	 * attempt, we simply update pending_sec_level and auth_type fields
728 	 * and return the object found.
729 	 */
730 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
731 	if (conn) {
732 		conn->pending_sec_level = sec_level;
733 		goto done;
734 	}
735 
736 	/* Since the controller supports only one LE connection attempt at a
737 	 * time, we return -EBUSY if there is any connection attempt running.
738 	 */
739 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
740 	if (conn)
741 		return ERR_PTR(-EBUSY);
742 
743 	/* When given an identity address with existing identity
744 	 * resolving key, the connection needs to be established
745 	 * to a resolvable random address.
746 	 *
747 	 * This uses the cached random resolvable address from
748 	 * a previous scan. When no cached address is available,
749 	 * try connecting to the identity address instead.
750 	 *
751 	 * Storing the resolvable random address is required here
752 	 * to handle connection failures. The address will later
753 	 * be resolved back into the original identity address
754 	 * from the connect request.
755 	 */
756 	irk = hci_find_irk_by_addr(hdev, dst, dst_type);
757 	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
758 		dst = &irk->rpa;
759 		dst_type = ADDR_LE_DEV_RANDOM;
760 	}
761 
762 	conn = hci_conn_add(hdev, LE_LINK, dst, role);
763 	if (!conn)
764 		return ERR_PTR(-ENOMEM);
765 
766 	conn->dst_type = dst_type;
767 	conn->sec_level = BT_SECURITY_LOW;
768 	conn->pending_sec_level = sec_level;
769 	conn->conn_timeout = conn_timeout;
770 
771 	hci_req_init(&req, hdev);
772 
773 	/* Disable advertising if we're active. For master role
774 	 * connections most controllers will refuse to connect if
775 	 * advertising is enabled, and for slave role connections we
776 	 * anyway have to disable it in order to start directed
777 	 * advertising.
778 	 */
779 	if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
780 		u8 enable = 0x00;
781 		hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
782 			    &enable);
783 	}
784 
785 	/* If requested to connect as slave use directed advertising */
786 	if (conn->role == HCI_ROLE_SLAVE) {
787 		/* If we're active scanning most controllers are unable
788 		 * to initiate advertising. Simply reject the attempt.
789 		 */
790 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
791 		    hdev->le_scan_type == LE_SCAN_ACTIVE) {
792 			skb_queue_purge(&req.cmd_q);
793 			hci_conn_del(conn);
794 			return ERR_PTR(-EBUSY);
795 		}
796 
797 		hci_req_directed_advertising(&req, conn);
798 		goto create_conn;
799 	}
800 
801 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
802 	if (params) {
803 		conn->le_conn_min_interval = params->conn_min_interval;
804 		conn->le_conn_max_interval = params->conn_max_interval;
805 		conn->le_conn_latency = params->conn_latency;
806 		conn->le_supv_timeout = params->supervision_timeout;
807 	} else {
808 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
809 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
810 		conn->le_conn_latency = hdev->le_conn_latency;
811 		conn->le_supv_timeout = hdev->le_supv_timeout;
812 	}
813 
814 	/* If controller is scanning, we stop it since some controllers are
815 	 * not able to scan and connect at the same time. Also set the
816 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
817 	 * handler for scan disabling knows to set the correct discovery
818 	 * state.
819 	 */
820 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
821 		hci_req_add_le_scan_disable(&req);
822 		set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
823 	}
824 
825 	hci_req_add_le_create_conn(&req, conn);
826 
827 create_conn:
828 	err = hci_req_run(&req, create_le_conn_complete);
829 	if (err) {
830 		hci_conn_del(conn);
831 		return ERR_PTR(err);
832 	}
833 
834 done:
835 	hci_conn_hold(conn);
836 	return conn;
837 }
838 
839 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
840 				 u8 sec_level, u8 auth_type)
841 {
842 	struct hci_conn *acl;
843 
844 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
845 		return ERR_PTR(-EOPNOTSUPP);
846 
847 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
848 	if (!acl) {
849 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
850 		if (!acl)
851 			return ERR_PTR(-ENOMEM);
852 	}
853 
854 	hci_conn_hold(acl);
855 
856 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
857 		acl->sec_level = BT_SECURITY_LOW;
858 		acl->pending_sec_level = sec_level;
859 		acl->auth_type = auth_type;
860 		hci_acl_create_connection(acl);
861 	}
862 
863 	return acl;
864 }
865 
866 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
867 				 __u16 setting)
868 {
869 	struct hci_conn *acl;
870 	struct hci_conn *sco;
871 
872 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
873 	if (IS_ERR(acl))
874 		return acl;
875 
876 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
877 	if (!sco) {
878 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
879 		if (!sco) {
880 			hci_conn_drop(acl);
881 			return ERR_PTR(-ENOMEM);
882 		}
883 	}
884 
885 	acl->link = sco;
886 	sco->link = acl;
887 
888 	hci_conn_hold(sco);
889 
890 	sco->setting = setting;
891 
892 	if (acl->state == BT_CONNECTED &&
893 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
894 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
895 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
896 
897 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
898 			/* defer SCO setup until mode change completed */
899 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
900 			return sco;
901 		}
902 
903 		hci_sco_setup(acl, 0x00);
904 	}
905 
906 	return sco;
907 }
908 
909 /* Check link security requirement */
910 int hci_conn_check_link_mode(struct hci_conn *conn)
911 {
912 	BT_DBG("hcon %p", conn);
913 
914 	/* In Secure Connections Only mode, it is required that Secure
915 	 * Connections is used and the link is encrypted with AES-CCM
916 	 * using a P-256 authenticated combination key.
917 	 */
918 	if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
919 		if (!hci_conn_sc_enabled(conn) ||
920 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
921 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
922 			return 0;
923 	}
924 
925 	if (hci_conn_ssp_enabled(conn) &&
926 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
927 		return 0;
928 
929 	return 1;
930 }
931 
932 /* Authenticate remote device */
933 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
934 {
935 	BT_DBG("hcon %p", conn);
936 
937 	if (conn->pending_sec_level > sec_level)
938 		sec_level = conn->pending_sec_level;
939 
940 	if (sec_level > conn->sec_level)
941 		conn->pending_sec_level = sec_level;
942 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
943 		return 1;
944 
945 	/* Make sure we preserve an existing MITM requirement*/
946 	auth_type |= (conn->auth_type & 0x01);
947 
948 	conn->auth_type = auth_type;
949 
950 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
951 		struct hci_cp_auth_requested cp;
952 
953 		cp.handle = cpu_to_le16(conn->handle);
954 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
955 			     sizeof(cp), &cp);
956 
957 		/* If we're already encrypted set the REAUTH_PEND flag,
958 		 * otherwise set the ENCRYPT_PEND.
959 		 */
960 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
961 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
962 		else
963 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
964 	}
965 
966 	return 0;
967 }
968 
969 /* Encrypt the the link */
970 static void hci_conn_encrypt(struct hci_conn *conn)
971 {
972 	BT_DBG("hcon %p", conn);
973 
974 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
975 		struct hci_cp_set_conn_encrypt cp;
976 		cp.handle  = cpu_to_le16(conn->handle);
977 		cp.encrypt = 0x01;
978 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
979 			     &cp);
980 	}
981 }
982 
983 /* Enable security */
984 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
985 		      bool initiator)
986 {
987 	BT_DBG("hcon %p", conn);
988 
989 	if (conn->type == LE_LINK)
990 		return smp_conn_security(conn, sec_level);
991 
992 	/* For sdp we don't need the link key. */
993 	if (sec_level == BT_SECURITY_SDP)
994 		return 1;
995 
996 	/* For non 2.1 devices and low security level we don't need the link
997 	   key. */
998 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
999 		return 1;
1000 
1001 	/* For other security levels we need the link key. */
1002 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1003 		goto auth;
1004 
1005 	/* An authenticated FIPS approved combination key has sufficient
1006 	 * security for security level 4. */
1007 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1008 	    sec_level == BT_SECURITY_FIPS)
1009 		goto encrypt;
1010 
1011 	/* An authenticated combination key has sufficient security for
1012 	   security level 3. */
1013 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1014 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1015 	    sec_level == BT_SECURITY_HIGH)
1016 		goto encrypt;
1017 
1018 	/* An unauthenticated combination key has sufficient security for
1019 	   security level 1 and 2. */
1020 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1021 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1022 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1023 		goto encrypt;
1024 
1025 	/* A combination key has always sufficient security for the security
1026 	   levels 1 or 2. High security level requires the combination key
1027 	   is generated using maximum PIN code length (16).
1028 	   For pre 2.1 units. */
1029 	if (conn->key_type == HCI_LK_COMBINATION &&
1030 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1031 	     conn->pin_length == 16))
1032 		goto encrypt;
1033 
1034 auth:
1035 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1036 		return 0;
1037 
1038 	if (initiator)
1039 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1040 
1041 	if (!hci_conn_auth(conn, sec_level, auth_type))
1042 		return 0;
1043 
1044 encrypt:
1045 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1046 		return 1;
1047 
1048 	hci_conn_encrypt(conn);
1049 	return 0;
1050 }
1051 EXPORT_SYMBOL(hci_conn_security);
1052 
1053 /* Check secure link requirement */
1054 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1055 {
1056 	BT_DBG("hcon %p", conn);
1057 
1058 	/* Accept if non-secure or higher security level is required */
1059 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1060 		return 1;
1061 
1062 	/* Accept if secure or higher security level is already present */
1063 	if (conn->sec_level == BT_SECURITY_HIGH ||
1064 	    conn->sec_level == BT_SECURITY_FIPS)
1065 		return 1;
1066 
1067 	/* Reject not secure link */
1068 	return 0;
1069 }
1070 EXPORT_SYMBOL(hci_conn_check_secure);
1071 
1072 /* Change link key */
1073 int hci_conn_change_link_key(struct hci_conn *conn)
1074 {
1075 	BT_DBG("hcon %p", conn);
1076 
1077 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1078 		struct hci_cp_change_conn_link_key cp;
1079 		cp.handle = cpu_to_le16(conn->handle);
1080 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1081 			     sizeof(cp), &cp);
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 /* Switch role */
1088 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1089 {
1090 	BT_DBG("hcon %p", conn);
1091 
1092 	if (role == conn->role)
1093 		return 1;
1094 
1095 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1096 		struct hci_cp_switch_role cp;
1097 		bacpy(&cp.bdaddr, &conn->dst);
1098 		cp.role = role;
1099 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1100 	}
1101 
1102 	return 0;
1103 }
1104 EXPORT_SYMBOL(hci_conn_switch_role);
1105 
1106 /* Enter active mode */
1107 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1108 {
1109 	struct hci_dev *hdev = conn->hdev;
1110 
1111 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1112 
1113 	if (conn->mode != HCI_CM_SNIFF)
1114 		goto timer;
1115 
1116 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1117 		goto timer;
1118 
1119 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1120 		struct hci_cp_exit_sniff_mode cp;
1121 		cp.handle = cpu_to_le16(conn->handle);
1122 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1123 	}
1124 
1125 timer:
1126 	if (hdev->idle_timeout > 0)
1127 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1128 				   msecs_to_jiffies(hdev->idle_timeout));
1129 }
1130 
1131 /* Drop all connection on the device */
1132 void hci_conn_hash_flush(struct hci_dev *hdev)
1133 {
1134 	struct hci_conn_hash *h = &hdev->conn_hash;
1135 	struct hci_conn *c, *n;
1136 
1137 	BT_DBG("hdev %s", hdev->name);
1138 
1139 	list_for_each_entry_safe(c, n, &h->list, list) {
1140 		c->state = BT_CLOSED;
1141 
1142 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1143 		hci_conn_del(c);
1144 	}
1145 }
1146 
1147 /* Check pending connect attempts */
1148 void hci_conn_check_pending(struct hci_dev *hdev)
1149 {
1150 	struct hci_conn *conn;
1151 
1152 	BT_DBG("hdev %s", hdev->name);
1153 
1154 	hci_dev_lock(hdev);
1155 
1156 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1157 	if (conn)
1158 		hci_acl_create_connection(conn);
1159 
1160 	hci_dev_unlock(hdev);
1161 }
1162 
1163 static u32 get_link_mode(struct hci_conn *conn)
1164 {
1165 	u32 link_mode = 0;
1166 
1167 	if (conn->role == HCI_ROLE_MASTER)
1168 		link_mode |= HCI_LM_MASTER;
1169 
1170 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1171 		link_mode |= HCI_LM_ENCRYPT;
1172 
1173 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
1174 		link_mode |= HCI_LM_AUTH;
1175 
1176 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
1177 		link_mode |= HCI_LM_SECURE;
1178 
1179 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
1180 		link_mode |= HCI_LM_FIPS;
1181 
1182 	return link_mode;
1183 }
1184 
1185 int hci_get_conn_list(void __user *arg)
1186 {
1187 	struct hci_conn *c;
1188 	struct hci_conn_list_req req, *cl;
1189 	struct hci_conn_info *ci;
1190 	struct hci_dev *hdev;
1191 	int n = 0, size, err;
1192 
1193 	if (copy_from_user(&req, arg, sizeof(req)))
1194 		return -EFAULT;
1195 
1196 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1197 		return -EINVAL;
1198 
1199 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1200 
1201 	cl = kmalloc(size, GFP_KERNEL);
1202 	if (!cl)
1203 		return -ENOMEM;
1204 
1205 	hdev = hci_dev_get(req.dev_id);
1206 	if (!hdev) {
1207 		kfree(cl);
1208 		return -ENODEV;
1209 	}
1210 
1211 	ci = cl->conn_info;
1212 
1213 	hci_dev_lock(hdev);
1214 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1215 		bacpy(&(ci + n)->bdaddr, &c->dst);
1216 		(ci + n)->handle = c->handle;
1217 		(ci + n)->type  = c->type;
1218 		(ci + n)->out   = c->out;
1219 		(ci + n)->state = c->state;
1220 		(ci + n)->link_mode = get_link_mode(c);
1221 		if (++n >= req.conn_num)
1222 			break;
1223 	}
1224 	hci_dev_unlock(hdev);
1225 
1226 	cl->dev_id = hdev->id;
1227 	cl->conn_num = n;
1228 	size = sizeof(req) + n * sizeof(*ci);
1229 
1230 	hci_dev_put(hdev);
1231 
1232 	err = copy_to_user(arg, cl, size);
1233 	kfree(cl);
1234 
1235 	return err ? -EFAULT : 0;
1236 }
1237 
1238 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1239 {
1240 	struct hci_conn_info_req req;
1241 	struct hci_conn_info ci;
1242 	struct hci_conn *conn;
1243 	char __user *ptr = arg + sizeof(req);
1244 
1245 	if (copy_from_user(&req, arg, sizeof(req)))
1246 		return -EFAULT;
1247 
1248 	hci_dev_lock(hdev);
1249 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1250 	if (conn) {
1251 		bacpy(&ci.bdaddr, &conn->dst);
1252 		ci.handle = conn->handle;
1253 		ci.type  = conn->type;
1254 		ci.out   = conn->out;
1255 		ci.state = conn->state;
1256 		ci.link_mode = get_link_mode(conn);
1257 	}
1258 	hci_dev_unlock(hdev);
1259 
1260 	if (!conn)
1261 		return -ENOENT;
1262 
1263 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1264 }
1265 
1266 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1267 {
1268 	struct hci_auth_info_req req;
1269 	struct hci_conn *conn;
1270 
1271 	if (copy_from_user(&req, arg, sizeof(req)))
1272 		return -EFAULT;
1273 
1274 	hci_dev_lock(hdev);
1275 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1276 	if (conn)
1277 		req.type = conn->auth_type;
1278 	hci_dev_unlock(hdev);
1279 
1280 	if (!conn)
1281 		return -ENOENT;
1282 
1283 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1284 }
1285 
1286 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1287 {
1288 	struct hci_dev *hdev = conn->hdev;
1289 	struct hci_chan *chan;
1290 
1291 	BT_DBG("%s hcon %p", hdev->name, conn);
1292 
1293 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1294 	if (!chan)
1295 		return NULL;
1296 
1297 	chan->conn = conn;
1298 	skb_queue_head_init(&chan->data_q);
1299 	chan->state = BT_CONNECTED;
1300 
1301 	list_add_rcu(&chan->list, &conn->chan_list);
1302 
1303 	return chan;
1304 }
1305 
1306 void hci_chan_del(struct hci_chan *chan)
1307 {
1308 	struct hci_conn *conn = chan->conn;
1309 	struct hci_dev *hdev = conn->hdev;
1310 
1311 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1312 
1313 	list_del_rcu(&chan->list);
1314 
1315 	synchronize_rcu();
1316 
1317 	hci_conn_drop(conn);
1318 
1319 	skb_queue_purge(&chan->data_q);
1320 	kfree(chan);
1321 }
1322 
1323 void hci_chan_list_flush(struct hci_conn *conn)
1324 {
1325 	struct hci_chan *chan, *n;
1326 
1327 	BT_DBG("hcon %p", conn);
1328 
1329 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1330 		hci_chan_del(chan);
1331 }
1332 
1333 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1334 						 __u16 handle)
1335 {
1336 	struct hci_chan *hchan;
1337 
1338 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1339 		if (hchan->handle == handle)
1340 			return hchan;
1341 	}
1342 
1343 	return NULL;
1344 }
1345 
1346 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1347 {
1348 	struct hci_conn_hash *h = &hdev->conn_hash;
1349 	struct hci_conn *hcon;
1350 	struct hci_chan *hchan = NULL;
1351 
1352 	rcu_read_lock();
1353 
1354 	list_for_each_entry_rcu(hcon, &h->list, list) {
1355 		hchan = __hci_chan_lookup_handle(hcon, handle);
1356 		if (hchan)
1357 			break;
1358 	}
1359 
1360 	rcu_read_unlock();
1361 
1362 	return hchan;
1363 }
1364