xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision f7777dcc)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/a2mp.h>
32 #include <net/bluetooth/smp.h>
33 
34 struct sco_param {
35 	u16 pkt_type;
36 	u16 max_latency;
37 };
38 
39 static const struct sco_param sco_param_cvsd[] = {
40 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
41 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
42 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007 }, /* S1 */
43 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff }, /* D1 */
44 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff }, /* D0 */
45 };
46 
47 static const struct sco_param sco_param_wideband[] = {
48 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
49 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
50 };
51 
52 static void hci_le_create_connection(struct hci_conn *conn)
53 {
54 	struct hci_dev *hdev = conn->hdev;
55 	struct hci_cp_le_create_conn cp;
56 
57 	conn->state = BT_CONNECT;
58 	conn->out = true;
59 	conn->link_mode |= HCI_LM_MASTER;
60 	conn->sec_level = BT_SECURITY_LOW;
61 
62 	memset(&cp, 0, sizeof(cp));
63 	cp.scan_interval = __constant_cpu_to_le16(0x0060);
64 	cp.scan_window = __constant_cpu_to_le16(0x0030);
65 	bacpy(&cp.peer_addr, &conn->dst);
66 	cp.peer_addr_type = conn->dst_type;
67 	cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
68 	cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
69 	cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
70 	cp.min_ce_len = __constant_cpu_to_le16(0x0000);
71 	cp.max_ce_len = __constant_cpu_to_le16(0x0000);
72 
73 	hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
74 }
75 
76 static void hci_le_create_connection_cancel(struct hci_conn *conn)
77 {
78 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
79 }
80 
81 static void hci_acl_create_connection(struct hci_conn *conn)
82 {
83 	struct hci_dev *hdev = conn->hdev;
84 	struct inquiry_entry *ie;
85 	struct hci_cp_create_conn cp;
86 
87 	BT_DBG("hcon %p", conn);
88 
89 	conn->state = BT_CONNECT;
90 	conn->out = true;
91 
92 	conn->link_mode = HCI_LM_MASTER;
93 
94 	conn->attempt++;
95 
96 	conn->link_policy = hdev->link_policy;
97 
98 	memset(&cp, 0, sizeof(cp));
99 	bacpy(&cp.bdaddr, &conn->dst);
100 	cp.pscan_rep_mode = 0x02;
101 
102 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
103 	if (ie) {
104 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
105 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
106 			cp.pscan_mode     = ie->data.pscan_mode;
107 			cp.clock_offset   = ie->data.clock_offset |
108 					    __constant_cpu_to_le16(0x8000);
109 		}
110 
111 		memcpy(conn->dev_class, ie->data.dev_class, 3);
112 		if (ie->data.ssp_mode > 0)
113 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
114 	}
115 
116 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
117 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
118 		cp.role_switch = 0x01;
119 	else
120 		cp.role_switch = 0x00;
121 
122 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
123 }
124 
125 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
126 {
127 	struct hci_cp_create_conn_cancel cp;
128 
129 	BT_DBG("hcon %p", conn);
130 
131 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
132 		return;
133 
134 	bacpy(&cp.bdaddr, &conn->dst);
135 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
136 }
137 
138 static void hci_reject_sco(struct hci_conn *conn)
139 {
140 	struct hci_cp_reject_sync_conn_req cp;
141 
142 	cp.reason = HCI_ERROR_REMOTE_USER_TERM;
143 	bacpy(&cp.bdaddr, &conn->dst);
144 
145 	hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
146 }
147 
148 void hci_disconnect(struct hci_conn *conn, __u8 reason)
149 {
150 	struct hci_cp_disconnect cp;
151 
152 	BT_DBG("hcon %p", conn);
153 
154 	conn->state = BT_DISCONN;
155 
156 	cp.handle = cpu_to_le16(conn->handle);
157 	cp.reason = reason;
158 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
159 }
160 
161 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
162 {
163 	struct hci_cp_disconn_phy_link cp;
164 
165 	BT_DBG("hcon %p", conn);
166 
167 	conn->state = BT_DISCONN;
168 
169 	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
170 	cp.reason = reason;
171 	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
172 		     sizeof(cp), &cp);
173 }
174 
175 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
176 {
177 	struct hci_dev *hdev = conn->hdev;
178 	struct hci_cp_add_sco cp;
179 
180 	BT_DBG("hcon %p", conn);
181 
182 	conn->state = BT_CONNECT;
183 	conn->out = true;
184 
185 	conn->attempt++;
186 
187 	cp.handle   = cpu_to_le16(handle);
188 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
189 
190 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
191 }
192 
193 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
194 {
195 	struct hci_dev *hdev = conn->hdev;
196 	struct hci_cp_setup_sync_conn cp;
197 	const struct sco_param *param;
198 
199 	BT_DBG("hcon %p", conn);
200 
201 	conn->state = BT_CONNECT;
202 	conn->out = true;
203 
204 	conn->attempt++;
205 
206 	cp.handle   = cpu_to_le16(handle);
207 
208 	cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
209 	cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
210 	cp.voice_setting  = cpu_to_le16(conn->setting);
211 
212 	switch (conn->setting & SCO_AIRMODE_MASK) {
213 	case SCO_AIRMODE_TRANSP:
214 		if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
215 			return false;
216 		cp.retrans_effort = 0x02;
217 		param = &sco_param_wideband[conn->attempt - 1];
218 		break;
219 	case SCO_AIRMODE_CVSD:
220 		if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
221 			return false;
222 		cp.retrans_effort = 0x01;
223 		param = &sco_param_cvsd[conn->attempt - 1];
224 		break;
225 	default:
226 		return false;
227 	}
228 
229 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
230 	cp.max_latency = __cpu_to_le16(param->max_latency);
231 
232 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
233 		return false;
234 
235 	return true;
236 }
237 
238 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
239 			u16 latency, u16 to_multiplier)
240 {
241 	struct hci_cp_le_conn_update cp;
242 	struct hci_dev *hdev = conn->hdev;
243 
244 	memset(&cp, 0, sizeof(cp));
245 
246 	cp.handle		= cpu_to_le16(conn->handle);
247 	cp.conn_interval_min	= cpu_to_le16(min);
248 	cp.conn_interval_max	= cpu_to_le16(max);
249 	cp.conn_latency		= cpu_to_le16(latency);
250 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
251 	cp.min_ce_len		= __constant_cpu_to_le16(0x0001);
252 	cp.max_ce_len		= __constant_cpu_to_le16(0x0001);
253 
254 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
255 }
256 
257 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
258 		      __u8 ltk[16])
259 {
260 	struct hci_dev *hdev = conn->hdev;
261 	struct hci_cp_le_start_enc cp;
262 
263 	BT_DBG("hcon %p", conn);
264 
265 	memset(&cp, 0, sizeof(cp));
266 
267 	cp.handle = cpu_to_le16(conn->handle);
268 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
269 	cp.ediv = ediv;
270 	memcpy(cp.rand, rand, sizeof(cp.rand));
271 
272 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
273 }
274 
275 /* Device _must_ be locked */
276 void hci_sco_setup(struct hci_conn *conn, __u8 status)
277 {
278 	struct hci_conn *sco = conn->link;
279 
280 	if (!sco)
281 		return;
282 
283 	BT_DBG("hcon %p", conn);
284 
285 	if (!status) {
286 		if (lmp_esco_capable(conn->hdev))
287 			hci_setup_sync(sco, conn->handle);
288 		else
289 			hci_add_sco(sco, conn->handle);
290 	} else {
291 		hci_proto_connect_cfm(sco, status);
292 		hci_conn_del(sco);
293 	}
294 }
295 
296 static void hci_conn_disconnect(struct hci_conn *conn)
297 {
298 	__u8 reason = hci_proto_disconn_ind(conn);
299 
300 	switch (conn->type) {
301 	case AMP_LINK:
302 		hci_amp_disconn(conn, reason);
303 		break;
304 	default:
305 		hci_disconnect(conn, reason);
306 		break;
307 	}
308 }
309 
310 static void hci_conn_timeout(struct work_struct *work)
311 {
312 	struct hci_conn *conn = container_of(work, struct hci_conn,
313 					     disc_work.work);
314 
315 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
316 
317 	if (atomic_read(&conn->refcnt))
318 		return;
319 
320 	switch (conn->state) {
321 	case BT_CONNECT:
322 	case BT_CONNECT2:
323 		if (conn->out) {
324 			if (conn->type == ACL_LINK)
325 				hci_acl_create_connection_cancel(conn);
326 			else if (conn->type == LE_LINK)
327 				hci_le_create_connection_cancel(conn);
328 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
329 			hci_reject_sco(conn);
330 		}
331 		break;
332 	case BT_CONFIG:
333 	case BT_CONNECTED:
334 		hci_conn_disconnect(conn);
335 		break;
336 	default:
337 		conn->state = BT_CLOSED;
338 		break;
339 	}
340 }
341 
342 /* Enter sniff mode */
343 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
344 {
345 	struct hci_dev *hdev = conn->hdev;
346 
347 	BT_DBG("hcon %p mode %d", conn, conn->mode);
348 
349 	if (test_bit(HCI_RAW, &hdev->flags))
350 		return;
351 
352 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
353 		return;
354 
355 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
356 		return;
357 
358 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
359 		struct hci_cp_sniff_subrate cp;
360 		cp.handle             = cpu_to_le16(conn->handle);
361 		cp.max_latency        = __constant_cpu_to_le16(0);
362 		cp.min_remote_timeout = __constant_cpu_to_le16(0);
363 		cp.min_local_timeout  = __constant_cpu_to_le16(0);
364 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
365 	}
366 
367 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
368 		struct hci_cp_sniff_mode cp;
369 		cp.handle       = cpu_to_le16(conn->handle);
370 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
371 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
372 		cp.attempt      = __constant_cpu_to_le16(4);
373 		cp.timeout      = __constant_cpu_to_le16(1);
374 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
375 	}
376 }
377 
378 static void hci_conn_idle(unsigned long arg)
379 {
380 	struct hci_conn *conn = (void *) arg;
381 
382 	BT_DBG("hcon %p mode %d", conn, conn->mode);
383 
384 	hci_conn_enter_sniff_mode(conn);
385 }
386 
387 static void hci_conn_auto_accept(unsigned long arg)
388 {
389 	struct hci_conn *conn = (void *) arg;
390 	struct hci_dev *hdev = conn->hdev;
391 
392 	hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
393 		     &conn->dst);
394 }
395 
396 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
397 {
398 	struct hci_conn *conn;
399 
400 	BT_DBG("%s dst %pMR", hdev->name, dst);
401 
402 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
403 	if (!conn)
404 		return NULL;
405 
406 	bacpy(&conn->dst, dst);
407 	conn->hdev  = hdev;
408 	conn->type  = type;
409 	conn->mode  = HCI_CM_ACTIVE;
410 	conn->state = BT_OPEN;
411 	conn->auth_type = HCI_AT_GENERAL_BONDING;
412 	conn->io_capability = hdev->io_capability;
413 	conn->remote_auth = 0xff;
414 	conn->key_type = 0xff;
415 
416 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
417 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
418 
419 	switch (type) {
420 	case ACL_LINK:
421 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
422 		break;
423 	case SCO_LINK:
424 		if (lmp_esco_capable(hdev))
425 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
426 					(hdev->esco_type & EDR_ESCO_MASK);
427 		else
428 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
429 		break;
430 	case ESCO_LINK:
431 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
432 		break;
433 	}
434 
435 	skb_queue_head_init(&conn->data_q);
436 
437 	INIT_LIST_HEAD(&conn->chan_list);
438 
439 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
440 	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
441 	setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
442 		    (unsigned long) conn);
443 
444 	atomic_set(&conn->refcnt, 0);
445 
446 	hci_dev_hold(hdev);
447 
448 	hci_conn_hash_add(hdev, conn);
449 	if (hdev->notify)
450 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
451 
452 	hci_conn_init_sysfs(conn);
453 
454 	return conn;
455 }
456 
457 int hci_conn_del(struct hci_conn *conn)
458 {
459 	struct hci_dev *hdev = conn->hdev;
460 
461 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
462 
463 	del_timer(&conn->idle_timer);
464 
465 	cancel_delayed_work_sync(&conn->disc_work);
466 
467 	del_timer(&conn->auto_accept_timer);
468 
469 	if (conn->type == ACL_LINK) {
470 		struct hci_conn *sco = conn->link;
471 		if (sco)
472 			sco->link = NULL;
473 
474 		/* Unacked frames */
475 		hdev->acl_cnt += conn->sent;
476 	} else if (conn->type == LE_LINK) {
477 		if (hdev->le_pkts)
478 			hdev->le_cnt += conn->sent;
479 		else
480 			hdev->acl_cnt += conn->sent;
481 	} else {
482 		struct hci_conn *acl = conn->link;
483 		if (acl) {
484 			acl->link = NULL;
485 			hci_conn_drop(acl);
486 		}
487 	}
488 
489 	hci_chan_list_flush(conn);
490 
491 	if (conn->amp_mgr)
492 		amp_mgr_put(conn->amp_mgr);
493 
494 	hci_conn_hash_del(hdev, conn);
495 	if (hdev->notify)
496 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
497 
498 	skb_queue_purge(&conn->data_q);
499 
500 	hci_conn_del_sysfs(conn);
501 
502 	hci_dev_put(hdev);
503 
504 	hci_conn_put(conn);
505 
506 	return 0;
507 }
508 
509 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
510 {
511 	int use_src = bacmp(src, BDADDR_ANY);
512 	struct hci_dev *hdev = NULL, *d;
513 
514 	BT_DBG("%pMR -> %pMR", src, dst);
515 
516 	read_lock(&hci_dev_list_lock);
517 
518 	list_for_each_entry(d, &hci_dev_list, list) {
519 		if (!test_bit(HCI_UP, &d->flags) ||
520 		    test_bit(HCI_RAW, &d->flags) ||
521 		    d->dev_type != HCI_BREDR)
522 			continue;
523 
524 		/* Simple routing:
525 		 *   No source address - find interface with bdaddr != dst
526 		 *   Source address    - find interface with bdaddr == src
527 		 */
528 
529 		if (use_src) {
530 			if (!bacmp(&d->bdaddr, src)) {
531 				hdev = d; break;
532 			}
533 		} else {
534 			if (bacmp(&d->bdaddr, dst)) {
535 				hdev = d; break;
536 			}
537 		}
538 	}
539 
540 	if (hdev)
541 		hdev = hci_dev_hold(hdev);
542 
543 	read_unlock(&hci_dev_list_lock);
544 	return hdev;
545 }
546 EXPORT_SYMBOL(hci_get_route);
547 
548 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
549 				    u8 dst_type, u8 sec_level, u8 auth_type)
550 {
551 	struct hci_conn *le;
552 
553 	if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
554 		return ERR_PTR(-ENOTSUPP);
555 
556 	le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
557 	if (!le) {
558 		le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
559 		if (le)
560 			return ERR_PTR(-EBUSY);
561 
562 		le = hci_conn_add(hdev, LE_LINK, dst);
563 		if (!le)
564 			return ERR_PTR(-ENOMEM);
565 
566 		le->dst_type = bdaddr_to_le(dst_type);
567 		hci_le_create_connection(le);
568 	}
569 
570 	le->pending_sec_level = sec_level;
571 	le->auth_type = auth_type;
572 
573 	hci_conn_hold(le);
574 
575 	return le;
576 }
577 
578 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
579 						u8 sec_level, u8 auth_type)
580 {
581 	struct hci_conn *acl;
582 
583 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
584 	if (!acl) {
585 		acl = hci_conn_add(hdev, ACL_LINK, dst);
586 		if (!acl)
587 			return ERR_PTR(-ENOMEM);
588 	}
589 
590 	hci_conn_hold(acl);
591 
592 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
593 		acl->sec_level = BT_SECURITY_LOW;
594 		acl->pending_sec_level = sec_level;
595 		acl->auth_type = auth_type;
596 		hci_acl_create_connection(acl);
597 	}
598 
599 	return acl;
600 }
601 
602 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
603 				 __u16 setting)
604 {
605 	struct hci_conn *acl;
606 	struct hci_conn *sco;
607 
608 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
609 	if (IS_ERR(acl))
610 		return acl;
611 
612 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
613 	if (!sco) {
614 		sco = hci_conn_add(hdev, type, dst);
615 		if (!sco) {
616 			hci_conn_drop(acl);
617 			return ERR_PTR(-ENOMEM);
618 		}
619 	}
620 
621 	acl->link = sco;
622 	sco->link = acl;
623 
624 	hci_conn_hold(sco);
625 
626 	sco->setting = setting;
627 
628 	if (acl->state == BT_CONNECTED &&
629 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
630 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
631 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
632 
633 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
634 			/* defer SCO setup until mode change completed */
635 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
636 			return sco;
637 		}
638 
639 		hci_sco_setup(acl, 0x00);
640 	}
641 
642 	return sco;
643 }
644 
645 /* Create SCO, ACL or LE connection. */
646 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
647 			     __u8 dst_type, __u8 sec_level, __u8 auth_type)
648 {
649 	BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
650 
651 	switch (type) {
652 	case LE_LINK:
653 		return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
654 	case ACL_LINK:
655 		return hci_connect_acl(hdev, dst, sec_level, auth_type);
656 	}
657 
658 	return ERR_PTR(-EINVAL);
659 }
660 
661 /* Check link security requirement */
662 int hci_conn_check_link_mode(struct hci_conn *conn)
663 {
664 	BT_DBG("hcon %p", conn);
665 
666 	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
667 		return 0;
668 
669 	return 1;
670 }
671 
672 /* Authenticate remote device */
673 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
674 {
675 	BT_DBG("hcon %p", conn);
676 
677 	if (conn->pending_sec_level > sec_level)
678 		sec_level = conn->pending_sec_level;
679 
680 	if (sec_level > conn->sec_level)
681 		conn->pending_sec_level = sec_level;
682 	else if (conn->link_mode & HCI_LM_AUTH)
683 		return 1;
684 
685 	/* Make sure we preserve an existing MITM requirement*/
686 	auth_type |= (conn->auth_type & 0x01);
687 
688 	conn->auth_type = auth_type;
689 
690 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
691 		struct hci_cp_auth_requested cp;
692 
693 		/* encrypt must be pending if auth is also pending */
694 		set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
695 
696 		cp.handle = cpu_to_le16(conn->handle);
697 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
698 			     sizeof(cp), &cp);
699 		if (conn->key_type != 0xff)
700 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
701 	}
702 
703 	return 0;
704 }
705 
706 /* Encrypt the the link */
707 static void hci_conn_encrypt(struct hci_conn *conn)
708 {
709 	BT_DBG("hcon %p", conn);
710 
711 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
712 		struct hci_cp_set_conn_encrypt cp;
713 		cp.handle  = cpu_to_le16(conn->handle);
714 		cp.encrypt = 0x01;
715 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
716 			     &cp);
717 	}
718 }
719 
720 /* Enable security */
721 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
722 {
723 	BT_DBG("hcon %p", conn);
724 
725 	if (conn->type == LE_LINK)
726 		return smp_conn_security(conn, sec_level);
727 
728 	/* For sdp we don't need the link key. */
729 	if (sec_level == BT_SECURITY_SDP)
730 		return 1;
731 
732 	/* For non 2.1 devices and low security level we don't need the link
733 	   key. */
734 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
735 		return 1;
736 
737 	/* For other security levels we need the link key. */
738 	if (!(conn->link_mode & HCI_LM_AUTH))
739 		goto auth;
740 
741 	/* An authenticated combination key has sufficient security for any
742 	   security level. */
743 	if (conn->key_type == HCI_LK_AUTH_COMBINATION)
744 		goto encrypt;
745 
746 	/* An unauthenticated combination key has sufficient security for
747 	   security level 1 and 2. */
748 	if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
749 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
750 		goto encrypt;
751 
752 	/* A combination key has always sufficient security for the security
753 	   levels 1 or 2. High security level requires the combination key
754 	   is generated using maximum PIN code length (16).
755 	   For pre 2.1 units. */
756 	if (conn->key_type == HCI_LK_COMBINATION &&
757 	    (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
758 		goto encrypt;
759 
760 auth:
761 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
762 		return 0;
763 
764 	if (!hci_conn_auth(conn, sec_level, auth_type))
765 		return 0;
766 
767 encrypt:
768 	if (conn->link_mode & HCI_LM_ENCRYPT)
769 		return 1;
770 
771 	hci_conn_encrypt(conn);
772 	return 0;
773 }
774 EXPORT_SYMBOL(hci_conn_security);
775 
776 /* Check secure link requirement */
777 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
778 {
779 	BT_DBG("hcon %p", conn);
780 
781 	if (sec_level != BT_SECURITY_HIGH)
782 		return 1; /* Accept if non-secure is required */
783 
784 	if (conn->sec_level == BT_SECURITY_HIGH)
785 		return 1;
786 
787 	return 0; /* Reject not secure link */
788 }
789 EXPORT_SYMBOL(hci_conn_check_secure);
790 
791 /* Change link key */
792 int hci_conn_change_link_key(struct hci_conn *conn)
793 {
794 	BT_DBG("hcon %p", conn);
795 
796 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
797 		struct hci_cp_change_conn_link_key cp;
798 		cp.handle = cpu_to_le16(conn->handle);
799 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
800 			     sizeof(cp), &cp);
801 	}
802 
803 	return 0;
804 }
805 
806 /* Switch role */
807 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
808 {
809 	BT_DBG("hcon %p", conn);
810 
811 	if (!role && conn->link_mode & HCI_LM_MASTER)
812 		return 1;
813 
814 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
815 		struct hci_cp_switch_role cp;
816 		bacpy(&cp.bdaddr, &conn->dst);
817 		cp.role = role;
818 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
819 	}
820 
821 	return 0;
822 }
823 EXPORT_SYMBOL(hci_conn_switch_role);
824 
825 /* Enter active mode */
826 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
827 {
828 	struct hci_dev *hdev = conn->hdev;
829 
830 	BT_DBG("hcon %p mode %d", conn, conn->mode);
831 
832 	if (test_bit(HCI_RAW, &hdev->flags))
833 		return;
834 
835 	if (conn->mode != HCI_CM_SNIFF)
836 		goto timer;
837 
838 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
839 		goto timer;
840 
841 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
842 		struct hci_cp_exit_sniff_mode cp;
843 		cp.handle = cpu_to_le16(conn->handle);
844 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
845 	}
846 
847 timer:
848 	if (hdev->idle_timeout > 0)
849 		mod_timer(&conn->idle_timer,
850 			  jiffies + msecs_to_jiffies(hdev->idle_timeout));
851 }
852 
853 /* Drop all connection on the device */
854 void hci_conn_hash_flush(struct hci_dev *hdev)
855 {
856 	struct hci_conn_hash *h = &hdev->conn_hash;
857 	struct hci_conn *c, *n;
858 
859 	BT_DBG("hdev %s", hdev->name);
860 
861 	list_for_each_entry_safe(c, n, &h->list, list) {
862 		c->state = BT_CLOSED;
863 
864 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
865 		hci_conn_del(c);
866 	}
867 }
868 
869 /* Check pending connect attempts */
870 void hci_conn_check_pending(struct hci_dev *hdev)
871 {
872 	struct hci_conn *conn;
873 
874 	BT_DBG("hdev %s", hdev->name);
875 
876 	hci_dev_lock(hdev);
877 
878 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
879 	if (conn)
880 		hci_acl_create_connection(conn);
881 
882 	hci_dev_unlock(hdev);
883 }
884 
885 int hci_get_conn_list(void __user *arg)
886 {
887 	struct hci_conn *c;
888 	struct hci_conn_list_req req, *cl;
889 	struct hci_conn_info *ci;
890 	struct hci_dev *hdev;
891 	int n = 0, size, err;
892 
893 	if (copy_from_user(&req, arg, sizeof(req)))
894 		return -EFAULT;
895 
896 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
897 		return -EINVAL;
898 
899 	size = sizeof(req) + req.conn_num * sizeof(*ci);
900 
901 	cl = kmalloc(size, GFP_KERNEL);
902 	if (!cl)
903 		return -ENOMEM;
904 
905 	hdev = hci_dev_get(req.dev_id);
906 	if (!hdev) {
907 		kfree(cl);
908 		return -ENODEV;
909 	}
910 
911 	ci = cl->conn_info;
912 
913 	hci_dev_lock(hdev);
914 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
915 		bacpy(&(ci + n)->bdaddr, &c->dst);
916 		(ci + n)->handle = c->handle;
917 		(ci + n)->type  = c->type;
918 		(ci + n)->out   = c->out;
919 		(ci + n)->state = c->state;
920 		(ci + n)->link_mode = c->link_mode;
921 		if (++n >= req.conn_num)
922 			break;
923 	}
924 	hci_dev_unlock(hdev);
925 
926 	cl->dev_id = hdev->id;
927 	cl->conn_num = n;
928 	size = sizeof(req) + n * sizeof(*ci);
929 
930 	hci_dev_put(hdev);
931 
932 	err = copy_to_user(arg, cl, size);
933 	kfree(cl);
934 
935 	return err ? -EFAULT : 0;
936 }
937 
938 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
939 {
940 	struct hci_conn_info_req req;
941 	struct hci_conn_info ci;
942 	struct hci_conn *conn;
943 	char __user *ptr = arg + sizeof(req);
944 
945 	if (copy_from_user(&req, arg, sizeof(req)))
946 		return -EFAULT;
947 
948 	hci_dev_lock(hdev);
949 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
950 	if (conn) {
951 		bacpy(&ci.bdaddr, &conn->dst);
952 		ci.handle = conn->handle;
953 		ci.type  = conn->type;
954 		ci.out   = conn->out;
955 		ci.state = conn->state;
956 		ci.link_mode = conn->link_mode;
957 	}
958 	hci_dev_unlock(hdev);
959 
960 	if (!conn)
961 		return -ENOENT;
962 
963 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
964 }
965 
966 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
967 {
968 	struct hci_auth_info_req req;
969 	struct hci_conn *conn;
970 
971 	if (copy_from_user(&req, arg, sizeof(req)))
972 		return -EFAULT;
973 
974 	hci_dev_lock(hdev);
975 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
976 	if (conn)
977 		req.type = conn->auth_type;
978 	hci_dev_unlock(hdev);
979 
980 	if (!conn)
981 		return -ENOENT;
982 
983 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
984 }
985 
986 struct hci_chan *hci_chan_create(struct hci_conn *conn)
987 {
988 	struct hci_dev *hdev = conn->hdev;
989 	struct hci_chan *chan;
990 
991 	BT_DBG("%s hcon %p", hdev->name, conn);
992 
993 	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
994 	if (!chan)
995 		return NULL;
996 
997 	chan->conn = conn;
998 	skb_queue_head_init(&chan->data_q);
999 	chan->state = BT_CONNECTED;
1000 
1001 	list_add_rcu(&chan->list, &conn->chan_list);
1002 
1003 	return chan;
1004 }
1005 
1006 void hci_chan_del(struct hci_chan *chan)
1007 {
1008 	struct hci_conn *conn = chan->conn;
1009 	struct hci_dev *hdev = conn->hdev;
1010 
1011 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1012 
1013 	list_del_rcu(&chan->list);
1014 
1015 	synchronize_rcu();
1016 
1017 	hci_conn_drop(conn);
1018 
1019 	skb_queue_purge(&chan->data_q);
1020 	kfree(chan);
1021 }
1022 
1023 void hci_chan_list_flush(struct hci_conn *conn)
1024 {
1025 	struct hci_chan *chan, *n;
1026 
1027 	BT_DBG("hcon %p", conn);
1028 
1029 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1030 		hci_chan_del(chan);
1031 }
1032 
1033 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1034 						 __u16 handle)
1035 {
1036 	struct hci_chan *hchan;
1037 
1038 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1039 		if (hchan->handle == handle)
1040 			return hchan;
1041 	}
1042 
1043 	return NULL;
1044 }
1045 
1046 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1047 {
1048 	struct hci_conn_hash *h = &hdev->conn_hash;
1049 	struct hci_conn *hcon;
1050 	struct hci_chan *hchan = NULL;
1051 
1052 	rcu_read_lock();
1053 
1054 	list_for_each_entry_rcu(hcon, &h->list, list) {
1055 		hchan = __hci_chan_lookup_handle(hcon, handle);
1056 		if (hchan)
1057 			break;
1058 	}
1059 
1060 	rcu_read_unlock();
1061 
1062 	return hchan;
1063 }
1064