xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 7b5a9241b780ea2f77e71647bc0d3c9708c18ef1)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 
32 #include "smp.h"
33 #include "a2mp.h"
34 
35 struct sco_param {
36 	u16 pkt_type;
37 	u16 max_latency;
38 };
39 
40 static const struct sco_param sco_param_cvsd[] = {
41 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
42 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
43 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007 }, /* S1 */
44 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff }, /* D1 */
45 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff }, /* D0 */
46 };
47 
48 static const struct sco_param sco_param_wideband[] = {
49 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
50 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
51 };
52 
53 static void hci_le_create_connection_cancel(struct hci_conn *conn)
54 {
55 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
56 }
57 
58 static void hci_acl_create_connection(struct hci_conn *conn)
59 {
60 	struct hci_dev *hdev = conn->hdev;
61 	struct inquiry_entry *ie;
62 	struct hci_cp_create_conn cp;
63 
64 	BT_DBG("hcon %p", conn);
65 
66 	conn->state = BT_CONNECT;
67 	conn->out = true;
68 
69 	conn->link_mode = HCI_LM_MASTER;
70 
71 	conn->attempt++;
72 
73 	conn->link_policy = hdev->link_policy;
74 
75 	memset(&cp, 0, sizeof(cp));
76 	bacpy(&cp.bdaddr, &conn->dst);
77 	cp.pscan_rep_mode = 0x02;
78 
79 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
80 	if (ie) {
81 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
82 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
83 			cp.pscan_mode     = ie->data.pscan_mode;
84 			cp.clock_offset   = ie->data.clock_offset |
85 					    __constant_cpu_to_le16(0x8000);
86 		}
87 
88 		memcpy(conn->dev_class, ie->data.dev_class, 3);
89 		if (ie->data.ssp_mode > 0)
90 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
91 	}
92 
93 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
94 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
95 		cp.role_switch = 0x01;
96 	else
97 		cp.role_switch = 0x00;
98 
99 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
100 }
101 
102 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
103 {
104 	struct hci_cp_create_conn_cancel cp;
105 
106 	BT_DBG("hcon %p", conn);
107 
108 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
109 		return;
110 
111 	bacpy(&cp.bdaddr, &conn->dst);
112 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
113 }
114 
115 static void hci_reject_sco(struct hci_conn *conn)
116 {
117 	struct hci_cp_reject_sync_conn_req cp;
118 
119 	cp.reason = HCI_ERROR_REMOTE_USER_TERM;
120 	bacpy(&cp.bdaddr, &conn->dst);
121 
122 	hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
123 }
124 
125 void hci_disconnect(struct hci_conn *conn, __u8 reason)
126 {
127 	struct hci_cp_disconnect cp;
128 
129 	BT_DBG("hcon %p", conn);
130 
131 	conn->state = BT_DISCONN;
132 
133 	cp.handle = cpu_to_le16(conn->handle);
134 	cp.reason = reason;
135 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
136 }
137 
138 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
139 {
140 	struct hci_cp_disconn_phy_link cp;
141 
142 	BT_DBG("hcon %p", conn);
143 
144 	conn->state = BT_DISCONN;
145 
146 	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
147 	cp.reason = reason;
148 	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
149 		     sizeof(cp), &cp);
150 }
151 
152 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
153 {
154 	struct hci_dev *hdev = conn->hdev;
155 	struct hci_cp_add_sco cp;
156 
157 	BT_DBG("hcon %p", conn);
158 
159 	conn->state = BT_CONNECT;
160 	conn->out = true;
161 
162 	conn->attempt++;
163 
164 	cp.handle   = cpu_to_le16(handle);
165 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
166 
167 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
168 }
169 
170 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
171 {
172 	struct hci_dev *hdev = conn->hdev;
173 	struct hci_cp_setup_sync_conn cp;
174 	const struct sco_param *param;
175 
176 	BT_DBG("hcon %p", conn);
177 
178 	conn->state = BT_CONNECT;
179 	conn->out = true;
180 
181 	conn->attempt++;
182 
183 	cp.handle   = cpu_to_le16(handle);
184 
185 	cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
186 	cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
187 	cp.voice_setting  = cpu_to_le16(conn->setting);
188 
189 	switch (conn->setting & SCO_AIRMODE_MASK) {
190 	case SCO_AIRMODE_TRANSP:
191 		if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
192 			return false;
193 		cp.retrans_effort = 0x02;
194 		param = &sco_param_wideband[conn->attempt - 1];
195 		break;
196 	case SCO_AIRMODE_CVSD:
197 		if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
198 			return false;
199 		cp.retrans_effort = 0x01;
200 		param = &sco_param_cvsd[conn->attempt - 1];
201 		break;
202 	default:
203 		return false;
204 	}
205 
206 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
207 	cp.max_latency = __cpu_to_le16(param->max_latency);
208 
209 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
210 		return false;
211 
212 	return true;
213 }
214 
215 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
216 			u16 latency, u16 to_multiplier)
217 {
218 	struct hci_cp_le_conn_update cp;
219 	struct hci_dev *hdev = conn->hdev;
220 
221 	memset(&cp, 0, sizeof(cp));
222 
223 	cp.handle		= cpu_to_le16(conn->handle);
224 	cp.conn_interval_min	= cpu_to_le16(min);
225 	cp.conn_interval_max	= cpu_to_le16(max);
226 	cp.conn_latency		= cpu_to_le16(latency);
227 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
228 	cp.min_ce_len		= __constant_cpu_to_le16(0x0001);
229 	cp.max_ce_len		= __constant_cpu_to_le16(0x0001);
230 
231 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
232 }
233 
234 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
235 		      __u8 ltk[16])
236 {
237 	struct hci_dev *hdev = conn->hdev;
238 	struct hci_cp_le_start_enc cp;
239 
240 	BT_DBG("hcon %p", conn);
241 
242 	memset(&cp, 0, sizeof(cp));
243 
244 	cp.handle = cpu_to_le16(conn->handle);
245 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
246 	cp.ediv = ediv;
247 	memcpy(cp.rand, rand, sizeof(cp.rand));
248 
249 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
250 }
251 
252 /* Device _must_ be locked */
253 void hci_sco_setup(struct hci_conn *conn, __u8 status)
254 {
255 	struct hci_conn *sco = conn->link;
256 
257 	if (!sco)
258 		return;
259 
260 	BT_DBG("hcon %p", conn);
261 
262 	if (!status) {
263 		if (lmp_esco_capable(conn->hdev))
264 			hci_setup_sync(sco, conn->handle);
265 		else
266 			hci_add_sco(sco, conn->handle);
267 	} else {
268 		hci_proto_connect_cfm(sco, status);
269 		hci_conn_del(sco);
270 	}
271 }
272 
273 static void hci_conn_disconnect(struct hci_conn *conn)
274 {
275 	__u8 reason = hci_proto_disconn_ind(conn);
276 
277 	switch (conn->type) {
278 	case AMP_LINK:
279 		hci_amp_disconn(conn, reason);
280 		break;
281 	default:
282 		hci_disconnect(conn, reason);
283 		break;
284 	}
285 }
286 
287 static void hci_conn_timeout(struct work_struct *work)
288 {
289 	struct hci_conn *conn = container_of(work, struct hci_conn,
290 					     disc_work.work);
291 
292 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
293 
294 	if (atomic_read(&conn->refcnt))
295 		return;
296 
297 	switch (conn->state) {
298 	case BT_CONNECT:
299 	case BT_CONNECT2:
300 		if (conn->out) {
301 			if (conn->type == ACL_LINK)
302 				hci_acl_create_connection_cancel(conn);
303 			else if (conn->type == LE_LINK)
304 				hci_le_create_connection_cancel(conn);
305 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
306 			hci_reject_sco(conn);
307 		}
308 		break;
309 	case BT_CONFIG:
310 	case BT_CONNECTED:
311 		hci_conn_disconnect(conn);
312 		break;
313 	default:
314 		conn->state = BT_CLOSED;
315 		break;
316 	}
317 }
318 
319 /* Enter sniff mode */
320 static void hci_conn_idle(struct work_struct *work)
321 {
322 	struct hci_conn *conn = container_of(work, struct hci_conn,
323 					     idle_work.work);
324 	struct hci_dev *hdev = conn->hdev;
325 
326 	BT_DBG("hcon %p mode %d", conn, conn->mode);
327 
328 	if (test_bit(HCI_RAW, &hdev->flags))
329 		return;
330 
331 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
332 		return;
333 
334 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
335 		return;
336 
337 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
338 		struct hci_cp_sniff_subrate cp;
339 		cp.handle             = cpu_to_le16(conn->handle);
340 		cp.max_latency        = __constant_cpu_to_le16(0);
341 		cp.min_remote_timeout = __constant_cpu_to_le16(0);
342 		cp.min_local_timeout  = __constant_cpu_to_le16(0);
343 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
344 	}
345 
346 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
347 		struct hci_cp_sniff_mode cp;
348 		cp.handle       = cpu_to_le16(conn->handle);
349 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
350 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
351 		cp.attempt      = __constant_cpu_to_le16(4);
352 		cp.timeout      = __constant_cpu_to_le16(1);
353 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
354 	}
355 }
356 
357 static void hci_conn_auto_accept(struct work_struct *work)
358 {
359 	struct hci_conn *conn = container_of(work, struct hci_conn,
360 					     auto_accept_work.work);
361 
362 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
363 		     &conn->dst);
364 }
365 
366 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
367 {
368 	struct hci_conn *conn;
369 
370 	BT_DBG("%s dst %pMR", hdev->name, dst);
371 
372 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
373 	if (!conn)
374 		return NULL;
375 
376 	bacpy(&conn->dst, dst);
377 	bacpy(&conn->src, &hdev->bdaddr);
378 	conn->hdev  = hdev;
379 	conn->type  = type;
380 	conn->mode  = HCI_CM_ACTIVE;
381 	conn->state = BT_OPEN;
382 	conn->auth_type = HCI_AT_GENERAL_BONDING;
383 	conn->io_capability = hdev->io_capability;
384 	conn->remote_auth = 0xff;
385 	conn->key_type = 0xff;
386 
387 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
388 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
389 
390 	switch (type) {
391 	case ACL_LINK:
392 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
393 		break;
394 	case SCO_LINK:
395 		if (lmp_esco_capable(hdev))
396 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
397 					(hdev->esco_type & EDR_ESCO_MASK);
398 		else
399 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
400 		break;
401 	case ESCO_LINK:
402 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
403 		break;
404 	}
405 
406 	skb_queue_head_init(&conn->data_q);
407 
408 	INIT_LIST_HEAD(&conn->chan_list);
409 
410 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
411 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
412 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
413 
414 	atomic_set(&conn->refcnt, 0);
415 
416 	hci_dev_hold(hdev);
417 
418 	hci_conn_hash_add(hdev, conn);
419 	if (hdev->notify)
420 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
421 
422 	hci_conn_init_sysfs(conn);
423 
424 	return conn;
425 }
426 
427 int hci_conn_del(struct hci_conn *conn)
428 {
429 	struct hci_dev *hdev = conn->hdev;
430 
431 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
432 
433 	cancel_delayed_work_sync(&conn->disc_work);
434 	cancel_delayed_work_sync(&conn->auto_accept_work);
435 	cancel_delayed_work_sync(&conn->idle_work);
436 
437 	if (conn->type == ACL_LINK) {
438 		struct hci_conn *sco = conn->link;
439 		if (sco)
440 			sco->link = NULL;
441 
442 		/* Unacked frames */
443 		hdev->acl_cnt += conn->sent;
444 	} else if (conn->type == LE_LINK) {
445 		if (hdev->le_pkts)
446 			hdev->le_cnt += conn->sent;
447 		else
448 			hdev->acl_cnt += conn->sent;
449 	} else {
450 		struct hci_conn *acl = conn->link;
451 		if (acl) {
452 			acl->link = NULL;
453 			hci_conn_drop(acl);
454 		}
455 	}
456 
457 	hci_chan_list_flush(conn);
458 
459 	if (conn->amp_mgr)
460 		amp_mgr_put(conn->amp_mgr);
461 
462 	hci_conn_hash_del(hdev, conn);
463 	if (hdev->notify)
464 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
465 
466 	skb_queue_purge(&conn->data_q);
467 
468 	hci_conn_del_sysfs(conn);
469 
470 	hci_dev_put(hdev);
471 
472 	hci_conn_put(conn);
473 
474 	return 0;
475 }
476 
477 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
478 {
479 	int use_src = bacmp(src, BDADDR_ANY);
480 	struct hci_dev *hdev = NULL, *d;
481 
482 	BT_DBG("%pMR -> %pMR", src, dst);
483 
484 	read_lock(&hci_dev_list_lock);
485 
486 	list_for_each_entry(d, &hci_dev_list, list) {
487 		if (!test_bit(HCI_UP, &d->flags) ||
488 		    test_bit(HCI_RAW, &d->flags) ||
489 		    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
490 		    d->dev_type != HCI_BREDR)
491 			continue;
492 
493 		/* Simple routing:
494 		 *   No source address - find interface with bdaddr != dst
495 		 *   Source address    - find interface with bdaddr == src
496 		 */
497 
498 		if (use_src) {
499 			if (!bacmp(&d->bdaddr, src)) {
500 				hdev = d; break;
501 			}
502 		} else {
503 			if (bacmp(&d->bdaddr, dst)) {
504 				hdev = d; break;
505 			}
506 		}
507 	}
508 
509 	if (hdev)
510 		hdev = hci_dev_hold(hdev);
511 
512 	read_unlock(&hci_dev_list_lock);
513 	return hdev;
514 }
515 EXPORT_SYMBOL(hci_get_route);
516 
517 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
518 {
519 	struct hci_conn *conn;
520 
521 	if (status == 0)
522 		return;
523 
524 	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
525 	       status);
526 
527 	hci_dev_lock(hdev);
528 
529 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
530 	if (!conn)
531 		goto done;
532 
533 	conn->state = BT_CLOSED;
534 
535 	mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
536 			    status);
537 
538 	hci_proto_connect_cfm(conn, status);
539 
540 	hci_conn_del(conn);
541 
542 done:
543 	hci_dev_unlock(hdev);
544 }
545 
546 static int hci_create_le_conn(struct hci_conn *conn)
547 {
548 	struct hci_dev *hdev = conn->hdev;
549 	struct hci_cp_le_create_conn cp;
550 	struct hci_request req;
551 	int err;
552 
553 	hci_req_init(&req, hdev);
554 
555 	memset(&cp, 0, sizeof(cp));
556 	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
557 	cp.scan_window = cpu_to_le16(hdev->le_scan_window);
558 	bacpy(&cp.peer_addr, &conn->dst);
559 	cp.peer_addr_type = conn->dst_type;
560 	cp.own_address_type = conn->src_type;
561 	cp.conn_interval_min = cpu_to_le16(hdev->le_conn_min_interval);
562 	cp.conn_interval_max = cpu_to_le16(hdev->le_conn_max_interval);
563 	cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
564 	cp.min_ce_len = __constant_cpu_to_le16(0x0000);
565 	cp.max_ce_len = __constant_cpu_to_le16(0x0000);
566 
567 	hci_req_add(&req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
568 
569 	err = hci_req_run(&req, create_le_conn_complete);
570 	if (err) {
571 		hci_conn_del(conn);
572 		return err;
573 	}
574 
575 	return 0;
576 }
577 
578 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
579 				    u8 dst_type, u8 sec_level, u8 auth_type)
580 {
581 	struct hci_conn *conn;
582 	int err;
583 
584 	if (test_bit(HCI_ADVERTISING, &hdev->flags))
585 		return ERR_PTR(-ENOTSUPP);
586 
587 	/* Some devices send ATT messages as soon as the physical link is
588 	 * established. To be able to handle these ATT messages, the user-
589 	 * space first establishes the connection and then starts the pairing
590 	 * process.
591 	 *
592 	 * So if a hci_conn object already exists for the following connection
593 	 * attempt, we simply update pending_sec_level and auth_type fields
594 	 * and return the object found.
595 	 */
596 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
597 	if (conn) {
598 		conn->pending_sec_level = sec_level;
599 		conn->auth_type = auth_type;
600 		goto done;
601 	}
602 
603 	/* Since the controller supports only one LE connection attempt at a
604 	 * time, we return -EBUSY if there is any connection attempt running.
605 	 */
606 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
607 	if (conn)
608 		return ERR_PTR(-EBUSY);
609 
610 	conn = hci_conn_add(hdev, LE_LINK, dst);
611 	if (!conn)
612 		return ERR_PTR(-ENOMEM);
613 
614 	if (dst_type == BDADDR_LE_PUBLIC)
615 		conn->dst_type = ADDR_LE_DEV_PUBLIC;
616 	else
617 		conn->dst_type = ADDR_LE_DEV_RANDOM;
618 
619 	conn->src_type = hdev->own_addr_type;
620 
621 	conn->state = BT_CONNECT;
622 	conn->out = true;
623 	conn->link_mode |= HCI_LM_MASTER;
624 	conn->sec_level = BT_SECURITY_LOW;
625 	conn->pending_sec_level = sec_level;
626 	conn->auth_type = auth_type;
627 
628 	err = hci_create_le_conn(conn);
629 	if (err)
630 		return ERR_PTR(err);
631 
632 done:
633 	hci_conn_hold(conn);
634 	return conn;
635 }
636 
637 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
638 						u8 sec_level, u8 auth_type)
639 {
640 	struct hci_conn *acl;
641 
642 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
643 		return ERR_PTR(-ENOTSUPP);
644 
645 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
646 	if (!acl) {
647 		acl = hci_conn_add(hdev, ACL_LINK, dst);
648 		if (!acl)
649 			return ERR_PTR(-ENOMEM);
650 	}
651 
652 	hci_conn_hold(acl);
653 
654 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
655 		acl->sec_level = BT_SECURITY_LOW;
656 		acl->pending_sec_level = sec_level;
657 		acl->auth_type = auth_type;
658 		hci_acl_create_connection(acl);
659 	}
660 
661 	return acl;
662 }
663 
664 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
665 				 __u16 setting)
666 {
667 	struct hci_conn *acl;
668 	struct hci_conn *sco;
669 
670 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
671 	if (IS_ERR(acl))
672 		return acl;
673 
674 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
675 	if (!sco) {
676 		sco = hci_conn_add(hdev, type, dst);
677 		if (!sco) {
678 			hci_conn_drop(acl);
679 			return ERR_PTR(-ENOMEM);
680 		}
681 	}
682 
683 	acl->link = sco;
684 	sco->link = acl;
685 
686 	hci_conn_hold(sco);
687 
688 	sco->setting = setting;
689 
690 	if (acl->state == BT_CONNECTED &&
691 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
692 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
693 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
694 
695 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
696 			/* defer SCO setup until mode change completed */
697 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
698 			return sco;
699 		}
700 
701 		hci_sco_setup(acl, 0x00);
702 	}
703 
704 	return sco;
705 }
706 
707 /* Create SCO, ACL or LE connection. */
708 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
709 			     __u8 dst_type, __u8 sec_level, __u8 auth_type)
710 {
711 	BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
712 
713 	switch (type) {
714 	case LE_LINK:
715 		return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
716 	case ACL_LINK:
717 		return hci_connect_acl(hdev, dst, sec_level, auth_type);
718 	}
719 
720 	return ERR_PTR(-EINVAL);
721 }
722 
723 /* Check link security requirement */
724 int hci_conn_check_link_mode(struct hci_conn *conn)
725 {
726 	BT_DBG("hcon %p", conn);
727 
728 	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
729 		return 0;
730 
731 	return 1;
732 }
733 
734 /* Authenticate remote device */
735 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
736 {
737 	BT_DBG("hcon %p", conn);
738 
739 	if (conn->pending_sec_level > sec_level)
740 		sec_level = conn->pending_sec_level;
741 
742 	if (sec_level > conn->sec_level)
743 		conn->pending_sec_level = sec_level;
744 	else if (conn->link_mode & HCI_LM_AUTH)
745 		return 1;
746 
747 	/* Make sure we preserve an existing MITM requirement*/
748 	auth_type |= (conn->auth_type & 0x01);
749 
750 	conn->auth_type = auth_type;
751 
752 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
753 		struct hci_cp_auth_requested cp;
754 
755 		/* encrypt must be pending if auth is also pending */
756 		set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
757 
758 		cp.handle = cpu_to_le16(conn->handle);
759 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
760 			     sizeof(cp), &cp);
761 		if (conn->key_type != 0xff)
762 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
763 	}
764 
765 	return 0;
766 }
767 
768 /* Encrypt the the link */
769 static void hci_conn_encrypt(struct hci_conn *conn)
770 {
771 	BT_DBG("hcon %p", conn);
772 
773 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
774 		struct hci_cp_set_conn_encrypt cp;
775 		cp.handle  = cpu_to_le16(conn->handle);
776 		cp.encrypt = 0x01;
777 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
778 			     &cp);
779 	}
780 }
781 
782 /* Enable security */
783 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
784 {
785 	BT_DBG("hcon %p", conn);
786 
787 	if (conn->type == LE_LINK)
788 		return smp_conn_security(conn, sec_level);
789 
790 	/* For sdp we don't need the link key. */
791 	if (sec_level == BT_SECURITY_SDP)
792 		return 1;
793 
794 	/* For non 2.1 devices and low security level we don't need the link
795 	   key. */
796 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
797 		return 1;
798 
799 	/* For other security levels we need the link key. */
800 	if (!(conn->link_mode & HCI_LM_AUTH))
801 		goto auth;
802 
803 	/* An authenticated FIPS approved combination key has sufficient
804 	 * security for security level 4. */
805 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
806 	    sec_level == BT_SECURITY_FIPS)
807 		goto encrypt;
808 
809 	/* An authenticated combination key has sufficient security for
810 	   security level 3. */
811 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
812 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
813 	    sec_level == BT_SECURITY_HIGH)
814 		goto encrypt;
815 
816 	/* An unauthenticated combination key has sufficient security for
817 	   security level 1 and 2. */
818 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
819 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
820 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
821 		goto encrypt;
822 
823 	/* A combination key has always sufficient security for the security
824 	   levels 1 or 2. High security level requires the combination key
825 	   is generated using maximum PIN code length (16).
826 	   For pre 2.1 units. */
827 	if (conn->key_type == HCI_LK_COMBINATION &&
828 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
829 	     conn->pin_length == 16))
830 		goto encrypt;
831 
832 auth:
833 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
834 		return 0;
835 
836 	if (!hci_conn_auth(conn, sec_level, auth_type))
837 		return 0;
838 
839 encrypt:
840 	if (conn->link_mode & HCI_LM_ENCRYPT)
841 		return 1;
842 
843 	hci_conn_encrypt(conn);
844 	return 0;
845 }
846 EXPORT_SYMBOL(hci_conn_security);
847 
848 /* Check secure link requirement */
849 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
850 {
851 	BT_DBG("hcon %p", conn);
852 
853 	if (sec_level != BT_SECURITY_HIGH)
854 		return 1; /* Accept if non-secure is required */
855 
856 	if (conn->sec_level == BT_SECURITY_HIGH)
857 		return 1;
858 
859 	return 0; /* Reject not secure link */
860 }
861 EXPORT_SYMBOL(hci_conn_check_secure);
862 
863 /* Change link key */
864 int hci_conn_change_link_key(struct hci_conn *conn)
865 {
866 	BT_DBG("hcon %p", conn);
867 
868 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
869 		struct hci_cp_change_conn_link_key cp;
870 		cp.handle = cpu_to_le16(conn->handle);
871 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
872 			     sizeof(cp), &cp);
873 	}
874 
875 	return 0;
876 }
877 
878 /* Switch role */
879 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
880 {
881 	BT_DBG("hcon %p", conn);
882 
883 	if (!role && conn->link_mode & HCI_LM_MASTER)
884 		return 1;
885 
886 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
887 		struct hci_cp_switch_role cp;
888 		bacpy(&cp.bdaddr, &conn->dst);
889 		cp.role = role;
890 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
891 	}
892 
893 	return 0;
894 }
895 EXPORT_SYMBOL(hci_conn_switch_role);
896 
897 /* Enter active mode */
898 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
899 {
900 	struct hci_dev *hdev = conn->hdev;
901 
902 	BT_DBG("hcon %p mode %d", conn, conn->mode);
903 
904 	if (test_bit(HCI_RAW, &hdev->flags))
905 		return;
906 
907 	if (conn->mode != HCI_CM_SNIFF)
908 		goto timer;
909 
910 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
911 		goto timer;
912 
913 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
914 		struct hci_cp_exit_sniff_mode cp;
915 		cp.handle = cpu_to_le16(conn->handle);
916 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
917 	}
918 
919 timer:
920 	if (hdev->idle_timeout > 0)
921 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
922 				   msecs_to_jiffies(hdev->idle_timeout));
923 }
924 
925 /* Drop all connection on the device */
926 void hci_conn_hash_flush(struct hci_dev *hdev)
927 {
928 	struct hci_conn_hash *h = &hdev->conn_hash;
929 	struct hci_conn *c, *n;
930 
931 	BT_DBG("hdev %s", hdev->name);
932 
933 	list_for_each_entry_safe(c, n, &h->list, list) {
934 		c->state = BT_CLOSED;
935 
936 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
937 		hci_conn_del(c);
938 	}
939 }
940 
941 /* Check pending connect attempts */
942 void hci_conn_check_pending(struct hci_dev *hdev)
943 {
944 	struct hci_conn *conn;
945 
946 	BT_DBG("hdev %s", hdev->name);
947 
948 	hci_dev_lock(hdev);
949 
950 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
951 	if (conn)
952 		hci_acl_create_connection(conn);
953 
954 	hci_dev_unlock(hdev);
955 }
956 
957 int hci_get_conn_list(void __user *arg)
958 {
959 	struct hci_conn *c;
960 	struct hci_conn_list_req req, *cl;
961 	struct hci_conn_info *ci;
962 	struct hci_dev *hdev;
963 	int n = 0, size, err;
964 
965 	if (copy_from_user(&req, arg, sizeof(req)))
966 		return -EFAULT;
967 
968 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
969 		return -EINVAL;
970 
971 	size = sizeof(req) + req.conn_num * sizeof(*ci);
972 
973 	cl = kmalloc(size, GFP_KERNEL);
974 	if (!cl)
975 		return -ENOMEM;
976 
977 	hdev = hci_dev_get(req.dev_id);
978 	if (!hdev) {
979 		kfree(cl);
980 		return -ENODEV;
981 	}
982 
983 	ci = cl->conn_info;
984 
985 	hci_dev_lock(hdev);
986 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
987 		bacpy(&(ci + n)->bdaddr, &c->dst);
988 		(ci + n)->handle = c->handle;
989 		(ci + n)->type  = c->type;
990 		(ci + n)->out   = c->out;
991 		(ci + n)->state = c->state;
992 		(ci + n)->link_mode = c->link_mode;
993 		if (++n >= req.conn_num)
994 			break;
995 	}
996 	hci_dev_unlock(hdev);
997 
998 	cl->dev_id = hdev->id;
999 	cl->conn_num = n;
1000 	size = sizeof(req) + n * sizeof(*ci);
1001 
1002 	hci_dev_put(hdev);
1003 
1004 	err = copy_to_user(arg, cl, size);
1005 	kfree(cl);
1006 
1007 	return err ? -EFAULT : 0;
1008 }
1009 
1010 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1011 {
1012 	struct hci_conn_info_req req;
1013 	struct hci_conn_info ci;
1014 	struct hci_conn *conn;
1015 	char __user *ptr = arg + sizeof(req);
1016 
1017 	if (copy_from_user(&req, arg, sizeof(req)))
1018 		return -EFAULT;
1019 
1020 	hci_dev_lock(hdev);
1021 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1022 	if (conn) {
1023 		bacpy(&ci.bdaddr, &conn->dst);
1024 		ci.handle = conn->handle;
1025 		ci.type  = conn->type;
1026 		ci.out   = conn->out;
1027 		ci.state = conn->state;
1028 		ci.link_mode = conn->link_mode;
1029 	}
1030 	hci_dev_unlock(hdev);
1031 
1032 	if (!conn)
1033 		return -ENOENT;
1034 
1035 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1036 }
1037 
1038 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1039 {
1040 	struct hci_auth_info_req req;
1041 	struct hci_conn *conn;
1042 
1043 	if (copy_from_user(&req, arg, sizeof(req)))
1044 		return -EFAULT;
1045 
1046 	hci_dev_lock(hdev);
1047 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1048 	if (conn)
1049 		req.type = conn->auth_type;
1050 	hci_dev_unlock(hdev);
1051 
1052 	if (!conn)
1053 		return -ENOENT;
1054 
1055 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1056 }
1057 
1058 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1059 {
1060 	struct hci_dev *hdev = conn->hdev;
1061 	struct hci_chan *chan;
1062 
1063 	BT_DBG("%s hcon %p", hdev->name, conn);
1064 
1065 	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
1066 	if (!chan)
1067 		return NULL;
1068 
1069 	chan->conn = conn;
1070 	skb_queue_head_init(&chan->data_q);
1071 	chan->state = BT_CONNECTED;
1072 
1073 	list_add_rcu(&chan->list, &conn->chan_list);
1074 
1075 	return chan;
1076 }
1077 
1078 void hci_chan_del(struct hci_chan *chan)
1079 {
1080 	struct hci_conn *conn = chan->conn;
1081 	struct hci_dev *hdev = conn->hdev;
1082 
1083 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1084 
1085 	list_del_rcu(&chan->list);
1086 
1087 	synchronize_rcu();
1088 
1089 	hci_conn_drop(conn);
1090 
1091 	skb_queue_purge(&chan->data_q);
1092 	kfree(chan);
1093 }
1094 
1095 void hci_chan_list_flush(struct hci_conn *conn)
1096 {
1097 	struct hci_chan *chan, *n;
1098 
1099 	BT_DBG("hcon %p", conn);
1100 
1101 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1102 		hci_chan_del(chan);
1103 }
1104 
1105 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1106 						 __u16 handle)
1107 {
1108 	struct hci_chan *hchan;
1109 
1110 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1111 		if (hchan->handle == handle)
1112 			return hchan;
1113 	}
1114 
1115 	return NULL;
1116 }
1117 
1118 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1119 {
1120 	struct hci_conn_hash *h = &hdev->conn_hash;
1121 	struct hci_conn *hcon;
1122 	struct hci_chan *hchan = NULL;
1123 
1124 	rcu_read_lock();
1125 
1126 	list_for_each_entry_rcu(hcon, &h->list, list) {
1127 		hchan = __hci_chan_lookup_handle(hcon, handle);
1128 		if (hchan)
1129 			break;
1130 	}
1131 
1132 	rcu_read_unlock();
1133 
1134 	return hchan;
1135 }
1136